summaryrefslogtreecommitdiffstats
path: root/qemu/target-i386
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/target-i386
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/target-i386')
-rw-r--r--qemu/target-i386/Makefile.objs8
-rw-r--r--qemu/target-i386/arch_dump.c1
-rw-r--r--qemu/target-i386/arch_memory_mapping.c1
-rw-r--r--qemu/target-i386/bpt_helper.c327
-rw-r--r--qemu/target-i386/cc_helper.c13
-rw-r--r--qemu/target-i386/cpu-qom.h8
-rw-r--r--qemu/target-i386/cpu.c473
-rw-r--r--qemu/target-i386/cpu.h226
-rw-r--r--qemu/target-i386/excp_helper.c31
-rw-r--r--qemu/target-i386/fpu_helper.c525
-rw-r--r--qemu/target-i386/gdbstub.c10
-rw-r--r--qemu/target-i386/helper.c429
-rw-r--r--qemu/target-i386/helper.h33
-rw-r--r--qemu/target-i386/hyperv.c140
-rw-r--r--qemu/target-i386/hyperv.h42
-rw-r--r--qemu/target-i386/int_helper.c43
-rw-r--r--qemu/target-i386/kvm-stub.c1
-rw-r--r--qemu/target-i386/kvm.c514
-rw-r--r--qemu/target-i386/kvm_i386.h2
-rw-r--r--qemu/target-i386/machine.c218
-rw-r--r--qemu/target-i386/mem_helper.c71
-rw-r--r--qemu/target-i386/misc_helper.c79
-rw-r--r--qemu/target-i386/monitor.c512
-rw-r--r--qemu/target-i386/mpx_helper.c167
-rw-r--r--qemu/target-i386/ops_sse.h380
-rw-r--r--qemu/target-i386/ops_sse_header.h80
-rw-r--r--qemu/target-i386/seg_helper.c671
-rw-r--r--qemu/target-i386/smm_helper.c10
-rw-r--r--qemu/target-i386/svm_helper.c1
-rw-r--r--qemu/target-i386/translate.c3442
30 files changed, 5464 insertions, 2994 deletions
diff --git a/qemu/target-i386/Makefile.objs b/qemu/target-i386/Makefile.objs
index 7a1df2c98..b223d7932 100644
--- a/qemu/target-i386/Makefile.objs
+++ b/qemu/target-i386/Makefile.objs
@@ -1,7 +1,7 @@
-obj-y += translate.o helper.o cpu.o
+obj-y += translate.o helper.o cpu.o bpt_helper.o
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
-obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o
+obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
obj-y += gdbstub.o
-obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o
-obj-$(CONFIG_KVM) += kvm.o
+obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
+obj-$(CONFIG_KVM) += kvm.o hyperv.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
diff --git a/qemu/target-i386/arch_dump.c b/qemu/target-i386/arch_dump.c
index eccd8031a..5a2e4be5d 100644
--- a/qemu/target-i386/arch_dump.c
+++ b/qemu/target-i386/arch_dump.c
@@ -11,6 +11,7 @@
*
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/cpu-all.h"
#include "sysemu/dump.h"
diff --git a/qemu/target-i386/arch_memory_mapping.c b/qemu/target-i386/arch_memory_mapping.c
index 01563fecc..88f341e1b 100644
--- a/qemu/target-i386/arch_memory_mapping.c
+++ b/qemu/target-i386/arch_memory_mapping.c
@@ -11,6 +11,7 @@
*
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/cpu-all.h"
#include "sysemu/memory_mapping.h"
diff --git a/qemu/target-i386/bpt_helper.c b/qemu/target-i386/bpt_helper.c
new file mode 100644
index 000000000..f47df1998
--- /dev/null
+++ b/qemu/target-i386/bpt_helper.c
@@ -0,0 +1,327 @@
+/*
+ * i386 breakpoint helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+
+#ifndef CONFIG_USER_ONLY
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return hw_global_breakpoint_enabled(dr7, index) ||
+ hw_local_breakpoint_enabled(dr7, index);
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+ return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+ int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+ return (len == 2) ? 8 : len + 1;
+}
+
+static int hw_breakpoint_insert(CPUX86State *env, int index)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ target_ulong dr7 = env->dr[7];
+ target_ulong drN = env->dr[index];
+ int err = 0;
+
+ switch (hw_breakpoint_type(dr7, index)) {
+ case DR7_TYPE_BP_INST:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_breakpoint_insert(cs, drN, BP_CPU,
+ &env->cpu_breakpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* Notice when we should enable calls to bpt_io. */
+ return hw_breakpoint_enabled(env->dr[7], index)
+ ? HF_IOBPT_MASK : 0;
+
+ case DR7_TYPE_DATA_WR:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_DATA_RW:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+ }
+ if (err) {
+ env->cpu_breakpoint[index] = NULL;
+ }
+ return 0;
+}
+
+static void hw_breakpoint_remove(CPUX86State *env, int index)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ switch (hw_breakpoint_type(env->dr[7], index)) {
+ case DR7_TYPE_BP_INST:
+ if (env->cpu_breakpoint[index]) {
+ cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_breakpoint[index]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* HF_IOBPT_MASK cleared elsewhere. */
+ break;
+ }
+}
+
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
+{
+ target_ulong old_dr7 = env->dr[7];
+ int iobpt = 0;
+ int i;
+
+ new_dr7 |= DR7_FIXED_1;
+
+ /* If nothing is changing except the global/local enable bits,
+ then we can make the change more efficient. */
+ if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
+ /* Fold the global and local enable bits together into the
+ global fields, then xor to show which registers have
+ changed collective enable state. */
+ int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
+
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
+ hw_breakpoint_remove(env, i);
+ }
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= HF_IOBPT_MASK;
+ }
+ }
+ } else {
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ hw_breakpoint_remove(env, i);
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ }
+ }
+
+ env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
+}
+
+static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
+{
+ target_ulong dr6;
+ int reg;
+ bool hit_enabled = false;
+
+ dr6 = env->dr[6] & ~0xf;
+ for (reg = 0; reg < DR7_MAX_BP; reg++) {
+ bool bp_match = false;
+ bool wp_match = false;
+
+ switch (hw_breakpoint_type(env->dr[7], reg)) {
+ case DR7_TYPE_BP_INST:
+ if (env->dr[reg] == env->eip) {
+ bp_match = true;
+ }
+ break;
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[reg] &&
+ env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+ wp_match = true;
+ }
+ break;
+ case DR7_TYPE_IO_RW:
+ break;
+ }
+ if (bp_match || wp_match) {
+ dr6 |= 1 << reg;
+ if (hw_breakpoint_enabled(env->dr[7], reg)) {
+ hit_enabled = true;
+ }
+ }
+ }
+
+ if (hit_enabled || force_dr6_update) {
+ env->dr[6] = dr6;
+ }
+
+ return hit_enabled;
+}
+
+void breakpoint_handler(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ CPUBreakpoint *bp;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_hw_breakpoints(env, false)) {
+ raise_exception(env, EXCP01_DB);
+ } else {
+ cpu_resume_from_signal(cs, NULL);
+ }
+ }
+ } else {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == env->eip) {
+ if (bp->flags & BP_CPU) {
+ check_hw_breakpoints(env, true);
+ raise_exception(env, EXCP01_DB);
+ }
+ break;
+ }
+ }
+ }
+}
+#endif
+
+void helper_single_step(CPUX86State *env)
+{
+#ifndef CONFIG_USER_ONLY
+ check_hw_breakpoints(env, true);
+ env->dr[6] |= DR6_BS;
+#endif
+ raise_exception(env, EXCP01_DB);
+}
+
+void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
+{
+#ifndef CONFIG_USER_ONLY
+ switch (reg) {
+ case 0: case 1: case 2: case 3:
+ if (hw_breakpoint_enabled(env->dr[7], reg)
+ && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
+ hw_breakpoint_remove(env, reg);
+ env->dr[reg] = t0;
+ hw_breakpoint_insert(env, reg);
+ } else {
+ env->dr[reg] = t0;
+ }
+ return;
+ case 4:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ }
+ /* fallthru */
+ case 6:
+ env->dr[6] = t0 | DR6_FIXED_1;
+ return;
+ case 5:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ }
+ /* fallthru */
+ case 7:
+ cpu_x86_update_dr7(env, t0);
+ return;
+ }
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+#endif
+}
+
+target_ulong helper_get_dr(CPUX86State *env, int reg)
+{
+ switch (reg) {
+ case 0: case 1: case 2: case 3: case 6: case 7:
+ return env->dr[reg];
+ case 4:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ } else {
+ return env->dr[6];
+ }
+ case 5:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ } else {
+ return env->dr[7];
+ }
+ }
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+}
+
+/* Check if Port I/O is trapped by a breakpoint. */
+void helper_bpt_io(CPUX86State *env, uint32_t port,
+ uint32_t size, target_ulong next_eip)
+{
+#ifndef CONFIG_USER_ONLY
+ target_ulong dr7 = env->dr[7];
+ int i, hit = 0;
+
+ for (i = 0; i < DR7_MAX_BP; ++i) {
+ if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(dr7, i)) {
+ int bpt_len = hw_breakpoint_len(dr7, i);
+ if (port + size - 1 >= env->dr[i]
+ && port <= env->dr[i] + bpt_len - 1) {
+ hit |= 1 << i;
+ }
+ }
+ }
+
+ if (hit) {
+ env->dr[6] = (env->dr[6] & ~0xf) | hit;
+ env->eip = next_eip;
+ raise_exception(env, EXCP01_DB);
+ }
+#endif
+}
diff --git a/qemu/target-i386/cc_helper.c b/qemu/target-i386/cc_helper.c
index ecbf0ec09..83af223c9 100644
--- a/qemu/target-i386/cc_helper.c
+++ b/qemu/target-i386/cc_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
@@ -378,17 +379,7 @@ void helper_sti_vm(CPUX86State *env)
{
env->eflags |= VIF_MASK;
if (env->eflags & VIP_MASK) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
}
#endif
-
-void helper_set_inhibit_irq(CPUX86State *env)
-{
- env->hflags |= HF_INHIBIT_IRQ_MASK;
-}
-
-void helper_reset_inhibit_irq(CPUX86State *env)
-{
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
-}
diff --git a/qemu/target-i386/cpu-qom.h b/qemu/target-i386/cpu-qom.h
index 7a4fddd85..cb750176c 100644
--- a/qemu/target-i386/cpu-qom.h
+++ b/qemu/target-i386/cpu-qom.h
@@ -22,7 +22,6 @@
#include "qom/cpu.h"
#include "cpu.h"
-#include "qapi/error.h"
#include "qemu/notify.h"
#ifdef TARGET_X86_64
@@ -88,7 +87,14 @@ typedef struct X86CPU {
bool hyperv_vapic;
bool hyperv_relaxed_timing;
int hyperv_spinlock_attempts;
+ char *hyperv_vendor_id;
bool hyperv_time;
+ bool hyperv_crash;
+ bool hyperv_reset;
+ bool hyperv_vpindex;
+ bool hyperv_runtime;
+ bool hyperv_synic;
+ bool hyperv_stimer;
bool check_cpuid;
bool enforce_cpuid;
bool expose_kvm;
diff --git a/qemu/target-i386/cpu.c b/qemu/target-i386/cpu.c
index 7a779b165..d0b5b6915 100644
--- a/qemu/target-i386/cpu.c
+++ b/qemu/target-i386/cpu.c
@@ -16,10 +16,8 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
#include "cpu.h"
#include "sysemu/kvm.h"
@@ -43,7 +41,6 @@
#include "sysemu/sysemu.h"
#include "hw/qdev-properties.h"
-#include "hw/cpu/icc_bus.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
#include "hw/xen/xen.h"
@@ -260,8 +257,19 @@ static const char *svm_feature_name[] = {
static const char *cpuid_7_0_ebx_feature_name[] = {
"fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
"bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
- "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
- NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
+ "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
+ "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
+};
+
+static const char *cpuid_7_0_ecx_feature_name[] = {
+ NULL, NULL, NULL, "pku",
+ "ospke", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
};
static const char *cpuid_apm_edx_feature_name[] = {
@@ -313,7 +321,7 @@ static const char *cpuid_6_feature_name[] = {
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
@@ -321,14 +329,14 @@ static const char *cpuid_6_feature_name[] = {
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
/* missing:
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
- CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
- CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
- CPUID_EXT_RDRAND */
+ CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
+ CPUID_EXT_F16C, CPUID_EXT_RDRAND */
#ifdef TARGET_X86_64
#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
@@ -346,14 +354,19 @@ static const char *cpuid_6_feature_name[] = {
#define TCG_SVM_FEATURES 0
#define TCG_KVM_FEATURES 0
#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
- CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
+ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
+ CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
/* missing:
- CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
+ CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
CPUID_7_0_EBX_RDSEED */
+#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
#define TCG_APM_FEATURES 0
#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
-
+#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
+ /* missing:
+ CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
typedef struct FeatureWordInfo {
const char **feat_names;
@@ -408,6 +421,13 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.cpuid_reg = R_EBX,
.tcg_features = TCG_7_0_EBX_FEATURES,
},
+ [FEAT_7_0_ECX] = {
+ .feat_names = cpuid_7_0_ecx_feature_name,
+ .cpuid_eax = 7,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_ECX,
+ .tcg_features = TCG_7_0_ECX_FEATURES,
+ },
[FEAT_8000_0007_EDX] = {
.feat_names = cpuid_apm_edx_feature_name,
.cpuid_eax = 0x80000007,
@@ -420,7 +440,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.cpuid_eax = 0xd,
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
- .tcg_features = 0,
+ .tcg_features = TCG_XSAVE_FEATURES,
},
[FEAT_6_EAX] = {
.feat_names = cpuid_6_feature_name,
@@ -450,24 +470,28 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-typedef struct ExtSaveArea {
- uint32_t feature, bits;
- uint32_t offset, size;
-} ExtSaveArea;
-
-static const ExtSaveArea ext_save_areas[] = {
- [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
+const ExtSaveArea x86_ext_save_areas[] = {
+ [XSTATE_YMM_BIT] =
+ { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
.offset = 0x240, .size = 0x100 },
- [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ [XSTATE_BNDREGS_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = 0x3c0, .size = 0x40 },
- [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ [XSTATE_BNDCSR_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = 0x400, .size = 0x40 },
- [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ [XSTATE_OPMASK_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = 0x440, .size = 0x40 },
- [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ [XSTATE_ZMM_Hi256_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = 0x480, .size = 0x200 },
- [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ [XSTATE_Hi16_ZMM_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = 0x680, .size = 0x400 },
+ [XSTATE_PKRU_BIT] =
+ { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
+ .offset = 0xA80, .size = 0x8 },
};
const char *get_register_name_32(unsigned int reg)
@@ -478,38 +502,6 @@ const char *get_register_name_32(unsigned int reg)
return x86_reg_info_32[reg].name;
}
-/* KVM-specific features that are automatically added to all CPU models
- * when KVM is enabled.
- */
-static uint32_t kvm_default_features[FEATURE_WORDS] = {
- [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
- (1 << KVM_FEATURE_NOP_IO_DELAY) |
- (1 << KVM_FEATURE_CLOCKSOURCE2) |
- (1 << KVM_FEATURE_ASYNC_PF) |
- (1 << KVM_FEATURE_STEAL_TIME) |
- (1 << KVM_FEATURE_PV_EOI) |
- (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
- [FEAT_1_ECX] = CPUID_EXT_X2APIC,
-};
-
-/* Features that are not added by default to any CPU model when KVM is enabled.
- */
-static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
- [FEAT_1_EDX] = CPUID_ACPI,
- [FEAT_1_ECX] = CPUID_EXT_MONITOR,
- [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
-};
-
-void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
-{
- kvm_default_features[w] &= ~features;
-}
-
-void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
-{
- kvm_default_unset_features[w] &= ~features;
-}
-
/*
* Returns the set of feature flags that are supported and migratable by
* QEMU, for a given FeatureWord.
@@ -689,7 +681,6 @@ struct X86CPUDefinition {
int stepping;
FeatureWordArray features;
char model_id[48];
- bool cache_info_passthrough;
};
static X86CPUDefinition builtin_x86_defs[] = {
@@ -705,12 +696,11 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
.features[FEAT_1_ECX] =
- CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
+ CPUID_EXT_SSE3 | CPUID_EXT_CX16,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
.features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
- CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
.xlevel = 0x8000000A,
},
{
@@ -806,7 +796,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.features[FEAT_1_EDX] =
PPRO_FEATURES,
.features[FEAT_1_ECX] =
- CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
+ CPUID_EXT_SSE3,
.xlevel = 0x80000004,
},
{
@@ -1113,7 +1103,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_LAHF_LM,
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
.features[FEAT_7_0_EBX] =
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
@@ -1148,7 +1138,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_LAHF_LM,
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
.features[FEAT_7_0_EBX] =
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
@@ -1185,7 +1175,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
.features[FEAT_7_0_EBX] =
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
@@ -1223,7 +1213,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
.features[FEAT_7_0_EBX] =
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
@@ -1277,8 +1267,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_DE | CPUID_FP87,
.features[FEAT_1_ECX] =
CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_LM | CPUID_EXT2_FXSR |
CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
@@ -1306,8 +1297,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
.features[FEAT_1_ECX] =
CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_LM | CPUID_EXT2_FXSR |
CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
@@ -1338,8 +1330,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_LM |
CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
@@ -1373,8 +1366,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_LM |
CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
@@ -1392,30 +1386,41 @@ static X86CPUDefinition builtin_x86_defs[] = {
},
};
-/**
- * x86_cpu_compat_set_features:
- * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
- * @w: Identifies the feature word to be changed.
- * @feat_add: Feature bits to be added to feature word
- * @feat_remove: Feature bits to be removed from feature word
- *
- * Change CPU model feature bits for compatibility.
- *
- * This function may be used by machine-type compatibility functions
- * to enable or disable feature bits on specific CPU models.
+typedef struct PropValue {
+ const char *prop, *value;
+} PropValue;
+
+/* KVM-specific features that are automatically added/removed
+ * from all CPU models when KVM is enabled.
*/
-void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
- uint32_t feat_add, uint32_t feat_remove)
+static PropValue kvm_default_props[] = {
+ { "kvmclock", "on" },
+ { "kvm-nopiodelay", "on" },
+ { "kvm-asyncpf", "on" },
+ { "kvm-steal-time", "on" },
+ { "kvm-pv-eoi", "on" },
+ { "kvmclock-stable-bit", "on" },
+ { "x2apic", "on" },
+ { "acpi", "off" },
+ { "monitor", "off" },
+ { "svm", "off" },
+ { NULL, NULL },
+};
+
+void x86_cpu_change_kvm_default(const char *prop, const char *value)
{
- X86CPUDefinition *def;
- int i;
- for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
- def = &builtin_x86_defs[i];
- if (!cpu_model || !strcmp(cpu_model, def->name)) {
- def->features[w] |= feat_add;
- def->features[w] &= ~feat_remove;
+ PropValue *pv;
+ for (pv = kvm_default_props; pv->prop; pv++) {
+ if (!strcmp(pv->prop, prop)) {
+ pv->value = value;
+ break;
}
}
+
+ /* It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+ assert(pv->prop);
}
static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
@@ -1442,6 +1447,7 @@ static X86CPUDefinition host_cpudef;
static Property host_x86_cpu_properties[] = {
DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
+ DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
DEFINE_PROP_END_OF_LIST()
};
@@ -1468,13 +1474,14 @@ static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
cpu_x86_fill_model_id(host_cpudef.model_id);
xcc->cpu_def = &host_cpudef;
- host_cpudef.cache_info_passthrough = true;
/* level, xlevel, xlevel2, and the feature words are initialized on
* instance_init, because they require KVM to be initialized.
*/
dc->props = host_x86_cpu_properties;
+ /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
+ dc->cannot_destroy_with_object_finalize_yet = true;
}
static void host_x86_cpu_initfn(Object *obj)
@@ -1512,7 +1519,7 @@ static void report_unavailable_features(FeatureWord w, uint32_t mask)
int i;
for (i = 0; i < 32; ++i) {
- if (1 << i & mask) {
+ if ((1UL << i) & mask) {
const char *reg = get_register_name_32(f->cpuid_reg);
assert(reg);
fprintf(stderr, "warning: %s doesn't support requested feature: "
@@ -1525,8 +1532,9 @@ static void report_unavailable_features(FeatureWord w, uint32_t mask)
}
}
-static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
@@ -1536,11 +1544,12 @@ static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
if (value == 0xf) {
value += (env->cpuid_version >> 20) & 0xff;
}
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
-static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
@@ -1549,7 +1558,7 @@ static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
Error *local_err = NULL;
int64_t value;
- visit_type_int(v, &value, name, &local_err);
+ visit_type_int(v, name, &value, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -1568,8 +1577,9 @@ static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
}
}
-static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
@@ -1577,11 +1587,12 @@ static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
value = (env->cpuid_version >> 4) & 0xf;
value |= ((env->cpuid_version >> 16) & 0xf) << 4;
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
-static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
@@ -1590,7 +1601,7 @@ static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
Error *local_err = NULL;
int64_t value;
- visit_type_int(v, &value, name, &local_err);
+ visit_type_int(v, name, &value, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -1606,7 +1617,7 @@ static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
}
static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
- void *opaque, const char *name,
+ const char *name, void *opaque,
Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
@@ -1614,11 +1625,11 @@ static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
int64_t value;
value = env->cpuid_version & 0xf;
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
- void *opaque, const char *name,
+ const char *name, void *opaque,
Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
@@ -1628,7 +1639,7 @@ static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
Error *local_err = NULL;
int64_t value;
- visit_type_int(v, &value, name, &local_err);
+ visit_type_int(v, name, &value, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -1714,18 +1725,18 @@ static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
}
}
-static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
int64_t value;
value = cpu->env.tsc_khz * 1000;
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
-static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
const int64_t min = 0;
@@ -1733,7 +1744,7 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
Error *local_err = NULL;
int64_t value;
- visit_type_int(v, &value, name, &local_err);
+ visit_type_int(v, name, &value, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -1744,20 +1755,20 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
return;
}
- cpu->env.tsc_khz = value / 1000;
+ cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
}
-static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
int64_t value = cpu->apic_id;
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
-static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
DeviceState *dev = DEVICE(obj);
@@ -1772,7 +1783,7 @@ static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
return;
}
- visit_type_int(v, &value, name, &error);
+ visit_type_int(v, name, &value, &error);
if (error) {
error_propagate(errp, error);
return;
@@ -1792,8 +1803,9 @@ static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
}
/* Generic getter for "feature-words" and "filtered-features" properties */
-static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
uint32_t *array = (uint32_t *)opaque;
FeatureWord w;
@@ -1817,21 +1829,21 @@ static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
list = &list_entries[w];
}
- visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
+ visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
error_propagate(errp, err);
}
-static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
int64_t value = cpu->hyperv_spinlock_attempts;
- visit_type_int(v, &value, name, errp);
+ visit_type_int(v, name, &value, errp);
}
-static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
const int64_t min = 0xFFF;
const int64_t max = UINT_MAX;
@@ -1839,7 +1851,7 @@ static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
Error *err = NULL;
int64_t value;
- visit_type_int(v, &value, name, &err);
+ visit_type_int(v, name, &value, &err);
if (err) {
error_propagate(errp, err);
return;
@@ -1919,8 +1931,8 @@ static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
char *err;
char num[32];
- tsc_freq = strtosz_suffix_unit(val, &err,
- STRTOSZ_DEFSUFFIX_B, 1000);
+ tsc_freq = qemu_strtosz_suffix_unit(val, &err,
+ QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
if (tsc_freq < 0 || *err) {
error_setg(errp, "bad numerical value %s", val);
return;
@@ -2087,6 +2099,18 @@ static int x86_cpu_filter_features(X86CPU *cpu)
return rv;
}
+static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
+{
+ PropValue *pv;
+ for (pv = props; pv->prop; pv++) {
+ if (!pv->value) {
+ continue;
+ }
+ object_property_parse(OBJECT(cpu), pv->value, pv->prop,
+ &error_abort);
+ }
+}
+
/* Load data from X86CPUDefinition
*/
static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
@@ -2102,7 +2126,6 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
- cpu->cache_info_passthrough = def->cache_info_passthrough;
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] = def->features[w];
@@ -2110,11 +2133,11 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
/* Special cases not set in the X86CPUDefinition structs: */
if (kvm_enabled()) {
- FeatureWord w;
- for (w = 0; w < FEATURE_WORDS; w++) {
- env->features[w] |= kvm_default_features[w];
- env->features[w] &= ~kvm_default_unset_features[w];
+ if (!kvm_irqchip_in_kernel()) {
+ x86_cpu_change_kvm_default("x2apic", "off");
}
+
+ x86_cpu_apply_props(cpu, kvm_default_props);
}
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
@@ -2257,7 +2280,7 @@ void x86_cpudef_setup(void)
pstrcpy(def->model_id, sizeof(def->model_id),
"QEMU Virtual CPU version ");
pstrcat(def->model_id, sizeof(def->model_id),
- qemu_get_version());
+ qemu_hw_version());
break;
}
}
@@ -2306,10 +2329,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = (cpu->apic_id << 24) |
8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
*ecx = env->features[FEAT_1_ECX];
+ if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
+ *ecx |= CPUID_EXT_OSXSAVE;
+ }
*edx = env->features[FEAT_1_EDX];
if (cs->nr_cores * cs->nr_threads > 1) {
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
- *edx |= 1 << 28; /* HTT bit */
+ *edx |= CPUID_HT;
}
break;
case 2:
@@ -2399,7 +2425,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (count == 0) {
*eax = 0; /* Maximum ECX value for sub-leaves */
*ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
- *ecx = 0; /* Reserved */
+ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
+ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
+ *ecx |= CPUID_7_0_ECX_OSPKE;
+ }
*edx = 0; /* Reserved */
} else {
*eax = 0;
@@ -2433,7 +2462,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0xD: {
KVMState *s = cs->kvm_state;
- uint64_t kvm_mask;
+ uint64_t ena_mask;
int i;
/* Processor Extended State */
@@ -2441,35 +2470,39 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = 0;
*ecx = 0;
*edx = 0;
- if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
- kvm_mask =
- kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
- ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
+ if (kvm_enabled()) {
+ ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
+ ena_mask <<= 32;
+ ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+ } else {
+ ena_mask = -1;
+ }
if (count == 0) {
*ecx = 0x240;
- for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
- const ExtSaveArea *esa = &ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits &&
- (kvm_mask & (1 << i)) != 0) {
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((env->features[esa->feature] & esa->bits) == esa->bits
+ && ((ena_mask >> i) & 1) != 0) {
if (i < 32) {
- *eax |= 1 << i;
+ *eax |= 1u << i;
} else {
- *edx |= 1 << (i - 32);
+ *edx |= 1u << (i - 32);
}
*ecx = MAX(*ecx, esa->offset + esa->size);
}
}
- *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
+ *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
- } else if (count < ARRAY_SIZE(ext_save_areas)) {
- const ExtSaveArea *esa = &ext_save_areas[count];
- if ((env->features[esa->feature] & esa->bits) == esa->bits &&
- (kvm_mask & (1 << count)) != 0) {
+ } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+ if ((env->features[esa->feature] & esa->bits) == esa->bits
+ && ((ena_mask >> count) & 1) != 0) {
*eax = esa->size;
*ebx = esa->offset;
}
@@ -2622,6 +2655,8 @@ static void x86_cpu_reset(CPUState *s)
X86CPU *cpu = X86_CPU(s);
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
CPUX86State *env = &cpu->env;
+ target_ulong cr4;
+ uint64_t xcr0;
int i;
xcc->parent_reset(s);
@@ -2681,7 +2716,8 @@ static void x86_cpu_reset(CPUState *s)
cpu_set_fpuc(env, 0x37f);
env->mxcsr = 0x1f80;
- env->xstate_bv = XSTATE_FP | XSTATE_SSE;
+ /* All units are in INIT state. */
+ env->xstate_bv = 0;
env->pat = 0x0007040600070406ULL;
env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
@@ -2692,7 +2728,31 @@ static void x86_cpu_reset(CPUState *s)
cpu_breakpoint_remove_all(s, BP_CPU);
cpu_watchpoint_remove_all(s, BP_CPU);
- env->xcr0 = 1;
+ cr4 = 0;
+ xcr0 = XSTATE_FP_MASK;
+
+#ifdef CONFIG_USER_ONLY
+ /* Enable all the features for user-mode. */
+ if (env->features[FEAT_1_EDX] & CPUID_SSE) {
+ xcr0 |= XSTATE_SSE_MASK;
+ }
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((env->features[esa->feature] & esa->bits) == esa->bits) {
+ xcr0 |= 1ull << i;
+ }
+ }
+
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
+ cr4 |= CR4_FSGSBASE_MASK;
+ }
+#endif
+
+ env->xcr0 = xcr0;
+ cpu_x86_update_cr4(env, cr4);
/*
* SDM 11.11.5 requires:
@@ -2749,21 +2809,16 @@ static void mce_init(X86CPU *cpu)
#ifndef CONFIG_USER_ONLY
static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
{
- DeviceState *dev = DEVICE(cpu);
APICCommonState *apic;
const char *apic_type = "apic";
- if (kvm_irqchip_in_kernel()) {
+ if (kvm_apic_in_kernel()) {
apic_type = "kvm-apic";
} else if (xen_enabled()) {
apic_type = "xen-apic";
}
- cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
- if (cpu->apic_state == NULL) {
- error_setg(errp, "APIC device '%s' could not be created", apic_type);
- return;
- }
+ cpu->apic_state = DEVICE(object_new(apic_type));
object_property_add_child(OBJECT(cpu), "apic",
OBJECT(cpu->apic_state), NULL);
@@ -2771,15 +2826,30 @@ static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
/* TODO: convert to link<> */
apic = APIC_COMMON(cpu->apic_state);
apic->cpu = cpu;
+ apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
}
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
+ APICCommonState *apic;
+ static bool apic_mmio_map_once;
+
if (cpu->apic_state == NULL) {
return;
}
object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
errp);
+
+ /* Map APIC MMIO area */
+ apic = APIC_COMMON(cpu->apic_state);
+ if (!apic_mmio_map_once) {
+ memory_region_add_subregion_overlap(get_system_memory(),
+ apic->apicbase &
+ MSR_IA32_APICBASE_BASE,
+ &apic->io_memory,
+ 0x1000);
+ apic_mmio_map_once = true;
+ }
}
static void x86_cpu_machine_done(Notifier *n, void *unused)
@@ -2827,6 +2897,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
env->cpuid_level = 7;
}
+ if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
+ error_setg(&local_err,
+ kvm_enabled() ?
+ "Host doesn't support requested features" :
+ "TCG doesn't support requested features");
+ goto out;
+ }
+
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
* CPUID[1].EDX.
*/
@@ -2837,14 +2915,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
}
- if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
- error_setg(&local_err,
- kvm_enabled() ?
- "Host doesn't support requested features" :
- "TCG doesn't support requested features");
- goto out;
- }
-
#ifndef CONFIG_USER_ONLY
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
@@ -2860,9 +2930,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
+ AddressSpace *newas = g_new(AddressSpace, 1);
+
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
- cs->as = g_new(AddressSpace, 1);
/* Outer container... */
memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
@@ -2875,7 +2946,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
- address_space_init(cs->as, cpu->cpu_as_root, "CPU");
+ address_space_init(newas, cpu->cpu_as_root, "CPU");
+ cs->num_ases = 1;
+ cpu_address_space_init(cs, newas, 0);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
@@ -2919,22 +2992,16 @@ typedef struct BitProperty {
uint32_t mask;
} BitProperty;
-static void x86_cpu_get_bit_prop(Object *obj,
- struct Visitor *v,
- void *opaque,
- const char *name,
- Error **errp)
+static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
BitProperty *fp = opaque;
bool value = (*fp->ptr & fp->mask) == fp->mask;
- visit_type_bool(v, &value, name, errp);
+ visit_type_bool(v, name, &value, errp);
}
-static void x86_cpu_set_bit_prop(Object *obj,
- struct Visitor *v,
- void *opaque,
- const char *name,
- Error **errp)
+static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
DeviceState *dev = DEVICE(obj);
BitProperty *fp = opaque;
@@ -2946,7 +3013,7 @@ static void x86_cpu_set_bit_prop(Object *obj,
return;
}
- visit_type_bool(v, &value, name, &local_err);
+ visit_type_bool(v, name, &value, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -3087,7 +3154,7 @@ static void x86_cpu_initfn(Object *obj)
/* init various static tables used in TCG mode */
if (tcg_enabled() && !inited) {
inited = 1;
- optimize_flags_init();
+ tcg_x86_init();
}
}
@@ -3124,14 +3191,8 @@ static bool x86_cpu_has_work(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
-#if !defined(CONFIG_USER_ONLY)
- if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
- apic_poll_irq(cpu->apic_state);
- cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
- }
-#endif
-
- return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
@@ -3147,12 +3208,19 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
- DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
+ DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
+ DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
+ DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
+ DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+ DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
+ DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
+ DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
+ DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
DEFINE_PROP_END_OF_LIST()
};
@@ -3164,7 +3232,6 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
xcc->parent_realize = dc->realize;
dc->realize = x86_cpu_realizefn;
- dc->bus_type = TYPE_ICC_BUS;
dc->props = x86_cpu_properties;
xcc->parent_reset = cc->reset;
@@ -3200,6 +3267,12 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#endif
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
+
+ /*
+ * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
+ * object in cpus -> dangling pointer after final object_unref().
+ */
+ dc->cannot_destroy_with_object_finalize_yet = true;
}
static const TypeInfo x86_cpu_type_info = {
diff --git a/qemu/target-i386/cpu.h b/qemu/target-i386/cpu.h
index ead28325b..732eb6d7e 100644
--- a/qemu/target-i386/cpu.h
+++ b/qemu/target-i386/cpu.h
@@ -19,8 +19,8 @@
#ifndef CPU_I386_H
#define CPU_I386_H
-#include "config.h"
#include "qemu-common.h"
+#include "standard-headers/asm-x86/hyperv.h"
#ifdef TARGET_X86_64
#define TARGET_LONG_BITS 64
@@ -36,10 +36,10 @@
#define TARGET_HAS_PRECISE_SMC
#ifdef TARGET_X86_64
-#define ELF_MACHINE EM_X86_64
+#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
#else
-#define ELF_MACHINE EM_386
+#define I386_ELF_MACHINE EM_386
#define ELF_MACHINE_UNAME "i686"
#endif
@@ -154,6 +154,9 @@
#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
+#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
+#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
+#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -177,6 +180,9 @@
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
+#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
+#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
+#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
/* hflags2 */
@@ -185,12 +191,14 @@
#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
+#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
+#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
#define CR0_PE_SHIFT 0
#define CR0_MP_SHIFT 1
@@ -224,6 +232,7 @@
#define CR4_OSXSAVE_MASK (1U << 18)
#define CR4_SMEP_MASK (1U << 20)
#define CR4_SMAP_MASK (1U << 21)
+#define CR4_PKE_MASK (1U << 22)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -234,6 +243,7 @@
#define DR7_TYPE_SHIFT 16
#define DR7_LEN_SHIFT 18
#define DR7_FIXED_1 0x00000400
+#define DR7_GLOBAL_BP_MASK 0xaa
#define DR7_LOCAL_BP_MASK 0x55
#define DR7_MAX_BP 4
#define DR7_TYPE_BP_INST 0x0
@@ -251,6 +261,7 @@
#define PG_PSE_BIT 7
#define PG_GLOBAL_BIT 8
#define PG_PSE_PAT_BIT 12
+#define PG_PKRU_BIT 59
#define PG_NX_BIT 63
#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
@@ -266,7 +277,8 @@
#define PG_ADDRESS_MASK 0x000ffffffffff000LL
#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
#define PG_HI_USER_MASK 0x7ff0000000000000LL
-#define PG_NX_MASK (1LL << PG_NX_BIT)
+#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
+#define PG_NX_MASK (1ULL << PG_NX_BIT)
#define PG_ERROR_W_BIT 1
@@ -275,6 +287,7 @@
#define PG_ERROR_U_MASK 0x04
#define PG_ERROR_RSVD_MASK 0x08
#define PG_ERROR_I_D_MASK 0x10
+#define PG_ERROR_PK_MASK 0x20
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
@@ -282,6 +295,8 @@
#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
#define MCE_BANKS_DEF 10
+#define MCG_CAP_BANKS_MASK 0xff
+
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
@@ -393,21 +408,32 @@
#define MSR_IA32_BNDCFGS 0x00000d90
#define MSR_IA32_XSS 0x00000da0
-#define XSTATE_FP (1ULL << 0)
-#define XSTATE_SSE (1ULL << 1)
-#define XSTATE_YMM (1ULL << 2)
-#define XSTATE_BNDREGS (1ULL << 3)
-#define XSTATE_BNDCSR (1ULL << 4)
-#define XSTATE_OPMASK (1ULL << 5)
-#define XSTATE_ZMM_Hi256 (1ULL << 6)
-#define XSTATE_Hi16_ZMM (1ULL << 7)
-
+#define XSTATE_FP_BIT 0
+#define XSTATE_SSE_BIT 1
+#define XSTATE_YMM_BIT 2
+#define XSTATE_BNDREGS_BIT 3
+#define XSTATE_BNDCSR_BIT 4
+#define XSTATE_OPMASK_BIT 5
+#define XSTATE_ZMM_Hi256_BIT 6
+#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PKRU_BIT 9
+
+#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
+#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
+#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
+#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
+#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
+#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
+#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
+#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
FEAT_1_ECX, /* CPUID[1].ECX */
FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
+ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
@@ -572,10 +598,16 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EBX_RDSEED (1U << 18)
#define CPUID_7_0_EBX_ADX (1U << 19)
#define CPUID_7_0_EBX_SMAP (1U << 20)
+#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
+#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
+#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_7_0_ECX_PKU (1U << 3)
+#define CPUID_7_0_ECX_OSPKE (1U << 4)
+
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
@@ -716,22 +748,18 @@ typedef struct SegmentCache {
uint32_t flags;
} SegmentCache;
-typedef union {
- uint8_t _b[64];
- uint16_t _w[32];
- uint32_t _l[16];
- uint64_t _q[8];
- float32 _s[16];
- float64 _d[8];
-} XMMReg; /* really zmm */
+#define MMREG_UNION(n, bits) \
+ union n { \
+ uint8_t _b_##n[(bits)/8]; \
+ uint16_t _w_##n[(bits)/16]; \
+ uint32_t _l_##n[(bits)/32]; \
+ uint64_t _q_##n[(bits)/64]; \
+ float32 _s_##n[(bits)/32]; \
+ float64 _d_##n[(bits)/64]; \
+ }
-typedef union {
- uint8_t _b[8];
- uint16_t _w[4];
- uint32_t _l[2];
- float32 _s[2];
- uint64_t q;
-} MMXReg;
+typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
+typedef MMREG_UNION(MMXReg, 64) MMXReg;
typedef struct BNDReg {
uint64_t lb;
@@ -743,32 +771,36 @@ typedef struct BNDCSReg {
uint64_t sts;
} BNDCSReg;
+#define BNDCFG_ENABLE 1ULL
+#define BNDCFG_BNDPRESERVE 2ULL
+#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
+
#ifdef HOST_WORDS_BIGENDIAN
-#define XMM_B(n) _b[63 - (n)]
-#define XMM_W(n) _w[31 - (n)]
-#define XMM_L(n) _l[15 - (n)]
-#define XMM_S(n) _s[15 - (n)]
-#define XMM_Q(n) _q[7 - (n)]
-#define XMM_D(n) _d[7 - (n)]
-
-#define MMX_B(n) _b[7 - (n)]
-#define MMX_W(n) _w[3 - (n)]
-#define MMX_L(n) _l[1 - (n)]
-#define MMX_S(n) _s[1 - (n)]
+#define ZMM_B(n) _b_ZMMReg[63 - (n)]
+#define ZMM_W(n) _w_ZMMReg[31 - (n)]
+#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_S(n) _s_ZMMReg[15 - (n)]
+#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
+#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+
+#define MMX_B(n) _b_MMXReg[7 - (n)]
+#define MMX_W(n) _w_MMXReg[3 - (n)]
+#define MMX_L(n) _l_MMXReg[1 - (n)]
+#define MMX_S(n) _s_MMXReg[1 - (n)]
#else
-#define XMM_B(n) _b[n]
-#define XMM_W(n) _w[n]
-#define XMM_L(n) _l[n]
-#define XMM_S(n) _s[n]
-#define XMM_Q(n) _q[n]
-#define XMM_D(n) _d[n]
-
-#define MMX_B(n) _b[n]
-#define MMX_W(n) _w[n]
-#define MMX_L(n) _l[n]
-#define MMX_S(n) _s[n]
+#define ZMM_B(n) _b_ZMMReg[n]
+#define ZMM_W(n) _w_ZMMReg[n]
+#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_S(n) _s_ZMMReg[n]
+#define ZMM_Q(n) _q_ZMMReg[n]
+#define ZMM_D(n) _d_ZMMReg[n]
+
+#define MMX_B(n) _b_MMXReg[n]
+#define MMX_W(n) _w_MMXReg[n]
+#define MMX_L(n) _l_MMXReg[n]
+#define MMX_S(n) _s_MMXReg[n]
#endif
-#define MMX_Q(n) q
+#define MMX_Q(n) _q_MMXReg[n]
typedef union {
floatx80 d __attribute__((aligned(16)));
@@ -793,6 +825,7 @@ typedef struct {
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 1
#define NB_OPMASK_REGS 8
@@ -832,6 +865,7 @@ typedef struct CPUX86State {
BNDReg bnd_regs[4];
BNDCSReg bndcs_regs;
uint64_t msr_bndcfgs;
+ uint64_t efer;
/* Beginning of state preserved by INIT (dummy marker). */
struct {} start_init_save;
@@ -854,8 +888,8 @@ typedef struct CPUX86State {
float_status mmx_status; /* for 3DNow! float ops */
float_status sse_status;
uint32_t mxcsr;
- XMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
- XMMReg xmm_t0;
+ ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
+ ZMMReg xmm_t0;
MMXReg mmx_t0;
uint64_t opmask_regs[NB_OPMASK_REGS];
@@ -864,7 +898,6 @@ typedef struct CPUX86State {
uint32_t sysenter_cs;
target_ulong sysenter_esp;
target_ulong sysenter_eip;
- uint64_t efer;
uint64_t star;
uint64_t vm_hsave;
@@ -908,12 +941,21 @@ typedef struct CPUX86State {
uint64_t msr_hv_guest_os_id;
uint64_t msr_hv_vapic;
uint64_t msr_hv_tsc;
+ uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
+ uint64_t msr_hv_runtime;
+ uint64_t msr_hv_synic_control;
+ uint64_t msr_hv_synic_version;
+ uint64_t msr_hv_synic_evt_page;
+ uint64_t msr_hv_synic_msg_page;
+ uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
+ uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT];
+ uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT];
/* exception/interrupt handling */
int error_code;
int exception_is_int;
target_ulong exception_next_eip;
- target_ulong dr[8]; /* debug registers */
+ target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
union {
struct CPUBreakpoint *cpu_breakpoint[4];
struct CPUWatchpoint *cpu_watchpoint[4];
@@ -963,6 +1005,7 @@ typedef struct CPUX86State {
uint32_t sipi_vector;
bool tsc_valid;
int64_t tsc_khz;
+ int64_t user_tsc_khz; /* for sanity check only */
void *kvm_xsave_buf;
uint64_t mcg_cap;
@@ -980,6 +1023,8 @@ typedef struct CPUX86State {
uint64_t xcr0;
uint64_t xss;
+ uint32_t pkru;
+
TPRAccess tpr_access_type;
} CPUX86State;
@@ -1098,7 +1143,14 @@ void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
int cpu_x86_signal_handler(int host_signum, void *pinfo,
void *puc);
-/* cpuid.c */
+/* cpu.c */
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+extern const ExtSaveArea x86_ext_save_areas[];
+
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
@@ -1123,42 +1175,13 @@ void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
#endif
-static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
-{
- return (dr7 >> (index * 2)) & 1;
-}
-
-static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
-{
- return (dr7 >> (index * 2)) & 2;
-
-}
-static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
-{
- return hw_global_breakpoint_enabled(dr7, index) ||
- hw_local_breakpoint_enabled(dr7, index);
-}
-
-static inline int hw_breakpoint_type(unsigned long dr7, int index)
-{
- return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
-}
-
-static inline int hw_breakpoint_len(unsigned long dr7, int index)
-{
- int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
- return (len == 2) ? 8 : len + 1;
-}
-
-void hw_breakpoint_insert(CPUX86State *env, int index);
-void hw_breakpoint_remove(CPUX86State *env, int index);
-bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
void breakpoint_handler(CPUState *cs);
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
/* hw/pc.c */
uint64_t cpu_get_tsc(CPUX86State *env);
@@ -1187,7 +1210,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
#define cpu_exec cpu_x86_exec
-#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
#define cpu_list x86_cpu_list
#define cpudef_setup x86_cpudef_setup
@@ -1199,7 +1221,7 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2
-static inline int cpu_mmu_index(CPUX86State *env)
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
@@ -1235,7 +1257,7 @@ static inline target_long lshift(target_long x, int n)
#define ST1 ST(1)
/* translate.c */
-void optimize_flags_init(void);
+void tcg_x86_init(void);
#include "exec/cpu-all.h"
#include "svm.h"
@@ -1267,8 +1289,12 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
/* excp_helper.c */
void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+ uintptr_t retaddr);
void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
int error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr);
void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
int error_code, int next_eip_addend);
@@ -1318,6 +1344,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
+/* mem_helper.c */
+void helper_lock_init(void);
+
/* svm_helper.c */
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
uint64_t param);
@@ -1332,12 +1361,18 @@ void cpu_smm_update(X86CPU *cpu);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
-void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
- uint32_t feat_add, uint32_t feat_remove);
-
-void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features);
-void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features);
+/* Change the value of a KVM-specific default
+ *
+ * If value is NULL, no default will be set and the original
+ * value from the CPU model table will be kept.
+ *
+ * It is valid to call this funciton only for properties that
+ * are already present in the kvm_default_props table.
+ */
+void x86_cpu_change_kvm_default(const char *prop, const char *value);
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
/* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg);
@@ -1347,4 +1382,7 @@ void enable_compat_apic_id_mode(void);
#define APIC_DEFAULT_ADDRESS 0xfee00000
#define APIC_SPACE_SIZE 0x100000
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+
#endif /* CPU_I386_H */
diff --git a/qemu/target-i386/excp_helper.c b/qemu/target-i386/excp_helper.c
index 99fca847d..ef37f4240 100644
--- a/qemu/target-i386/excp_helper.c
+++ b/qemu/target-i386/excp_helper.c
@@ -17,19 +17,12 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/log.h"
#include "sysemu/sysemu.h"
#include "exec/helper-proto.h"
-#if 0
-#define raise_exception_err(env, a, b) \
- do { \
- qemu_log("raise_exception line=%d\n", __LINE__); \
- (raise_exception_err)(env, a, b); \
- } while (0)
-#endif
-
void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
{
raise_interrupt(env, intno, 1, 0, next_eip_addend);
@@ -92,7 +85,8 @@ static int check_exception(CPUX86State *env, int intno, int *error_code)
*/
static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
int is_int, int error_code,
- int next_eip_addend)
+ int next_eip_addend,
+ uintptr_t retaddr)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
@@ -108,7 +102,7 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
env->error_code = error_code;
env->exception_is_int = is_int;
env->exception_next_eip = env->eip + next_eip_addend;
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
/* shortcuts to generate exceptions */
@@ -116,16 +110,27 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
int error_code, int next_eip_addend)
{
- raise_interrupt2(env, intno, is_int, error_code, next_eip_addend);
+ raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
}
void raise_exception_err(CPUX86State *env, int exception_index,
int error_code)
{
- raise_interrupt2(env, exception_index, 0, error_code, 0);
+ raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
+}
+
+void raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
}
void raise_exception(CPUX86State *env, int exception_index)
{
- raise_interrupt2(env, exception_index, 0, 0, 0);
+ raise_interrupt2(env, exception_index, 0, 0, 0, 0);
+}
+
+void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
diff --git a/qemu/target-i386/fpu_helper.c b/qemu/target-i386/fpu_helper.c
index 1f954e0c5..fee5573a1 100644
--- a/qemu/target-i386/fpu_helper.c
+++ b/qemu/target-i386/fpu_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include <math.h>
#include "cpu.h"
#include "exec/helper-proto.h"
@@ -67,22 +68,24 @@ static inline void fpop(CPUX86State *env)
env->fpstt = (env->fpstt + 1) & 7;
}
-static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr)
+static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr,
+ uintptr_t retaddr)
{
CPU_LDoubleU temp;
- temp.l.lower = cpu_ldq_data(env, ptr);
- temp.l.upper = cpu_lduw_data(env, ptr + 8);
+ temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr);
+ temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr);
return temp.d;
}
-static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr)
+static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
+ uintptr_t retaddr)
{
CPU_LDoubleU temp;
temp.d = f;
- cpu_stq_data(env, ptr, temp.l.lower);
- cpu_stw_data(env, ptr + 8, temp.l.upper);
+ cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr);
}
/* x87 FPU helpers */
@@ -125,10 +128,10 @@ static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
return floatx80_div(a, b, &env->fp_status);
}
-static void fpu_raise_exception(CPUX86State *env)
+static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
{
if (env->cr[0] & CR0_NE_MASK) {
- raise_exception(env, EXCP10_COPR);
+ raise_exception_ra(env, EXCP10_COPR, retaddr);
}
#if !defined(CONFIG_USER_ONLY)
else {
@@ -313,14 +316,14 @@ void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt].d = helper_fldt(env, ptr);
+ env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC());
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
void helper_fstt_ST0(CPUX86State *env, target_ulong ptr)
{
- helper_fstt(env, ST0, ptr);
+ helper_fstt(env, ST0, ptr, GETPC());
}
void helper_fpush(CPUX86State *env)
@@ -603,7 +606,7 @@ void helper_fclex(CPUX86State *env)
void helper_fwait(CPUX86State *env)
{
if (env->fpus & FPUS_SE) {
- fpu_raise_exception(env);
+ fpu_raise_exception(env, GETPC());
}
}
@@ -633,11 +636,11 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
val = 0;
for (i = 8; i >= 0; i--) {
- v = cpu_ldub_data(env, ptr + i);
+ v = cpu_ldub_data_ra(env, ptr + i, GETPC());
val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
}
tmp = int64_to_floatx80(val, &env->fp_status);
- if (cpu_ldub_data(env, ptr + 9) & 0x80) {
+ if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) {
tmp = floatx80_chs(tmp);
}
fpush(env);
@@ -654,10 +657,10 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
mem_ref = ptr;
mem_end = mem_ref + 9;
if (val < 0) {
- cpu_stb_data(env, mem_end, 0x80);
+ cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
val = -val;
} else {
- cpu_stb_data(env, mem_end, 0x00);
+ cpu_stb_data_ra(env, mem_end, 0x00, GETPC());
}
while (mem_ref < mem_end) {
if (val == 0) {
@@ -666,10 +669,10 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
v = val % 100;
val = val / 100;
v = ((v / 10) << 4) | (v % 10);
- cpu_stb_data(env, mem_ref++, v);
+ cpu_stb_data_ra(env, mem_ref++, v, GETPC());
}
while (mem_ref < mem_end) {
- cpu_stb_data(env, mem_ref++, 0);
+ cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
}
}
@@ -977,7 +980,8 @@ void helper_fxam_ST0(CPUX86State *env)
}
}
-void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
+static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
{
int fpus, fptag, exp, i;
uint64_t mant;
@@ -1005,37 +1009,43 @@ void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
}
if (data32) {
/* 32 bit */
- cpu_stl_data(env, ptr, env->fpuc);
- cpu_stl_data(env, ptr + 4, fpus);
- cpu_stl_data(env, ptr + 8, fptag);
- cpu_stl_data(env, ptr + 12, 0); /* fpip */
- cpu_stl_data(env, ptr + 16, 0); /* fpcs */
- cpu_stl_data(env, ptr + 20, 0); /* fpoo */
- cpu_stl_data(env, ptr + 24, 0); /* fpos */
+ cpu_stl_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stl_data_ra(env, ptr + 4, fpus, retaddr);
+ cpu_stl_data_ra(env, ptr + 8, fptag, retaddr);
+ cpu_stl_data_ra(env, ptr + 12, 0, retaddr); /* fpip */
+ cpu_stl_data_ra(env, ptr + 16, 0, retaddr); /* fpcs */
+ cpu_stl_data_ra(env, ptr + 20, 0, retaddr); /* fpoo */
+ cpu_stl_data_ra(env, ptr + 24, 0, retaddr); /* fpos */
} else {
/* 16 bit */
- cpu_stw_data(env, ptr, env->fpuc);
- cpu_stw_data(env, ptr + 2, fpus);
- cpu_stw_data(env, ptr + 4, fptag);
- cpu_stw_data(env, ptr + 6, 0);
- cpu_stw_data(env, ptr + 8, 0);
- cpu_stw_data(env, ptr + 10, 0);
- cpu_stw_data(env, ptr + 12, 0);
+ cpu_stw_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stw_data_ra(env, ptr + 2, fpus, retaddr);
+ cpu_stw_data_ra(env, ptr + 4, fptag, retaddr);
+ cpu_stw_data_ra(env, ptr + 6, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 10, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 12, 0, retaddr);
}
}
-void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
+void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fstenv(env, ptr, data32, GETPC());
+}
+
+static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
{
int i, fpus, fptag;
if (data32) {
- cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
- fpus = cpu_lduw_data(env, ptr + 4);
- fptag = cpu_lduw_data(env, ptr + 8);
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr);
} else {
- cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
- fpus = cpu_lduw_data(env, ptr + 2);
- fptag = cpu_lduw_data(env, ptr + 4);
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr);
}
env->fpstt = (fpus >> 11) & 7;
env->fpus = fpus & ~0x3800;
@@ -1045,17 +1055,22 @@ void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
}
}
+void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fldenv(env, ptr, data32, GETPC());
+}
+
void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
{
floatx80 tmp;
int i;
- helper_fstenv(env, ptr, data32);
+ do_fstenv(env, ptr, data32, GETPC());
ptr += (14 << data32);
for (i = 0; i < 8; i++) {
tmp = ST(i);
- helper_fstt(env, tmp, ptr);
+ helper_fstt(env, tmp, ptr, GETPC());
ptr += 10;
}
@@ -1078,11 +1093,11 @@ void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
floatx80 tmp;
int i;
- helper_fldenv(env, ptr, data32);
+ do_fldenv(env, ptr, data32, GETPC());
ptr += (14 << data32);
for (i = 0; i < 8; i++) {
- tmp = helper_fldt(env, ptr);
+ tmp = helper_fldt(env, ptr, GETPC());
ST(i) = tmp;
ptr += 10;
}
@@ -1100,82 +1115,183 @@ void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
}
#endif
-void helper_fxsave(CPUX86State *env, target_ulong ptr, int data64)
+static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- int fpus, fptag, i, nb_xmm_regs;
- floatx80 tmp;
+ int fpus, fptag, i;
target_ulong addr;
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception(env, EXCP0D_GPF);
- }
-
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
fptag = 0;
for (i = 0; i < 8; i++) {
fptag |= (env->fptags[i] << i);
}
- cpu_stw_data(env, ptr, env->fpuc);
- cpu_stw_data(env, ptr + 2, fpus);
- cpu_stw_data(env, ptr + 4, fptag ^ 0xff);
-#ifdef TARGET_X86_64
- if (data64) {
- cpu_stq_data(env, ptr + 0x08, 0); /* rip */
- cpu_stq_data(env, ptr + 0x10, 0); /* rdp */
- } else
-#endif
- {
- cpu_stl_data(env, ptr + 0x08, 0); /* eip */
- cpu_stl_data(env, ptr + 0x0c, 0); /* sel */
- cpu_stl_data(env, ptr + 0x10, 0); /* dp */
- cpu_stl_data(env, ptr + 0x14, 0); /* sel */
- }
+ cpu_stw_data_ra(env, ptr, env->fpuc, ra);
+ cpu_stw_data_ra(env, ptr + 2, fpus, ra);
+ cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, ra);
+
+ /* In 32-bit mode this is eip, sel, dp, sel.
+ In 64-bit mode this is rip, rdp.
+ But in either case we don't write actual data, just zeros. */
+ cpu_stq_data_ra(env, ptr + 0x08, 0, ra); /* eip+sel; rip */
+ cpu_stq_data_ra(env, ptr + 0x10, 0, ra); /* edp+sel; rdp */
addr = ptr + 0x20;
for (i = 0; i < 8; i++) {
- tmp = ST(i);
- helper_fstt(env, tmp, addr);
+ floatx80 tmp = ST(i);
+ helper_fstt(env, tmp, addr, ra);
addr += 16;
}
+}
+
+static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, ra); /* mxcsr */
+ cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, ra); /* mxcsr_mask */
+}
+
+static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + 0xa0;
+ for (i = 0; i < nb_xmm_regs; i++) {
+ cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
+ cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra);
+ }
+}
+
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, addr, env->bndcs_regs.cfgu, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bndcs_regs.sts, ra);
+}
+
+static void do_xsave_pkru(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, addr, env->pkru, ra);
+}
+
+void helper_fxsave(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xsave_fpu(env, ptr, ra);
if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- cpu_stl_data(env, ptr + 0x18, env->mxcsr); /* mxcsr */
- cpu_stl_data(env, ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
- if (env->hflags & HF_CS64_MASK) {
- nb_xmm_regs = 16;
- } else {
- nb_xmm_regs = 8;
- }
- addr = ptr + 0xa0;
+ do_xsave_mxcsr(env, ptr, ra);
/* Fast FXSAVE leaves out the XMM registers */
if (!(env->efer & MSR_EFER_FFXSR)
|| (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) {
- for (i = 0; i < nb_xmm_regs; i++) {
- cpu_stq_data(env, addr, env->xmm_regs[i].XMM_Q(0));
- cpu_stq_data(env, addr + 8, env->xmm_regs[i].XMM_Q(1));
- addr += 16;
- }
+ do_xsave_sse(env, ptr, ra);
}
}
}
-void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64)
+static uint64_t get_xinuse(CPUX86State *env)
{
- int i, fpus, fptag, nb_xmm_regs;
- floatx80 tmp;
- target_ulong addr;
+ uint64_t inuse = -1;
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception(env, EXCP0D_GPF);
+ /* For the most part, we don't track XINUSE. We could calculate it
+ here for all components, but it's probably less work to simply
+ indicate in use. That said, the state of BNDREGS is important
+ enough to track in HFLAGS, so we might as well use that here. */
+ if ((env->hflags & HF_MPX_IU_MASK) == 0) {
+ inuse &= ~XSTATE_BNDREGS_MASK;
+ }
+ return inuse;
+}
+
+static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
+ uint64_t inuse, uint64_t opt, uintptr_t ra)
+{
+ uint64_t old_bv, new_bv;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Never save anything not enabled by XCR0. */
+ rfbm &= env->xcr0;
+ opt &= rfbm;
+
+ if (opt & XSTATE_FP_MASK) {
+ do_xsave_fpu(env, ptr, ra);
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that saving MXCSR is not suppressed by XSAVEOPT. */
+ do_xsave_mxcsr(env, ptr, ra);
+ }
+ if (opt & XSTATE_SSE_MASK) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ if (opt & XSTATE_BNDREGS_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS_BIT].offset;
+ do_xsave_bndregs(env, ptr + off, ra);
+ }
+ if (opt & XSTATE_BNDCSR_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR_BIT].offset;
+ do_xsave_bndcsr(env, ptr + off, ra);
}
+ if (opt & XSTATE_PKRU_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_PKRU_BIT].offset;
+ do_xsave_pkru(env, ptr + off, ra);
+ }
+
+ /* Update the XSTATE_BV field. */
+ old_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+ new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
+ cpu_stq_data_ra(env, ptr + 512, new_bv, ra);
+}
+
+void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC());
+}
+
+void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uint64_t inuse = get_xinuse(env);
+ do_xsave(env, ptr, rfbm, inuse, inuse, GETPC());
+}
- cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
- fpus = cpu_lduw_data(env, ptr + 2);
- fptag = cpu_lduw_data(env, ptr + 4);
+static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, fpus, fptag;
+ target_ulong addr;
+
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, ra));
+ fpus = cpu_lduw_data_ra(env, ptr + 2, ra);
+ fptag = cpu_lduw_data_ra(env, ptr + 4, ra);
env->fpstt = (fpus >> 11) & 7;
env->fpus = fpus & ~0x3800;
fptag ^= 0xff;
@@ -1185,34 +1301,227 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64)
addr = ptr + 0x20;
for (i = 0; i < 8; i++) {
- tmp = helper_fldt(env, addr);
+ floatx80 tmp = helper_fldt(env, addr, ra);
ST(i) = tmp;
addr += 16;
}
+}
+
+static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, ra));
+}
+
+static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + 0xa0;
+ for (i = 0; i < nb_xmm_regs; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
+ env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
+ addr += 16;
+ }
+}
+
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra);
+ env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+}
+
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->bndcs_regs.cfgu = cpu_ldq_data_ra(env, addr, ra);
+ env->bndcs_regs.sts = cpu_ldq_data_ra(env, addr + 8, ra);
+}
+
+static void do_xrstor_pkru(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ env->pkru = cpu_ldq_data_ra(env, addr, ra);
+}
+
+void helper_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xrstor_fpu(env, ptr, ra);
if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- cpu_set_mxcsr(env, cpu_ldl_data(env, ptr + 0x18));
- /* cpu_ldl_data(env, ptr + 0x1c); */
- if (env->hflags & HF_CS64_MASK) {
- nb_xmm_regs = 16;
- } else {
- nb_xmm_regs = 8;
- }
- addr = ptr + 0xa0;
- /* Fast FXRESTORE leaves out the XMM registers */
+ do_xrstor_mxcsr(env, ptr, ra);
+ /* Fast FXRSTOR leaves out the XMM registers */
if (!(env->efer & MSR_EFER_FFXSR)
|| (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) {
- for (i = 0; i < nb_xmm_regs; i++) {
- env->xmm_regs[i].XMM_Q(0) = cpu_ldq_data(env, addr);
- env->xmm_regs[i].XMM_Q(1) = cpu_ldq_data(env, addr + 8);
- addr += 16;
- }
+ do_xrstor_sse(env, ptr, ra);
+ }
+ }
+}
+
+void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uintptr_t ra = GETPC();
+ uint64_t xstate_bv, xcomp_bv0, xcomp_bv1;
+
+ rfbm &= env->xcr0;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ xstate_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+
+ if ((int64_t)xstate_bv < 0) {
+ /* FIXME: Compact form. */
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Standard form. */
+
+ /* The XSTATE field must not set bits not present in XCR0. */
+ if (xstate_bv & ~env->xcr0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* The XCOMP field must be zero. */
+ xcomp_bv0 = cpu_ldq_data_ra(env, ptr + 520, ra);
+ xcomp_bv1 = cpu_ldq_data_ra(env, ptr + 528, ra);
+ if (xcomp_bv0 || xcomp_bv1) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ if (rfbm & XSTATE_FP_MASK) {
+ if (xstate_bv & XSTATE_FP_MASK) {
+ do_xrstor_fpu(env, ptr, ra);
+ } else {
+ helper_fninit(env);
+ memset(env->fpregs, 0, sizeof(env->fpregs));
+ }
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that the standard form of XRSTOR loads MXCSR from memory
+ whether or not the XSTATE_BV bit is set. */
+ do_xrstor_mxcsr(env, ptr, ra);
+ if (xstate_bv & XSTATE_SSE_MASK) {
+ do_xrstor_sse(env, ptr, ra);
+ } else {
+ /* ??? When AVX is implemented, we may have to be more
+ selective in the clearing. */
+ memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
+ }
+ }
+ if (rfbm & XSTATE_BNDREGS_MASK) {
+ if (xstate_bv & XSTATE_BNDREGS_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS_BIT].offset;
+ do_xrstor_bndregs(env, ptr + off, ra);
+ env->hflags |= HF_MPX_IU_MASK;
+ } else {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+ }
+ if (rfbm & XSTATE_BNDCSR_MASK) {
+ if (xstate_bv & XSTATE_BNDCSR_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR_BIT].offset;
+ do_xrstor_bndcsr(env, ptr + off, ra);
+ } else {
+ memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
+ }
+ cpu_sync_bndcs_hflags(env);
+ }
+ if (rfbm & XSTATE_PKRU_MASK) {
+ uint64_t old_pkru = env->pkru;
+ if (xstate_bv & XSTATE_PKRU_MASK) {
+ target_ulong off = x86_ext_save_areas[XSTATE_PKRU_BIT].offset;
+ do_xrstor_pkru(env, ptr + off, ra);
+ } else {
+ env->pkru = 0;
+ }
+ if (env->pkru != old_pkru) {
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ tlb_flush(cs, 1);
}
}
}
+uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
+{
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ switch (ecx) {
+ case 0:
+ return env->xcr0;
+ case 1:
+ if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) {
+ return env->xcr0 & get_xinuse(env);
+ }
+ break;
+ }
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
+{
+ uint32_t dummy, ena_lo, ena_hi;
+ uint64_t ena;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ /* Only XCR0 is defined at present; the FPU may not be disabled. */
+ if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling unimplemented features. */
+ cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi);
+ ena = ((uint64_t)ena_hi << 32) | ena_lo;
+ if (mask & ~ena) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling only half of MPX. */
+ if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK)))
+ & XSTATE_BNDCSR_MASK) {
+ goto do_gpf;
+ }
+
+ env->xcr0 = mask;
+ cpu_sync_bndcs_hflags(env);
+ return;
+
+ do_gpf:
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
{
CPU_LDoubleU temp;
diff --git a/qemu/target-i386/gdbstub.c b/qemu/target-i386/gdbstub.c
index ff99cfb00..4b5071398 100644
--- a/qemu/target-i386/gdbstub.c
+++ b/qemu/target-i386/gdbstub.c
@@ -17,7 +17,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "exec/gdbstub.h"
@@ -61,8 +61,8 @@ int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 ||
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
- stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
- stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
+ stq_p(mem_buf, env->xmm_regs[n].ZMM_Q(0));
+ stq_p(mem_buf + 8, env->xmm_regs[n].ZMM_Q(1));
return 16;
}
} else {
@@ -170,8 +170,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 ||
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
- env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
- env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
+ env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
+ env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
return 16;
}
} else {
diff --git a/qemu/target-i386/helper.c b/qemu/target-i386/helper.c
index 5480a96a0..bf3e76207 100644
--- a/qemu/target-i386/helper.c
+++ b/qemu/target-i386/helper.c
@@ -17,12 +17,14 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "sysemu/kvm.h"
#include "kvm_i386.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#include "monitor/monitor.h"
+#include "hw/i386/apic_internal.h"
#endif
static void cpu_x86_version(CPUX86State *env, int *family, int *model)
@@ -177,6 +179,196 @@ done:
cpu_fprintf(f, "\n");
}
+#ifndef CONFIG_USER_ONLY
+
+/* ARRAY_SIZE check is not required because
+ * DeliveryMode(dm) has a size of 3 bit.
+ */
+static inline const char *dm2str(uint32_t dm)
+{
+ static const char *str[] = {
+ "Fixed",
+ "...",
+ "SMI",
+ "...",
+ "NMI",
+ "INIT",
+ "...",
+ "ExtINT"
+ };
+ return str[dm];
+}
+
+static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
+ const char *name, uint32_t lvt, bool is_timer)
+{
+ uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
+ cpu_fprintf(f,
+ "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
+ name, lvt,
+ lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
+ lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
+ lvt & APIC_LVT_MASKED ? "masked" : "",
+ lvt & APIC_LVT_DELIV_STS ? "pending" : "",
+ !is_timer ?
+ "" : lvt & APIC_LVT_TIMER_PERIODIC ?
+ "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
+ "tsc-deadline" : "one-shot",
+ dm2str(dm));
+ if (dm != APIC_DM_NMI) {
+ cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
+ } else {
+ cpu_fprintf(f, "\n");
+ }
+}
+
+/* ARRAY_SIZE check is not required because
+ * destination shorthand has a size of 2 bit.
+ */
+static inline const char *shorthand2str(uint32_t shorthand)
+{
+ const char *str[] = {
+ "no-shorthand", "self", "all-self", "all"
+ };
+ return str[shorthand];
+}
+
+static inline uint8_t divider_conf(uint32_t divide_conf)
+{
+ uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
+
+ return divide_val == 7 ? 1 : 2 << divide_val;
+}
+
+static inline void mask2str(char *str, uint32_t val, uint8_t size)
+{
+ while (size--) {
+ *str++ = (val >> size) & 1 ? '1' : '0';
+ }
+ *str = 0;
+}
+
+#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
+
+static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
+ APICCommonState *s, CPUX86State *env)
+{
+ uint32_t icr = s->icr[0], icr2 = s->icr[1];
+ uint8_t dest_shorthand = \
+ (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
+ bool logical_mod = icr & APIC_ICR_DEST_MOD;
+ char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
+ uint32_t dest_field;
+ bool x2apic;
+
+ cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
+ icr,
+ logical_mod ? "logical" : "physical",
+ icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
+ icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
+ shorthand2str(dest_shorthand));
+
+ cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
+ if (dest_shorthand != 0) {
+ cpu_fprintf(f, "\n");
+ return;
+ }
+ x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
+ dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
+
+ if (!logical_mod) {
+ if (x2apic) {
+ cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
+ } else {
+ cpu_fprintf(f, " cpu %u (APIC ID)\n",
+ dest_field & APIC_LOGDEST_XAPIC_ID);
+ }
+ return;
+ }
+
+ if (s->dest_mode == 0xf) { /* flat mode */
+ mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
+ cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
+ } else if (s->dest_mode == 0) { /* cluster mode */
+ if (x2apic) {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
+ cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
+ dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
+ } else {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
+ cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
+ dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
+ }
+ }
+}
+
+static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
+ const char *name, uint32_t *ireg_tab,
+ uint32_t *tmr_tab)
+{
+ int i, empty = true;
+
+ cpu_fprintf(f, "%s\t ", name);
+ for (i = 0; i < 256; i++) {
+ if (apic_get_bit(ireg_tab, i)) {
+ cpu_fprintf(f, "%u%s ", i,
+ apic_get_bit(tmr_tab, i) ? "(level)" : "");
+ empty = false;
+ }
+ }
+ cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
+}
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ APICCommonState *s = APIC_COMMON(cpu->apic_state);
+ uint32_t *lvt = s->lvt;
+
+ cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
+ CPU(cpu)->cpu_index);
+ dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
+
+ cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
+ s->divide_conf & APIC_DCR_MASK,
+ divider_conf(s->divide_conf),
+ s->initial_count);
+
+ cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
+ s->spurious_vec,
+ s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
+ s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
+ s->spurious_vec & APIC_VECTOR_MASK);
+
+ dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
+
+ cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
+
+ dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
+ dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
+
+ cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
+ s->arb_id, s->tpr, s->dest_mode, s->log_dest);
+ if (s->dest_mode == 0) {
+ cpu_fprintf(f, "(cluster %u: id %u)",
+ s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
+ s->log_dest & APIC_LOGDEST_XAPIC_ID);
+ }
+ cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
+}
+#else
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+}
+#endif /* !CONFIG_USER_ONLY */
+
#define DUMP_CODE_BYTES_TOTAL 50
#define DUMP_CODE_BYTES_BACKWARD 20
@@ -344,10 +536,10 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
for(i=0;i<nb;i++) {
cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
i,
- env->xmm_regs[i].XMM_L(3),
- env->xmm_regs[i].XMM_L(2),
- env->xmm_regs[i].XMM_L(1),
- env->xmm_regs[i].XMM_L(0));
+ env->xmm_regs[i].ZMM_L(3),
+ env->xmm_regs[i].ZMM_L(2),
+ env->xmm_regs[i].ZMM_L(1),
+ env->xmm_regs[i].ZMM_L(0));
if ((i & 1) == 1)
cpu_fprintf(f, "\n");
else
@@ -455,6 +647,7 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
{
X86CPU *cpu = x86_env_get_cpu(env);
+ uint32_t hflags;
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
@@ -464,24 +657,33 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(CPU(cpu), 1);
}
+
+ /* Clear bits we're going to recompute. */
+ hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
+
/* SSE handling */
if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
new_cr4 &= ~CR4_OSFXSR_MASK;
}
- env->hflags &= ~HF_OSFXSR_MASK;
if (new_cr4 & CR4_OSFXSR_MASK) {
- env->hflags |= HF_OSFXSR_MASK;
+ hflags |= HF_OSFXSR_MASK;
}
if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
new_cr4 &= ~CR4_SMAP_MASK;
}
- env->hflags &= ~HF_SMAP_MASK;
if (new_cr4 & CR4_SMAP_MASK) {
- env->hflags |= HF_SMAP_MASK;
+ hflags |= HF_SMAP_MASK;
+ }
+
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ new_cr4 &= ~CR4_PKE_MASK;
}
env->cr[4] = new_cr4;
+ env->hflags = hflags;
+
+ cpu_sync_bndcs_hflags(env);
}
#if defined(CONFIG_USER_ONLY)
@@ -669,7 +871,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
- pte = pde | ((pde & 0x1fe000) << (32 - 13));
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
@@ -699,38 +901,50 @@ do_check_protect_pse36:
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK;
- if ((ptep & PG_NX_MASK) && is_write1 == 2) {
+
+ /* can the page can be put in the TLB? prot will tell us */
+ if (is_user && !(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
- switch (mmu_idx) {
- case MMU_USER_IDX:
- if (!(ptep & PG_USER_MASK)) {
- goto do_fault_protect;
- }
- if (is_write && !(ptep & PG_RW_MASK)) {
- goto do_fault_protect;
- }
- break;
- case MMU_KSMAP_IDX:
- if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
- goto do_fault_protect;
+ prot = 0;
+ if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
+ prot |= PAGE_READ;
+ if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
+ prot |= PAGE_WRITE;
}
- /* fall through */
- case MMU_KNOSMAP_IDX:
- if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
- (ptep & PG_USER_MASK)) {
- goto do_fault_protect;
+ }
+ if (!(ptep & PG_NX_MASK) &&
+ (mmu_idx == MMU_USER_IDX ||
+ !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
+ prot |= PAGE_EXEC;
+ }
+ if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
+ (ptep & PG_USER_MASK) && env->pkru) {
+ uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
+ uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
+ uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
+ uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ if (pkru_ad) {
+ pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
+ } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
+ pkru_prot &= ~PAGE_WRITE;
}
- if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK)) {
+
+ prot &= pkru_prot;
+ if ((pkru_prot & (1 << is_write1)) == 0) {
+ assert(is_write1 != 2);
+ error_code |= PG_ERROR_PK_MASK;
goto do_fault_protect;
}
- break;
+ }
- default: /* cannot happen */
- break;
+ if ((prot & (1 << is_write1)) == 0) {
+ goto do_fault_protect;
}
+
+ /* yes, it can! */
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
@@ -740,25 +954,13 @@ do_check_protect_pse36:
x86_stl_phys_notdirty(cs, pte_addr, pte);
}
- /* the page can be put in the TLB */
- prot = PAGE_READ;
- if (!(ptep & PG_NX_MASK) &&
- (mmu_idx == MMU_USER_IDX ||
- !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
- prot |= PAGE_EXEC;
- }
- if (pte & PG_DIRTY_MASK) {
+ if (!(pte & PG_DIRTY_MASK)) {
/* only set write access if already dirty... otherwise wait
for dirty access */
- if (is_user) {
- if (ptep & PG_RW_MASK)
- prot |= PAGE_WRITE;
- } else {
- if (!(env->cr[0] & CR0_WP_MASK) ||
- (ptep & PG_RW_MASK))
- prot |= PAGE_WRITE;
- }
+ assert(!is_write);
+ prot &= ~PAGE_WRITE;
}
+
do_mapping:
pte = pte & env->a20_mask;
@@ -771,6 +973,7 @@ do_check_protect_pse36:
page_offset = vaddr & (page_size - 1);
paddr = pte + page_offset;
+ assert(prot & (1 << is_write1));
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
prot, mmu_idx, page_size);
return 0;
@@ -883,7 +1086,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
- pte = pde | ((pde & 0x1fe000) << (32 - 13));
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
page_size = 4096 * 1024;
} else {
/* page directory entry */
@@ -905,134 +1108,6 @@ out:
return pte | page_offset;
}
-void hw_breakpoint_insert(CPUX86State *env, int index)
-{
- CPUState *cs = CPU(x86_env_get_cpu(env));
- int type = 0, err = 0;
-
- switch (hw_breakpoint_type(env->dr[7], index)) {
- case DR7_TYPE_BP_INST:
- if (hw_breakpoint_enabled(env->dr[7], index)) {
- err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
- &env->cpu_breakpoint[index]);
- }
- break;
- case DR7_TYPE_DATA_WR:
- type = BP_CPU | BP_MEM_WRITE;
- break;
- case DR7_TYPE_IO_RW:
- /* No support for I/O watchpoints yet */
- break;
- case DR7_TYPE_DATA_RW:
- type = BP_CPU | BP_MEM_ACCESS;
- break;
- }
-
- if (type != 0) {
- err = cpu_watchpoint_insert(cs, env->dr[index],
- hw_breakpoint_len(env->dr[7], index),
- type, &env->cpu_watchpoint[index]);
- }
-
- if (err) {
- env->cpu_breakpoint[index] = NULL;
- }
-}
-
-void hw_breakpoint_remove(CPUX86State *env, int index)
-{
- CPUState *cs;
-
- if (!env->cpu_breakpoint[index]) {
- return;
- }
- cs = CPU(x86_env_get_cpu(env));
- switch (hw_breakpoint_type(env->dr[7], index)) {
- case DR7_TYPE_BP_INST:
- if (hw_breakpoint_enabled(env->dr[7], index)) {
- cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
- }
- break;
- case DR7_TYPE_DATA_WR:
- case DR7_TYPE_DATA_RW:
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
- break;
- case DR7_TYPE_IO_RW:
- /* No support for I/O watchpoints yet */
- break;
- }
-}
-
-bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
-{
- target_ulong dr6;
- int reg;
- bool hit_enabled = false;
-
- dr6 = env->dr[6] & ~0xf;
- for (reg = 0; reg < DR7_MAX_BP; reg++) {
- bool bp_match = false;
- bool wp_match = false;
-
- switch (hw_breakpoint_type(env->dr[7], reg)) {
- case DR7_TYPE_BP_INST:
- if (env->dr[reg] == env->eip) {
- bp_match = true;
- }
- break;
- case DR7_TYPE_DATA_WR:
- case DR7_TYPE_DATA_RW:
- if (env->cpu_watchpoint[reg] &&
- env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
- wp_match = true;
- }
- break;
- case DR7_TYPE_IO_RW:
- break;
- }
- if (bp_match || wp_match) {
- dr6 |= 1 << reg;
- if (hw_breakpoint_enabled(env->dr[7], reg)) {
- hit_enabled = true;
- }
- }
- }
-
- if (hit_enabled || force_dr6_update) {
- env->dr[6] = dr6;
- }
-
- return hit_enabled;
-}
-
-void breakpoint_handler(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- CPUBreakpoint *bp;
-
- if (cs->watchpoint_hit) {
- if (cs->watchpoint_hit->flags & BP_CPU) {
- cs->watchpoint_hit = NULL;
- if (check_hw_breakpoints(env, false)) {
- raise_exception(env, EXCP01_DB);
- } else {
- cpu_resume_from_signal(cs, NULL);
- }
- }
- } else {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == env->eip) {
- if (bp->flags & BP_CPU) {
- check_hw_breakpoints(env, true);
- raise_exception(env, EXCP01_DB);
- }
- break;
- }
- }
- }
-}
-
typedef struct MCEInjectionParams {
Monitor *mon;
X86CPU *cpu;
diff --git a/qemu/target-i386/helper.h b/qemu/target-i386/helper.h
index 74308f442..1320edc01 100644
--- a/qemu/target-i386/helper.h
+++ b/qemu/target-i386/helper.h
@@ -15,6 +15,14 @@ DEF_HELPER_2(idivl_EAX, void, env, tl)
DEF_HELPER_2(divq_EAX, void, env, tl)
DEF_HELPER_2(idivq_EAX, void, env, tl)
#endif
+DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_1(bnd_jmp, void, env)
DEF_HELPER_2(aam, void, env, int)
DEF_HELPER_2(aad, void, env, int)
@@ -30,9 +38,9 @@ DEF_HELPER_2(verw, void, env, tl)
DEF_HELPER_2(lldt, void, env, int)
DEF_HELPER_2(ltr, void, env, int)
DEF_HELPER_3(load_seg, void, env, int, int)
-DEF_HELPER_4(ljmp_protected, void, env, int, tl, int)
+DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
-DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int)
+DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
DEF_HELPER_2(iret_real, void, env, int)
DEF_HELPER_3(iret_protected, void, env, int, int)
DEF_HELPER_3(lret_protected, void, env, int, int)
@@ -40,13 +48,10 @@ DEF_HELPER_2(read_crN, tl, env, int)
DEF_HELPER_3(write_crN, void, env, int, tl)
DEF_HELPER_2(lmsw, void, env, tl)
DEF_HELPER_1(clts, void, env)
-DEF_HELPER_3(movl_drN_T0, void, env, int, tl)
+DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
+DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
DEF_HELPER_2(invlpg, void, env, tl)
-DEF_HELPER_4(enter_level, void, env, int, int, tl)
-#ifdef TARGET_X86_64
-DEF_HELPER_4(enter64_level, void, env, int, int, tl)
-#endif
DEF_HELPER_1(sysenter, void, env)
DEF_HELPER_2(sysexit, void, env, int)
#ifdef TARGET_X86_64
@@ -65,8 +70,6 @@ DEF_HELPER_1(cli, void, env)
DEF_HELPER_1(sti, void, env)
DEF_HELPER_1(clac, void, env)
DEF_HELPER_1(stac, void, env)
-DEF_HELPER_1(set_inhibit_irq, void, env)
-DEF_HELPER_1(reset_inhibit_irq, void, env)
DEF_HELPER_3(boundw, void, env, tl, int)
DEF_HELPER_3(boundl, void, env, tl, int)
DEF_HELPER_1(rsm, void, env)
@@ -92,6 +95,7 @@ DEF_HELPER_3(outw, void, env, i32, i32)
DEF_HELPER_2(inw, tl, env, i32)
DEF_HELPER_3(outl, void, env, i32, i32)
DEF_HELPER_2(inl, tl, env, i32)
+DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
DEF_HELPER_3(vmexit, void, env, i32, i64)
@@ -187,8 +191,15 @@ DEF_HELPER_3(fstenv, void, env, tl, int)
DEF_HELPER_3(fldenv, void, env, tl, int)
DEF_HELPER_3(fsave, void, env, tl, int)
DEF_HELPER_3(frstor, void, env, tl, int)
-DEF_HELPER_3(fxsave, void, env, tl, int)
-DEF_HELPER_3(fxrstor, void, env, tl, int)
+DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
diff --git a/qemu/target-i386/hyperv.c b/qemu/target-i386/hyperv.c
new file mode 100644
index 000000000..39a230f11
--- /dev/null
+++ b/qemu/target-i386/hyperv.c
@@ -0,0 +1,140 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hyperv.h"
+#include "standard-headers/asm-x86/hyperv.h"
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
+{
+ CPUX86State *env = &cpu->env;
+
+ switch (exit->type) {
+ case KVM_EXIT_HYPERV_SYNIC:
+ if (!cpu->hyperv_synic) {
+ return -1;
+ }
+
+ /*
+ * For now just track changes in SynIC control and msg/evt pages msr's.
+ * When SynIC messaging/events processing will be added in future
+ * here we will do messages queues flushing and pages remapping.
+ */
+ switch (exit->u.synic.msr) {
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = exit->u.synic.control;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = exit->u.synic.msg_page;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = exit->u.synic.evt_page;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+ case KVM_EXIT_HYPERV_HCALL: {
+ uint16_t code;
+
+ code = exit->u.hcall.input & 0xffff;
+ switch (code) {
+ case HVCALL_POST_MESSAGE:
+ case HVCALL_SIGNAL_EVENT:
+ default:
+ exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+ return 0;
+ }
+ }
+ default:
+ return -1;
+ }
+}
+
+static void kvm_hv_sint_ack_handler(EventNotifier *notifier)
+{
+ HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
+ sint_ack_notifier);
+ event_notifier_test_and_clear(notifier);
+ if (sint_route->sint_ack_clb) {
+ sint_route->sint_ack_clb(sint_route);
+ }
+}
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+ HvSintAckClb sint_ack_clb)
+{
+ HvSintRoute *sint_route;
+ int r, gsi;
+
+ sint_route = g_malloc0(sizeof(*sint_route));
+ r = event_notifier_init(&sint_route->sint_set_notifier, false);
+ if (r) {
+ goto err;
+ }
+
+ r = event_notifier_init(&sint_route->sint_ack_notifier, false);
+ if (r) {
+ goto err_sint_set_notifier;
+ }
+
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false,
+ kvm_hv_sint_ack_handler);
+
+ gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint);
+ if (gsi < 0) {
+ goto err_gsi;
+ }
+
+ r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
+ &sint_route->sint_set_notifier,
+ &sint_route->sint_ack_notifier, gsi);
+ if (r) {
+ goto err_irqfd;
+ }
+ sint_route->gsi = gsi;
+ sint_route->sint_ack_clb = sint_ack_clb;
+ sint_route->vcpu_id = vcpu_id;
+ sint_route->sint = sint;
+
+ return sint_route;
+
+err_irqfd:
+ kvm_irqchip_release_virq(kvm_state, gsi);
+err_gsi:
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_cleanup(&sint_route->sint_ack_notifier);
+err_sint_set_notifier:
+ event_notifier_cleanup(&sint_route->sint_set_notifier);
+err:
+ g_free(sint_route);
+
+ return NULL;
+}
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route)
+{
+ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
+ &sint_route->sint_set_notifier,
+ sint_route->gsi);
+ kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_cleanup(&sint_route->sint_ack_notifier);
+ event_notifier_cleanup(&sint_route->sint_set_notifier);
+ g_free(sint_route);
+}
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route)
+{
+ return event_notifier_set(&sint_route->sint_set_notifier);
+}
diff --git a/qemu/target-i386/hyperv.h b/qemu/target-i386/hyperv.h
new file mode 100644
index 000000000..b26201f8b
--- /dev/null
+++ b/qemu/target-i386/hyperv.h
@@ -0,0 +1,42 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HYPERV_I386_H
+#define HYPERV_I386_H
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "qemu/event_notifier.h"
+
+typedef struct HvSintRoute HvSintRoute;
+typedef void (*HvSintAckClb)(HvSintRoute *sint_route);
+
+struct HvSintRoute {
+ uint32_t sint;
+ uint32_t vcpu_id;
+ int gsi;
+ EventNotifier sint_set_notifier;
+ EventNotifier sint_ack_notifier;
+ HvSintAckClb sint_ack_clb;
+};
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit);
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+ HvSintAckClb sint_ack_clb);
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route);
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route);
+
+#endif
diff --git a/qemu/target-i386/int_helper.c b/qemu/target-i386/int_helper.c
index b0d78e6ee..cf5bbb048 100644
--- a/qemu/target-i386/int_helper.c
+++ b/qemu/target-i386/int_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
@@ -48,11 +49,11 @@ void helper_divb_AL(CPUX86State *env, target_ulong t0)
num = (env->regs[R_EAX] & 0xffff);
den = (t0 & 0xff);
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
if (q > 0xff) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q &= 0xff;
r = (num % den) & 0xff;
@@ -66,11 +67,11 @@ void helper_idivb_AL(CPUX86State *env, target_ulong t0)
num = (int16_t)env->regs[R_EAX];
den = (int8_t)t0;
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
if (q != (int8_t)q) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q &= 0xff;
r = (num % den) & 0xff;
@@ -84,11 +85,11 @@ void helper_divw_AX(CPUX86State *env, target_ulong t0)
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
den = (t0 & 0xffff);
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
if (q > 0xffff) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q &= 0xffff;
r = (num % den) & 0xffff;
@@ -103,11 +104,11 @@ void helper_idivw_AX(CPUX86State *env, target_ulong t0)
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
den = (int16_t)t0;
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
if (q != (int16_t)q) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q &= 0xffff;
r = (num % den) & 0xffff;
@@ -123,12 +124,12 @@ void helper_divl_EAX(CPUX86State *env, target_ulong t0)
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
den = t0;
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
r = (num % den);
if (q > 0xffffffff) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
env->regs[R_EAX] = (uint32_t)q;
env->regs[R_EDX] = (uint32_t)r;
@@ -142,12 +143,12 @@ void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
den = t0;
if (den == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
q = (num / den);
r = (num % den);
if (q != (int32_t)q) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
env->regs[R_EAX] = (uint32_t)q;
env->regs[R_EDX] = (uint32_t)r;
@@ -379,12 +380,12 @@ void helper_divq_EAX(CPUX86State *env, target_ulong t0)
uint64_t r0, r1;
if (t0 == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
r0 = env->regs[R_EAX];
r1 = env->regs[R_EDX];
if (div64(&r0, &r1, t0)) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
env->regs[R_EAX] = r0;
env->regs[R_EDX] = r1;
@@ -395,12 +396,12 @@ void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
uint64_t r0, r1;
if (t0 == 0) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
r0 = env->regs[R_EAX];
r1 = env->regs[R_EDX];
if (idiv64(&r0, &r1, t0)) {
- raise_exception(env, EXCP00_DIVZ);
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
}
env->regs[R_EAX] = r0;
env->regs[R_EDX] = r1;
@@ -469,3 +470,13 @@ target_ulong helper_pext(target_ulong src, target_ulong mask)
#include "shift_helper_template.h"
#undef SHIFT
#endif
+
+/* Test that BIT is enabled in CR4. If not, raise an illegal opcode
+ exception. This reduces the requirements for rare CR4 bits being
+ mapped into HFLAGS. */
+void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
+{
+ if (unlikely((env->cr[4] & bit) == 0)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+}
diff --git a/qemu/target-i386/kvm-stub.c b/qemu/target-i386/kvm-stub.c
index 6fefd65c2..8df9c5953 100644
--- a/qemu/target-i386/kvm-stub.c
+++ b/qemu/target-i386/kvm-stub.c
@@ -9,6 +9,7 @@
* See the COPYING file in the top-level directory.
*
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "kvm_i386.h"
diff --git a/qemu/target-i386/kvm.c b/qemu/target-i386/kvm.c
index 721c580ed..799fdfa68 100644
--- a/qemu/target-i386/kvm.c
+++ b/qemu/target-i386/kvm.c
@@ -12,7 +12,8 @@
*
*/
-#include <sys/types.h>
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/utsname.h>
@@ -25,16 +26,21 @@
#include "sysemu/kvm_int.h"
#include "kvm_i386.h"
#include "cpu.h"
+#include "hyperv.h"
+
#include "exec/gdbstub.h"
#include "qemu/host-utils.h"
#include "qemu/config-file.h"
+#include "qemu/error-report.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic.h"
#include "hw/i386/apic_internal.h"
#include "hw/i386/apic-msidef.h"
+
#include "exec/ioport.h"
-#include <asm/hyperv.h>
+#include "standard-headers/asm-x86/hyperv.h"
#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
#include "migration/migration.h"
#include "exec/memattrs.h"
@@ -67,6 +73,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
static bool has_msr_star;
static bool has_msr_hsave_pa;
+static bool has_msr_tsc_aux;
static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline;
static bool has_msr_feature_control;
@@ -80,12 +87,27 @@ static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
+static bool has_msr_hv_crash;
+static bool has_msr_hv_reset;
+static bool has_msr_hv_vpindex;
+static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
+static bool has_msr_hv_stimer;
static bool has_msr_mtrr;
static bool has_msr_xss;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
+static int has_xsave;
+static int has_xcrs;
+static int has_pit_state2;
+
+int kvm_has_pit_state2(void)
+{
+ return has_pit_state2;
+}
+
bool kvm_has_smm(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
@@ -119,6 +141,7 @@ static int kvm_get_tsc(CPUState *cs)
return ret;
}
+ assert(ret == 1);
env->tsc = msr_data.entries[0].data;
return 0;
}
@@ -502,7 +525,43 @@ static bool hyperv_enabled(X86CPU *cpu)
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
(hyperv_hypercall_available(cpu) ||
cpu->hyperv_time ||
- cpu->hyperv_relaxed_timing);
+ cpu->hyperv_relaxed_timing ||
+ cpu->hyperv_crash ||
+ cpu->hyperv_reset ||
+ cpu->hyperv_vpindex ||
+ cpu->hyperv_runtime ||
+ cpu->hyperv_synic ||
+ cpu->hyperv_stimer);
+}
+
+static int kvm_arch_set_tsc_khz(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int r;
+
+ if (!env->tsc_khz) {
+ return 0;
+ }
+
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
+ kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
+ -ENOTSUP;
+ if (r < 0) {
+ /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
+ * TSC frequency doesn't match the one we want.
+ */
+ int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
+ error_report("warning: TSC frequency mismatch between "
+ "VM and host, and TSC scaling unavailable");
+ return r;
+ }
+ }
+
+ return 0;
}
static Error *invtsc_mig_blocker;
@@ -532,7 +591,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (hyperv_enabled(cpu)) {
c = &cpuid_data.entries[cpuid_i++];
c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
- memcpy(signature, "Microsoft Hv", 12);
+ if (!cpu->hyperv_vendor_id) {
+ memcpy(signature, "Microsoft Hv", 12);
+ } else {
+ size_t len = strlen(cpu->hyperv_vendor_id);
+
+ if (len > 12) {
+ error_report("hv-vendor-id truncated to 12 characters");
+ len = 12;
+ }
+ memset(signature, 0, 12);
+ memcpy(signature, cpu->hyperv_vendor_id, len);
+ }
c->eax = HYPERV_CPUID_MIN;
c->ebx = signature[0];
c->ecx = signature[1];
@@ -568,6 +638,41 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->eax |= 0x200;
has_msr_hv_tsc = true;
}
+ if (cpu->hyperv_crash && has_msr_hv_crash) {
+ c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
+ }
+ c->edx |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ if (cpu->hyperv_reset && has_msr_hv_reset) {
+ c->eax |= HV_X64_MSR_RESET_AVAILABLE;
+ }
+ if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
+ c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ }
+ if (cpu->hyperv_runtime && has_msr_hv_runtime) {
+ c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ }
+ if (cpu->hyperv_synic) {
+ int sint;
+
+ if (!has_msr_hv_synic ||
+ kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+ fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+ return -ENOSYS;
+ }
+
+ c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+ env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+ for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
+ env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
+ }
+ }
+ if (cpu->hyperv_stimer) {
+ if (!has_msr_hv_stimer) {
+ fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
+ return -ENOSYS;
+ }
+ c->eax |= HV_X64_MSR_SYNTIMER_AVAILABLE;
+ }
c = &cpuid_data.entries[cpuid_i++];
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
@@ -732,7 +837,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
&& (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
(CPUID_MCE | CPUID_MCA)
&& kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
- uint64_t mcg_cap;
+ uint64_t mcg_cap, unsupported_caps;
int banks;
int ret;
@@ -742,18 +847,24 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret;
}
- if (banks > MCE_BANKS_DEF) {
- banks = MCE_BANKS_DEF;
+ if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
+ error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
+ (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
+ return -ENOTSUP;
+ }
+
+ unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
+ if (unsupported_caps) {
+ error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
+ unsupported_caps);
}
- mcg_cap &= MCE_CAP_DEF;
- mcg_cap |= banks;
- ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &mcg_cap);
+
+ env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
+ ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
}
-
- env->mcg_cap = mcg_cap;
}
qemu_add_vm_change_state_handler(cpu_update_state, env);
@@ -781,22 +892,35 @@ int kvm_arch_init_vcpu(CPUState *cs)
return r;
}
- r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
- if (r && env->tsc_khz) {
- r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
- if (r < 0) {
- fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
- return r;
+ r = kvm_arch_set_tsc_khz(cs);
+ if (r < 0) {
+ return r;
+ }
+
+ /* vcpu's TSC frequency is either specified by user, or following
+ * the value used by KVM if the former is not present. In the
+ * latter case, we query it from KVM and record in env->tsc_khz,
+ * so that vcpu's TSC frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (r > 0) {
+ env->tsc_khz = r;
}
}
- if (kvm_has_xsave()) {
+ if (has_xsave) {
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
}
+ if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+ has_msr_tsc_aux = false;
+ }
return 0;
}
@@ -864,6 +988,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_hsave_pa = true;
continue;
}
+ if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
+ has_msr_tsc_aux = true;
+ continue;
+ }
if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
has_msr_tsc_adjust = true;
continue;
@@ -888,6 +1016,30 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_xss = true;
continue;
}
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
+ has_msr_hv_crash = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
+ has_msr_hv_reset = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
+ has_msr_hv_vpindex = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
+ has_msr_hv_runtime = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+ has_msr_hv_synic = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
+ has_msr_hv_stimer = true;
+ continue;
+ }
}
}
@@ -938,6 +1090,18 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int ret;
struct utsname utsname;
+#ifdef KVM_CAP_XSAVE
+ has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
+#endif
+
+#ifdef KVM_CAP_XCRS
+ has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
+#endif
+
+#ifdef KVM_CAP_PIT_STATE2
+ has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
+#endif
+
ret = kvm_get_supported_msrs(s);
if (ret < 0) {
return ret;
@@ -1027,7 +1191,7 @@ static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
lhs->l = (flags >> DESC_L_SHIFT) & 1;
lhs->g = (flags & DESC_G_MASK) != 0;
lhs->avl = (flags & DESC_AVL_MASK) != 0;
- lhs->unusable = 0;
+ lhs->unusable = !lhs->present;
lhs->padding = 0;
}
@@ -1036,14 +1200,18 @@ static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
- lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
- (rhs->present * DESC_P_MASK) |
- (rhs->dpl << DESC_DPL_SHIFT) |
- (rhs->db << DESC_B_SHIFT) |
- (rhs->s * DESC_S_MASK) |
- (rhs->l << DESC_L_SHIFT) |
- (rhs->g * DESC_G_MASK) |
- (rhs->avl * DESC_AVL_MASK);
+ if (rhs->unusable) {
+ lhs->flags = 0;
+ } else {
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ (rhs->present * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
+ }
}
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
@@ -1115,8 +1283,8 @@ static int kvm_put_fpu(X86CPU *cpu)
}
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) {
- stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0));
- stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1));
+ stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
}
fpu.mxcsr = env->mxcsr;
@@ -1137,6 +1305,7 @@ static int kvm_put_fpu(X86CPU *cpu)
#define XSAVE_OPMASK 272
#define XSAVE_ZMM_Hi256 288
#define XSAVE_Hi16_ZMM 416
+#define XSAVE_PKRU 672
static int kvm_put_xsave(X86CPU *cpu)
{
@@ -1146,7 +1315,7 @@ static int kvm_put_xsave(X86CPU *cpu)
uint8_t *xmm, *ymmh, *zmmh;
int i, r;
- if (!kvm_has_xsave()) {
+ if (!has_xsave) {
return kvm_put_fpu(cpu);
}
@@ -1177,19 +1346,20 @@ static int kvm_put_xsave(X86CPU *cpu)
ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
- stq_p(xmm, env->xmm_regs[i].XMM_Q(0));
- stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1));
- stq_p(ymmh, env->xmm_regs[i].XMM_Q(2));
- stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3));
- stq_p(zmmh, env->xmm_regs[i].XMM_Q(4));
- stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5));
- stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
- stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
+ stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
+ stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
+ stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
+ stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
+ stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
}
#ifdef TARGET_X86_64
memcpy(&xsave->region[XSAVE_Hi16_ZMM], &env->xmm_regs[16],
16 * sizeof env->xmm_regs[16]);
+ memcpy(&xsave->region[XSAVE_PKRU], &env->pkru, sizeof env->pkru);
#endif
r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
return r;
@@ -1200,7 +1370,7 @@ static int kvm_put_xcrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
struct kvm_xcrs xcrs = {};
- if (!kvm_has_xcrs()) {
+ if (!has_xcrs) {
return 0;
}
@@ -1277,6 +1447,7 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
struct kvm_msr_entry entries[1];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
+ int ret;
if (!has_msr_tsc_deadline) {
return 0;
@@ -1288,7 +1459,13 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
.nmsrs = 1,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
}
/*
@@ -1303,6 +1480,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
struct kvm_msrs info;
struct kvm_msr_entry entry;
} msr_data;
+ int ret;
+
+ if (!has_msr_feature_control) {
+ return 0;
+ }
kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
@@ -1311,7 +1493,13 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
.nmsrs = 1,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
}
static int kvm_put_msrs(X86CPU *cpu, int level)
@@ -1323,6 +1511,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int n = 0, i;
+ int ret;
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
@@ -1334,6 +1523,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_hsave_pa) {
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
}
+ if (has_msr_tsc_aux) {
+ kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
+ }
if (has_msr_tsc_adjust) {
kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
}
@@ -1420,6 +1612,50 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
env->msr_hv_tsc);
}
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
+ env->msr_hv_crash_params[j]);
+
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
+ HV_X64_MSR_CRASH_CTL_NOTIFY);
+ }
+ if (has_msr_hv_runtime) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
+ env->msr_hv_runtime);
+ }
+ if (cpu->hyperv_synic) {
+ int j;
+
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
+ env->msr_hv_synic_control);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
+ env->msr_hv_synic_version);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
+ env->msr_hv_synic_evt_page);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
+ env->msr_hv_synic_msg_page);
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
+ env->msr_hv_synic_sint[j]);
+ }
+ }
+ if (has_msr_hv_stimer) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_CONFIG + j*2,
+ env->msr_hv_stimer_config[j]);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_COUNT + j*2,
+ env->msr_hv_stimer_count[j]);
+ }
+ }
if (has_msr_mtrr) {
kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
kvm_msr_entry_set(&msrs[n++],
@@ -1469,8 +1705,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
.nmsrs = n,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+ assert(ret == n);
+ return 0;
}
@@ -1496,8 +1737,8 @@ static int kvm_get_fpu(X86CPU *cpu)
}
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) {
- env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
- env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
}
env->mxcsr = fpu.mxcsr;
@@ -1512,7 +1753,7 @@ static int kvm_get_xsave(X86CPU *cpu)
const uint8_t *xmm, *ymmh, *zmmh;
uint16_t cwd, swd, twd;
- if (!kvm_has_xsave()) {
+ if (!has_xsave) {
return kvm_get_fpu(cpu);
}
@@ -1548,19 +1789,20 @@ static int kvm_get_xsave(X86CPU *cpu)
ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
- env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
- env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
- env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
- env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
- env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
- env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
- env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
- env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
+ env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
+ env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
}
#ifdef TARGET_X86_64
memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM],
16 * sizeof env->xmm_regs[16]);
+ memcpy(&env->pkru, &xsave->region[XSAVE_PKRU], sizeof env->pkru);
#endif
return 0;
}
@@ -1571,7 +1813,7 @@ static int kvm_get_xcrs(X86CPU *cpu)
int i, ret;
struct kvm_xcrs xcrs;
- if (!kvm_has_xcrs()) {
+ if (!has_xcrs) {
return 0;
}
@@ -1643,13 +1885,16 @@ static int kvm_get_sregs(X86CPU *cpu)
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
- hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags = env->hflags & HFLAG_COPY_MASK;
+ hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
- hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
- (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
if (env->efer & MSR_EFER_LMA) {
hflags |= HF_LMA_MASK;
@@ -1670,7 +1915,7 @@ static int kvm_get_sregs(X86CPU *cpu)
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
}
}
- env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ env->hflags = hflags;
return 0;
}
@@ -1696,6 +1941,9 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hsave_pa) {
msrs[n++].index = MSR_VM_HSAVE_PA;
}
+ if (has_msr_tsc_aux) {
+ msrs[n++].index = MSR_TSC_AUX;
+ }
if (has_msr_tsc_adjust) {
msrs[n++].index = MSR_TSC_ADJUST;
}
@@ -1775,6 +2023,35 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_tsc) {
msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
}
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
+ msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
+ }
+ }
+ if (has_msr_hv_runtime) {
+ msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
+ }
+ if (cpu->hyperv_synic) {
+ uint32_t msr;
+
+ msrs[n++].index = HV_X64_MSR_SCONTROL;
+ msrs[n++].index = HV_X64_MSR_SVERSION;
+ msrs[n++].index = HV_X64_MSR_SIEFP;
+ msrs[n++].index = HV_X64_MSR_SIMP;
+ for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+ msrs[n++].index = msr;
+ }
+ }
+ if (has_msr_hv_stimer) {
+ uint32_t msr;
+
+ for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
+ msr++) {
+ msrs[n++].index = msr;
+ }
+ }
if (has_msr_mtrr) {
msrs[n++].index = MSR_MTRRdefType;
msrs[n++].index = MSR_MTRRfix64K_00000;
@@ -1803,6 +2080,7 @@ static int kvm_get_msrs(X86CPU *cpu)
return ret;
}
+ assert(ret == n);
for (i = 0; i < ret; i++) {
uint32_t index = msrs[i].index;
switch (index) {
@@ -1838,6 +2116,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_TSC:
env->tsc = msrs[i].data;
break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = msrs[i].data;
+ break;
case MSR_TSC_ADJUST:
env->tsc_adjust = msrs[i].data;
break;
@@ -1922,6 +2203,41 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_REFERENCE_TSC:
env->msr_hv_tsc = msrs[i].data;
break;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_VP_RUNTIME:
+ env->msr_hv_runtime = msrs[i].data;
+ break;
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_SVERSION:
+ env->msr_hv_synic_version = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
+ msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
+ msrs[i].data;
+ break;
case MSR_MTRRdefType:
env->mtrr_deftype = msrs[i].data;
break;
@@ -2221,13 +2537,22 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
+ if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
return ret;
}
}
+ if (level == KVM_PUT_FULL_STATE) {
+ /* We don't check for kvm_arch_set_tsc_khz() errors here,
+ * because TSC frequency mismatch shouldn't abort migration,
+ * unless the user explicitly asked for a more strict TSC
+ * setting (e.g. using an explicit "tsc-freq" option).
+ */
+ kvm_arch_set_tsc_khz(cpu);
+ }
+
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
@@ -2294,41 +2619,44 @@ int kvm_arch_get_registers(CPUState *cs)
ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xsave(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xcrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_sregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_msrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_mp_state(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_apic(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_debugregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
- return 0;
+ ret = 0;
+ out:
+ cpu_sync_bndcs_hflags(&cpu->env);
+ return ret;
}
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
@@ -2363,7 +2691,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
- if (!kvm_irqchip_in_kernel()) {
+ if (!kvm_pic_in_kernel()) {
qemu_mutex_lock_iothread();
}
@@ -2381,7 +2709,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
- if (!kvm_irqchip_in_kernel()) {
+ if (!kvm_pic_in_kernel()) {
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -2780,6 +3108,13 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
ret = kvm_handle_debug(cpu, &run->debug.arch);
qemu_mutex_unlock_iothread();
break;
+ case KVM_EXIT_HYPERV:
+ ret = kvm_hv_handle_exit(cpu, &run->hyperv);
+ break;
+ case KVM_EXIT_IOAPIC_EOI:
+ ioapic_eoi_broadcast(run->eoi.vector);
+ ret = 0;
+ break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
@@ -2814,6 +3149,39 @@ void kvm_arch_init_irq_routing(KVMState *s)
*/
kvm_msi_via_irqfd_allowed = true;
kvm_gsi_routing_allowed = true;
+
+ if (kvm_irqchip_is_split()) {
+ int i;
+
+ /* If the ioapic is in QEMU and the lapics are in KVM, reserve
+ MSI routes for signaling interrupts to the local apics. */
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ struct MSIMessage msg = { 0x0, 0x0 };
+ if (kvm_irqchip_add_msi_route(s, msg, NULL) < 0) {
+ error_report("Could not enable split IRQ mode.");
+ exit(1);
+ }
+ }
+ }
+}
+
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ int ret;
+ if (machine_kernel_irqchip_split(ms)) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
+ if (ret) {
+ error_report("Could not enable split irqchip mode: %s\n",
+ strerror(-ret));
+ exit(1);
+ } else {
+ DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
+ kvm_split_irqchip = true;
+ return 1;
+ }
+ } else {
+ return 0;
+ }
}
/* Classic KVM device assignment interface. Will remain x86 only. */
@@ -2957,7 +3325,7 @@ int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
- uint64_t address, uint32_t data)
+ uint64_t address, uint32_t data, PCIDevice *dev)
{
return 0;
}
diff --git a/qemu/target-i386/kvm_i386.h b/qemu/target-i386/kvm_i386.h
index c1b312ba2..42b00af1b 100644
--- a/qemu/target-i386/kvm_i386.h
+++ b/qemu/target-i386/kvm_i386.h
@@ -13,6 +13,8 @@
#include "sysemu/kvm.h"
+#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
+
bool kvm_allows_irq0_override(void);
bool kvm_has_smm(void);
void kvm_synchronize_all_tsc(void);
diff --git a/qemu/target-i386/machine.c b/qemu/target-i386/machine.c
index a0df64b57..ee5b94922 100644
--- a/qemu/target-i386/machine.c
+++ b/qemu/target-i386/machine.c
@@ -1,3 +1,4 @@
+#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/boards.h"
#include "hw/i386/pc.h"
@@ -6,6 +7,8 @@
#include "cpu.h"
#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
+
static const VMStateDescription vmstate_segment = {
.name = "segment",
.version_id = 1,
@@ -36,15 +39,15 @@ static const VMStateDescription vmstate_xmm_reg = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT64(XMM_Q(0), XMMReg),
- VMSTATE_UINT64(XMM_Q(1), XMMReg),
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
VMSTATE_END_OF_LIST()
}
};
#define VMSTATE_XMM_REGS(_field, _state, _start) \
VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
- vmstate_xmm_reg, XMMReg)
+ vmstate_xmm_reg, ZMMReg)
/* YMMH format is the same as XMM, but for bits 128-255 */
static const VMStateDescription vmstate_ymmh_reg = {
@@ -52,32 +55,32 @@ static const VMStateDescription vmstate_ymmh_reg = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT64(XMM_Q(2), XMMReg),
- VMSTATE_UINT64(XMM_Q(3), XMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
VMSTATE_END_OF_LIST()
}
};
#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
- vmstate_ymmh_reg, XMMReg)
+ vmstate_ymmh_reg, ZMMReg)
static const VMStateDescription vmstate_zmmh_reg = {
.name = "zmmh_reg",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT64(XMM_Q(4), XMMReg),
- VMSTATE_UINT64(XMM_Q(5), XMMReg),
- VMSTATE_UINT64(XMM_Q(6), XMMReg),
- VMSTATE_UINT64(XMM_Q(7), XMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
VMSTATE_END_OF_LIST()
}
};
#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
- vmstate_zmmh_reg, XMMReg)
+ vmstate_zmmh_reg, ZMMReg)
#ifdef TARGET_X86_64
static const VMStateDescription vmstate_hi16_zmm_reg = {
@@ -85,21 +88,21 @@ static const VMStateDescription vmstate_hi16_zmm_reg = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT64(XMM_Q(0), XMMReg),
- VMSTATE_UINT64(XMM_Q(1), XMMReg),
- VMSTATE_UINT64(XMM_Q(2), XMMReg),
- VMSTATE_UINT64(XMM_Q(3), XMMReg),
- VMSTATE_UINT64(XMM_Q(4), XMMReg),
- VMSTATE_UINT64(XMM_Q(5), XMMReg),
- VMSTATE_UINT64(XMM_Q(6), XMMReg),
- VMSTATE_UINT64(XMM_Q(7), XMMReg),
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
VMSTATE_END_OF_LIST()
}
};
#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
- vmstate_hi16_zmm_reg, XMMReg)
+ vmstate_hi16_zmm_reg, ZMMReg)
#endif
static const VMStateDescription vmstate_bnd_regs = {
@@ -331,6 +334,13 @@ static int cpu_post_load(void *opaque, int version_id)
CPUX86State *env = &cpu->env;
int i;
+ if (env->tsc_khz && env->user_tsc_khz &&
+ env->tsc_khz != env->user_tsc_khz) {
+ error_report("Mismatch between user-specified TSC frequency and "
+ "migrated TSC frequency");
+ return -EINVAL;
+ }
+
/*
* Real mode guest segments register DPL should be zero.
* Older KVM version were setting it wrongly.
@@ -367,8 +377,12 @@ static int cpu_post_load(void *opaque, int version_id)
cpu_breakpoint_remove_all(cs, BP_CPU);
cpu_watchpoint_remove_all(cs, BP_CPU);
- for (i = 0; i < DR7_MAX_BP; i++) {
- hw_breakpoint_insert(env, i);
+ {
+ /* Indicate all breakpoints disabled, as they are, then
+ let the helper re-enable them. */
+ target_ulong dr7 = env->dr[7];
+ env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
+ cpu_x86_update_dr7(env, dr7);
}
tlb_flush(cs, 1);
@@ -661,6 +675,115 @@ static const VMStateDescription vmstate_msr_hyperv_time = {
}
};
+static bool hyperv_crash_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
+ if (env->msr_hv_crash_params[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_crash = {
+ .name = "cpu/msr_hyperv_crash",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_crash_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
+ X86CPU, HV_X64_MSR_CRASH_PARAMS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_runtime_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_runtime != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_runtime = {
+ .name = "cpu/msr_hyperv_runtime",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_runtime_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_hv_synic_control != 0 ||
+ env->msr_hv_synic_evt_page != 0 ||
+ env->msr_hv_synic_msg_page != 0) {
+ return true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ if (env->msr_hv_synic_sint[i] != 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+ .name = "cpu/msr_hyperv_synic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_synic_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+ HV_SYNIC_SINT_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_stimer_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
+ if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_stimer = {
+ .name = "cpu/msr_hyperv_stimer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_stimer_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
+ X86CPU, HV_SYNIC_STIMER_COUNT),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
+ X86CPU, HV_SYNIC_STIMER_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool avx512_needed(void *opaque)
{
X86CPU *cpu = opaque;
@@ -674,7 +797,7 @@ static bool avx512_needed(void *opaque)
}
for (i = 0; i < CPU_NB_REGS; i++) {
-#define ENV_XMM(reg, field) (env->xmm_regs[reg].XMM_Q(field))
+#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
return true;
@@ -726,6 +849,47 @@ static const VMStateDescription vmstate_xss = {
}
};
+#ifdef TARGET_X86_64
+static bool pkru_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->pkru != 0;
+}
+
+static const VMStateDescription vmstate_pkru = {
+ .name = "cpu/pkru",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pkru_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32(env.pkru, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+static bool tsc_khz_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
+ return env->tsc_khz && pcmc->save_tsc_khz;
+}
+
+static const VMStateDescription vmstate_tsc_khz = {
+ .name = "cpu/tsc_khz",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tsc_khz_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(env.tsc_khz, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -842,8 +1006,16 @@ VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_hypercall_hypercall,
&vmstate_msr_hyperv_vapic,
&vmstate_msr_hyperv_time,
+ &vmstate_msr_hyperv_crash,
+ &vmstate_msr_hyperv_runtime,
+ &vmstate_msr_hyperv_synic,
+ &vmstate_msr_hyperv_stimer,
&vmstate_avx512,
&vmstate_xss,
+ &vmstate_tsc_khz,
+#ifdef TARGET_X86_64
+ &vmstate_pkru,
+#endif
NULL
}
};
diff --git a/qemu/target-i386/mem_helper.c b/qemu/target-i386/mem_helper.c
index 1aec8a5f1..85e75161b 100644
--- a/qemu/target-i386/mem_helper.c
+++ b/qemu/target-i386/mem_helper.c
@@ -17,37 +17,58 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
/* broken thread support */
-static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+#if defined(CONFIG_USER_ONLY)
+QemuMutex global_cpu_lock;
void helper_lock(void)
{
- spin_lock(&global_cpu_lock);
+ qemu_mutex_lock(&global_cpu_lock);
}
void helper_unlock(void)
{
- spin_unlock(&global_cpu_lock);
+ qemu_mutex_unlock(&global_cpu_lock);
}
+void helper_lock_init(void)
+{
+ qemu_mutex_init(&global_cpu_lock);
+}
+#else
+void helper_lock(void)
+{
+}
+
+void helper_unlock(void)
+{
+}
+
+void helper_lock_init(void)
+{
+}
+#endif
+
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
{
uint64_t d;
int eflags;
eflags = cpu_cc_compute_all(env, CC_OP);
- d = cpu_ldq_data(env, a0);
+ d = cpu_ldq_data_ra(env, a0, GETPC());
if (d == (((uint64_t)env->regs[R_EDX] << 32) | (uint32_t)env->regs[R_EAX])) {
- cpu_stq_data(env, a0, ((uint64_t)env->regs[R_ECX] << 32) | (uint32_t)env->regs[R_EBX]);
+ cpu_stq_data_ra(env, a0, ((uint64_t)env->regs[R_ECX] << 32)
+ | (uint32_t)env->regs[R_EBX], GETPC());
eflags |= CC_Z;
} else {
/* always do the store */
- cpu_stq_data(env, a0, d);
+ cpu_stq_data_ra(env, a0, d, GETPC());
env->regs[R_EDX] = (uint32_t)(d >> 32);
env->regs[R_EAX] = (uint32_t)d;
eflags &= ~CC_Z;
@@ -62,19 +83,19 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
int eflags;
if ((a0 & 0xf) != 0) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
eflags = cpu_cc_compute_all(env, CC_OP);
- d0 = cpu_ldq_data(env, a0);
- d1 = cpu_ldq_data(env, a0 + 8);
+ d0 = cpu_ldq_data_ra(env, a0, GETPC());
+ d1 = cpu_ldq_data_ra(env, a0 + 8, GETPC());
if (d0 == env->regs[R_EAX] && d1 == env->regs[R_EDX]) {
- cpu_stq_data(env, a0, env->regs[R_EBX]);
- cpu_stq_data(env, a0 + 8, env->regs[R_ECX]);
+ cpu_stq_data_ra(env, a0, env->regs[R_EBX], GETPC());
+ cpu_stq_data_ra(env, a0 + 8, env->regs[R_ECX], GETPC());
eflags |= CC_Z;
} else {
/* always do the store */
- cpu_stq_data(env, a0, d0);
- cpu_stq_data(env, a0 + 8, d1);
+ cpu_stq_data_ra(env, a0, d0, GETPC());
+ cpu_stq_data_ra(env, a0 + 8, d1, GETPC());
env->regs[R_EDX] = d1;
env->regs[R_EAX] = d0;
eflags &= ~CC_Z;
@@ -87,11 +108,14 @@ void helper_boundw(CPUX86State *env, target_ulong a0, int v)
{
int low, high;
- low = cpu_ldsw_data(env, a0);
- high = cpu_ldsw_data(env, a0 + 2);
+ low = cpu_ldsw_data_ra(env, a0, GETPC());
+ high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
v = (int16_t)v;
if (v < low || v > high) {
- raise_exception(env, EXCP05_BOUND);
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
@@ -99,10 +123,13 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
{
int low, high;
- low = cpu_ldl_data(env, a0);
- high = cpu_ldl_data(env, a0 + 4);
+ low = cpu_ldl_data_ra(env, a0, GETPC());
+ high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
if (v < low || v > high) {
- raise_exception(env, EXCP05_BOUND);
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
@@ -122,11 +149,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- raise_exception_err(env, cs->exception_index, env->error_code);
+ raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
}
}
#endif
diff --git a/qemu/target-i386/misc_helper.c b/qemu/target-i386/misc_helper.c
index 52c5d65e9..e31ec976a 100644
--- a/qemu/target-i386/misc_helper.c
+++ b/qemu/target-i386/misc_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
@@ -95,15 +96,6 @@ void helper_into(CPUX86State *env, int next_eip_addend)
}
}
-void helper_single_step(CPUX86State *env)
-{
-#ifndef CONFIG_USER_ONLY
- check_hw_breakpoints(env, true);
- env->dr[6] |= DR6_BS;
-#endif
- raise_exception(env, EXCP01_DB);
-}
-
void helper_cpuid(CPUX86State *env)
{
uint32_t eax, ebx, ecx, edx;
@@ -127,10 +119,6 @@ target_ulong helper_read_crN(CPUX86State *env, int reg)
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
}
-
-void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
-{
-}
#else
target_ulong helper_read_crN(CPUX86State *env, int reg)
{
@@ -176,27 +164,6 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
break;
}
}
-
-void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
-{
- int i;
-
- if (reg < 4) {
- hw_breakpoint_remove(env, reg);
- env->dr[reg] = t0;
- hw_breakpoint_insert(env, reg);
- } else if (reg == 7) {
- for (i = 0; i < DR7_MAX_BP; i++) {
- hw_breakpoint_remove(env, i);
- }
- env->dr[7] = t0;
- for (i = 0; i < DR7_MAX_BP; i++) {
- hw_breakpoint_insert(env, i);
- }
- } else {
- env->dr[reg] = t0;
- }
-}
#endif
void helper_lmsw(CPUX86State *env, target_ulong t0)
@@ -220,7 +187,7 @@ void helper_rdtsc(CPUX86State *env)
uint64_t val;
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
@@ -238,7 +205,7 @@ void helper_rdtscp(CPUX86State *env)
void helper_rdpmc(CPUX86State *env)
{
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
@@ -394,6 +361,12 @@ void helper_wrmsr(CPUX86State *env)
case MSR_IA32_MISC_ENABLE:
env->msr_ia32_misc_enable = val;
break;
+ case MSR_IA32_BNDCFGS:
+ /* FIXME: #GP if reserved bits are set. */
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->msr_bndcfgs = val;
+ cpu_sync_bndcs_hflags(env);
+ break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
@@ -539,6 +512,9 @@ void helper_rdmsr(CPUX86State *env)
case MSR_IA32_MISC_ENABLE:
val = env->msr_ia32_misc_enable;
break;
+ case MSR_IA32_BNDCFGS:
+ val = env->msr_bndcfgs;
+ break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
@@ -589,7 +565,7 @@ void helper_hlt(CPUX86State *env, int next_eip_addend)
void helper_monitor(CPUX86State *env, target_ulong ptr)
{
if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
/* XXX: store address? */
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
@@ -601,7 +577,7 @@ void helper_mwait(CPUX86State *env, int next_eip_addend)
X86CPU *cpu;
if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception(env, EXCP0D_GPF);
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
env->eip += next_eip_addend;
@@ -633,3 +609,30 @@ void helper_debug(CPUX86State *env)
cs->exception_index = EXCP_DEBUG;
cpu_loop_exit(cs);
}
+
+uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
+{
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ return env->pkru;
+}
+
+void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ env->pkru = val;
+ tlb_flush(cs, 1);
+}
diff --git a/qemu/target-i386/monitor.c b/qemu/target-i386/monitor.c
new file mode 100644
index 000000000..fccfe40ab
--- /dev/null
+++ b/qemu/target-i386/monitor.c
@@ -0,0 +1,512 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hw/i386/pc.h"
+#include "sysemu/kvm.h"
+#include "hmp.h"
+
+
+static void print_pte(Monitor *mon, hwaddr addr,
+ hwaddr pte,
+ hwaddr mask)
+{
+#ifdef TARGET_X86_64
+ if (addr & (1ULL << 47)) {
+ addr |= -1LL << 48;
+ }
+#endif
+ monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
+ " %c%c%c%c%c%c%c%c%c\n",
+ addr,
+ pte & mask,
+ pte & PG_NX_MASK ? 'X' : '-',
+ pte & PG_GLOBAL_MASK ? 'G' : '-',
+ pte & PG_PSE_MASK ? 'P' : '-',
+ pte & PG_DIRTY_MASK ? 'D' : '-',
+ pte & PG_ACCESSED_MASK ? 'A' : '-',
+ pte & PG_PCD_MASK ? 'C' : '-',
+ pte & PG_PWT_MASK ? 'T' : '-',
+ pte & PG_USER_MASK ? 'U' : '-',
+ pte & PG_RW_MASK ? 'W' : '-');
+}
+
+static void tlb_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ uint32_t pgd, pde, pte;
+
+ pgd = env->cr[3] & ~0xfff;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ /* 4M pages */
+ print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 22) + (l2 << 12),
+ pte & ~PG_PSE_MASK,
+ ~0xfff);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages with PAE, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
+ ~((hwaddr)(1 << 20) - 1));
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 30 ) + (l2 << 21)
+ + (l3 << 12),
+ pte & ~PG_PSE_MASK,
+ ~(hwaddr)0xfff);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#ifdef TARGET_X86_64
+static void tlb_info_64(Monitor *mon, CPUArchState *env)
+{
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
+
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ if (pml4e & PG_PRESENT_MASK) {
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (pdpe & PG_PRESENT_MASK) {
+ if (pdpe & PG_PSE_MASK) {
+ /* 1G pages, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
+ 0x3ffffc0000000ULL);
+ } else {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 39) + (l2 << 30) +
+ (l3 << 21), pde,
+ 0x3ffffffe00000ULL);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 39) +
+ (l2 << 30) +
+ (l3 << 21) + (l4 << 12),
+ pte & ~PG_PSE_MASK,
+ 0x3fffffffff000ULL);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env();
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ tlb_info_64(mon, env);
+ } else
+#endif
+ {
+ tlb_info_pae32(mon, env);
+ }
+ } else {
+ tlb_info_32(mon, env);
+ }
+}
+
+static void mem_print(Monitor *mon, hwaddr *pstart,
+ int *plast_prot,
+ hwaddr end, int prot)
+{
+ int prot1;
+ prot1 = *plast_prot;
+ if (prot != prot1) {
+ if (*pstart != -1) {
+ monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
+ TARGET_FMT_plx " %c%c%c\n",
+ *pstart, end, end - *pstart,
+ prot1 & PG_USER_MASK ? 'u' : '-',
+ 'r',
+ prot1 & PG_RW_MASK ? 'w' : '-');
+ }
+ if (prot != 0)
+ *pstart = end;
+ else
+ *pstart = -1;
+ *plast_prot = prot;
+ }
+}
+
+static void mem_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ int prot, last_prot;
+ uint32_t pgd, pde, pte;
+ hwaddr start, end;
+
+ pgd = env->cr[3] & ~0xfff;
+ last_prot = 0;
+ start = -1;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ end = l1 << 22;
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ end = (l1 << 22) + (l2 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde &
+ (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ int prot, last_prot;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+ hwaddr start, end;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = l1 << 30;
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 30) + (l2 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 30) + (l2 << 21) + (l3 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+
+#ifdef TARGET_X86_64
+static void mem_info_64(Monitor *mon, CPUArchState *env)
+{
+ int prot, last_prot;
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ end = l1 << 39;
+ if (pml4e & PG_PRESENT_MASK) {
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = (l1 << 39) + (l2 << 30);
+ if (pdpe & PG_PRESENT_MASK) {
+ if (pdpe & PG_PSE_MASK) {
+ prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e;
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 39) + (l2 << 30) + (l3 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe;
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 39) + (l2 << 30) +
+ (l3 << 21) + (l4 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe & pde;
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_mem(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env();
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ mem_info_64(mon, env);
+ } else
+#endif
+ {
+ mem_info_pae32(mon, env);
+ }
+ } else {
+ mem_info_32(mon, env);
+ }
+}
+
+void hmp_mce(Monitor *mon, const QDict *qdict)
+{
+ X86CPU *cpu;
+ CPUState *cs;
+ int cpu_index = qdict_get_int(qdict, "cpu_index");
+ int bank = qdict_get_int(qdict, "bank");
+ uint64_t status = qdict_get_int(qdict, "status");
+ uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
+ uint64_t addr = qdict_get_int(qdict, "addr");
+ uint64_t misc = qdict_get_int(qdict, "misc");
+ int flags = MCE_INJECT_UNCOND_AO;
+
+ if (qdict_get_try_bool(qdict, "broadcast", false)) {
+ flags |= MCE_INJECT_BROADCAST;
+ }
+ cs = qemu_get_cpu(cpu_index);
+ if (cs != NULL) {
+ cpu = X86_CPU(cs);
+ cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
+ flags);
+ }
+}
+
+static target_long monitor_get_pc(const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return env->eip + env->segs[R_CS].base;
+}
+
+const MonitorDef monitor_defs[] = {
+#define SEG(name, seg) \
+ { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
+ { name ".base", offsetof(CPUX86State, segs[seg].base) },\
+ { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
+
+ { "eax", offsetof(CPUX86State, regs[0]) },
+ { "ecx", offsetof(CPUX86State, regs[1]) },
+ { "edx", offsetof(CPUX86State, regs[2]) },
+ { "ebx", offsetof(CPUX86State, regs[3]) },
+ { "esp|sp", offsetof(CPUX86State, regs[4]) },
+ { "ebp|fp", offsetof(CPUX86State, regs[5]) },
+ { "esi", offsetof(CPUX86State, regs[6]) },
+ { "edi", offsetof(CPUX86State, regs[7]) },
+#ifdef TARGET_X86_64
+ { "r8", offsetof(CPUX86State, regs[8]) },
+ { "r9", offsetof(CPUX86State, regs[9]) },
+ { "r10", offsetof(CPUX86State, regs[10]) },
+ { "r11", offsetof(CPUX86State, regs[11]) },
+ { "r12", offsetof(CPUX86State, regs[12]) },
+ { "r13", offsetof(CPUX86State, regs[13]) },
+ { "r14", offsetof(CPUX86State, regs[14]) },
+ { "r15", offsetof(CPUX86State, regs[15]) },
+#endif
+ { "eflags", offsetof(CPUX86State, eflags) },
+ { "eip", offsetof(CPUX86State, eip) },
+ SEG("cs", R_CS)
+ SEG("ds", R_DS)
+ SEG("es", R_ES)
+ SEG("ss", R_SS)
+ SEG("fs", R_FS)
+ SEG("gs", R_GS)
+ { "pc", 0, monitor_get_pc, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
+{
+ x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
+ CPU_DUMP_FPU);
+}
+
+void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
+{
+ if (kvm_irqchip_in_kernel()) {
+ kvm_ioapic_dump_state(mon, qdict);
+ } else {
+ ioapic_dump_state(mon, qdict);
+ }
+}
diff --git a/qemu/target-i386/mpx_helper.c b/qemu/target-i386/mpx_helper.c
new file mode 100644
index 000000000..4d1785ece
--- /dev/null
+++ b/qemu/target-i386/mpx_helper.c
@@ -0,0 +1,167 @@
+/*
+ * x86 MPX helpers
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+
+void cpu_sync_bndcs_hflags(CPUX86State *env)
+{
+ uint32_t hflags = env->hflags;
+ uint32_t hflags2 = env->hflags2;
+ uint32_t bndcsr;
+
+ if ((hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ if ((env->cr[4] & CR4_OSXSAVE_MASK)
+ && (env->xcr0 & XSTATE_BNDCSR_MASK)
+ && (bndcsr & BNDCFG_ENABLE)) {
+ hflags |= HF_MPX_EN_MASK;
+ } else {
+ hflags &= ~HF_MPX_EN_MASK;
+ }
+
+ if (bndcsr & BNDCFG_BNDPRESERVE) {
+ hflags2 |= HF2_MPX_PR_MASK;
+ } else {
+ hflags2 &= ~HF2_MPX_PR_MASK;
+ }
+
+ env->hflags = hflags;
+ env->hflags2 = hflags2;
+}
+
+void helper_bndck(CPUX86State *env, uint32_t fail)
+{
+ if (unlikely(fail)) {
+ env->bndcs_regs.sts = 1;
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
+{
+ uint64_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
+ bt = cpu_ldq_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract64(base, 3, 17) << 5) + (bt & ~7);
+}
+
+static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
+{
+ uint32_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
+ bt = cpu_ldl_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract32(base, 2, 10) << 4) + (bt & ~3);
+}
+
+uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte, lb, ub, pt;
+
+ bte = lookup_bte64(env, base, ra);
+ lb = cpu_ldq_data_ra(env, bte, ra);
+ ub = cpu_ldq_data_ra(env, bte + 8, ra);
+ pt = cpu_ldq_data_ra(env, bte + 16, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ env->mmx_t0.MMX_Q(0) = ub;
+ return lb;
+}
+
+uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte, lb, ub, pt;
+
+ bte = lookup_bte32(env, base, ra);
+ lb = cpu_ldl_data_ra(env, bte, ra);
+ ub = cpu_ldl_data_ra(env, bte + 4, ra);
+ pt = cpu_ldl_data_ra(env, bte + 8, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ return ((uint64_t)ub << 32) | lb;
+}
+
+void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte;
+
+ bte = lookup_bte64(env, base, ra);
+ cpu_stq_data_ra(env, bte, lb, ra);
+ cpu_stq_data_ra(env, bte + 8, ub, ra);
+ cpu_stq_data_ra(env, bte + 16, ptr, ra);
+}
+
+void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte;
+
+ bte = lookup_bte32(env, base, ra);
+ cpu_stl_data_ra(env, bte, lb, ra);
+ cpu_stl_data_ra(env, bte + 4, ub, ra);
+ cpu_stl_data_ra(env, bte + 8, ptr, ra);
+}
+
+void helper_bnd_jmp(CPUX86State *env)
+{
+ if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+}
diff --git a/qemu/target-i386/ops_sse.h b/qemu/target-i386/ops_sse.h
index bee134bae..7a98f5386 100644
--- a/qemu/target-i386/ops_sse.h
+++ b/qemu/target-i386/ops_sse.h
@@ -26,15 +26,15 @@
#define B(n) MMX_B(n)
#define W(n) MMX_W(n)
#define L(n) MMX_L(n)
-#define Q(n) q
+#define Q(n) MMX_Q(n)
#define SUFFIX _mmx
#else
-#define Reg XMMReg
+#define Reg ZMMReg
#define XMM_ONLY(...) __VA_ARGS__
-#define B(n) XMM_B(n)
-#define W(n) XMM_W(n)
-#define L(n) XMM_L(n)
-#define Q(n) XMM_Q(n)
+#define B(n) ZMM_B(n)
+#define W(n) ZMM_W(n)
+#define L(n) ZMM_L(n)
+#define Q(n) ZMM_Q(n)
#define SUFFIX _xmm
#endif
@@ -483,7 +483,7 @@ void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
for (i = 0; i < (8 << SHIFT); i++) {
if (s->B(i) & 0x80) {
- cpu_stb_data(env, a0 + i, d->B(i));
+ cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC());
}
}
}
@@ -582,26 +582,26 @@ void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order)
#define SSE_HELPER_S(name, F) \
void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
- d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
- d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
- d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
} \
\
void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
} \
\
void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
- d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
} \
\
void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
}
#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
@@ -633,216 +633,216 @@ void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s)
{
float32 s0, s1;
- s0 = s->XMM_S(0);
- s1 = s->XMM_S(1);
- d->XMM_D(0) = float32_to_float64(s0, &env->sse_status);
- d->XMM_D(1) = float32_to_float64(s1, &env->sse_status);
+ s0 = s->ZMM_S(0);
+ s1 = s->ZMM_S(1);
+ d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status);
+ d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status);
}
void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s)
{
- d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status);
- d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status);
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status);
d->Q(1) = 0;
}
void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s)
{
- d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status);
+ d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status);
}
void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s)
{
- d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status);
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
}
/* integer to float */
void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s)
{
- d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status);
- d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status);
- d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status);
- d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status);
+ d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status);
+ d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status);
+ d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status);
}
void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s)
{
int32_t l0, l1;
- l0 = (int32_t)s->XMM_L(0);
- l1 = (int32_t)s->XMM_L(1);
- d->XMM_D(0) = int32_to_float64(l0, &env->sse_status);
- d->XMM_D(1) = int32_to_float64(l1, &env->sse_status);
+ l0 = (int32_t)s->ZMM_L(0);
+ l1 = (int32_t)s->ZMM_L(1);
+ d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status);
}
-void helper_cvtpi2ps(CPUX86State *env, XMMReg *d, MMXReg *s)
+void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s)
{
- d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
- d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
+ d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
}
-void helper_cvtpi2pd(CPUX86State *env, XMMReg *d, MMXReg *s)
+void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s)
{
- d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
- d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
+ d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
}
-void helper_cvtsi2ss(CPUX86State *env, XMMReg *d, uint32_t val)
+void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val)
{
- d->XMM_S(0) = int32_to_float32(val, &env->sse_status);
+ d->ZMM_S(0) = int32_to_float32(val, &env->sse_status);
}
-void helper_cvtsi2sd(CPUX86State *env, XMMReg *d, uint32_t val)
+void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val)
{
- d->XMM_D(0) = int32_to_float64(val, &env->sse_status);
+ d->ZMM_D(0) = int32_to_float64(val, &env->sse_status);
}
#ifdef TARGET_X86_64
-void helper_cvtsq2ss(CPUX86State *env, XMMReg *d, uint64_t val)
+void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val)
{
- d->XMM_S(0) = int64_to_float32(val, &env->sse_status);
+ d->ZMM_S(0) = int64_to_float32(val, &env->sse_status);
}
-void helper_cvtsq2sd(CPUX86State *env, XMMReg *d, uint64_t val)
+void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val)
{
- d->XMM_D(0) = int64_to_float64(val, &env->sse_status);
+ d->ZMM_D(0) = int64_to_float64(val, &env->sse_status);
}
#endif
/* float to integer */
-void helper_cvtps2dq(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
- d->XMM_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
- d->XMM_L(2) = float32_to_int32(s->XMM_S(2), &env->sse_status);
- d->XMM_L(3) = float32_to_int32(s->XMM_S(3), &env->sse_status);
+ d->ZMM_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = float32_to_int32(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = float32_to_int32(s->ZMM_S(3), &env->sse_status);
}
-void helper_cvtpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
- d->XMM_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
- d->XMM_Q(1) = 0;
+ d->ZMM_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
}
-void helper_cvtps2pi(CPUX86State *env, MMXReg *d, XMMReg *s)
+void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
{
- d->MMX_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
- d->MMX_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
+ d->MMX_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
}
-void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s)
+void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
{
- d->MMX_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
- d->MMX_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
+ d->MMX_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
}
-int32_t helper_cvtss2si(CPUX86State *env, XMMReg *s)
+int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s)
{
- return float32_to_int32(s->XMM_S(0), &env->sse_status);
+ return float32_to_int32(s->ZMM_S(0), &env->sse_status);
}
-int32_t helper_cvtsd2si(CPUX86State *env, XMMReg *s)
+int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s)
{
- return float64_to_int32(s->XMM_D(0), &env->sse_status);
+ return float64_to_int32(s->ZMM_D(0), &env->sse_status);
}
#ifdef TARGET_X86_64
-int64_t helper_cvtss2sq(CPUX86State *env, XMMReg *s)
+int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s)
{
- return float32_to_int64(s->XMM_S(0), &env->sse_status);
+ return float32_to_int64(s->ZMM_S(0), &env->sse_status);
}
-int64_t helper_cvtsd2sq(CPUX86State *env, XMMReg *s)
+int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s)
{
- return float64_to_int64(s->XMM_D(0), &env->sse_status);
+ return float64_to_int64(s->ZMM_D(0), &env->sse_status);
}
#endif
/* float to integer truncated */
-void helper_cvttps2dq(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
- d->XMM_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
- d->XMM_L(2) = float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status);
- d->XMM_L(3) = float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status);
+ d->ZMM_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
}
-void helper_cvttpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
- d->XMM_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
- d->XMM_Q(1) = 0;
+ d->ZMM_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
}
-void helper_cvttps2pi(CPUX86State *env, MMXReg *d, XMMReg *s)
+void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
{
- d->MMX_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
- d->MMX_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
+ d->MMX_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
}
-void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s)
+void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
{
- d->MMX_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
- d->MMX_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
+ d->MMX_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
}
-int32_t helper_cvttss2si(CPUX86State *env, XMMReg *s)
+int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s)
{
- return float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
+ return float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
}
-int32_t helper_cvttsd2si(CPUX86State *env, XMMReg *s)
+int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s)
{
- return float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
+ return float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
}
#ifdef TARGET_X86_64
-int64_t helper_cvttss2sq(CPUX86State *env, XMMReg *s)
+int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s)
{
- return float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status);
+ return float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
}
-int64_t helper_cvttsd2sq(CPUX86State *env, XMMReg *s)
+int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
{
- return float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status);
+ return float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
}
#endif
-void helper_rsqrtps(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_S(0) = float32_div(float32_one,
- float32_sqrt(s->XMM_S(0), &env->sse_status),
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
&env->sse_status);
- d->XMM_S(1) = float32_div(float32_one,
- float32_sqrt(s->XMM_S(1), &env->sse_status),
+ d->ZMM_S(1) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(1), &env->sse_status),
&env->sse_status);
- d->XMM_S(2) = float32_div(float32_one,
- float32_sqrt(s->XMM_S(2), &env->sse_status),
+ d->ZMM_S(2) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(2), &env->sse_status),
&env->sse_status);
- d->XMM_S(3) = float32_div(float32_one,
- float32_sqrt(s->XMM_S(3), &env->sse_status),
+ d->ZMM_S(3) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(3), &env->sse_status),
&env->sse_status);
}
-void helper_rsqrtss(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_S(0) = float32_div(float32_one,
- float32_sqrt(s->XMM_S(0), &env->sse_status),
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
&env->sse_status);
}
-void helper_rcpps(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status);
- d->XMM_S(1) = float32_div(float32_one, s->XMM_S(1), &env->sse_status);
- d->XMM_S(2) = float32_div(float32_one, s->XMM_S(2), &env->sse_status);
- d->XMM_S(3) = float32_div(float32_one, s->XMM_S(3), &env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
}
-void helper_rcpss(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
}
static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
@@ -857,14 +857,14 @@ static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
return (src >> shift) & mask;
}
-void helper_extrq_r(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0));
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0));
}
-void helper_extrq_i(CPUX86State *env, XMMReg *d, int index, int length)
+void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
{
- d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length);
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length);
}
static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
@@ -879,94 +879,94 @@ static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
return (src & ~(mask << shift)) | ((src & mask) << shift);
}
-void helper_insertq_r(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8));
+ d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8));
}
-void helper_insertq_i(CPUX86State *env, XMMReg *d, int index, int length)
+void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length)
{
- d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length);
+ d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length);
}
-void helper_haddps(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- XMMReg r;
+ ZMMReg r;
- r.XMM_S(0) = float32_add(d->XMM_S(0), d->XMM_S(1), &env->sse_status);
- r.XMM_S(1) = float32_add(d->XMM_S(2), d->XMM_S(3), &env->sse_status);
- r.XMM_S(2) = float32_add(s->XMM_S(0), s->XMM_S(1), &env->sse_status);
- r.XMM_S(3) = float32_add(s->XMM_S(2), s->XMM_S(3), &env->sse_status);
+ r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
*d = r;
}
-void helper_haddpd(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- XMMReg r;
+ ZMMReg r;
- r.XMM_D(0) = float64_add(d->XMM_D(0), d->XMM_D(1), &env->sse_status);
- r.XMM_D(1) = float64_add(s->XMM_D(0), s->XMM_D(1), &env->sse_status);
+ r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
*d = r;
}
-void helper_hsubps(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- XMMReg r;
+ ZMMReg r;
- r.XMM_S(0) = float32_sub(d->XMM_S(0), d->XMM_S(1), &env->sse_status);
- r.XMM_S(1) = float32_sub(d->XMM_S(2), d->XMM_S(3), &env->sse_status);
- r.XMM_S(2) = float32_sub(s->XMM_S(0), s->XMM_S(1), &env->sse_status);
- r.XMM_S(3) = float32_sub(s->XMM_S(2), s->XMM_S(3), &env->sse_status);
+ r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
*d = r;
}
-void helper_hsubpd(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- XMMReg r;
+ ZMMReg r;
- r.XMM_D(0) = float64_sub(d->XMM_D(0), d->XMM_D(1), &env->sse_status);
- r.XMM_D(1) = float64_sub(s->XMM_D(0), s->XMM_D(1), &env->sse_status);
+ r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
*d = r;
}
-void helper_addsubps(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_S(0) = float32_sub(d->XMM_S(0), s->XMM_S(0), &env->sse_status);
- d->XMM_S(1) = float32_add(d->XMM_S(1), s->XMM_S(1), &env->sse_status);
- d->XMM_S(2) = float32_sub(d->XMM_S(2), s->XMM_S(2), &env->sse_status);
- d->XMM_S(3) = float32_add(d->XMM_S(3), s->XMM_S(3), &env->sse_status);
+ d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
}
-void helper_addsubpd(CPUX86State *env, XMMReg *d, XMMReg *s)
+void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->XMM_D(0) = float64_sub(d->XMM_D(0), s->XMM_D(0), &env->sse_status);
- d->XMM_D(1) = float64_add(d->XMM_D(1), s->XMM_D(1), &env->sse_status);
+ d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
}
/* XXX: unordered */
#define SSE_HELPER_CMP(name, F) \
void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
- d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
- d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
- d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
} \
\
void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
} \
\
void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
- d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
} \
\
void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
{ \
- d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
}
#define FPU_CMPEQ(size, a, b) \
@@ -1002,8 +1002,8 @@ void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s)
int ret;
float32 s0, s1;
- s0 = d->XMM_S(0);
- s1 = s->XMM_S(0);
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
ret = float32_compare_quiet(s0, s1, &env->sse_status);
CC_SRC = comis_eflags[ret + 1];
}
@@ -1013,8 +1013,8 @@ void helper_comiss(CPUX86State *env, Reg *d, Reg *s)
int ret;
float32 s0, s1;
- s0 = d->XMM_S(0);
- s1 = s->XMM_S(0);
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
ret = float32_compare(s0, s1, &env->sse_status);
CC_SRC = comis_eflags[ret + 1];
}
@@ -1024,8 +1024,8 @@ void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s)
int ret;
float64 d0, d1;
- d0 = d->XMM_D(0);
- d1 = s->XMM_D(0);
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
ret = float64_compare_quiet(d0, d1, &env->sse_status);
CC_SRC = comis_eflags[ret + 1];
}
@@ -1035,8 +1035,8 @@ void helper_comisd(CPUX86State *env, Reg *d, Reg *s)
int ret;
float64 d0, d1;
- d0 = d->XMM_D(0);
- d1 = s->XMM_D(0);
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
ret = float64_compare(d0, d1, &env->sse_status);
CC_SRC = comis_eflags[ret + 1];
}
@@ -1045,10 +1045,10 @@ uint32_t helper_movmskps(CPUX86State *env, Reg *s)
{
int b0, b1, b2, b3;
- b0 = s->XMM_L(0) >> 31;
- b1 = s->XMM_L(1) >> 31;
- b2 = s->XMM_L(2) >> 31;
- b3 = s->XMM_L(3) >> 31;
+ b0 = s->ZMM_L(0) >> 31;
+ b1 = s->ZMM_L(1) >> 31;
+ b2 = s->ZMM_L(2) >> 31;
+ b3 = s->ZMM_L(3) >> 31;
return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
}
@@ -1056,8 +1056,8 @@ uint32_t helper_movmskpd(CPUX86State *env, Reg *s)
{
int b0, b1;
- b0 = s->XMM_L(1) >> 31;
- b1 = s->XMM_L(3) >> 31;
+ b0 = s->ZMM_L(1) >> 31;
+ b1 = s->ZMM_L(3) >> 31;
return b0 | (b1 << 1);
}
@@ -1736,10 +1736,10 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
}
}
- d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status);
- d->XMM_S(1) = float32_round_to_int(s->XMM_S(1), &env->sse_status);
- d->XMM_S(2) = float32_round_to_int(s->XMM_S(2), &env->sse_status);
- d->XMM_S(3) = float32_round_to_int(s->XMM_S(3), &env->sse_status);
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
#if 0 /* TODO */
if (mode & (1 << 3)) {
@@ -1774,8 +1774,8 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
}
}
- d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status);
- d->XMM_D(1) = float64_round_to_int(s->XMM_D(1), &env->sse_status);
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
#if 0 /* TODO */
if (mode & (1 << 3)) {
@@ -1810,7 +1810,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
}
}
- d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status);
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
#if 0 /* TODO */
if (mode & (1 << 3)) {
@@ -1845,7 +1845,7 @@ void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
}
}
- d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status);
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
#if 0 /* TODO */
if (mode & (1 << 3)) {
@@ -1868,32 +1868,32 @@ void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
if (mask & (1 << 4)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(0), s->XMM_S(0),
+ float32_mul(d->ZMM_S(0), s->ZMM_S(0),
&env->sse_status),
&env->sse_status);
}
if (mask & (1 << 5)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(1), s->XMM_S(1),
+ float32_mul(d->ZMM_S(1), s->ZMM_S(1),
&env->sse_status),
&env->sse_status);
}
if (mask & (1 << 6)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(2), s->XMM_S(2),
+ float32_mul(d->ZMM_S(2), s->ZMM_S(2),
&env->sse_status),
&env->sse_status);
}
if (mask & (1 << 7)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(3), s->XMM_S(3),
+ float32_mul(d->ZMM_S(3), s->ZMM_S(3),
&env->sse_status),
&env->sse_status);
}
- d->XMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
- d->XMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
- d->XMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
- d->XMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
+ d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
+ d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
+ d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
+ d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
}
void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
@@ -1902,18 +1902,18 @@ void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
if (mask & (1 << 4)) {
iresult = float64_add(iresult,
- float64_mul(d->XMM_D(0), s->XMM_D(0),
+ float64_mul(d->ZMM_D(0), s->ZMM_D(0),
&env->sse_status),
&env->sse_status);
}
if (mask & (1 << 5)) {
iresult = float64_add(iresult,
- float64_mul(d->XMM_D(1), s->XMM_D(1),
+ float64_mul(d->ZMM_D(1), s->ZMM_D(1),
&env->sse_status),
&env->sse_status);
}
- d->XMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
- d->XMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
+ d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
+ d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
}
void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
@@ -2037,10 +2037,10 @@ static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s,
}
break;
case 3:
- for (j = valids - validd; j >= 0; j--) {
+ for (j = valids; j >= 0; j--) {
res <<= 1;
v = 1;
- for (i = MIN(upper - j, validd); i >= 0; i--) {
+ for (i = MIN(valids - j, validd); i >= 0; i--) {
v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
}
res |= v;
diff --git a/qemu/target-i386/ops_sse_header.h b/qemu/target-i386/ops_sse_header.h
index a68c7cc0c..64c5857cf 100644
--- a/qemu/target-i386/ops_sse_header.h
+++ b/qemu/target-i386/ops_sse_header.h
@@ -20,18 +20,18 @@
#define Reg MMXReg
#define SUFFIX _mmx
#else
-#define Reg XMMReg
+#define Reg ZMMReg
#define SUFFIX _xmm
#endif
#define dh_alias_Reg ptr
-#define dh_alias_XMMReg ptr
+#define dh_alias_ZMMReg ptr
#define dh_alias_MMXReg ptr
#define dh_ctype_Reg Reg *
-#define dh_ctype_XMMReg XMMReg *
+#define dh_ctype_ZMMReg ZMMReg *
#define dh_ctype_MMXReg MMXReg *
#define dh_is_signed_Reg dh_is_signed_ptr
-#define dh_is_signed_XMMReg dh_is_signed_ptr
+#define dh_is_signed_ZMMReg dh_is_signed_ptr
#define dh_is_signed_MMXReg dh_is_signed_ptr
DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
@@ -154,52 +154,52 @@ DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
-DEF_HELPER_3(cvtpi2ps, void, env, XMMReg, MMXReg)
-DEF_HELPER_3(cvtpi2pd, void, env, XMMReg, MMXReg)
-DEF_HELPER_3(cvtsi2ss, void, env, XMMReg, i32)
-DEF_HELPER_3(cvtsi2sd, void, env, XMMReg, i32)
+DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32)
+DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32)
#ifdef TARGET_X86_64
-DEF_HELPER_3(cvtsq2ss, void, env, XMMReg, i64)
-DEF_HELPER_3(cvtsq2sd, void, env, XMMReg, i64)
+DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64)
+DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64)
#endif
-DEF_HELPER_3(cvtps2dq, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(cvtpd2dq, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(cvtps2pi, void, env, MMXReg, XMMReg)
-DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, XMMReg)
-DEF_HELPER_2(cvtss2si, s32, env, XMMReg)
-DEF_HELPER_2(cvtsd2si, s32, env, XMMReg)
+DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvtss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg)
#ifdef TARGET_X86_64
-DEF_HELPER_2(cvtss2sq, s64, env, XMMReg)
-DEF_HELPER_2(cvtsd2sq, s64, env, XMMReg)
+DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg)
#endif
-DEF_HELPER_3(cvttps2dq, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(cvttpd2dq, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(cvttps2pi, void, env, MMXReg, XMMReg)
-DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, XMMReg)
-DEF_HELPER_2(cvttss2si, s32, env, XMMReg)
-DEF_HELPER_2(cvttsd2si, s32, env, XMMReg)
+DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvttss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg)
#ifdef TARGET_X86_64
-DEF_HELPER_2(cvttss2sq, s64, env, XMMReg)
-DEF_HELPER_2(cvttsd2sq, s64, env, XMMReg)
+DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg)
#endif
-DEF_HELPER_3(rsqrtps, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(rsqrtss, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(rcpps, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(rcpss, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(extrq_r, void, env, XMMReg, XMMReg)
-DEF_HELPER_4(extrq_i, void, env, XMMReg, int, int)
-DEF_HELPER_3(insertq_r, void, env, XMMReg, XMMReg)
-DEF_HELPER_4(insertq_i, void, env, XMMReg, int, int)
-DEF_HELPER_3(haddps, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(haddpd, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(hsubps, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(hsubpd, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(addsubps, void, env, XMMReg, XMMReg)
-DEF_HELPER_3(addsubpd, void, env, XMMReg, XMMReg)
+DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg)
#define SSE_HELPER_CMP(name, F) \
DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
diff --git a/qemu/target-i386/seg_helper.c b/qemu/target-i386/seg_helper.c
index 8a4271ebe..b5f3d72fe 100644
--- a/qemu/target-i386/seg_helper.c
+++ b/qemu/target-i386/seg_helper.c
@@ -18,10 +18,12 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
+#include "exec/log.h"
//#define DEBUG_PCALL
@@ -67,8 +69,9 @@
#endif
/* return non zero if error */
-static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
- uint32_t *e2_ptr, int selector)
+static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector,
+ uintptr_t retaddr)
{
SegmentCache *dt;
int index;
@@ -84,11 +87,17 @@ static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
return -1;
}
ptr = dt->base + index;
- *e1_ptr = cpu_ldl_kernel(env, ptr);
- *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
+ *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
return 0;
}
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector)
+{
+ return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
+}
+
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
{
unsigned int limit;
@@ -124,7 +133,8 @@ static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
}
static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
- uint32_t *esp_ptr, int dpl)
+ uint32_t *esp_ptr, int dpl,
+ uintptr_t retaddr)
{
X86CPU *cpu = x86_env_get_cpu(env);
int type, index, shift;
@@ -153,60 +163,61 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
shift = type >> 3;
index = (dpl * 4 + 2) << shift;
if (index + (4 << shift) - 1 > env->tr.limit) {
- raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
}
if (shift == 0) {
- *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
- *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
+ *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
} else {
- *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
- *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
+ *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
}
}
-static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
+ uintptr_t retaddr)
{
uint32_t e1, e2;
int rpl, dpl;
if ((selector & 0xfffc) != 0) {
- if (load_segment(env, &e1, &e2, selector) != 0) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
if (!(e2 & DESC_S_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
rpl = selector & 3;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (seg_reg == R_CS) {
if (!(e2 & DESC_CS_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
if (dpl != rpl) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
} else if (seg_reg == R_SS) {
/* SS must be writable data */
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
if (dpl != cpl || dpl != rpl) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
} else {
/* not readable code */
if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
/* if data or non conforming code, checks the rights */
if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
if (dpl < cpl || dpl < rpl) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
}
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
}
cpu_x86_load_seg_cache(env, seg_reg, selector,
get_seg_base(e1, e2),
@@ -214,7 +225,7 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
e2);
} else {
if (seg_reg == R_SS || seg_reg == R_CS) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
}
}
}
@@ -224,9 +235,9 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
target_ulong tss_base;
@@ -244,26 +255,26 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* if task gate, we read the TSS segment and we load it */
if (type == 5) {
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
}
tss_selector = e1 >> 16;
if (tss_selector & 4) {
- raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
}
- if (load_segment(env, &e1, &e2, tss_selector) != 0) {
- raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
}
if (e2 & DESC_S_MASK) {
- raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
}
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
if ((type & 7) != 1) {
- raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
}
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
}
if (type & 8) {
@@ -275,7 +286,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
tss_base = get_seg_base(e1, e2);
if ((tss_selector & 4) != 0 ||
tss_limit < tss_limit_max) {
- raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
}
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
if (old_type & 8) {
@@ -287,30 +298,33 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* read all the registers from the new TSS */
if (type & 8) {
/* 32 bit */
- new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
- new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
- new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
+ new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
+ new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
+ new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
for (i = 0; i < 8; i++) {
- new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
+ new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
+ retaddr);
}
for (i = 0; i < 6; i++) {
- new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
+ retaddr);
}
- new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
- new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
+ new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
} else {
/* 16 bit */
new_cr3 = 0;
- new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
- new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
+ new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
+ new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
for (i = 0; i < 8; i++) {
- new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
- 0xffff0000;
+ new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
+ retaddr) | 0xffff0000;
}
for (i = 0; i < 4; i++) {
- new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
+ retaddr);
}
- new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
new_segs[R_FS] = 0;
new_segs[R_GS] = 0;
new_trap = 0;
@@ -325,10 +339,10 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* XXX: it can still fail in some cases, so a bigger hack is
necessary to valid the TLB after having done the accesses */
- v1 = cpu_ldub_kernel(env, env->tr.base);
- v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
- cpu_stb_kernel(env, env->tr.base, v1);
- cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
+ v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
+ v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
@@ -336,9 +350,9 @@ static void switch_tss(CPUX86State *env, int tss_selector,
uint32_t e2;
ptr = env->gdt.base + (env->tr.selector & ~7);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
e2 &= ~DESC_TSS_BUSY_MASK;
- cpu_stl_kernel(env, ptr + 4, e2);
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
}
old_eflags = cpu_compute_eflags(env);
if (source == SWITCH_TSS_IRET) {
@@ -348,35 +362,35 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* save the current state in the old TSS */
if (type & 8) {
/* 32 bit */
- cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
- cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
+ cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
for (i = 0; i < 6; i++) {
- cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
- env->segs[i].selector);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
+ env->segs[i].selector, retaddr);
}
} else {
/* 16 bit */
- cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
- cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
+ cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
for (i = 0; i < 4; i++) {
- cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
- env->segs[i].selector);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
+ env->segs[i].selector, retaddr);
}
}
@@ -384,7 +398,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
context */
if (source == SWITCH_TSS_CALL) {
- cpu_stw_kernel(env, tss_base, env->tr.selector);
+ cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
new_eflags |= NT_MASK;
}
@@ -394,9 +408,9 @@ static void switch_tss(CPUX86State *env, int tss_selector,
uint32_t e2;
ptr = env->gdt.base + (tss_selector & ~7);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
e2 |= DESC_TSS_BUSY_MASK;
- cpu_stl_kernel(env, ptr + 4, e2);
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
}
/* set the new CPU state */
@@ -448,23 +462,23 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* load the LDT */
if (new_ldt & 4) {
- raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
}
if ((new_ldt & 0xfffc) != 0) {
dt = &env->gdt;
index = new_ldt & ~7;
if ((index + 7) > dt->limit) {
- raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
}
ptr = dt->base + index;
- e1 = cpu_ldl_kernel(env, ptr);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
- raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
}
load_seg_cache_raw_dt(&env->ldt, e1, e2);
}
@@ -472,34 +486,35 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* load the segments */
if (!(new_eflags & VM_MASK)) {
int cpl = new_segs[R_CS] & 3;
- tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
- tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
- tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
- tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
- tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
- tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
+ tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
+ tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
+ tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
+ tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
+ tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
+ tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
}
/* check that env->eip is in the CS segment limits */
if (new_eip > env->segs[R_CS].limit) {
/* XXX: different exception if CALL? */
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
}
#ifndef CONFIG_USER_ONLY
/* reset local breakpoints */
if (env->dr[7] & DR7_LOCAL_BP_MASK) {
- for (i = 0; i < DR7_MAX_BP; i++) {
- if (hw_local_breakpoint_enabled(env->dr[7], i) &&
- !hw_global_breakpoint_enabled(env->dr[7], i)) {
- hw_breakpoint_remove(env, i);
- }
- }
- env->dr[7] &= ~DR7_LOCAL_BP_MASK;
+ cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
}
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
+{
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+}
+
static inline unsigned int get_sp_mask(unsigned int e2)
{
if (e2 & DESC_B_MASK) {
@@ -549,30 +564,35 @@ static int exception_has_error_code(int intno)
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
/* XXX: add a is_user flag to have proper security support */
-#define PUSHW(ssp, sp, sp_mask, val) \
+#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
{ \
sp -= 2; \
- cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
+ cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
}
-#define PUSHL(ssp, sp, sp_mask, val) \
+#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
{ \
sp -= 4; \
- cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
+ cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
}
-#define POPW(ssp, sp, sp_mask, val) \
+#define POPW_RA(ssp, sp, sp_mask, val, ra) \
{ \
- val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
+ val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
sp += 2; \
}
-#define POPL(ssp, sp, sp_mask, val) \
+#define POPL_RA(ssp, sp, sp_mask, val, ra) \
{ \
- val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
+ val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
sp += 4; \
}
+#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
+#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
+#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
+#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
+
/* protected mode interrupt */
static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
int error_code, unsigned int next_eip,
@@ -673,7 +693,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(env, &ss, &esp, dpl);
+ get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
if ((ss & 0xfffc) == 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
@@ -791,18 +811,21 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
#ifdef TARGET_X86_64
-#define PUSHQ(sp, val) \
+#define PUSHQ_RA(sp, val, ra) \
{ \
sp -= 8; \
- cpu_stq_kernel(env, sp, (val)); \
+ cpu_stq_kernel_ra(env, sp, (val), ra); \
}
-#define POPQ(sp, val) \
+#define POPQ_RA(sp, val, ra) \
{ \
- val = cpu_ldq_kernel(env, sp); \
+ val = cpu_ldq_kernel_ra(env, sp, ra); \
sp += 8; \
}
+#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
+#define POPQ(sp, val) POPQ_RA(sp, val, 0)
+
static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{
X86CPU *cpu = x86_env_get_cpu(env);
@@ -961,7 +984,7 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
int selector;
if (!(env->efer & MSR_EFER_SCE)) {
- raise_exception_err(env, EXCP06_ILLOP, 0);
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
selector = (env->star >> 32) & 0xffff;
if (env->hflags & HF_LMA_MASK) {
@@ -1016,11 +1039,11 @@ void helper_sysret(CPUX86State *env, int dflag)
int cpl, selector;
if (!(env->efer & MSR_EFER_SCE)) {
- raise_exception_err(env, EXCP06_ILLOP, 0);
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
cpl = env->hflags & HF_CPL_MASK;
if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
selector = (env->star >> 48) & 0xffff;
if (env->hflags & HF_LMA_MASK) {
@@ -1294,6 +1317,9 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (interrupt_request & CPU_INTERRUPT_POLL) {
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state);
+ /* Don't process multiple interrupt requests in a single call.
+ This is required to make icount-driven execution deterministic. */
+ return true;
}
#endif
if (interrupt_request & CPU_INTERRUPT_SIPI) {
@@ -1353,74 +1379,6 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return ret;
}
-void helper_enter_level(CPUX86State *env, int level, int data32,
- target_ulong t1)
-{
- target_ulong ssp;
- uint32_t esp_mask, esp, ebp;
-
- esp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- ebp = env->regs[R_EBP];
- esp = env->regs[R_ESP];
- if (data32) {
- /* 32 bit */
- esp -= 4;
- while (--level) {
- esp -= 4;
- ebp -= 4;
- cpu_stl_data(env, ssp + (esp & esp_mask),
- cpu_ldl_data(env, ssp + (ebp & esp_mask)));
- }
- esp -= 4;
- cpu_stl_data(env, ssp + (esp & esp_mask), t1);
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- cpu_stw_data(env, ssp + (esp & esp_mask),
- cpu_lduw_data(env, ssp + (ebp & esp_mask)));
- }
- esp -= 2;
- cpu_stw_data(env, ssp + (esp & esp_mask), t1);
- }
-}
-
-#ifdef TARGET_X86_64
-void helper_enter64_level(CPUX86State *env, int level, int data64,
- target_ulong t1)
-{
- target_ulong esp, ebp;
-
- ebp = env->regs[R_EBP];
- esp = env->regs[R_ESP];
-
- if (data64) {
- /* 64 bit */
- esp -= 8;
- while (--level) {
- esp -= 8;
- ebp -= 8;
- cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
- }
- esp -= 8;
- cpu_stq_data(env, esp, t1);
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
- }
- esp -= 2;
- cpu_stw_data(env, esp, t1);
- }
-}
-#endif
-
void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
@@ -1435,7 +1393,7 @@ void helper_lldt(CPUX86State *env, int selector)
env->ldt.limit = 0;
} else {
if (selector & 0x4) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
dt = &env->gdt;
index = selector & ~7;
@@ -1448,22 +1406,22 @@ void helper_lldt(CPUX86State *env, int selector)
entry_limit = 7;
}
if ((index + entry_limit) > dt->limit) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
ptr = dt->base + index;
- e1 = cpu_ldl_kernel(env, ptr);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
}
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
uint32_t e3;
- e3 = cpu_ldl_kernel(env, ptr + 8);
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
load_seg_cache_raw_dt(&env->ldt, e1, e2);
env->ldt.base |= (target_ulong)e3 << 32;
} else
@@ -1490,7 +1448,7 @@ void helper_ltr(CPUX86State *env, int selector)
env->tr.flags = 0;
} else {
if (selector & 0x4) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
dt = &env->gdt;
index = selector & ~7;
@@ -1503,27 +1461,27 @@ void helper_ltr(CPUX86State *env, int selector)
entry_limit = 7;
}
if ((index + entry_limit) > dt->limit) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
ptr = dt->base + index;
- e1 = cpu_ldl_kernel(env, ptr);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
if ((e2 & DESC_S_MASK) ||
(type != 1 && type != 9)) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
}
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
uint32_t e3, e4;
- e3 = cpu_ldl_kernel(env, ptr + 8);
- e4 = cpu_ldl_kernel(env, ptr + 12);
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+ e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
load_seg_cache_raw_dt(&env->tr, e1, e2);
env->tr.base |= (target_ulong)e3 << 32;
@@ -1533,7 +1491,7 @@ void helper_ltr(CPUX86State *env, int selector)
load_seg_cache_raw_dt(&env->tr, e1, e2);
}
e2 |= DESC_TSS_BUSY_MASK;
- cpu_stl_kernel(env, ptr + 4, e2);
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
}
env->tr.selector = selector;
}
@@ -1556,7 +1514,7 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
&& (!(env->hflags & HF_CS64_MASK) || cpl == 3)
#endif
) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
} else {
@@ -1568,51 +1526,51 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
}
index = selector & ~7;
if ((index + 7) > dt->limit) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
ptr = dt->base + index;
- e1 = cpu_ldl_kernel(env, ptr);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
if (!(e2 & DESC_S_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
rpl = selector & 3;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (seg_reg == R_SS) {
/* must be writable segment */
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (rpl != cpl || dpl != cpl) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
} else {
/* must be readable segment */
if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
/* if not conforming code, test rights */
if (dpl < cpl || dpl < rpl) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
}
}
if (!(e2 & DESC_P_MASK)) {
if (seg_reg == R_SS) {
- raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
} else {
- raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
}
}
/* set the access bit if not already set */
if (!(e2 & DESC_A_MASK)) {
e2 |= DESC_A_MASK;
- cpu_stl_kernel(env, ptr + 4, e2);
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
}
cpu_x86_load_seg_cache(env, seg_reg, selector,
@@ -1628,46 +1586,45 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
/* protected mode jump */
void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
- int next_eip_addend)
+ target_ulong next_eip)
{
int gate_cs, type;
uint32_t e1, e2, cpl, dpl, rpl, limit;
- target_ulong next_eip;
if ((new_cs & 0xfffc) == 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
- if (load_segment(env, &e1, &e2, new_cs) != 0) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_C_MASK) {
/* conforming code segment */
if (dpl > cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
} else {
/* non conforming code segment */
rpl = new_cs & 3;
if (rpl > cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
if (dpl != cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
limit = get_seg_limit(e1, e2);
if (new_eip > limit &&
!(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
@@ -1683,50 +1640,49 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
case 9: /* 386 TSS */
case 5: /* task gate */
if (dpl < cpl || dpl < rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- next_eip = env->eip + next_eip_addend;
- switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
if ((dpl < cpl) || (dpl < rpl)) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
gate_cs = e1 >> 16;
new_eip = (e1 & 0xffff);
if (type == 12) {
new_eip |= (e2 & 0xffff0000);
}
- if (load_segment(env, &e1, &e2, gate_cs) != 0) {
- raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
/* must be code segment */
if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
(DESC_S_MASK | DESC_CS_MASK))) {
- raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
(!(e2 & DESC_C_MASK) && (dpl != cpl))) {
- raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
limit = get_seg_limit(e1, e2);
if (new_eip > limit) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
env->eip = new_eip;
break;
default:
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
break;
}
}
@@ -1745,11 +1701,11 @@ void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
esp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
if (shift) {
- PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, esp, esp_mask, next_eip);
+ PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
} else {
- PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, esp, esp_mask, next_eip);
+ PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
}
SET_ESP(esp, esp_mask);
@@ -1760,47 +1716,46 @@ void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
/* protected mode call */
void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
- int shift, int next_eip_addend)
+ int shift, target_ulong next_eip)
{
int new_stack, i;
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
uint32_t val, limit, old_sp_mask;
- target_ulong ssp, old_ssp, next_eip;
+ target_ulong ssp, old_ssp;
- next_eip = env->eip + next_eip_addend;
LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
if ((new_cs & 0xfffc) == 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
- if (load_segment(env, &e1, &e2, new_cs) != 0) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
cpl = env->hflags & HF_CPL_MASK;
LOG_PCALL("desc=%08x:%08x\n", e1, e2);
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_C_MASK) {
/* conforming code segment */
if (dpl > cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
} else {
/* non conforming code segment */
rpl = new_cs & 3;
if (rpl > cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
if (dpl != cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
#ifdef TARGET_X86_64
@@ -1810,8 +1765,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
/* 64 bit case */
rsp = env->regs[R_ESP];
- PUSHQ(rsp, env->segs[R_CS].selector);
- PUSHQ(rsp, next_eip);
+ PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
+ PUSHQ_RA(rsp, next_eip, GETPC());
/* from this point, not restartable */
env->regs[R_ESP] = rsp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
@@ -1825,16 +1780,16 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
sp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, sp, sp_mask, next_eip);
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
} else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, sp, sp_mask, next_eip);
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
}
limit = get_seg_limit(e1, e2);
if (new_eip > limit) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
/* from this point, not restartable */
SET_ESP(sp, sp_mask);
@@ -1852,73 +1807,73 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
case 9: /* available 386 TSS */
case 5: /* task gate */
if (dpl < cpl || dpl < rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
break;
default:
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
break;
}
shift = type >> 3;
if (dpl < cpl || dpl < rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
/* check valid bit */
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
selector = e1 >> 16;
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
param_count = e2 & 0x1f;
if ((selector & 0xfffc) == 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
- if (load_segment(env, &e1, &e2, selector) != 0) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (dpl > cpl) {
- raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(env, &ss, &sp, dpl);
+ get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
TARGET_FMT_lx "\n", ss, sp, param_count,
env->regs[R_ESP]);
if ((ss & 0xfffc) == 0) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
if ((ss & 3) != dpl) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
- if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
if (ss_dpl != dpl) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
if (!(ss_e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
/* push_size = ((param_count * 2) + 8) << shift; */
@@ -1929,22 +1884,22 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
sp_mask = get_sp_mask(ss_e2);
ssp = get_seg_base(ss_e1, ss_e2);
if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_ldl_kernel(env, old_ssp +
- ((env->regs[R_ESP] + i * 4) &
- old_sp_mask));
- PUSHL(ssp, sp, sp_mask, val);
+ val = cpu_ldl_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 4) &
+ old_sp_mask), GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
}
} else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_lduw_kernel(env, old_ssp +
- ((env->regs[R_ESP] + i * 2) &
- old_sp_mask));
- PUSHW(ssp, sp, sp_mask, val);
+ val = cpu_lduw_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 2) &
+ old_sp_mask), GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
}
}
new_stack = 1;
@@ -1958,11 +1913,11 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, sp, sp_mask, next_eip);
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
} else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, sp, sp_mask, next_eip);
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
}
/* from this point, not restartable */
@@ -1997,15 +1952,15 @@ void helper_iret_real(CPUX86State *env, int shift)
ssp = env->segs[R_SS].base;
if (shift == 1) {
/* 32 bits */
- POPL(ssp, sp, sp_mask, new_eip);
- POPL(ssp, sp, sp_mask, new_cs);
+ POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
new_cs &= 0xffff;
- POPL(ssp, sp, sp_mask, new_eflags);
+ POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
} else {
/* 16 bits */
- POPW(ssp, sp, sp_mask, new_eip);
- POPW(ssp, sp, sp_mask, new_cs);
- POPW(ssp, sp, sp_mask, new_eflags);
+ POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
}
env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
env->segs[R_CS].selector = new_cs;
@@ -2050,7 +2005,8 @@ static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
/* protected mode iret */
static inline void helper_ret_protected(CPUX86State *env, int shift,
- int is_iret, int addend)
+ int is_iret, int addend,
+ uintptr_t retaddr)
{
uint32_t new_cs, new_eflags, new_ss;
uint32_t new_es, new_ds, new_fs, new_gs;
@@ -2071,32 +2027,32 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
if (shift == 2) {
- POPQ(sp, new_eip);
- POPQ(sp, new_cs);
+ POPQ_RA(sp, new_eip, retaddr);
+ POPQ_RA(sp, new_cs, retaddr);
new_cs &= 0xffff;
if (is_iret) {
- POPQ(sp, new_eflags);
+ POPQ_RA(sp, new_eflags, retaddr);
}
} else
#endif
{
if (shift == 1) {
/* 32 bits */
- POPL(ssp, sp, sp_mask, new_eip);
- POPL(ssp, sp, sp_mask, new_cs);
+ POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
new_cs &= 0xffff;
if (is_iret) {
- POPL(ssp, sp, sp_mask, new_eflags);
+ POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
if (new_eflags & VM_MASK) {
goto return_to_vm86;
}
}
} else {
/* 16 bits */
- POPW(ssp, sp, sp_mask, new_eip);
- POPW(ssp, sp, sp_mask, new_cs);
+ POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
if (is_iret) {
- POPW(ssp, sp, sp_mask, new_eflags);
+ POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
}
}
}
@@ -2104,32 +2060,32 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
new_cs, new_eip, shift, addend);
LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
if ((new_cs & 0xfffc) == 0) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
- if (load_segment(env, &e1, &e2, new_cs) != 0) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
cpl = env->hflags & HF_CPL_MASK;
rpl = new_cs & 3;
if (rpl < cpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_C_MASK) {
if (dpl > rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
} else {
if (dpl != rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
}
if (!(e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
}
sp += addend;
@@ -2144,21 +2100,21 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
/* return to different privilege level */
#ifdef TARGET_X86_64
if (shift == 2) {
- POPQ(sp, new_esp);
- POPQ(sp, new_ss);
+ POPQ_RA(sp, new_esp, retaddr);
+ POPQ_RA(sp, new_ss, retaddr);
new_ss &= 0xffff;
} else
#endif
{
if (shift == 1) {
/* 32 bits */
- POPL(ssp, sp, sp_mask, new_esp);
- POPL(ssp, sp, sp_mask, new_ss);
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
new_ss &= 0xffff;
} else {
/* 16 bits */
- POPW(ssp, sp, sp_mask, new_esp);
- POPW(ssp, sp, sp_mask, new_ss);
+ POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
}
}
LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
@@ -2177,26 +2133,26 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
} else
#endif
{
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
}
} else {
if ((new_ss & 3) != rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
}
- if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
- raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
}
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK)) {
- raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
}
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
if (dpl != rpl) {
- raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
}
if (!(ss_e2 & DESC_P_MASK)) {
- raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
}
cpu_x86_load_seg_cache(env, R_SS, new_ss,
get_seg_base(ss_e1, ss_e2),
@@ -2246,12 +2202,12 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
return;
return_to_vm86:
- POPL(ssp, sp, sp_mask, new_esp);
- POPL(ssp, sp, sp_mask, new_ss);
- POPL(ssp, sp, sp_mask, new_es);
- POPL(ssp, sp, sp_mask, new_ds);
- POPL(ssp, sp, sp_mask, new_fs);
- POPL(ssp, sp, sp_mask, new_gs);
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
/* modify processor state */
cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
@@ -2277,37 +2233,37 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
if (env->eflags & NT_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
#endif
- tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
+ tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
if (tss_selector & 4) {
- raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
- if (load_segment(env, &e1, &e2, tss_selector) != 0) {
- raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
/* NOTE: we check both segment and busy TSS */
if (type != 3) {
- raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
- switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
} else {
- helper_ret_protected(env, shift, 1, 0);
+ helper_ret_protected(env, shift, 1, 0, GETPC());
}
env->hflags2 &= ~HF2_NMI_MASK;
}
void helper_lret_protected(CPUX86State *env, int shift, int addend)
{
- helper_ret_protected(env, shift, 0, addend);
+ helper_ret_protected(env, shift, 0, addend, GETPC());
}
void helper_sysenter(CPUX86State *env)
{
if (env->sysenter_cs == 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
@@ -2343,7 +2299,7 @@ void helper_sysexit(CPUX86State *env, int dflag)
cpl = env->hflags & HF_CPL_MASK;
if (env->sysenter_cs == 0 || cpl != 0) {
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
#ifdef TARGET_X86_64
if (dflag == 2) {
@@ -2387,7 +2343,7 @@ target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(env, &e1, &e2, selector) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
goto fail;
}
rpl = selector & 3;
@@ -2434,7 +2390,7 @@ target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(env, &e1, &e2, selector) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
goto fail;
}
rpl = selector & 3;
@@ -2483,7 +2439,7 @@ void helper_verr(CPUX86State *env, target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(env, &e1, &e2, selector) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
@@ -2521,7 +2477,7 @@ void helper_verw(CPUX86State *env, target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(env, &e1, &e2, selector) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
@@ -2562,7 +2518,8 @@ void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
#endif
/* check if Port I/O is allowed in TSS */
-static inline void check_io(CPUX86State *env, int addr, int size)
+static inline void check_io(CPUX86State *env, int addr, int size,
+ uintptr_t retaddr)
{
int io_offset, val, mask;
@@ -2572,33 +2529,33 @@ static inline void check_io(CPUX86State *env, int addr, int size)
env->tr.limit < 103) {
goto fail;
}
- io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
+ io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
io_offset += (addr >> 3);
/* Note: the check needs two bytes */
if ((io_offset + 1) > env->tr.limit) {
goto fail;
}
- val = cpu_lduw_kernel(env, env->tr.base + io_offset);
+ val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
val >>= (addr & 7);
mask = (1 << size) - 1;
/* all bits must be zero to allow the I/O */
if ((val & mask) != 0) {
fail:
- raise_exception_err(env, EXCP0D_GPF, 0);
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
}
}
void helper_check_iob(CPUX86State *env, uint32_t t0)
{
- check_io(env, t0, 1);
+ check_io(env, t0, 1, GETPC());
}
void helper_check_iow(CPUX86State *env, uint32_t t0)
{
- check_io(env, t0, 2);
+ check_io(env, t0, 2, GETPC());
}
void helper_check_iol(CPUX86State *env, uint32_t t0)
{
- check_io(env, t0, 4);
+ check_io(env, t0, 4, GETPC());
}
diff --git a/qemu/target-i386/smm_helper.c b/qemu/target-i386/smm_helper.c
index 02e24b923..4dd6a2c54 100644
--- a/qemu/target-i386/smm_helper.c
+++ b/qemu/target-i386/smm_helper.c
@@ -17,8 +17,10 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "exec/log.h"
/* SMM support */
@@ -97,6 +99,10 @@ void do_smm_enter(X86CPU *cpu)
x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+ /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+ is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
+ 7EA0-7ED7 as "reserved". What's this, and what's really
+ supposed to happen? */
x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
@@ -266,7 +272,7 @@ void helper_rsm(CPUX86State *env)
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
- env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00) & ~0x7fff;
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
}
#else
cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
@@ -319,7 +325,7 @@ void helper_rsm(CPUX86State *env)
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
- env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8) & ~0x7fff;
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
}
#endif
if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
diff --git a/qemu/target-i386/svm_helper.c b/qemu/target-i386/svm_helper.c
index f1fabf54e..ab472f6ee 100644
--- a/qemu/target-i386/svm_helper.c
+++ b/qemu/target-i386/svm_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/cpu-all.h"
#include "exec/helper-proto.h"
diff --git a/qemu/target-i386/translate.c b/qemu/target-i386/translate.c
index 82e2245bf..1a1214dcb 100644
--- a/qemu/target-i386/translate.c
+++ b/qemu/target-i386/translate.c
@@ -16,12 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include <signal.h>
+#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "cpu.h"
@@ -33,6 +28,7 @@
#include "exec/helper-gen.h"
#include "trace-tcg.h"
+#include "exec/log.h"
#define PREFIX_REPZ 0x01
@@ -60,24 +56,37 @@
# define clztl clz32
#endif
+/* For a switch indexed by MODRM, match all memory operands for a given OP. */
+#define CASE_MODRM_MEM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
+
+#define CASE_MODRM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
+ case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
+
//#define MACRO_TEST 1
/* global register indexes */
-static TCGv_ptr cpu_env;
+static TCGv_env cpu_env;
static TCGv cpu_A0;
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
static TCGv_i32 cpu_cc_op;
static TCGv cpu_regs[CPU_NB_REGS];
+static TCGv cpu_seg_base[6];
+static TCGv_i64 cpu_bndl[4];
+static TCGv_i64 cpu_bndu[4];
/* local temps */
-static TCGv cpu_T[2];
+static TCGv cpu_T0, cpu_T1;
/* local register indexes (only used inside old micro ops) */
static TCGv cpu_tmp0, cpu_tmp4;
static TCGv_ptr cpu_ptr0, cpu_ptr1;
static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
static TCGv_i64 cpu_tmp1_i64;
-static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
-
#include "exec/gen-icount.h"
#ifdef TARGET_X86_64
@@ -90,6 +99,7 @@ typedef struct DisasContext {
int prefix;
TCGMemOp aflag;
TCGMemOp dflag;
+ target_ulong pc_start;
target_ulong pc; /* pc = eip + cs_base */
int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
static state change (stop translation) */
@@ -126,6 +136,7 @@ typedef struct DisasContext {
int cpuid_ext2_features;
int cpuid_ext3_features;
int cpuid_7_0_ebx_features;
+ int cpuid_xsave_features;
} DisasContext;
static void gen_eob(DisasContext *s);
@@ -310,6 +321,12 @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
}
}
+/* Select the size of the stack pointer. */
+static inline TCGMemOp mo_stacksize(DisasContext *s)
+{
+ return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+}
+
/* Select only size 64 else 32. Used for SSE operand sizes. */
static inline TCGMemOp mo_64_32(TCGMemOp ot)
{
@@ -372,34 +389,12 @@ static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
}
}
-static inline void gen_op_movl_A0_reg(int reg)
-{
- tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
-}
-
-static inline void gen_op_addl_A0_im(int32_t val)
-{
- tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
-#ifdef TARGET_X86_64
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
-#endif
-}
-
-#ifdef TARGET_X86_64
-static inline void gen_op_addq_A0_im(int64_t val)
-{
- tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
-}
-#endif
-
static void gen_add_A0_im(DisasContext *s, int val)
{
-#ifdef TARGET_X86_64
- if (CODE64(s))
- gen_op_addq_A0_im(val);
- else
-#endif
- gen_op_addl_A0_im(val);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
}
static inline void gen_op_jmp_v(TCGv dest)
@@ -415,68 +410,10 @@ static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
{
- tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
+ tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
gen_op_mov_reg_v(size, reg, cpu_tmp0);
}
-static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
-{
- tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
- if (shift != 0)
- tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- /* For x86_64, this sets the higher half of register to zero.
- For i386, this is equivalent to a nop. */
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
-}
-
-static inline void gen_op_movl_A0_seg(int reg)
-{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
-}
-
-static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
-{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- } else {
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
- }
-#else
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-#endif
-}
-
-#ifdef TARGET_X86_64
-static inline void gen_op_movq_A0_seg(int reg)
-{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
-}
-
-static inline void gen_op_addq_A0_seg(int reg)
-{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-}
-
-static inline void gen_op_movq_A0_reg(int reg)
-{
- tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
-}
-
-static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
-{
- tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
- if (shift != 0)
- tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-}
-#endif
-
static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
{
tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
@@ -490,9 +427,9 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
{
if (d == OR_TMP0) {
- gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, idx, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(idx, d, cpu_T[0]);
+ gen_op_mov_reg_v(idx, d, cpu_T0);
}
}
@@ -502,74 +439,77 @@ static inline void gen_jmp_im(target_ulong pc)
gen_op_jmp_v(cpu_tmp0);
}
-static inline void gen_string_movl_A0_ESI(DisasContext *s)
+/* Compute SEG:REG into A0. SEG is selected from the override segment
+ (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
+ indicate no override. */
+static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
+ int def_seg, int ovr_seg)
{
- int override;
-
- override = s->override;
- switch (s->aflag) {
+ switch (aflag) {
#ifdef TARGET_X86_64
case MO_64:
- if (override >= 0) {
- gen_op_movq_A0_seg(override);
- gen_op_addq_A0_reg_sN(0, R_ESI);
- } else {
- gen_op_movq_A0_reg(R_ESI);
+ if (ovr_seg < 0) {
+ tcg_gen_mov_tl(cpu_A0, a0);
+ return;
}
break;
#endif
case MO_32:
/* 32 bit address */
- if (s->addseg && override < 0)
- override = R_DS;
- if (override >= 0) {
- gen_op_movl_A0_seg(override);
- gen_op_addl_A0_reg_sN(0, R_ESI);
- } else {
- gen_op_movl_A0_reg(R_ESI);
+ if (ovr_seg < 0) {
+ if (s->addseg) {
+ ovr_seg = def_seg;
+ } else {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ return;
+ }
}
break;
case MO_16:
- /* 16 address, always override */
- if (override < 0)
- override = R_DS;
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
- gen_op_addl_A0_seg(s, override);
+ /* 16 bit address */
+ tcg_gen_ext16u_tl(cpu_A0, a0);
+ a0 = cpu_A0;
+ if (ovr_seg < 0) {
+ if (s->addseg) {
+ ovr_seg = def_seg;
+ } else {
+ return;
+ }
+ }
break;
default:
tcg_abort();
}
-}
-static inline void gen_string_movl_A0_EDI(DisasContext *s)
-{
- switch (s->aflag) {
-#ifdef TARGET_X86_64
- case MO_64:
- gen_op_movq_A0_reg(R_EDI);
- break;
-#endif
- case MO_32:
- if (s->addseg) {
- gen_op_movl_A0_seg(R_ES);
- gen_op_addl_A0_reg_sN(0, R_EDI);
+ if (ovr_seg >= 0) {
+ TCGv seg = cpu_seg_base[ovr_seg];
+
+ if (aflag == MO_64) {
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ } else if (CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
} else {
- gen_op_movl_A0_reg(R_EDI);
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
- break;
- case MO_16:
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
- gen_op_addl_A0_seg(s, R_ES);
- break;
- default:
- tcg_abort();
}
}
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
+}
+
static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
{
- tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
+ tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
};
static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
@@ -664,15 +604,10 @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
uint32_t svm_flags)
{
- int state_saved;
target_ulong next_eip;
- state_saved = 0;
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- state_saved = 1;
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
switch (ot) {
case MO_8:
gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
@@ -688,13 +623,11 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
}
}
if(s->flags & HF_SVMI_MASK) {
- if (!state_saved) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- }
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
svm_flags |= (1 << (4 + ot));
next_eip = s->pc - s->cs_base;
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
tcg_const_i32(svm_flags),
tcg_const_i32(next_eip - cur_eip));
@@ -704,9 +637,9 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
static inline void gen_movs(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
gen_string_movl_A0_EDI(s);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
gen_op_add_reg_T0(s->aflag, R_EDI);
@@ -714,31 +647,31 @@ static inline void gen_movs(DisasContext *s, TCGMemOp ot)
static void gen_op_update1_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_op_update2_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_op_update3_cc(TCGv reg)
{
tcg_gen_mov_tl(cpu_cc_src2, reg);
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static inline void gen_op_testl_T0_T1_cc(void)
{
- tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
+ tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
}
static void gen_op_update_neg_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
tcg_gen_movi_tl(cpu_cc_srcT, 0);
}
@@ -1080,11 +1013,11 @@ static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
value 'b'. In the fast case, T0 is guaranted not to be used. */
static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
{
- CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
if (cc.mask != -1) {
- tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
- cc.reg = cpu_T[0];
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
}
if (cc.use_reg2) {
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
@@ -1098,12 +1031,12 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
A translation block must end soon. */
static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
{
- CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
gen_update_cc_op(s);
if (cc.mask != -1) {
- tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
- cc.reg = cpu_T[0];
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
}
set_cc_op(s, CC_OP_DYNAMIC);
if (cc.use_reg2) {
@@ -1128,9 +1061,9 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
static inline void gen_stos(DisasContext *s, TCGMemOp ot)
{
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
gen_string_movl_A0_EDI(s);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
}
@@ -1138,8 +1071,8 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot)
static inline void gen_lods(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
}
@@ -1147,7 +1080,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot)
static inline void gen_scas(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_EDI(s);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_op(s, OP_CMPL, ot, R_EAX);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
@@ -1156,7 +1089,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot)
static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_EDI(s);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_string_movl_A0_ESI(s);
gen_op(s, OP_CMPL, ot, OR_TMP0);
gen_op_movl_T0_Dshift(ot);
@@ -1164,6 +1097,19 @@ static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
gen_op_add_reg_T0(s->aflag, R_EDI);
}
+static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
+{
+ if (s->flags & HF_IOBPT_MASK) {
+ TCGv_i32 t_size = tcg_const_i32(1 << ot);
+ TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
+
+ gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
+ tcg_temp_free_i32(t_size);
+ tcg_temp_free(t_next);
+ }
+}
+
+
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{
if (s->tb->cflags & CF_USE_ICOUNT) {
@@ -1172,14 +1118,15 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot)
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
- tcg_gen_movi_tl(cpu_T[0], 0);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
- gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
@@ -1191,15 +1138,15 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot)
gen_io_start();
}
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
-
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
@@ -1309,63 +1256,63 @@ static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
{
if (d != OR_TMP0) {
- gen_op_mov_v_reg(ot, cpu_T[0], d);
+ gen_op_mov_v_reg(ot, cpu_T0, d);
} else {
- gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
}
switch(op) {
case OP_ADCL:
gen_compute_eflags_c(s1, cpu_tmp4);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update3_cc(cpu_tmp4);
set_cc_op(s1, CC_OP_ADCB + ot);
break;
case OP_SBBL:
gen_compute_eflags_c(s1, cpu_tmp4);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update3_cc(cpu_tmp4);
set_cc_op(s1, CC_OP_SBBB + ot);
break;
case OP_ADDL:
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update2_cc();
set_cc_op(s1, CC_OP_ADDB + ot);
break;
case OP_SUBL:
- tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update2_cc();
set_cc_op(s1, CC_OP_SUBB + ot);
break;
default:
case OP_ANDL:
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_ORL:
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_XORL:
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_CMPL:
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
set_cc_op(s1, CC_OP_SUBB + ot);
break;
}
@@ -1375,20 +1322,20 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
{
if (d != OR_TMP0) {
- gen_op_mov_v_reg(ot, cpu_T[0], d);
+ gen_op_mov_v_reg(ot, cpu_T0, d);
} else {
- gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
}
gen_compute_eflags_c(s1, cpu_cc_src);
if (c > 0) {
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
+ tcg_gen_addi_tl(cpu_T0, cpu_T0, 1);
set_cc_op(s1, CC_OP_INCB + ot);
} else {
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
+ tcg_gen_addi_tl(cpu_T0, cpu_T0, -1);
set_cc_op(s1, CC_OP_DECB + ot);
}
gen_op_st_rm_T0_A0(s1, ot, d);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
@@ -1443,33 +1390,33 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
- tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+ tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
if (is_right) {
if (is_arith) {
- gen_exts(ot, cpu_T[0]);
- tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
} else {
- tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
}
/* store */
gen_op_st_rm_T0_A0(s, ot, op1);
- gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
}
static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
@@ -1479,25 +1426,25 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* load */
if (op1 == OR_TMP0)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
else
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
op2 &= mask;
if (op2 != 0) {
if (is_right) {
if (is_arith) {
- gen_exts(ot, cpu_T[0]);
- tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
} else {
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
}
} else {
- tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
}
}
@@ -1507,7 +1454,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* update eflags if non zero shift */
if (op2 != 0) {
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
}
}
@@ -1519,41 +1466,41 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
switch (ot) {
case MO_8:
/* Replicate the 8-bit input so that a 32-bit rotate works. */
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
goto do_long;
case MO_16:
/* Replicate the 16-bit input so that a 32-bit rotate works. */
- tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
+ tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
goto do_long;
do_long:
#ifdef TARGET_X86_64
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
if (is_right) {
tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
} else {
tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
}
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
break;
#endif
default:
if (is_right) {
- tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
- tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
}
break;
}
@@ -1569,12 +1516,12 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
since we've computed the flags into CC_SRC, these variables are
currently dead. */
if (is_right) {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
- tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
} else {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
- tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
}
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
@@ -1585,7 +1532,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
exactly as we computed above. */
t0 = tcg_const_i32(0);
t1 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(t1, cpu_T1);
tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
@@ -1605,9 +1552,9 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
op2 &= mask;
@@ -1615,20 +1562,20 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
switch (ot) {
#ifdef TARGET_X86_64
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (is_right) {
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
} else {
tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
}
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
break;
#endif
default:
if (is_right) {
- tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
} else {
- tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
}
break;
case MO_8:
@@ -1641,10 +1588,10 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
if (is_right) {
shift = mask + 1 - shift;
}
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
}
@@ -1661,12 +1608,12 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
since we've computed the flags into CC_SRC, these variables are
currently dead. */
if (is_right) {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
- tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
} else {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
- tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
}
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
@@ -1683,24 +1630,24 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
else
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
if (is_right) {
switch (ot) {
case MO_8:
- gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_16:
- gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_32:
- gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#endif
default:
@@ -1709,17 +1656,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
} else {
switch (ot) {
case MO_8:
- gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_16:
- gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_32:
- gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#endif
default:
@@ -1739,9 +1686,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
count = tcg_temp_new();
@@ -1753,11 +1700,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
portion by constructing it as a 32-bit value. */
if (is_right) {
- tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
- tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
- tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
+ tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
+ tcg_gen_mov_tl(cpu_T1, cpu_T0);
+ tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
} else {
- tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
+ tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
}
/* FALLTHRU */
#ifdef TARGET_X86_64
@@ -1765,57 +1712,57 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* Concatenate the two 32-bit values and use a 64-bit shift. */
tcg_gen_subi_tl(cpu_tmp0, count, 1);
if (is_right) {
- tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
} else {
- tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
+ tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
- tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
+ tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
}
break;
#endif
default:
tcg_gen_subi_tl(cpu_tmp0, count, 1);
if (is_right) {
- tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
- tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
} else {
- tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
if (ot == MO_16) {
/* Only needed if count > 16, for Intel behaviour. */
tcg_gen_subfi_tl(cpu_tmp4, 33, count);
- tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
}
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
- tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
}
tcg_gen_movi_tl(cpu_tmp4, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
- cpu_tmp4, cpu_T[1]);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
+ cpu_tmp4, cpu_T1);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
break;
}
/* store */
gen_op_st_rm_T0_A0(s, ot, op1);
- gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
tcg_temp_free(count);
}
static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
{
if (s != OR_TMP1)
- gen_op_mov_v_reg(ot, cpu_T[1], s);
+ gen_op_mov_v_reg(ot, cpu_T1, s);
switch(op) {
case OP_ROL:
gen_rot_rm_T1(s1, ot, d, 0);
@@ -1863,48 +1810,58 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
break;
default:
/* currently not optimized */
- tcg_gen_movi_tl(cpu_T[1], c);
+ tcg_gen_movi_tl(cpu_T1, c);
gen_shift(s1, op, ot, d, OR_TMP1);
break;
}
}
-static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
-{
- target_long disp;
- int havesib;
+/* Decompose an address. */
+
+typedef struct AddressParts {
+ int def_seg;
int base;
int index;
int scale;
- int mod, rm, code, override, must_add_seg;
- TCGv sum;
+ target_long disp;
+} AddressParts;
+
+static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
+ int modrm)
+{
+ int def_seg, base, index, scale, mod, rm;
+ target_long disp;
+ bool havesib;
+
+ def_seg = R_DS;
+ index = -1;
+ scale = 0;
+ disp = 0;
- override = s->override;
- must_add_seg = s->addseg;
- if (override >= 0)
- must_add_seg = 1;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
+ base = rm | REX_B(s);
+
+ if (mod == 3) {
+ /* Normally filtered out earlier, but including this path
+ simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
+ goto done;
+ }
switch (s->aflag) {
case MO_64:
case MO_32:
havesib = 0;
- base = rm;
- index = -1;
- scale = 0;
-
- if (base == 4) {
- havesib = 1;
- code = cpu_ldub_code(env, s->pc++);
+ if (rm == 4) {
+ int code = cpu_ldub_code(env, s->pc++);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
if (index == 4) {
index = -1; /* no index */
}
- base = (code & 7);
+ base = (code & 7) | REX_B(s);
+ havesib = 1;
}
- base |= REX_B(s);
switch (mod) {
case 0:
@@ -1913,10 +1870,9 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
if (CODE64(s) && !havesib) {
+ base = -2;
disp += s->pc + s->rip_offset;
}
- } else {
- disp = 0;
}
break;
case 1:
@@ -1933,204 +1889,132 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
if (base == R_ESP && s->popl_esp_hack) {
disp += s->popl_esp_hack;
}
-
- /* Compute the address, with a minimum number of TCG ops. */
- TCGV_UNUSED(sum);
- if (index >= 0) {
- if (scale == 0) {
- sum = cpu_regs[index];
- } else {
- tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
- sum = cpu_A0;
- }
- if (base >= 0) {
- tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
- sum = cpu_A0;
- }
- } else if (base >= 0) {
- sum = cpu_regs[base];
- }
- if (TCGV_IS_UNUSED(sum)) {
- tcg_gen_movi_tl(cpu_A0, disp);
- } else {
- tcg_gen_addi_tl(cpu_A0, sum, disp);
- }
-
- if (must_add_seg) {
- if (override < 0) {
- if (base == R_EBP || base == R_ESP) {
- override = R_SS;
- } else {
- override = R_DS;
- }
- }
-
- tcg_gen_ld_tl(cpu_tmp0, cpu_env,
- offsetof(CPUX86State, segs[override].base));
- if (CODE64(s)) {
- if (s->aflag == MO_32) {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
- }
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- return;
- }
-
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- }
-
- if (s->aflag == MO_32) {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ if (base == R_EBP || base == R_ESP) {
+ def_seg = R_SS;
}
break;
case MO_16:
- switch (mod) {
- case 0:
+ if (mod == 0) {
if (rm == 6) {
+ base = -1;
disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
- tcg_gen_movi_tl(cpu_A0, disp);
- rm = 0; /* avoid SS override */
- goto no_rm;
- } else {
- disp = 0;
+ break;
}
- break;
- case 1:
+ } else if (mod == 1) {
disp = (int8_t)cpu_ldub_code(env, s->pc++);
- break;
- default:
- case 2:
+ } else {
disp = (int16_t)cpu_lduw_code(env, s->pc);
s->pc += 2;
- break;
}
- sum = cpu_A0;
switch (rm) {
case 0:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
+ base = R_EBX;
+ index = R_ESI;
break;
case 1:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
+ base = R_EBX;
+ index = R_EDI;
break;
case 2:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
+ base = R_EBP;
+ index = R_ESI;
+ def_seg = R_SS;
break;
case 3:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
+ base = R_EBP;
+ index = R_EDI;
+ def_seg = R_SS;
break;
case 4:
- sum = cpu_regs[R_ESI];
+ base = R_ESI;
break;
case 5:
- sum = cpu_regs[R_EDI];
+ base = R_EDI;
break;
case 6:
- sum = cpu_regs[R_EBP];
+ base = R_EBP;
+ def_seg = R_SS;
break;
default:
case 7:
- sum = cpu_regs[R_EBX];
+ base = R_EBX;
break;
}
- tcg_gen_addi_tl(cpu_A0, sum, disp);
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- no_rm:
- if (must_add_seg) {
- if (override < 0) {
- if (rm == 2 || rm == 3 || rm == 6) {
- override = R_SS;
- } else {
- override = R_DS;
- }
- }
- gen_op_addl_A0_seg(s, override);
- }
break;
default:
tcg_abort();
}
+
+ done:
+ return (AddressParts){ def_seg, base, index, scale, disp };
}
-static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+/* Compute the address, with a minimum number of TCG ops. */
+static TCGv gen_lea_modrm_1(AddressParts a)
{
- int mod, rm, base, code;
+ TCGv ea;
- mod = (modrm >> 6) & 3;
- if (mod == 3)
- return;
- rm = modrm & 7;
+ TCGV_UNUSED(ea);
+ if (a.index >= 0) {
+ if (a.scale == 0) {
+ ea = cpu_regs[a.index];
+ } else {
+ tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
+ ea = cpu_A0;
+ }
+ if (a.base >= 0) {
+ tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
+ ea = cpu_A0;
+ }
+ } else if (a.base >= 0) {
+ ea = cpu_regs[a.base];
+ }
+ if (TCGV_IS_UNUSED(ea)) {
+ tcg_gen_movi_tl(cpu_A0, a.disp);
+ ea = cpu_A0;
+ } else if (a.disp != 0) {
+ tcg_gen_addi_tl(cpu_A0, ea, a.disp);
+ ea = cpu_A0;
+ }
- switch (s->aflag) {
- case MO_64:
- case MO_32:
- base = rm;
+ return ea;
+}
- if (base == 4) {
- code = cpu_ldub_code(env, s->pc++);
- base = (code & 7);
- }
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+}
- switch (mod) {
- case 0:
- if (base == 5) {
- s->pc += 4;
- }
- break;
- case 1:
- s->pc++;
- break;
- default:
- case 2:
- s->pc += 4;
- break;
- }
- break;
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ (void)gen_lea_modrm_0(env, s, modrm);
+}
- case MO_16:
- switch (mod) {
- case 0:
- if (rm == 6) {
- s->pc += 2;
- }
- break;
- case 1:
- s->pc++;
- break;
- default:
- case 2:
- s->pc += 2;
- break;
- }
- break;
+/* Used for BNDCL, BNDCU, BNDCN. */
+static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+ TCGCond cond, TCGv_i64 bndv)
+{
+ TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
- default:
- tcg_abort();
+ tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
}
+ tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
+ tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
+ gen_helper_bndck(cpu_env, cpu_tmp2_i32);
}
/* used for LEA and MOV AX, mem */
static void gen_add_A0_ds_seg(DisasContext *s)
{
- int override, must_add_seg;
- must_add_seg = s->addseg;
- override = R_DS;
- if (s->override >= 0) {
- override = s->override;
- must_add_seg = 1;
- }
- if (must_add_seg) {
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_addq_A0_seg(override);
- } else
-#endif
- {
- gen_op_addl_A0_seg(s, override);
- }
- }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
}
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
@@ -2145,23 +2029,23 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
if (mod == 3) {
if (is_store) {
if (reg != OR_TMP0)
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
if (reg != OR_TMP0)
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
} else {
gen_lea_modrm(env, s, modrm);
if (is_store) {
if (reg != OR_TMP0)
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
if (reg != OR_TMP0)
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
}
}
@@ -2258,7 +2142,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- cc = gen_prepare_cc(s, b, cpu_T[1]);
+ cc = gen_prepare_cc(s, b, cpu_T1);
if (cc.mask != -1) {
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cc.reg, cc.mask);
@@ -2268,9 +2152,9 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
cc.reg2 = tcg_const_tl(cc.imm);
}
- tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
- cpu_T[0], cpu_regs[reg]);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
+ cpu_T0, cpu_regs[reg]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
if (cc.mask != -1) {
tcg_temp_free(cc.reg);
@@ -2282,29 +2166,24 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
static inline void gen_op_movl_T0_seg(int seg_reg)
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
{
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
- tcg_gen_st_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,segs[seg_reg].base));
+ tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
}
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
{
if (s->pe && !s->vm86) {
- /* XXX: optimize by finding processor state dynamically */
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
@@ -2345,44 +2224,25 @@ gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
static inline void gen_stack_update(DisasContext *s, int addend)
{
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_add_reg_im(MO_64, R_ESP, addend);
- } else
-#endif
- if (s->ss32) {
- gen_op_add_reg_im(MO_32, R_ESP, addend);
- } else {
- gen_op_add_reg_im(MO_16, R_ESP, addend);
- }
+ gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
}
/* Generate a push. It depends on ss32, addseg and dflag. */
static void gen_push_v(DisasContext *s, TCGv val)
{
- TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
int size = 1 << d_ot;
TCGv new_esp = cpu_A0;
tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
- if (CODE64(s)) {
- a_ot = MO_64;
- } else if (s->ss32) {
- a_ot = MO_32;
+ if (!CODE64(s)) {
if (s->addseg) {
new_esp = cpu_tmp4;
tcg_gen_mov_tl(new_esp, cpu_A0);
- gen_op_addl_A0_seg(s, R_SS);
- } else {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
- } else {
- a_ot = MO_16;
- new_esp = cpu_tmp4;
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(new_esp, cpu_A0);
- gen_op_addl_A0_seg(s, R_SS);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
}
gen_op_st_v(s, d_ot, val, cpu_A0);
@@ -2393,127 +2253,112 @@ static void gen_push_v(DisasContext *s, TCGv val)
static TCGMemOp gen_pop_T0(DisasContext *s)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
- TCGv addr = cpu_A0;
- if (CODE64(s)) {
- addr = cpu_regs[R_ESP];
- } else if (!s->ss32) {
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
- gen_op_addl_A0_seg(s, R_SS);
- } else if (s->addseg) {
- tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
- gen_op_addl_A0_seg(s, R_SS);
- } else {
- tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
- }
+ gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
- gen_op_ld_v(s, d_ot, cpu_T[0], addr);
return d_ot;
}
-static void gen_pop_update(DisasContext *s, TCGMemOp ot)
+static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
{
gen_stack_update(s, 1 << ot);
}
-static void gen_stack_A0(DisasContext *s)
+static inline void gen_stack_A0(DisasContext *s)
{
- gen_op_movl_A0_reg(R_ESP);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
+ gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
}
-/* NOTE: wrap around in 16 bit not fully handled */
static void gen_pusha(DisasContext *s)
{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
int i;
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addl_A0_im(-8 << s->dflag);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- for(i = 0;i < 8; i++) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
- gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
- gen_op_addl_A0_im(1 << s->dflag);
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
}
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+
+ gen_stack_update(s, -8 * size);
}
-/* NOTE: wrap around in 16 bit not fully handled */
static void gen_popa(DisasContext *s)
{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
int i;
- gen_op_movl_A0_reg(R_ESP);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- for(i = 0;i < 8; i++) {
+
+ for (i = 0; i < 8; i++) {
/* ESP is not reloaded */
- if (i != 3) {
- gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
+ if (7 - i == R_ESP) {
+ continue;
}
- gen_op_addl_A0_im(1 << s->dflag);
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
}
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+
+ gen_stack_update(s, 8 * size);
}
static void gen_enter(DisasContext *s, int esp_addend, int level)
{
- TCGMemOp ot = mo_pushpop(s, s->dflag);
- int opsize = 1 << ot;
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+ int size = 1 << d_ot;
- level &= 0x1f;
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addq_A0_im(-opsize);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
-
- /* push bp */
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- if (level) {
- /* XXX: must save state */
- gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
- tcg_const_i32((ot == MO_64)),
- cpu_T[1]);
- }
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
- gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
- } else
-#endif
- {
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addl_A0_im(-opsize);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- /* push bp */
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- if (level) {
- /* XXX: must save state */
- gen_helper_enter_level(cpu_env, tcg_const_i32(level),
- tcg_const_i32(s->dflag - 1),
- cpu_T[1]);
- }
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+ /* Push BP; compute FrameTemp into T1. */
+ tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
+ gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
+
+ level &= 31;
+ if (level != 0) {
+ int i;
+
+ /* Copy level-1 pointers from the previous frame. */
+ for (i = 1; i < level; ++i) {
+ tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
+
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
+ }
+
+ /* Push the current FrameTemp as the last level. */
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
}
+
+ /* Copy the FrameTemp value to EBP. */
+ gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
+
+ /* Compute the final value of ESP. */
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_leave(DisasContext *s)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
+
+ gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+ tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
+
+ gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
}
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
@@ -2524,6 +2369,30 @@ static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
s->is_jmp = DISAS_TB_JUMP;
}
+/* Generate #UD for the current instruction. The assumption here is that
+ the instruction is known, but it isn't allowed in the current cpu mode. */
+static void gen_illegal_opcode(DisasContext *s)
+{
+ gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
+}
+
+/* Similarly, except that the assumption here is that we don't decode
+ the instruction at all -- either a missing opcode, an unimplemented
+ feature, or just a bogus instruction stream. */
+static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
+{
+ gen_illegal_opcode(s);
+
+ if (qemu_loglevel_mask(LOG_UNIMP)) {
+ target_ulong pc = s->pc_start, end = s->pc;
+ qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
+ for (; pc < end; ++pc) {
+ qemu_log(" %02x", cpu_ldub_code(env, pc));
+ }
+ qemu_log("\n");
+ }
+}
+
/* an interrupt is different from an exception because of the
privilege checks */
static void gen_interrupt(DisasContext *s, int intno,
@@ -2544,14 +2413,56 @@ static void gen_debug(DisasContext *s, target_ulong cur_eip)
s->is_jmp = DISAS_TB_JUMP;
}
-/* generate a generic end of block. Trace exception is also generated
- if needed */
-static void gen_eob(DisasContext *s)
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+ if ((s->flags & mask) == 0) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ori_i32(t, t, mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags |= mask;
+ }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+ if (s->flags & mask) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_andi_i32(t, t, ~mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags &= ~mask;
+ }
+}
+
+/* Clear BND registers during legacy branches. */
+static void gen_bnd_jmp(DisasContext *s)
+{
+ /* Clear the registers only if BND prefix is missing, MPX is enabled,
+ and if the BNDREGs are known to be in use (non-zero) already.
+ The helper itself will check BNDPRESERVE at runtime. */
+ if ((s->prefix & PREFIX_REPNZ) == 0
+ && (s->flags & HF_MPX_EN_MASK) != 0
+ && (s->flags & HF_MPX_IU_MASK) != 0) {
+ gen_helper_bnd_jmp(cpu_env);
+ }
+}
+
+/* Generate an end of block. Trace exception is also generated if needed.
+ If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
+static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
{
gen_update_cc_op(s);
- if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
- gen_helper_reset_inhibit_irq(cpu_env);
+
+ /* If several instructions disable interrupts, only the first does it. */
+ if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
+ } else {
+ gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
}
+
if (s->tb->flags & HF_RF_MASK) {
gen_helper_reset_rf(cpu_env);
}
@@ -2565,6 +2476,12 @@ static void gen_eob(DisasContext *s)
s->is_jmp = DISAS_TB_JUMP;
}
+/* End of block, resetting the inhibit irq flag. */
+static void gen_eob(DisasContext *s)
+{
+ gen_eob_inhibit_irq(s, false);
+}
+
/* generate a jump to eip. No segment change must happen before as a
direct call to the next block may occur */
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
@@ -2601,28 +2518,28 @@ static inline void gen_ldo_env_A0(DisasContext *s, int offset)
{
int mem_index = s->mem_index;
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
}
static inline void gen_sto_env_A0(DisasContext *s, int offset)
{
int mem_index = s->mem_index;
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
}
static inline void gen_op_movo(int d_offset, int s_offset)
{
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
}
static inline void gen_op_movq(int d_offset, int s_offset)
@@ -2995,7 +2912,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
b1 = 0;
sse_fn_epp = sse_op_table1[b][b1];
if (!sse_fn_epp) {
- goto illegal_op;
+ goto unknown_op;
}
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
is_xmm = 1;
@@ -3014,15 +2931,19 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
if (s->flags & HF_EM_MASK) {
illegal_op:
- gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
+ gen_illegal_opcode(s);
return;
}
- if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
- if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
- goto illegal_op;
+ if (is_xmm
+ && !(s->flags & HF_OSFXSR_MASK)
+ && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
+ goto unknown_op;
+ }
if (b == 0x0e) {
- if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
- goto illegal_op;
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+ /* If we were fully decoding this we might use illegal_op. */
+ goto unknown_op;
+ }
/* femms */
gen_helper_emms(cpu_env);
return;
@@ -3047,8 +2968,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
b |= (b1 << 8);
switch(b) {
case 0x0e7: /* movntq */
- if (mod == 3)
+ if (mod == 3) {
goto illegal_op;
+ }
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
break;
@@ -3073,25 +2995,25 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_lea_modrm(env, s, modrm);
if (b1 & 1) {
gen_stq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(0)));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
}
break;
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
}
break;
@@ -3101,14 +3023,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
- gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
+ gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
}
break;
@@ -3142,30 +3064,30 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x210: /* movss xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
}
break;
case 0x310: /* movsd xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ xmm_regs[reg].ZMM_Q(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x012: /* movlps */
@@ -3173,12 +3095,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
/* movhlps */
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x212: /* movsldup */
@@ -3187,40 +3109,40 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
}
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
break;
case 0x312: /* movddup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
break;
case 0x016: /* movhps */
case 0x116: /* movhpd */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(1)));
+ xmm_regs[reg].ZMM_Q(1)));
} else {
/* movlhps */
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x216: /* movshdup */
@@ -3229,15 +3151,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
}
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
- gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
break;
case 0x178:
case 0x378:
@@ -3263,13 +3185,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
@@ -3277,14 +3199,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
- offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
break;
@@ -3292,13 +3214,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
- gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x7f: /* movq ea, mm */
if (mod != 3) {
@@ -3328,23 +3250,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x211: /* movss ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
}
break;
case 0x311: /* movsd ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
}
break;
case 0x013: /* movlps */
@@ -3352,7 +3274,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
goto illegal_op;
}
@@ -3362,7 +3284,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(1)));
+ xmm_regs[reg].ZMM_Q(1)));
} else {
goto illegal_op;
}
@@ -3374,26 +3296,26 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x172:
case 0x173:
if (b1 >= 2) {
- goto illegal_op;
+ goto unknown_op;
}
val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
- tcg_gen_movi_tl(cpu_T[0], val);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
- tcg_gen_movi_tl(cpu_T[0], val);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
(((modrm >> 3)) & 7)][b1];
if (!sse_fn_epp) {
- goto illegal_op;
+ goto unknown_op;
}
if (is_xmm) {
rm = (modrm & 7) | REX_B(s);
@@ -3452,12 +3374,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == MO_32) {
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
- sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
+ sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
#else
goto illegal_op;
#endif
@@ -3502,10 +3424,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if ((b >> 8) & 1) {
- gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
} else {
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
}
op2_offset = offsetof(CPUX86State,xmm_t0);
} else {
@@ -3517,17 +3439,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
SSEFunc_i_ep sse_fn_i_ep =
sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_l_ep sse_fn_l_ep =
sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
- sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
+ sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
#else
goto illegal_op;
#endif
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0xc4: /* pinsrw */
case 0x1c4:
@@ -3536,11 +3458,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
- tcg_gen_st16_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
} else {
val &= 3;
- tcg_gen_st16_tl(cpu_T[0], cpu_env,
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
}
break;
@@ -3553,41 +3475,41 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
} else {
val &= 3;
rm = (modrm & 7);
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
}
reg = ((modrm >> 3) & 7) | rex_r;
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(0)));
+ xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
- offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
- gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x2d6: /* movq2dq */
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7);
- gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,fpregs[rm].mmx));
- gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x3d6: /* movdq2q */
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
- offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
break;
case 0xd7: /* pmovmskb */
case 0x1d7:
@@ -3617,12 +3539,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
- goto illegal_op;
+ goto unknown_op;
}
sse_fn_epp = sse_op_table6[b].op[b1];
if (!sse_fn_epp) {
- goto illegal_op;
+ goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
goto illegal_op;
@@ -3639,20 +3561,20 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
gen_ldq_env_A0(s, op2_offset +
- offsetof(XMMReg, XMM_Q(0)));
+ offsetof(ZMMReg, ZMM_Q(0)));
break;
case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
- offsetof(XMMReg, XMM_L(0)));
+ offsetof(ZMMReg, ZMM_L(0)));
break;
case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
s->mem_index, MO_LEUW);
tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
- offsetof(XMMReg, XMM_W(0)));
+ offsetof(ZMMReg, ZMM_W(0)));
break;
case 0x2a: /* movntqda */
gen_ldo_env_A0(s, op1_offset);
@@ -3672,7 +3594,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
}
if (sse_fn_epp == SSE_SPECIAL) {
- goto illegal_op;
+ goto unknown_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
@@ -3709,11 +3631,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
- cpu_T[0], tcg_const_i32(8 << ot));
+ gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
+ cpu_T0, tcg_const_i32(8 << ot));
ot = mo_64_32(s->dflag);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1f0: /* crc32 or movbe */
@@ -3738,9 +3660,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_lea_modrm(env, s, modrm);
if ((b & 1) == 0) {
- tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, ot | MO_BE);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
s->mem_index, ot | MO_BE);
@@ -3755,8 +3677,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -3775,12 +3697,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Extract START, and shift the operand.
Shifts larger than operand size get zeros. */
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
zero = tcg_const_tl(0);
- tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
- cpu_T[0], zero);
+ tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
+ cpu_T0, zero);
tcg_temp_free(zero);
/* Extract the LEN into a mask. Lengths larger than
@@ -3790,12 +3712,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
- tcg_gen_movi_tl(cpu_T[1], 1);
- tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
- tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_movi_tl(cpu_T1, 1);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
}
@@ -3809,21 +3731,21 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
{
TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
/* Note that since we're using BMILG (in order to get O
cleared) we need to store the inverse into C. */
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
- cpu_T[1], bound);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
- bound, bound, cpu_T[1]);
+ cpu_T1, bound);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
+ bound, bound, cpu_T1);
tcg_temp_free(bound);
}
tcg_gen_movi_tl(cpu_A0, -1);
- tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
- tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
@@ -3838,7 +3760,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (ot) {
default:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -3847,8 +3769,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
break;
#ifdef TARGET_X86_64
case MO_64:
- tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
- cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
+ cpu_T0, cpu_regs[R_EDX]);
+ tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
+ tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
break;
#endif
}
@@ -3865,11 +3789,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
- tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
- tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
- gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
+ gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x2f5: /* pext Gy, By, Ey */
@@ -3883,11 +3807,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
- tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
- tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
- gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
+ gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x1f6: /* adcx Gy, Ey */
@@ -3946,22 +3870,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* If we know TL is 64-bit, and we want a 32-bit
result, just do everything in 64-bit arithmetic. */
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
- tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
- tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
- tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
- tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
- tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
+ tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
+ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
+ tcg_gen_shri_i64(carry_out, cpu_T0, 32);
break;
#endif
default:
/* Otherwise compute the carry-out in two steps. */
zero = tcg_const_tl(0);
- tcg_gen_add2_tl(cpu_T[0], carry_out,
- cpu_T[0], zero,
+ tcg_gen_add2_tl(cpu_T0, carry_out,
+ cpu_T0, zero,
carry_in, zero);
tcg_gen_add2_tl(cpu_regs[reg], carry_out,
cpu_regs[reg], carry_out,
- cpu_T[0], zero);
+ cpu_T0, zero);
tcg_temp_free(zero);
break;
}
@@ -3980,24 +3904,24 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
- tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
} else {
- tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
}
if (b == 0x1f7) {
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
} else if (b == 0x2f7) {
if (ot != MO_64) {
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
- tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
if (ot != MO_64) {
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
}
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x0f3:
@@ -4014,36 +3938,36 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
switch (reg & 7) {
case 1: /* blsr By,Ey */
- tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
+ tcg_gen_neg_tl(cpu_T1, cpu_T0);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
gen_op_update2_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 2: /* blsmsk By,Ey */
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 3: /* blsi By, Ey */
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
@@ -4055,12 +3979,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
- goto illegal_op;
+ goto unknown_op;
}
sse_fn_eppi = sse_op_table7[b].op[b1];
if (!sse_fn_eppi) {
- goto illegal_op;
+ goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
goto illegal_op;
@@ -4074,22 +3998,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14: /* pextrb */
- tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_B(val & 15)));
+ tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
break;
case 0x15: /* pextrw */
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_W(val & 7)));
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_W(val & 7)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUW);
}
break;
@@ -4097,7 +4021,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (ot == MO_32) { /* pextrd */
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(val & 3)));
+ xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
} else {
@@ -4108,7 +4032,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
#ifdef TARGET_X86_64
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(val & 1)));
+ xmm_regs[reg].ZMM_Q(val & 1)));
if (mod == 3) {
tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
} else {
@@ -4121,53 +4045,53 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
break;
case 0x17: /* extractps */
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(val & 3)));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUL);
}
break;
case 0x20: /* pinsrb */
if (mod == 3) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
} else {
- tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
- tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_B(val & 15)));
+ tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
break;
case 0x21: /* insertps */
if (mod == 3) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]
- .XMM_L((val >> 6) & 3)));
+ .ZMM_L((val >> 6) & 3)));
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]
- .XMM_L((val >> 4) & 3)));
+ .ZMM_L((val >> 4) & 3)));
if ((val >> 0) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(0)));
+ xmm_regs[reg].ZMM_L(0)));
if ((val >> 1) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(1)));
+ xmm_regs[reg].ZMM_L(1)));
if ((val >> 2) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(2)));
+ xmm_regs[reg].ZMM_L(2)));
if ((val >> 3) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(3)));
+ xmm_regs[reg].ZMM_L(3)));
break;
case 0x22:
if (ot == MO_32) { /* pinsrd */
@@ -4179,7 +4103,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
- xmm_regs[reg].XMM_L(val & 3)));
+ xmm_regs[reg].ZMM_L(val & 3)));
} else { /* pinsrq */
#ifdef TARGET_X86_64
if (mod == 3) {
@@ -4190,7 +4114,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
- xmm_regs[reg].XMM_Q(val & 1)));
+ xmm_regs[reg].ZMM_Q(val & 1)));
#else
goto illegal_op;
#endif
@@ -4252,22 +4176,24 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
b = cpu_ldub_code(env, s->pc++);
if (ot == MO_64) {
- tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
} else {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
default:
- goto illegal_op;
+ unknown_op:
+ gen_unknown_opcode(env, s);
+ return;
}
} else {
/* generic MMX or SSE operation */
@@ -4313,13 +4239,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
switch (sz) {
case 2:
/* 32 bit access */
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
break;
case 3:
/* 64 bit access */
- gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
break;
default:
/* 128 bit access */
@@ -4343,11 +4269,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
switch(b) {
case 0x0f: /* 3DNow! data insns */
- if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
- goto illegal_op;
val = cpu_ldub_code(env, s->pc++);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto illegal_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
@@ -4367,7 +4294,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* compare insns */
val = cpu_ldub_code(env, s->pc++);
if (val >= 8)
- goto illegal_op;
+ goto unknown_op;
sse_fn_epp = sse_op_table4[val][b1];
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
@@ -4412,10 +4339,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
target_ulong next_eip, tval;
int rex_w, rex_r;
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(pc_start);
- }
- s->pc = pc_start;
+ s->pc_start = s->pc = pc_start;
prefixes = 0;
s->override = -1;
rex_w = -1;
@@ -4528,7 +4452,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
b = 0x13a;
break;
default: /* Reserved for future use. */
- goto illegal_op;
+ goto unknown_op;
}
}
s->vex_v = (~vex3 >> 3) & 0xf;
@@ -4608,13 +4532,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
- tcg_gen_movi_tl(cpu_T[0], 0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
} else {
opreg = rm;
}
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
@@ -4624,17 +4548,17 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
}
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, OR_EAX);
break;
}
@@ -4679,7 +4603,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
val = (int8_t)insn_get(env, s, MO_8);
break;
}
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, opreg);
}
break;
@@ -4706,32 +4630,32 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (op == 0)
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
- tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_not_tl(cpu_T0, cpu_T0);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
case 3: /* neg */
- tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
@@ -4739,32 +4663,32 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 4: /* mul */
switch(ot) {
case MO_8:
- gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -4777,7 +4701,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
- cpu_T[0], cpu_regs[R_EAX]);
+ cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
@@ -4788,34 +4712,34 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 5: /* imul */
switch(ot) {
case MO_8:
- gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -4830,7 +4754,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
- cpu_T[0], cpu_regs[R_EAX]);
+ cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
@@ -4842,22 +4766,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 6: /* div */
switch(ot) {
case MO_8:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_divb_AL(cpu_env, cpu_T[0]);
+ gen_helper_divb_AL(cpu_env, cpu_T0);
break;
case MO_16:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_divw_AX(cpu_env, cpu_T[0]);
+ gen_helper_divw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_divl_EAX(cpu_env, cpu_T[0]);
+ gen_helper_divl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_divq_EAX(cpu_env, cpu_T[0]);
+ gen_helper_divq_EAX(cpu_env, cpu_T0);
break;
#endif
}
@@ -4865,28 +4785,24 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 7: /* idiv */
switch(ot) {
case MO_8:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_idivb_AL(cpu_env, cpu_T[0]);
+ gen_helper_idivb_AL(cpu_env, cpu_T0);
break;
case MO_16:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_idivw_AX(cpu_env, cpu_T[0]);
+ gen_helper_idivw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
+ gen_helper_idivl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
+ gen_helper_idivq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
@@ -4899,7 +4815,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (op >= 2 && b == 0xfe) {
- goto illegal_op;
+ goto unknown_op;
}
if (CODE64(s)) {
if (op == 2 || op == 4) {
@@ -4915,9 +4831,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if (op >= 2 && op != 3 && op != 5)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
@@ -4938,29 +4854,28 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (dflag == MO_16) {
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
next_eip = s->pc - s->cs_base;
- tcg_gen_movi_tl(cpu_T[1], next_eip);
- gen_push_v(s, cpu_T[1]);
- gen_op_jmp_v(cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T1, next_eip);
+ gen_push_v(s, cpu_T1);
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 3: /* lcall Ev */
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
- tcg_const_i32(s->pc - pc_start));
+ tcg_const_tl(s->pc - s->cs_base));
} else {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
@@ -4968,33 +4883,32 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 5: /* ljmp Ev */
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(s->pc - pc_start));
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
+ tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
- gen_op_jmp_v(cpu_T[1]);
+ gen_op_jmp_v(cpu_T1);
}
gen_eob(s);
break;
case 6: /* push Ev */
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
@@ -5006,7 +4920,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -5016,8 +4930,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = mo_b_d(b, dflag);
val = insn_get(env, s, ot);
- gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
- tcg_gen_movi_tl(cpu_T[1], val);
+ gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -5026,20 +4940,20 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
break;
#endif
case MO_32:
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
break;
case MO_16:
- gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
tcg_abort();
@@ -5049,22 +4963,22 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
- gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
- gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
+ gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
break;
#endif
case MO_32:
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
- gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
+ gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
break;
default:
tcg_abort();
@@ -5083,25 +4997,25 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
} else {
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_64:
- tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
+ tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
- tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
break;
#endif
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
@@ -5111,14 +5025,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
}
set_cc_op(s, CC_OP_MULB + ot);
@@ -5131,18 +5045,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
}
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
@@ -5212,8 +5126,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
@@ -5221,8 +5133,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
}
@@ -5232,14 +5142,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
- gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
- gen_push_v(s, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
+ gen_push_v(s, cpu_T0);
break;
case 0x58 ... 0x5f: /* pop */
ot = gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
- gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
+ gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
break;
case 0x60: /* pusha */
if (CODE64(s))
@@ -5258,8 +5168,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_push_v(s, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
modrm = cpu_ldub_code(env, s->pc++);
@@ -5269,7 +5179,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
@@ -5288,20 +5198,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
break;
case 0xc9: /* leave */
- /* XXX: exception not precise (ESP is updated before potential exception) */
- if (CODE64(s)) {
- gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
- } else if (s->ss32) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
- } else {
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
- }
- ot = gen_pop_T0(s);
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
- gen_pop_update(s, ot);
+ gen_leave(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
@@ -5310,12 +5207,12 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
@@ -5324,25 +5221,23 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
- if (reg == R_SS) {
- /* if reg == SS, inhibit interrupts/trace. */
- /* If several instructions disable interrupts, only the
- _first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
- s->tf = 0;
- }
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ if (reg == R_SS) {
+ s->tf = 0;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
}
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
@@ -5371,11 +5266,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_lea_modrm(env, s, modrm);
}
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
+ gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
}
break;
case 0x8a:
@@ -5385,7 +5280,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
modrm = cpu_ldub_code(env, s->pc++);
@@ -5393,18 +5288,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
- if (reg == R_SS) {
- /* if reg == SS, inhibit interrupts/trace */
- /* If several instructions disable interrupts, only the
- _first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
- s->tf = 0;
- }
+ gen_movl_seg_T0(s, reg);
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ if (reg == R_SS) {
+ s->tf = 0;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
}
break;
case 0x8c: /* mov Gv, seg */
@@ -5439,45 +5332,42 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
switch (s_ot) {
case MO_UB:
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
break;
case MO_SB:
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
break;
case MO_UW:
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
break;
default:
case MO_SW:
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
break;
}
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
}
break;
case 0x8d: /* lea */
- ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
- /* we must ensure that no segment is added */
- s->override = -1;
- val = s->addseg;
- s->addseg = 0;
- gen_lea_modrm(env, s, modrm);
- s->addseg = val;
- gen_op_mov_reg_v(ot, reg, cpu_A0);
+ {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_op_mov_reg_v(dflag, reg, ea);
+ }
break;
case 0xa0: /* mov EAX, Ov */
@@ -5502,27 +5392,27 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_movi_tl(cpu_A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
}
break;
case 0xd7: /* xlat */
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
- tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
- gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
@@ -5532,16 +5422,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
- tcg_gen_movi_tl(cpu_T[0], tmp);
- gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, tmp);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
ot = dflag;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
break;
@@ -5560,21 +5450,21 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
gen_helper_lock();
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
if (!(prefixes & PREFIX_LOCK))
gen_helper_unlock();
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
}
break;
case 0xc4: /* les Gv */
@@ -5601,13 +5491,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
- gen_movl_seg_T0(s, op, pc_start - s->cs_base);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_movl_seg_T0(s, op);
/* then put the data */
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -5686,7 +5576,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
opreg = rm;
}
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
@@ -5837,8 +5727,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
break;
case 0x0c: /* fldenv mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0d: /* fldcw mem */
@@ -5847,8 +5735,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0f: /* fnstcw mem */
@@ -5857,24 +5743,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
s->mem_index, MO_LEUW);
break;
case 0x1d: /* fldt mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2e: /* fnsave mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2f: /* fnstsw mem */
@@ -5883,13 +5761,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
s->mem_index, MO_LEUW);
break;
case 0x3c: /* fbld */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
@@ -5903,7 +5777,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fpop(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
} else {
/* register float ops */
@@ -5924,12 +5798,10 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x0c: /* grp d9/4 */
@@ -5948,7 +5820,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fxam_ST0(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x0d: /* grp d9/5 */
@@ -5983,7 +5855,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fldz_ST0(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
}
break;
@@ -6083,7 +5955,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fpop(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x1c:
@@ -6101,7 +5973,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x1d: /* fucomi */
@@ -6153,7 +6025,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_helper_fpop(cpu_env);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
@@ -6164,11 +6036,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
case 0x3d: /* fucomip */
@@ -6214,7 +6086,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
}
break;
@@ -6275,7 +6147,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
@@ -6290,7 +6162,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
@@ -6310,15 +6182,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xe5:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
- gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
@@ -6328,17 +6201,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xe7:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
- gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
@@ -6347,15 +6221,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
@@ -6364,17 +6239,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
- gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
@@ -6389,14 +6265,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xca: /* lret im */
@@ -6411,13 +6289,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_stack_A0(s);
/* pop offset */
- gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
/* NOTE: keeping EIP updated is not a problem in case of
exception */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
/* pop selector */
- gen_op_addl_A0_im(1 << dflag);
- gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
+ gen_add_A0_im(s, 1 << dflag);
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (2 << dflag));
@@ -6441,8 +6319,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
@@ -6463,8 +6339,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
- tcg_gen_movi_tl(cpu_T[0], next_eip);
- gen_push_v(s, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, next_eip);
+ gen_push_v(s, cpu_T0);
+ gen_bnd_jmp(s);
gen_jmp(s, tval);
}
break;
@@ -6478,8 +6355,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
- tcg_gen_movi_tl(cpu_T[0], selector);
- tcg_gen_movi_tl(cpu_T[1], offset);
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_lcall;
case 0xe9: /* jmp im */
@@ -6494,6 +6371,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
+ gen_bnd_jmp(s);
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
@@ -6506,8 +6384,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
- tcg_gen_movi_tl(cpu_T[0], selector);
- tcg_gen_movi_tl(cpu_T[1], offset);
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
@@ -6533,12 +6411,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (dflag == MO_16) {
tval &= 0xffff;
}
+ gen_bnd_jmp(s);
gen_jcc(s, b, tval, next_eip);
break;
case 0x190 ... 0x19f: /* setcc Gv */
modrm = cpu_ldub_code(env, s->pc++);
- gen_setcc1(s, b, cpu_T[0]);
+ gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
@@ -6559,8 +6438,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
- gen_helper_read_eflags(cpu_T[0], cpu_env);
- gen_push_v(s, cpu_T[0]);
+ gen_helper_read_eflags(cpu_T0, cpu_env);
+ gen_push_v(s, cpu_T0);
}
break;
case 0x9d: /* popf */
@@ -6571,13 +6450,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = gen_pop_T0(s);
if (s->cpl == 0) {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
@@ -6586,14 +6465,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
if (s->cpl <= s->iopl) {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
@@ -6603,11 +6482,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
} else {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
@@ -6624,19 +6503,19 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
- gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
- tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
- tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
- gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
+ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
+ gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
@@ -6670,15 +6549,15 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
- goto illegal_op;
+ goto unknown_op;
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
@@ -6698,46 +6577,46 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
+ gen_op_mov_v_reg(MO_32, cpu_T1, reg);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
/* specific case: we need to add a displacement */
- gen_exts(ot, cpu_T[1]);
- tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
+ gen_exts(ot, cpu_T1);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
bt_op:
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
- tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
switch(op) {
case 0:
break;
case 1:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
case 2:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
default:
case 3:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
if (op != 0) {
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
@@ -6777,7 +6656,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_extu(ot, cpu_T[0]);
+ gen_extu(ot, cpu_T0);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
@@ -6785,18 +6664,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
- gen_helper_clz(cpu_T[0], cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size:
force all bits outside the operand size to 1. */
target_ulong mask = (target_ulong)-2 << (size - 1);
- tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
- gen_helper_ctz(cpu_T[0], cpu_T[0]);
+ tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
+ gen_helper_ctz(cpu_T0, cpu_T0);
}
/* For lzcnt/tzcnt, C and Z bits are defined and are
related to the result. */
@@ -6805,24 +6684,24 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
- gen_helper_clz(cpu_T[0], cpu_T[0]);
- tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
- gen_helper_ctz(cpu_T[0], cpu_T[0]);
+ gen_helper_ctz(cpu_T0, cpu_T0);
}
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
- cpu_regs[reg], cpu_T[0]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
+ cpu_regs[reg], cpu_T0);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
/************************/
/* bcd */
@@ -6895,8 +6774,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
}
break;
@@ -6946,27 +6823,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
break;
case 0xfb: /* sti */
- if (!s->vm86) {
- if (s->cpl <= s->iopl) {
- gen_sti:
- gen_helper_sti(cpu_env);
- /* interruptions are enabled only the first insn after sti */
- /* If several instructions disable interrupts, only the
- _first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
- /* give a chance to handle pending irqs */
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
- } else {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- }
+ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
+ gen_helper_sti(cpu_env);
+ /* interruptions are enabled only the first insn after sti */
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob_inhibit_irq(s, true);
} else {
- if (s->iopl == 3) {
- goto gen_sti;
- } else {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- }
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
break;
case 0x62: /* bound */
@@ -6978,10 +6841,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_lea_modrm(env, s, modrm);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
@@ -6992,24 +6854,24 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
- gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
- tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
+ gen_op_mov_v_reg(MO_64, cpu_T0, reg);
+ tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
- gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, reg);
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+ tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, reg, cpu_T0);
}
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
- gen_compute_eflags_c(s, cpu_T[0]);
- tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
+ gen_compute_eflags_c(s, cpu_T0);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
@@ -7094,8 +6956,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
@@ -7107,8 +6967,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
@@ -7125,8 +6983,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
@@ -7160,7 +7016,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
@@ -7172,8 +7029,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
@@ -7181,7 +7037,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
@@ -7193,8 +7050,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
@@ -7205,279 +7061,347 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
- gen_helper_verr(cpu_env, cpu_T[0]);
+ gen_helper_verr(cpu_env, cpu_T0);
} else {
- gen_helper_verw(cpu_env, cpu_T[0]);
+ gen_helper_verw(cpu_env, cpu_T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
+
case 0x101:
modrm = cpu_ldub_code(env, s->pc++);
- mod = (modrm >> 6) & 3;
- op = (modrm >> 3) & 7;
- rm = modrm & 7;
- switch(op) {
- case 0: /* sgdt */
- if (mod == 3)
- goto illegal_op;
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
- gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, gdt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
- gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
- case 1:
- if (mod == 3) {
- switch (rm) {
- case 0: /* monitor */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
- s->cpl != 0)
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
- gen_extu(s->aflag, cpu_A0);
- gen_add_A0_ds_seg(s);
- gen_helper_monitor(cpu_env, cpu_A0);
- break;
- case 1: /* mwait */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
- s->cpl != 0)
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
- gen_eob(s);
- break;
- case 2: /* clac */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
- s->cpl != 0) {
- goto illegal_op;
- }
- gen_helper_clac(cpu_env);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
- break;
- case 3: /* stac */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
- s->cpl != 0) {
- goto illegal_op;
- }
- gen_helper_stac(cpu_env);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
- break;
- default:
- goto illegal_op;
- }
- } else { /* sidt */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
- gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
- gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
- gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
- if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
- }
- gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
+
+ case 0xc8: /* monitor */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
+ gen_extu(s->aflag, cpu_A0);
+ gen_add_A0_ds_seg(s);
+ gen_helper_monitor(cpu_env, cpu_A0);
+ break;
+
+ case 0xc9: /* mwait */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
}
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
+ gen_eob(s);
break;
- case 2: /* lgdt */
- case 3: /* lidt */
- if (mod == 3) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- switch(rm) {
- case 0: /* VMRUN */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
- tcg_const_i32(s->pc - pc_start));
- tcg_gen_exit_tb(0);
- s->is_jmp = DISAS_TB_JUMP;
- }
- break;
- case 1: /* VMMCALL */
- if (!(s->flags & HF_SVME_MASK))
- goto illegal_op;
- gen_helper_vmmcall(cpu_env);
- break;
- case 2: /* VMLOAD */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
- }
- break;
- case 3: /* VMSAVE */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
- }
- break;
- case 4: /* STGI */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
- !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_stgi(cpu_env);
- }
- break;
- case 5: /* CLGI */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_clgi(cpu_env);
- }
- break;
- case 6: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
- !s->pe)
- goto illegal_op;
- gen_helper_skinit(cpu_env);
- break;
- case 7: /* INVLPGA */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_invlpga(cpu_env,
- tcg_const_i32(s->aflag - 1));
- }
- break;
- default:
- goto illegal_op;
- }
- } else if (s->cpl != 0) {
+
+ case 0xca: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xcb: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(1): /* sidt */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ break;
+
+ case 0xd0: /* xgetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+ break;
+
+ case 0xd1: /* xsetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start,
- op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
- gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
- gen_add_A0_im(s, 2);
- gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
- if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
- }
- if (op == 2) {
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
- tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
- } else {
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
- tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
- }
+ break;
}
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+ /* End TB because translation flags may change. */
+ gen_jmp_im(s->pc - pc_start);
+ gen_eob(s);
break;
- case 4: /* smsw */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
-#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
-#else
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
-#endif
- gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
+
+ case 0xd8: /* VMRUN */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ tcg_const_i32(s->pc - pc_start));
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_TB_JUMP;
break;
- case 6: /* lmsw */
+
+ case 0xd9: /* VMMCALL */
+ if (!(s->flags & HF_SVME_MASK)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmmcall(cpu_env);
+ break;
+
+ case 0xda: /* VMLOAD */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdb: /* VMSAVE */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdc: /* STGI */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_stgi(cpu_env);
+ break;
+
+ case 0xdd: /* CLGI */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_clgi(cpu_env);
+ break;
+
+ case 0xde: /* SKINIT */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_skinit(cpu_env);
+ break;
+
+ case 0xdf: /* INVLPGA */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ CASE_MODRM_MEM_OP(2): /* lgdt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* lidt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
+ break;
+
+ CASE_MODRM_OP(4): /* smsw */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
+ if (CODE64(s)) {
+ mod = (modrm >> 6) & 3;
+ ot = (mod != 3 ? MO_16 : s->dflag);
} else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
- gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_helper_lmsw(cpu_env, cpu_T[0]);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ ot = MO_16;
}
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
- case 7:
- if (mod != 3) { /* invlpg */
+ case 0xee: /* rdpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+ break;
+ case 0xef: /* wrpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+ break;
+ CASE_MODRM_OP(6): /* lmsw */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_helper_lmsw(cpu_env, cpu_T0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* invlpg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_invlpg(cpu_env, cpu_A0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xf8: /* swapgs */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_lea_modrm(env, s, modrm);
- gen_helper_invlpg(cpu_env, cpu_A0);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
}
- } else {
- switch (rm) {
- case 0: /* swapgs */
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- tcg_gen_ld_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,segs[R_GS].base));
- tcg_gen_ld_tl(cpu_T[1], cpu_env,
- offsetof(CPUX86State,kernelgsbase));
- tcg_gen_st_tl(cpu_T[1], cpu_env,
- offsetof(CPUX86State,segs[R_GS].base));
- tcg_gen_st_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,kernelgsbase));
- }
- } else
+ break;
+ }
#endif
- {
- goto illegal_op;
- }
- break;
- case 1: /* rdtscp */
- if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- if (s->tb->cflags & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_rdtscp(cpu_env);
- if (s->tb->cflags & CF_USE_ICOUNT) {
- gen_io_end();
- gen_jmp(s, s->pc - s->cs_base);
- }
- break;
- default:
- goto illegal_op;
- }
+ goto illegal_op;
+
+ case 0xf9: /* rdtscp */
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtscp(cpu_env);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
}
break;
+
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
+
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
@@ -7500,16 +7424,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
/* sign extend */
if (d_ot == MO_64) {
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
} else
#endif
@@ -7574,9 +7498,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
- gen_helper_lar(t0, cpu_env, cpu_T[0]);
+ gen_helper_lar(t0, cpu_env, cpu_T0);
} else {
- gen_helper_lsl(t0, cpu_env, cpu_T[0]);
+ gen_helper_lsl(t0, cpu_env, cpu_T0);
}
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
@@ -7598,7 +7522,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(env, s, modrm);
+ gen_nop_modrm(env, s, modrm);
/* nothing more to do */
break;
default: /* nop (multi byte) */
@@ -7606,7 +7530,199 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
}
break;
- case 0x119 ... 0x11f: /* nop (multi byte) */
+ case 0x11a:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (prefixes & PREFIX_REPZ) {
+ /* bndcl */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcu */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ TCGv_i64 notu = tcg_temp_new_i64();
+ tcg_gen_not_i64(notu, cpu_bndu[reg]);
+ gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+ tcg_temp_free_i64(notu);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- from reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
+ tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ } else if (mod != 3) {
+ /* bndldx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
+ } else {
+ gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
+ tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
+ }
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x11b:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (mod != 3 && (prefixes & PREFIX_REPZ)) {
+ /* bndmk */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (a.base >= 0) {
+ tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
+ }
+ } else if (a.base == -1) {
+ /* no base register has lower bound of 0 */
+ tcg_gen_movi_i64(cpu_bndl[reg], 0);
+ } else {
+ /* rip-relative generates #ud */
+ goto illegal_op;
+ }
+ tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
+ tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ break;
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcn */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- to reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
+ tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ }
+ } else if (mod != 3) {
+ /* bndstx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ } else {
+ gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ }
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
modrm = cpu_ldub_code(env, s->pc++);
gen_nop_modrm(env, s, modrm);
break;
@@ -7640,18 +7756,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
- cpu_T[0]);
+ cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
- gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
}
break;
@@ -7672,19 +7788,21 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = MO_64;
else
ot = MO_32;
- /* XXX: do it dynamically with CR4.DE bit */
- if (reg == 4 || reg == 5 || reg >= 8)
+ if (reg >= 8) {
goto illegal_op;
+ }
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
- gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+ gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+ gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
@@ -7714,83 +7832,195 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
case 0x1ae:
modrm = cpu_ldub_code(env, s->pc++);
- mod = (modrm >> 6) & 3;
- op = (modrm >> 3) & 7;
- switch(op) {
- case 0: /* fxsave */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
- (s->prefix & PREFIX_LOCK))
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* fxsave */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
+ }
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
+ gen_helper_fxsave(cpu_env, cpu_A0);
break;
- case 1: /* fxrstor */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
- (s->prefix & PREFIX_LOCK))
+
+ CASE_MODRM_MEM_OP(1): /* fxrstor */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
+ }
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
+ gen_helper_fxrstor(cpu_env, cpu_A0);
break;
- case 2: /* ldmxcsr */
- case 3: /* stmxcsr */
+
+ CASE_MODRM_MEM_OP(2): /* ldmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
- mod == 3)
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
+ gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* stmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
gen_lea_modrm(env, s, modrm);
- if (op == 2) {
- tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
- s->mem_index, MO_LEUL);
- gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
- } else {
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ break;
+
+ CASE_MODRM_MEM_OP(4): /* xsave */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
}
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
break;
- case 5: /* lfence */
- case 6: /* mfence */
- if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
+
+ CASE_MODRM_MEM_OP(5): /* xrstor */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
+ /* XRSTOR is how MPX is enabled, which changes how
+ we translate. Thus we need to end the TB. */
+ gen_update_cc_op(s);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
break;
- case 7: /* sfence / clflush */
- if ((modrm & 0xc7) == 0xc0) {
- /* sfence */
- /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
- if (!(s->cpuid_features & CPUID_SSE))
+
+ CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clwb */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
goto illegal_op;
+ }
+ gen_nop_modrm(env, s, modrm);
} else {
- /* clflush */
- if (!(s->cpuid_features & CPUID_CLFLUSH))
+ /* xsaveopt */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
+ || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
+ }
gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
+ }
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clflushopt */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
+ goto illegal_op;
+ }
+ } else {
+ /* clflush */
+ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
+ || !(s->cpuid_features & CPUID_CLFLUSH)) {
+ goto illegal_op;
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+
+ case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
+ case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
+ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
+ case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
+ if (CODE64(s)
+ && (prefixes & PREFIX_REPZ)
+ && !(prefixes & PREFIX_LOCK)
+ && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
+ TCGv base, treg, src, dst;
+
+ /* Preserve hflags bits by testing CR4 at runtime. */
+ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
+ gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
+
+ base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
+ treg = cpu_regs[(modrm & 7) | REX_B(s)];
+
+ if (modrm & 0x10) {
+ /* wr*base */
+ dst = base, src = treg;
+ } else {
+ /* rd*base */
+ dst = treg, src = base;
+ }
+
+ if (s->dflag == MO_32) {
+ tcg_gen_ext32u_tl(dst, src);
+ } else {
+ tcg_gen_mov_tl(dst, src);
+ }
+ break;
+ }
+ goto unknown_op;
+
+ case 0xf8: /* sfence / pcommit */
+ if (prefixes & PREFIX_DATA) {
+ /* pcommit */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ /* fallthru */
+ case 0xf9 ... 0xff: /* sfence */
+ case 0xe8 ... 0xef: /* lfence */
+ case 0xf0 ... 0xf7: /* mfence */
+ if (!(s->cpuid_features & CPUID_SSE2)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
}
break;
+
default:
- goto illegal_op;
+ goto unknown_op;
}
break;
+
case 0x10d: /* 3DNow! prefetch(w) */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(env, s, modrm);
- /* ignore for now */
+ gen_nop_modrm(env, s, modrm);
break;
case 0x1aa: /* rsm */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
@@ -7818,8 +8048,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -7837,7 +8067,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_sse(env, s, b, pc_start, rex_r);
break;
default:
- goto illegal_op;
+ goto unknown_op;
}
/* lock generation */
if (s->prefix & PREFIX_LOCK)
@@ -7847,11 +8077,17 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (s->prefix & PREFIX_LOCK)
gen_helper_unlock();
/* XXX: ensure that no lock was generated */
- gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
+ gen_illegal_opcode(s);
+ return s->pc;
+ unknown_op:
+ if (s->prefix & PREFIX_LOCK)
+ gen_helper_unlock();
+ /* XXX: ensure that no lock was generated */
+ gen_unknown_opcode(env, s);
return s->pc;
}
-void optimize_flags_init(void)
+void tcg_x86_init(void)
{
static const char reg_names[CPU_NB_REGS][4] = {
#ifdef TARGET_X86_64
@@ -7882,38 +8118,66 @@ void optimize_flags_init(void)
[R_ESP] = "esp",
#endif
};
+ static const char seg_base_names[6][8] = {
+ [R_CS] = "cs_base",
+ [R_DS] = "ds_base",
+ [R_ES] = "es_base",
+ [R_FS] = "fs_base",
+ [R_GS] = "gs_base",
+ [R_SS] = "ss_base",
+ };
+ static const char bnd_regl_names[4][8] = {
+ "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
+ };
+ static const char bnd_regu_names[4][8] = {
+ "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
+ };
int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUX86State, cc_op), "cc_op");
- cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
+ cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
"cc_dst");
- cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
+ cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
"cc_src");
- cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
+ cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
"cc_src2");
for (i = 0; i < CPU_NB_REGS; ++i) {
- cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUX86State, regs[i]),
reg_names[i]);
}
+
+ for (i = 0; i < 6; ++i) {
+ cpu_seg_base[i]
+ = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, segs[i].base),
+ seg_base_names[i]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ cpu_bndl[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].lb),
+ bnd_regl_names[i]);
+ cpu_bndu[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].ub),
+ bnd_regu_names[i]);
+ }
+
+ helper_lock_init();
}
-/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
- basic block 'tb'. If search_pc is TRUE, also generate PC
- information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(X86CPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
- CPUBreakpoint *bp;
- int j, lj;
uint64_t flags;
target_ulong pc_start;
target_ulong cs_base;
@@ -7943,13 +8207,14 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- dc->mem_index = cpu_mmu_index(env);
+ dc->mem_index = cpu_mmu_index(env, false);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
+ dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
@@ -7978,8 +8243,8 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
printf("ERROR addseg\n");
#endif
- cpu_T[0] = tcg_temp_new();
- cpu_T[1] = tcg_temp_new();
+ cpu_T0 = tcg_temp_new();
+ cpu_T1 = tcg_temp_new();
cpu_A0 = tcg_temp_new();
cpu_tmp0 = tcg_temp_new();
@@ -7993,40 +8258,37 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
- lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0)
+ if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
gen_tb_start(tb);
for(;;) {
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == pc_ptr &&
- !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
- gen_debug(dc, pc_ptr - dc->cs_base);
- goto done_generating;
- }
- }
- }
- if (search_pc) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- tcg_ctx.gen_opc_pc[lj] = pc_ptr;
- gen_opc_cc_op[lj] = dc->cc_op;
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
- }
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ tcg_gen_insn_start(pc_ptr, dc->cc_op);
+ num_insns++;
+
+ /* If RF is set, suppress an internally generated breakpoint. */
+ if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
+ tb->flags & HF_RF_MASK
+ ? BP_GDB : BP_ANY))) {
+ gen_debug(dc, pc_ptr - dc->cs_base);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ pc_ptr += 1;
+ goto done_generating;
+ }
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
+ }
pc_ptr = disas_insn(env, dc, pc_ptr);
- num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
break;
@@ -8074,14 +8336,6 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- /* we don't forget to fill the last values */
- if (search_pc) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
-
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int disas_flags;
@@ -8098,42 +8352,16 @@ done_generating:
}
#endif
- if (!search_pc) {
- tb->size = pc_ptr - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = pc_ptr - pc_start;
+ tb->icount = num_insns;
}
-void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
+ target_ulong *data)
{
- gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
-}
-
-void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
-{
- gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
-}
-
-void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
-{
- int cc_op;
-#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
- int i;
- qemu_log("RESTORE:\n");
- for(i = 0;i <= pc_pos; i++) {
- if (tcg_ctx.gen_opc_instr_start[i]) {
- qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
- tcg_ctx.gen_opc_pc[i]);
- }
- }
- qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
- pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
- (uint32_t)tb->cs_base);
- }
-#endif
- env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
- cc_op = gen_opc_cc_op[pc_pos];
- if (cc_op != CC_OP_DYNAMIC)
+ int cc_op = data[1];
+ env->eip = data[0] - tb->cs_base;
+ if (cc_op != CC_OP_DYNAMIC) {
env->cc_op = cc_op;
+ }
}