summaryrefslogtreecommitdiffstats
path: root/kernel/arch/x86/kvm
diff options
context:
space:
mode:
authorDon Dugger <donald.d.dugger@intel.com>2015-10-29 22:15:25 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2015-10-29 22:15:25 +0000
commitafc76d554ed517e38d46b6b182a7016406a1323f (patch)
tree06133cb33a43488837ea67667458e1582f2f40ce /kernel/arch/x86/kvm
parent41f827bfbb10e03c4d228fbcc801dd51fb9983b0 (diff)
parentec0a2ed6d8a5e555edef907895c041e285fdb495 (diff)
Merge "These changes are a raw update to a vanilla kernel 4.1.10, with the recently announced rt patch patch-4.1.10-rt10.patch. No further changes needed."
Diffstat (limited to 'kernel/arch/x86/kvm')
-rw-r--r--kernel/arch/x86/kvm/mmu.c45
-rw-r--r--kernel/arch/x86/kvm/x86.c2
2 files changed, 1 insertions, 46 deletions
diff --git a/kernel/arch/x86/kvm/mmu.c b/kernel/arch/x86/kvm/mmu.c
index b73337634..554e877e0 100644
--- a/kernel/arch/x86/kvm/mmu.c
+++ b/kernel/arch/x86/kvm/mmu.c
@@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- /* It is valid if the spte is zapped. */
- return spte == 0ull;
-}
#else
union split_spte {
struct {
@@ -478,23 +472,6 @@ retry:
return spte.spte;
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- union split_spte sspte = (union split_spte)spte;
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
- /* It is valid if the spte is zapped. */
- if (spte == 0ull)
- return true;
-
- /* It is valid if the spte is being zapped. */
- if (sspte.spte_low == 0ull &&
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
- return true;
-
- return false;
-}
#endif
static bool spte_is_locklessly_modifiable(u64 spte)
@@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return vcpu_match_mmio_gva(vcpu, addr);
}
-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- * - It is the mmio spte
- * - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
- return __check_direct_spte_mmio_pf(spte);
-}
-
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
}
/*
- * It's ok if the gva is remapped by other cpus on shadow guest,
- * it's a BUG if the gfn is not a mmio page.
- */
- if (direct && !check_direct_spte_mmio_pf(spte))
- return RET_MMIO_PF_BUG;
-
- /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c
index 6cceb2cb2..37d79a026 100644
--- a/kernel/arch/x86/kvm/x86.c
+++ b/kernel/arch/x86/kvm/x86.c
@@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}