summaryrefslogtreecommitdiffstats
path: root/kernel/arch
diff options
context:
space:
mode:
authorJiang, Yunhong <yunhong.jiang@intel.com>2016-07-21 17:36:18 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-07-21 17:36:18 +0000
commitbceff3ff72bd362baf8a764f75a0e2a1f5aa0870 (patch)
treef09e369ded8c6d27338bb24d98f3ff224a7acf9d /kernel/arch
parent3a1e86d637c095effee0cff87c8ef67a2924a5ff (diff)
parentebcdab040619575a47ad19b18e7ed5c38d287336 (diff)
Merge "KVM: x86: support using the vmx preemption timer for tsc deadline timer"
Diffstat (limited to 'kernel/arch')
-rw-r--r--kernel/arch/x86/include/asm/kvm_host.h3
-rw-r--r--kernel/arch/x86/kvm/lapic.c73
-rw-r--r--kernel/arch/x86/kvm/lapic.h5
-rw-r--r--kernel/arch/x86/kvm/trace.h15
-rw-r--r--kernel/arch/x86/kvm/x86.c5
5 files changed, 100 insertions, 1 deletions
diff --git a/kernel/arch/x86/include/asm/kvm_host.h b/kernel/arch/x86/include/asm/kvm_host.h
index 30cfd6429..21637c9e9 100644
--- a/kernel/arch/x86/include/asm/kvm_host.h
+++ b/kernel/arch/x86/include/asm/kvm_host.h
@@ -911,6 +911,9 @@ struct kvm_x86_ops {
void (*post_block)(struct kvm_vcpu *vcpu);
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+ void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
diff --git a/kernel/arch/x86/kvm/lapic.c b/kernel/arch/x86/kvm/lapic.c
index 1e1e7eb8e..701f91234 100644
--- a/kernel/arch/x86/kvm/lapic.c
+++ b/kernel/arch/x86/kvm/lapic.c
@@ -1288,6 +1288,68 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags);
}
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
+
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ WARN_ON(swait_active(&vcpu->wq));
+ kvm_x86_ops->cancel_hv_timer(vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+ apic_timer_expired(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(apic->lapic_timer.hv_timer_in_use);
+
+ if (apic_lvtt_tscdeadline(apic) &&
+ !atomic_read(&apic->lapic_timer.pending)) {
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+ if (!kvm_x86_ops->set_hv_timer(vcpu, tscdeadline)) {
+ apic->lapic_timer.hv_timer_in_use = true;
+ hrtimer_cancel(&apic->lapic_timer.timer);
+
+ /* In case the sw timer triggered in the window */
+ if (atomic_read(&apic->lapic_timer.pending)) {
+ apic->lapic_timer.hv_timer_in_use = false;
+ kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ }
+ }
+ trace_kvm_hv_timer_state(vcpu->vcpu_id,
+ apic->lapic_timer.hv_timer_in_use);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
+
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ /* Possibly the TSC deadline timer is not enabled yet */
+ if (!apic->lapic_timer.hv_timer_in_use)
+ return;
+
+ kvm_x86_ops->cancel_hv_timer(vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+
+ if (atomic_read(&apic->lapic_timer.pending))
+ return;
+
+ start_sw_tscdeadline(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
+
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
@@ -1334,7 +1396,16 @@ static void start_apic_timer(struct kvm_lapic *apic)
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
- start_sw_tscdeadline(apic);
+ /* lapic timer in tsc deadline mode */
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+ if (kvm_x86_ops->set_hv_timer &&
+ !kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
+ apic->lapic_timer.hv_timer_in_use = true;
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
+ apic->lapic_timer.hv_timer_in_use);
+ } else
+ start_sw_tscdeadline(apic);
}
}
diff --git a/kernel/arch/x86/kvm/lapic.h b/kernel/arch/x86/kvm/lapic.h
index fde8e35d5..640ad275b 100644
--- a/kernel/arch/x86/kvm/lapic.h
+++ b/kernel/arch/x86/kvm/lapic.h
@@ -16,6 +16,7 @@ struct kvm_timer {
u64 tscdeadline;
u64 expired_tscdeadline;
atomic_t pending; /* accumulated triggered timers */
+ bool hv_timer_in_use;
};
struct kvm_lapic {
@@ -170,4 +171,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu);
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
#endif
diff --git a/kernel/arch/x86/kvm/trace.h b/kernel/arch/x86/kvm/trace.h
index ab9ae67a8..b41f7a013 100644
--- a/kernel/arch/x86/kvm/trace.h
+++ b/kernel/arch/x86/kvm/trace.h
@@ -1025,6 +1025,21 @@ TRACE_EVENT(kvm_pi_irte_update,
__entry->pi_desc_addr)
);
+TRACE_EVENT(kvm_hv_timer_state,
+ TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
+ TP_ARGS(vcpu_id, hv_timer_in_use),
+ TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
+ __field(unsigned int, hv_timer_in_use)
+ ),
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->hv_timer_in_use = hv_timer_in_use;
+ ),
+ TP_printk("vcpu_id %x hv_timer %x\n",
+ __entry->vcpu_id,
+ __entry->hv_timer_in_use)
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c
index 27419ba5a..e3e1a8c1b 100644
--- a/kernel/arch/x86/kvm/x86.c
+++ b/kernel/arch/x86/kvm/x86.c
@@ -2718,6 +2718,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
+
+ if (kvm_lapic_hv_timer_in_use(vcpu) &&
+ kvm_x86_ops->set_hv_timer(vcpu,
+ kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);