summaryrefslogtreecommitdiffstats
path: root/kernel/arch
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@gmail.com>2016-06-13 14:20:01 -0700
committerYunhong Jiang <yunhong.jiang@linux.intel.com>2016-07-18 08:06:50 -0700
commitebcdab040619575a47ad19b18e7ed5c38d287336 (patch)
treed806a3a9443fcf29245e37aa0efc5f4a4de5230c /kernel/arch
parentab9c81f473b3feff08f1e51428fbaf6a93d4896a (diff)
KVM: x86: support using the vmx preemption timer for tsc deadline timer
The VMX preemption timer can be used to virtualize the TSC deadline timer. The VMX preemption timer is armed when the vCPU is running, and a VMExit will happen if the virtual TSC deadline timer expires. When the vCPU thread is blocked because of HLT, KVM will switch to use an hrtimer, and then go back to the VMX preemption timer when the vCPU thread is unblocked. This solution avoids the complex OS's hrtimer system, and the host timer interrupt handling cost, replacing them with a little math (for guest->host TSC and host TSC->preemption timer conversion) and a cheaper VMexit. This benefits latency for isolated pCPUs. [A word about performance... Yunhong reported a 30% reduction in average latency from cyclictest. I made a similar test with tscdeadline_latency from kvm-unit-tests, and measured - ~20 clock cycles loss (out of ~3200, so less than 1% but still statistically significant) in the worst case where the test halts just after programming the TSC deadline timer - ~800 clock cycles gain (25% reduction in latency) in the best case where the test busy waits. I removed the VMX bits from Yunhong's patch, to concentrate them in the next patch - Paolo] Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Change-Id: I4aa1ecfa3463d1cbfb317511b45d2074b33d9b6f upstream-status: backport Signed-off-by: Yunhong Jiang <yunhong.jiang@linux.intel.com>
Diffstat (limited to 'kernel/arch')
-rw-r--r--kernel/arch/x86/include/asm/kvm_host.h3
-rw-r--r--kernel/arch/x86/kvm/lapic.c73
-rw-r--r--kernel/arch/x86/kvm/lapic.h5
-rw-r--r--kernel/arch/x86/kvm/trace.h15
-rw-r--r--kernel/arch/x86/kvm/x86.c5
5 files changed, 100 insertions, 1 deletions
diff --git a/kernel/arch/x86/include/asm/kvm_host.h b/kernel/arch/x86/include/asm/kvm_host.h
index 30cfd6429..21637c9e9 100644
--- a/kernel/arch/x86/include/asm/kvm_host.h
+++ b/kernel/arch/x86/include/asm/kvm_host.h
@@ -911,6 +911,9 @@ struct kvm_x86_ops {
void (*post_block)(struct kvm_vcpu *vcpu);
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+ void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
diff --git a/kernel/arch/x86/kvm/lapic.c b/kernel/arch/x86/kvm/lapic.c
index 1e1e7eb8e..701f91234 100644
--- a/kernel/arch/x86/kvm/lapic.c
+++ b/kernel/arch/x86/kvm/lapic.c
@@ -1288,6 +1288,68 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags);
}
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
+
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ WARN_ON(swait_active(&vcpu->wq));
+ kvm_x86_ops->cancel_hv_timer(vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+ apic_timer_expired(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(apic->lapic_timer.hv_timer_in_use);
+
+ if (apic_lvtt_tscdeadline(apic) &&
+ !atomic_read(&apic->lapic_timer.pending)) {
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+ if (!kvm_x86_ops->set_hv_timer(vcpu, tscdeadline)) {
+ apic->lapic_timer.hv_timer_in_use = true;
+ hrtimer_cancel(&apic->lapic_timer.timer);
+
+ /* In case the sw timer triggered in the window */
+ if (atomic_read(&apic->lapic_timer.pending)) {
+ apic->lapic_timer.hv_timer_in_use = false;
+ kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ }
+ }
+ trace_kvm_hv_timer_state(vcpu->vcpu_id,
+ apic->lapic_timer.hv_timer_in_use);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
+
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ /* Possibly the TSC deadline timer is not enabled yet */
+ if (!apic->lapic_timer.hv_timer_in_use)
+ return;
+
+ kvm_x86_ops->cancel_hv_timer(vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+
+ if (atomic_read(&apic->lapic_timer.pending))
+ return;
+
+ start_sw_tscdeadline(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
+
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
@@ -1334,7 +1396,16 @@ static void start_apic_timer(struct kvm_lapic *apic)
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
- start_sw_tscdeadline(apic);
+ /* lapic timer in tsc deadline mode */
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+ if (kvm_x86_ops->set_hv_timer &&
+ !kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
+ apic->lapic_timer.hv_timer_in_use = true;
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
+ apic->lapic_timer.hv_timer_in_use);
+ } else
+ start_sw_tscdeadline(apic);
}
}
diff --git a/kernel/arch/x86/kvm/lapic.h b/kernel/arch/x86/kvm/lapic.h
index fde8e35d5..640ad275b 100644
--- a/kernel/arch/x86/kvm/lapic.h
+++ b/kernel/arch/x86/kvm/lapic.h
@@ -16,6 +16,7 @@ struct kvm_timer {
u64 tscdeadline;
u64 expired_tscdeadline;
atomic_t pending; /* accumulated triggered timers */
+ bool hv_timer_in_use;
};
struct kvm_lapic {
@@ -170,4 +171,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu);
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
#endif
diff --git a/kernel/arch/x86/kvm/trace.h b/kernel/arch/x86/kvm/trace.h
index ab9ae67a8..b41f7a013 100644
--- a/kernel/arch/x86/kvm/trace.h
+++ b/kernel/arch/x86/kvm/trace.h
@@ -1025,6 +1025,21 @@ TRACE_EVENT(kvm_pi_irte_update,
__entry->pi_desc_addr)
);
+TRACE_EVENT(kvm_hv_timer_state,
+ TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
+ TP_ARGS(vcpu_id, hv_timer_in_use),
+ TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
+ __field(unsigned int, hv_timer_in_use)
+ ),
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->hv_timer_in_use = hv_timer_in_use;
+ ),
+ TP_printk("vcpu_id %x hv_timer %x\n",
+ __entry->vcpu_id,
+ __entry->hv_timer_in_use)
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c
index 27419ba5a..e3e1a8c1b 100644
--- a/kernel/arch/x86/kvm/x86.c
+++ b/kernel/arch/x86/kvm/x86.c
@@ -2718,6 +2718,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
+
+ if (kvm_lapic_hv_timer_in_use(vcpu) &&
+ kvm_x86_ops->set_hv_timer(vcpu,
+ kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);