diff options
author | Jiang, Yunhong <yunhong.jiang@intel.com> | 2016-07-21 17:36:18 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@172.30.200.206> | 2016-07-21 17:36:18 +0000 |
commit | bceff3ff72bd362baf8a764f75a0e2a1f5aa0870 (patch) | |
tree | f09e369ded8c6d27338bb24d98f3ff224a7acf9d /kernel/arch/x86/kvm/lapic.c | |
parent | 3a1e86d637c095effee0cff87c8ef67a2924a5ff (diff) | |
parent | ebcdab040619575a47ad19b18e7ed5c38d287336 (diff) |
Merge "KVM: x86: support using the vmx preemption timer for tsc deadline timer"
Diffstat (limited to 'kernel/arch/x86/kvm/lapic.c')
-rw-r--r-- | kernel/arch/x86/kvm/lapic.c | 73 |
1 files changed, 72 insertions, 1 deletions
diff --git a/kernel/arch/x86/kvm/lapic.c b/kernel/arch/x86/kvm/lapic.c index 1e1e7eb8e..701f91234 100644 --- a/kernel/arch/x86/kvm/lapic.c +++ b/kernel/arch/x86/kvm/lapic.c @@ -1288,6 +1288,68 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) local_irq_restore(flags); } +bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.apic->lapic_timer.hv_timer_in_use; +} +EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); + +void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + WARN_ON(!apic->lapic_timer.hv_timer_in_use); + WARN_ON(swait_active(&vcpu->wq)); + kvm_x86_ops->cancel_hv_timer(vcpu); + apic->lapic_timer.hv_timer_in_use = false; + apic_timer_expired(apic); +} +EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); + +void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + WARN_ON(apic->lapic_timer.hv_timer_in_use); + + if (apic_lvtt_tscdeadline(apic) && + !atomic_read(&apic->lapic_timer.pending)) { + u64 tscdeadline = apic->lapic_timer.tscdeadline; + + if (!kvm_x86_ops->set_hv_timer(vcpu, tscdeadline)) { + apic->lapic_timer.hv_timer_in_use = true; + hrtimer_cancel(&apic->lapic_timer.timer); + + /* In case the sw timer triggered in the window */ + if (atomic_read(&apic->lapic_timer.pending)) { + apic->lapic_timer.hv_timer_in_use = false; + kvm_x86_ops->cancel_hv_timer(apic->vcpu); + } + } + trace_kvm_hv_timer_state(vcpu->vcpu_id, + apic->lapic_timer.hv_timer_in_use); + } +} +EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer); + +void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + /* Possibly the TSC deadline timer is not enabled yet */ + if (!apic->lapic_timer.hv_timer_in_use) + return; + + kvm_x86_ops->cancel_hv_timer(vcpu); + apic->lapic_timer.hv_timer_in_use = false; + + if (atomic_read(&apic->lapic_timer.pending)) + return; + + start_sw_tscdeadline(apic); +} +EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer); + static void start_apic_timer(struct kvm_lapic *apic) { ktime_t now; @@ -1334,7 +1396,16 @@ static void start_apic_timer(struct kvm_lapic *apic) ktime_to_ns(ktime_add_ns(now, apic->lapic_timer.period))); } else if (apic_lvtt_tscdeadline(apic)) { - start_sw_tscdeadline(apic); + /* lapic timer in tsc deadline mode */ + u64 tscdeadline = apic->lapic_timer.tscdeadline; + + if (kvm_x86_ops->set_hv_timer && + !kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) { + apic->lapic_timer.hv_timer_in_use = true; + trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, + apic->lapic_timer.hv_timer_in_use); + } else + start_sw_tscdeadline(apic); } } |