summaryrefslogtreecommitdiffstats
path: root/kernel/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/virt/kvm')
-rw-r--r--kernel/virt/kvm/Kconfig5
-rw-r--r--kernel/virt/kvm/arm/arch_timer.c210
-rw-r--r--kernel/virt/kvm/arm/trace.h63
-rw-r--r--kernel/virt/kvm/arm/vgic-v2.c22
-rw-r--r--kernel/virt/kvm/arm/vgic-v3-emul.c56
-rw-r--r--kernel/virt/kvm/arm/vgic-v3.c29
-rw-r--r--kernel/virt/kvm/arm/vgic.c651
-rw-r--r--kernel/virt/kvm/async_pf.c10
-rw-r--r--kernel/virt/kvm/async_pf.h4
-rw-r--r--kernel/virt/kvm/coalesced_mmio.h4
-rw-r--r--kernel/virt/kvm/eventfd.c308
-rw-r--r--kernel/virt/kvm/irqchip.c67
-rw-r--r--kernel/virt/kvm/kvm_main.c597
-rw-r--r--kernel/virt/kvm/vfio.c5
14 files changed, 1438 insertions, 593 deletions
diff --git a/kernel/virt/kvm/Kconfig b/kernel/virt/kvm/Kconfig
index e2c876d5a..7a79b6853 100644
--- a/kernel/virt/kvm/Kconfig
+++ b/kernel/virt/kvm/Kconfig
@@ -46,4 +46,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT
def_bool y
- depends on COMPAT && !S390
+ depends on KVM && COMPAT && !S390
+
+config HAVE_KVM_IRQ_BYPASS
+ bool
diff --git a/kernel/virt/kvm/arm/arch_timer.c b/kernel/virt/kvm/arm/arch_timer.c
index 98c95f2fc..ea6064696 100644
--- a/kernel/virt/kvm/arm/arch_timer.c
+++ b/kernel/virt/kvm/arm/arch_timer.c
@@ -28,6 +28,8 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#include "trace.h"
+
static struct timecounter *timecounter;
static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
@@ -59,18 +61,6 @@ static void timer_disarm(struct arch_timer_cpu *timer)
}
}
-static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
-{
- int ret;
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
- timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->irq->irq,
- timer->irq->level);
- WARN_ON(ret);
-}
-
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -111,13 +101,20 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
+static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
+ (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
+}
+
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
cycle_t cval, now;
- if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+ if (!kvm_timer_irq_can_fire(vcpu))
return false;
cval = timer->cntv_cval;
@@ -126,68 +123,159 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
return cval <= now;
}
+static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
+{
+ int ret;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ BUG_ON(!vgic_initialized(vcpu->kvm));
+
+ timer->irq.level = new_level;
+ trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
+ timer->irq.level);
+ ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->map,
+ timer->irq.level);
+ WARN_ON(ret);
+}
+
+/*
+ * Check if there was a change in the timer state (should we raise or lower
+ * the line level to the GIC).
+ */
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ /*
+ * If userspace modified the timer registers via SET_ONE_REG before
+ * the vgic was initialized, we mustn't set the timer->irq.level value
+ * because the guest would never see the interrupt. Instead wait
+ * until we call this function from kvm_timer_flush_hwstate.
+ */
+ if (!vgic_initialized(vcpu->kvm))
+ return -ENODEV;
+
+ if (kvm_timer_should_fire(vcpu) != timer->irq.level)
+ kvm_timer_update_irq(vcpu, !timer->irq.level);
+
+ return 0;
+}
+
+/*
+ * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * thread is removed from its waitqueue and made runnable when there's a timer
+ * interrupt to handle.
+ */
+void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 ns;
+ cycle_t cval, now;
+
+ BUG_ON(timer_is_armed(timer));
+
+ /*
+ * No need to schedule a background timer if the guest timer has
+ * already expired, because kvm_vcpu_block will return before putting
+ * the thread to sleep.
+ */
+ if (kvm_timer_should_fire(vcpu))
+ return;
+
+ /*
+ * If the timer is not capable of raising interrupts (disabled or
+ * masked), then there's no more work for us to do.
+ */
+ if (!kvm_timer_irq_can_fire(vcpu))
+ return;
+
+ /* The timer has not yet expired, schedule a background timer */
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ timer_arm(timer, ns);
+}
+
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ timer_disarm(timer);
+}
+
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @vcpu: The vcpu pointer
*
- * Disarm any pending soft timers, since the world-switch code will write the
- * virtual timer state back to the physical CPU.
+ * Check if the virtual timer has expired while we were running in the host,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ bool phys_active;
+ int ret;
- /*
- * We're about to run this vcpu again, so there is no need to
- * keep the background timer running, as we're about to
- * populate the CPU timer again.
- */
- timer_disarm(timer);
+ if (kvm_timer_update_state(vcpu))
+ return;
/*
- * If the timer expired while we were not scheduled, now is the time
- * to inject it.
- */
- if (kvm_timer_should_fire(vcpu))
- kvm_timer_inject_irq(vcpu);
+ * If we enter the guest with the virtual input level to the VGIC
+ * asserted, then we have already told the VGIC what we need to, and
+ * we don't need to exit from the guest until the guest deactivates
+ * the already injected interrupt, so therefore we should set the
+ * hardware active state to prevent unnecessary exits from the guest.
+ *
+ * Also, if we enter the guest with the virtual timer interrupt active,
+ * then it must be active on the physical distributor, because we set
+ * the HW bit and the guest must be able to deactivate the virtual and
+ * physical interrupt at the same time.
+ *
+ * Conversely, if the virtual input level is deasserted and the virtual
+ * interrupt is not active, then always clear the hardware active state
+ * to ensure that hardware interrupts from the timer triggers a guest
+ * exit.
+ */
+ if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
+ phys_active = true;
+ else
+ phys_active = false;
+
+ ret = irq_set_irqchip_state(timer->map->irq,
+ IRQCHIP_STATE_ACTIVE,
+ phys_active);
+ WARN_ON(ret);
}
/**
* kvm_timer_sync_hwstate - sync timer state from cpu
* @vcpu: The vcpu pointer
*
- * Check if the virtual timer was armed and either schedule a corresponding
- * soft timer or inject directly if already expired.
+ * Check if the virtual timer has expired while we were running in the guest,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- cycle_t cval, now;
- u64 ns;
BUG_ON(timer_is_armed(timer));
- if (kvm_timer_should_fire(vcpu)) {
- /*
- * Timer has already expired while we were not
- * looking. Inject the interrupt and carry on.
- */
- kvm_timer_inject_irq(vcpu);
- return;
- }
-
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
- &timecounter->frac);
- timer_arm(timer, ns);
+ /*
+ * The guest could have modified the timer registers or the timer
+ * could have expired, update the timer state.
+ */
+ kvm_timer_update_state(vcpu);
}
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq)
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct irq_phys_map *map;
/*
* The vcpu timer irq number cannot be determined in
@@ -195,7 +283,27 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* kvm_vcpu_set_target(). To handle this, we determine
* vcpu timer irq number when the vcpu is reset.
*/
- timer->irq = irq;
+ timer->irq.irq = irq->irq;
+
+ /*
+ * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
+ * and to 0 for ARMv7. We provide an implementation that always
+ * resets the timer to be disabled and unmasked and is compliant with
+ * the ARMv7 architecture.
+ */
+ timer->cntv_ctl = 0;
+ kvm_timer_update_state(vcpu);
+
+ /*
+ * Tell the VGIC that the virtual interrupt is tied to a
+ * physical interrupt. We do that once per VCPU.
+ */
+ map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
+ if (WARN_ON(IS_ERR(map)))
+ return PTR_ERR(map);
+
+ timer->map = map;
+ return 0;
}
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
@@ -229,6 +337,8 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
default:
return -1;
}
+
+ kvm_timer_update_state(vcpu);
return 0;
}
@@ -335,6 +445,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer_disarm(timer);
+ if (timer->map)
+ kvm_vgic_unmap_phys_irq(vcpu, timer->map);
}
void kvm_timer_enable(struct kvm *kvm)
diff --git a/kernel/virt/kvm/arm/trace.h b/kernel/virt/kvm/arm/trace.h
new file mode 100644
index 000000000..37d8b9886
--- /dev/null
+++ b/kernel/virt/kvm/arm/trace.h
@@ -0,0 +1,63 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for vgic
+ */
+TRACE_EVENT(vgic_update_irq_pending,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( bool, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level: %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+/*
+ * Tracepoints for arch_timer
+ */
+TRACE_EVENT(kvm_timer_update_irq,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, int level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( int, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+#endif /* _TRACE_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/virt/kvm/arm/vgic-v2.c b/kernel/virt/kvm/arm/vgic-v2.c
index f9b9c7c51..ff02f08df 100644
--- a/kernel/virt/kvm/arm/vgic-v2.c
+++ b/kernel/virt/kvm/arm/vgic-v2.c
@@ -48,6 +48,10 @@ static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
lr_desc.state |= LR_STATE_ACTIVE;
if (val & GICH_LR_EOI)
lr_desc.state |= LR_EOI_INT;
+ if (val & GICH_LR_HW) {
+ lr_desc.state |= LR_HW;
+ lr_desc.hwirq = (val & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT;
+ }
return lr_desc;
}
@@ -55,7 +59,9 @@ static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
- u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
+ u32 lr_val;
+
+ lr_val = lr_desc.irq;
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= GICH_LR_PENDING_BIT;
@@ -64,12 +70,16 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
if (lr_desc.state & LR_EOI_INT)
lr_val |= GICH_LR_EOI;
+ if (lr_desc.state & LR_HW) {
+ lr_val |= GICH_LR_HW;
+ lr_val |= (u32)lr_desc.hwirq << GICH_LR_PHYSID_CPUID_SHIFT;
+ }
+
+ if (lr_desc.irq < VGIC_NR_SGIS)
+ lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
+
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-}
-static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else
@@ -144,6 +154,7 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
/* Get the show on the road... */
vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
@@ -152,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
- .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr,
diff --git a/kernel/virt/kvm/arm/vgic-v3-emul.c b/kernel/virt/kvm/arm/vgic-v3-emul.c
index e9c3a7a83..e661e7fb9 100644
--- a/kernel/virt/kvm/arm/vgic-v3-emul.c
+++ b/kernel/virt/kvm/arm/vgic-v3-emul.c
@@ -76,8 +76,6 @@ static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
vgic_reg_access(mmio, &reg, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (reg & GICD_CTLR_ENABLE_SS_G0)
- kvm_info("guest tried to enable unsupported Group0 interrupts\n");
vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
vgic_update_state(vcpu->kvm);
return true;
@@ -173,6 +171,32 @@ static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
return false;
}
+static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
@@ -428,13 +452,13 @@ static const struct vgic_io_range vgic_v3_dist_ranges[] = {
.base = GICD_ISACTIVER,
.len = 0x80,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_set_active_reg_dist,
},
{
.base = GICD_ICACTIVER,
.len = 0x80,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_clear_active_reg_dist,
},
{
.base = GICD_IPRIORITYR,
@@ -561,6 +585,26 @@ static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
ACCESS_WRITE_CLEARBIT);
}
+static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
@@ -674,13 +718,13 @@ static const struct vgic_io_range vgic_redist_ranges[] = {
.base = SGI_base(GICR_ISACTIVER0),
.len = 0x04,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_set_active_reg_redist,
},
{
.base = SGI_base(GICR_ICACTIVER0),
.len = 0x04,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_clear_active_reg_redist,
},
{
.base = SGI_base(GICR_IPRIORITYR0),
diff --git a/kernel/virt/kvm/arm/vgic-v3.c b/kernel/virt/kvm/arm/vgic-v3.c
index dff06021e..487d6357b 100644
--- a/kernel/virt/kvm/arm/vgic-v3.c
+++ b/kernel/virt/kvm/arm/vgic-v3.c
@@ -67,6 +67,10 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
lr_desc.state |= LR_STATE_ACTIVE;
if (val & ICH_LR_EOI)
lr_desc.state |= LR_EOI_INT;
+ if (val & ICH_LR_HW) {
+ lr_desc.state |= LR_HW;
+ lr_desc.hwirq = (val >> ICH_LR_PHYS_ID_SHIFT) & GENMASK(9, 0);
+ }
return lr_desc;
}
@@ -84,10 +88,17 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
* Eventually we want to make this configurable, so we may revisit
* this in the future.
*/
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ switch (vcpu->kvm->arch.vgic.vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
lr_val |= ICH_LR_GROUP;
- else
- lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (lr_desc.irq < VGIC_NR_SGIS)
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+ break;
+ default:
+ BUG();
+ }
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= ICH_LR_PENDING_BIT;
@@ -95,13 +106,13 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
lr_val |= ICH_LR_ACTIVE_BIT;
if (lr_desc.state & LR_EOI_INT)
lr_val |= ICH_LR_EOI;
+ if (lr_desc.state & LR_HW) {
+ lr_val |= ICH_LR_HW;
+ lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
+ }
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
-}
-static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else
@@ -178,6 +189,7 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vgic_v3->vgic_vmcr = 0;
+ vgic_v3->vgic_elrsr = ~0;
/*
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
@@ -196,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = {
.get_lr = vgic_v3_get_lr,
.set_lr = vgic_v3_set_lr,
- .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr,
@@ -273,7 +284,7 @@ int vgic_v3_probe(struct device_node *vgic_node,
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
- vgic->max_gic_vcpus = KVM_MAX_VCPUS;
+ vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vcpu_res.start, vgic->maint_irq);
diff --git a/kernel/virt/kvm/arm/vgic.c b/kernel/virt/kvm/arm/vgic.c
index 950064a09..5d10f104f 100644
--- a/kernel/virt/kvm/arm/vgic.c
+++ b/kernel/virt/kvm/arm/vgic.c
@@ -24,10 +24,9 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/rculist.h>
#include <linux/uaccess.h>
-#include <linux/irqchip/arm-gic.h>
-
#include <asm/kvm_emulate.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
@@ -35,6 +34,9 @@
#include <asm/kvm.h>
#include <kvm/iodev.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
/*
* How the whole thing works (courtesy of Christoffer Dall):
*
@@ -76,14 +78,40 @@
* cause the interrupt to become inactive in such a situation.
* Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
* inactive as long as the external input line is held high.
+ *
+ *
+ * Initialization rules: there are multiple stages to the vgic
+ * initialization, both for the distributor and the CPU interfaces.
+ *
+ * Distributor:
+ *
+ * - kvm_vgic_early_init(): initialization of static data that doesn't
+ * depend on any sizing information or emulation type. No allocation
+ * is allowed there.
+ *
+ * - vgic_init(): allocation and initialization of the generic data
+ * structures that depend on sizing information (number of CPUs,
+ * number of interrupts). Also initializes the vcpu specific data
+ * structures. Can be executed lazily for GICv2.
+ * [to be renamed to kvm_vgic_init??]
+ *
+ * CPU Interface:
+ *
+ * - kvm_vgic_cpu_early_init(): initialization of static data that
+ * doesn't depend on any sizing information or emulation type. No
+ * allocation is allowed there.
*/
#include "vgic.h"
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+ int virt_irq);
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
@@ -334,6 +362,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
+ if (!vgic_dist_irq_get_level(vcpu, irq)) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ if (!compute_pending_for_cpu(vcpu))
+ clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+ }
}
static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
@@ -377,7 +410,7 @@ void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
{
- return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
+ return !vgic_irq_is_queued(vcpu, irq);
}
/**
@@ -631,10 +664,9 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 8) {
- *reg = ~0U; /* Force PPIs/SGIs to 1 */
+ /* Ignore writes to read-only SGI and PPI bits */
+ if (offset < 8)
return false;
- }
val = vgic_cfg_compress(val);
if (offset & 4) {
@@ -660,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int i;
- for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+ for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
struct vgic_lr lr = vgic_get_lr(vcpu, i);
/*
@@ -683,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* interrupt then move the active state to the
* distributor tracking bit.
*/
- if (lr.state & LR_STATE_ACTIVE) {
+ if (lr.state & LR_STATE_ACTIVE)
vgic_irq_set_active(vcpu, lr.irq);
- lr.state &= ~LR_STATE_ACTIVE;
- }
/*
* Reestablish the pending state on the distributor and the
- * CPU interface. It may have already been pending, but that
- * is fine, then we are only setting a few bits that were
- * already set.
+ * CPU interface and mark the LR as free for other use.
*/
- if (lr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, lr.irq);
- lr.state &= ~LR_STATE_PENDING;
- }
-
- vgic_set_lr(vcpu, i, lr);
-
- /*
- * Mark the LR as free for other use.
- */
- BUG_ON(lr.state & LR_STATE_MASK);
- vgic_retire_lr(i, lr.irq, vcpu);
- vgic_irq_clear_queued(vcpu, lr.irq);
+ vgic_retire_lr(i, vcpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
@@ -959,6 +977,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
pend_shared = vcpu->arch.vgic_cpu.pending_shared;
+ if (!dist->enabled) {
+ bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
+ bitmap_zero(pend_shared, nr_shared);
+ return 0;
+ }
+
pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@ -986,11 +1010,6 @@ void vgic_update_state(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int c;
- if (!dist->enabled) {
- set_bit(0, dist->irq_pending_on_cpu);
- return;
- }
-
kvm_for_each_vcpu(c, vcpu, kvm) {
if (compute_pending_for_cpu(vcpu))
set_bit(c, dist->irq_pending_on_cpu);
@@ -1013,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops->set_lr(vcpu, lr, vlr);
}
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
-{
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
-}
-
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
return vgic_ops->get_elrsr(vcpu);
@@ -1064,16 +1077,44 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
vgic_ops->enable(vcpu);
}
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
+ /*
+ * We must transfer the pending state back to the distributor before
+ * retiring the LR, otherwise we may loose edge-triggered interrupts.
+ */
+ if (vlr.state & LR_STATE_PENDING) {
+ vgic_dist_irq_set_pending(vcpu, vlr.irq);
+ vlr.hwirq = 0;
+ }
+
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
- clear_bit(lr_nr, vgic_cpu->lr_used);
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
+}
+
+static bool dist_active_irq(struct kvm_vcpu *vcpu)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
+}
+
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+{
+ int i;
+
+ for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
+ struct vgic_lr vlr = vgic_get_lr(vcpu, i);
+
+ if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
+ return true;
+ }
+
+ return vgic_irq_is_active(vcpu, map->virt_irq);
}
/*
@@ -1087,17 +1128,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
*/
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int lr;
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
- vgic_retire_lr(lr, vlr.irq, vcpu);
- if (vgic_irq_is_queued(vcpu, vlr.irq))
- vgic_irq_clear_queued(vcpu, vlr.irq);
- }
+ if (!vgic_irq_is_enabled(vcpu, vlr.irq))
+ vgic_retire_lr(lr, vcpu);
}
}
@@ -1109,7 +1148,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
vgic_irq_clear_active(vcpu, irq);
vgic_update_state(vcpu->kvm);
- } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
+ } else {
+ WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
vlr.state |= LR_STATE_PENDING;
kvm_debug("Set pending: 0x%x\n", vlr.state);
}
@@ -1117,8 +1157,25 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
if (!vgic_irq_is_edge(vcpu, irq))
vlr.state |= LR_EOI_INT;
+ if (vlr.irq >= VGIC_NR_SGIS) {
+ struct irq_phys_map *map;
+ map = vgic_irq_map_search(vcpu, irq);
+
+ if (map) {
+ vlr.hwirq = map->phys_irq;
+ vlr.state |= LR_HW;
+ vlr.state &= ~LR_EOI_INT;
+
+ /*
+ * Make sure we're not going to sample this
+ * again, as a HW-backed interrupt cannot be
+ * in the PENDING_ACTIVE stage.
+ */
+ vgic_irq_set_queued(vcpu, irq);
+ }
+ }
+
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1128,8 +1185,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
*/
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
struct vgic_lr vlr;
int lr;
@@ -1140,28 +1198,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
kvm_debug("Queue IRQ%d\n", irq);
- lr = vgic_cpu->vgic_irq_lr_map[irq];
-
/* Do we have an active interrupt for the same CPUID? */
- if (lr != LR_EMPTY) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
vlr = vgic_get_lr(vcpu, lr);
- if (vlr.source == sgi_source_id) {
+ if (vlr.irq == irq && vlr.source == sgi_source_id) {
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
return true;
}
}
/* Try to use another LR for this interrupt */
- lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
- vgic->nr_lr);
+ lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
if (lr >= vgic->nr_lr)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vgic_cpu->vgic_irq_lr_map[irq] = lr;
- set_bit(lr, vgic_cpu->lr_used);
vlr.irq = irq;
vlr.source = sgi_source_id;
@@ -1217,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* may have been serviced from another vcpu. In all cases,
* move along.
*/
- if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
+ if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
goto epilog;
/* SGIs */
@@ -1256,12 +1308,60 @@ epilog:
}
}
+static int process_queued_irq(struct kvm_vcpu *vcpu,
+ int lr, struct vgic_lr vlr)
+{
+ int pending = 0;
+
+ /*
+ * If the IRQ was EOIed (called from vgic_process_maintenance) or it
+ * went from active to non-active (called from vgic_sync_hwirq) it was
+ * also ACKed and we we therefore assume we can clear the soft pending
+ * state (should it had been set) for this interrupt.
+ *
+ * Note: if the IRQ soft pending state was set after the IRQ was
+ * acked, it actually shouldn't be cleared, but we have no way of
+ * knowing that unless we start trapping ACKs when the soft-pending
+ * state is set.
+ */
+ vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+
+ /*
+ * Tell the gic to start sampling this interrupt again.
+ */
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
+ /* Any additional pending interrupt? */
+ if (vgic_irq_is_edge(vcpu, vlr.irq)) {
+ BUG_ON(!(vlr.state & LR_HW));
+ pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
+ } else {
+ if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+ vgic_cpu_irq_set(vcpu, vlr.irq);
+ pending = 1;
+ } else {
+ vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
+ }
+ }
+
+ /*
+ * Despite being EOIed, the LR may not have
+ * been marked as empty.
+ */
+ vlr.state = 0;
+ vlr.hwirq = 0;
+ vgic_set_lr(vcpu, lr, vlr);
+
+ return pending;
+}
+
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
u32 status = vgic_get_interrupt_status(vcpu);
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool level_pending = false;
struct kvm *kvm = vcpu->kvm;
+ int level_pending = 0;
kvm_debug("STATUS = %08x\n", status);
@@ -1276,54 +1376,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
- spin_lock(&dist->lock);
- vgic_irq_clear_queued(vcpu, vlr.irq);
+ WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
WARN_ON(vlr.state & LR_STATE_MASK);
- vlr.state = 0;
- vgic_set_lr(vcpu, lr, vlr);
- /*
- * If the IRQ was EOIed it was also ACKed and we we
- * therefore assume we can clear the soft pending
- * state (should it had been set) for this interrupt.
- *
- * Note: if the IRQ soft pending state was set after
- * the IRQ was acked, it actually shouldn't be
- * cleared, but we have no way of knowing that unless
- * we start trapping ACKs when the soft-pending state
- * is set.
- */
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
/*
* kvm_notify_acked_irq calls kvm_set_irq()
- * to reset the IRQ level. Need to release the
- * lock for kvm_set_irq to grab it.
+ * to reset the IRQ level, which grabs the dist->lock
+ * so we call this before taking the dist->lock.
*/
- spin_unlock(&dist->lock);
-
kvm_notify_acked_irq(kvm, 0,
vlr.irq - VGIC_NR_PRIVATE_IRQS);
- spin_lock(&dist->lock);
-
- /* Any additional pending interrupt? */
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- level_pending = true;
- } else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
- }
+ spin_lock(&dist->lock);
+ level_pending |= process_queued_irq(vcpu, lr, vlr);
spin_unlock(&dist->lock);
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
}
}
@@ -1341,10 +1409,31 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
return level_pending;
}
+/*
+ * Save the physical active state, and reset it to inactive.
+ *
+ * Return true if there's a pending forwarded interrupt to queue.
+ */
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ bool level_pending;
+
+ if (!(vlr.state & LR_HW))
+ return false;
+
+ if (vlr.state & LR_STATE_ACTIVE)
+ return false;
+
+ spin_lock(&dist->lock);
+ level_pending = process_queued_irq(vcpu, lr, vlr);
+ spin_unlock(&dist->lock);
+ return level_pending;
+}
+
/* Sync back the VGIC state after a guest run */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u64 elrsr;
unsigned long *elrsr_ptr;
@@ -1352,23 +1441,18 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
bool level_pending;
level_pending = vgic_process_maintenance(vcpu);
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
-
- /* Clear mappings for empty LRs */
- for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
- struct vgic_lr vlr;
- if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
- continue;
-
- vlr = vgic_get_lr(vcpu, lr);
+ /* Deal with HW interrupts, and clear mappings for empty LRs */
+ for (lr = 0; lr < vgic->nr_lr; lr++) {
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
+ level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
BUG_ON(vlr.irq >= dist->nr_irqs);
- vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
}
/* Check if we still have something up our sleeve... */
+ elrsr = vgic_get_elrsr(vcpu);
+ elrsr_ptr = u64_to_bitmask(&elrsr);
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
@@ -1404,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
}
-int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- if (!irqchip_in_kernel(vcpu->kvm))
- return 0;
-
- return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
-}
-
-
void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
@@ -1449,7 +1522,8 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
}
static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
- unsigned int irq_num, bool level)
+ struct irq_phys_map *map,
+ unsigned int irq_num, bool level)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1457,6 +1531,11 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
int enabled;
bool ret = true, can_inject = true;
+ trace_vgic_update_irq_pending(cpuid, irq_num, level);
+
+ if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+ return -EINVAL;
+
spin_lock(&dist->lock);
vcpu = kvm_get_vcpu(kvm, cpuid);
@@ -1487,8 +1566,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
} else {
if (level_triggered) {
vgic_dist_irq_clear_level(vcpu, irq_num);
- if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
+ if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
vgic_dist_irq_clear_pending(vcpu, irq_num);
+ vgic_cpu_irq_clear(vcpu, irq_num);
+ if (!compute_pending_for_cpu(vcpu))
+ clear_bit(cpuid, dist->irq_pending_on_cpu);
+ }
}
ret = false;
@@ -1519,18 +1602,46 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
out:
spin_unlock(&dist->lock);
- return ret ? cpuid : -EINVAL;
+ if (ret) {
+ /* kick the specified vcpu */
+ kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
+ }
+
+ return 0;
+}
+
+static int vgic_lazy_init(struct kvm *kvm)
+{
+ int ret = 0;
+
+ if (unlikely(!vgic_initialized(kvm))) {
+ /*
+ * We only provide the automatic initialization of the VGIC
+ * for the legacy case of a GICv2. Any other type must
+ * be explicitly initialized once setup with the respective
+ * KVM device call.
+ */
+ if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+ return -EBUSY;
+
+ mutex_lock(&kvm->lock);
+ ret = vgic_init(kvm);
+ mutex_unlock(&kvm->lock);
+ }
+
+ return ret;
}
/**
* kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
* @kvm: The VM structure pointer
* @cpuid: The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device
+ * @irq_num: The IRQ number that is assigned to the device. This IRQ
+ * must not be mapped to a HW interrupt.
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
- * Level-sensitive true: activates an interrupt
- * false: deactivates an interrupt
+ * Level-sensitive true: raise the input signal
+ * false: lower the input signal
*
* The GIC is not concerned with devices being active-LOW or active-HIGH for
* level-sensitive interrupts. You can think of the level parameter as 1
@@ -1539,39 +1650,44 @@ out:
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level)
{
- int ret = 0;
- int vcpu_id;
-
- if (unlikely(!vgic_initialized(kvm))) {
- /*
- * We only provide the automatic initialization of the VGIC
- * for the legacy case of a GICv2. Any other type must
- * be explicitly initialized once setup with the respective
- * KVM device call.
- */
- if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
- ret = -EBUSY;
- goto out;
- }
- mutex_lock(&kvm->lock);
- ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
+ struct irq_phys_map *map;
+ int ret;
- if (ret)
- goto out;
- }
+ ret = vgic_lazy_init(kvm);
+ if (ret)
+ return ret;
- if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+ map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
+ if (map)
return -EINVAL;
- vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
- if (vcpu_id >= 0) {
- /* kick the specified vcpu */
- kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
- }
+ return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
+}
-out:
- return ret;
+/**
+ * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
+ * @kvm: The VM structure pointer
+ * @cpuid: The CPU for PPIs
+ * @map: Pointer to a irq_phys_map structure describing the mapping
+ * @level: Edge-triggered: true: to trigger the interrupt
+ * false: to ignore the call
+ * Level-sensitive true: raise the input signal
+ * false: lower the input signal
+ *
+ * The GIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts. You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+ struct irq_phys_map *map, bool level)
+{
+ int ret;
+
+ ret = vgic_lazy_init(kvm);
+ if (ret)
+ return ret;
+
+ return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
}
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1585,6 +1701,164 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
+ int virt_irq)
+{
+ if (virt_irq < VGIC_NR_PRIVATE_IRQS)
+ return &vcpu->arch.vgic_cpu.irq_phys_map_list;
+ else
+ return &vcpu->kvm->arch.vgic.irq_phys_map_list;
+}
+
+/**
+ * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
+ * @vcpu: The VCPU pointer
+ * @virt_irq: The virtual irq number
+ * @irq: The Linux IRQ number
+ *
+ * Establish a mapping between a guest visible irq (@virt_irq) and a
+ * Linux irq (@irq). On injection, @virt_irq will be associated with
+ * the physical interrupt represented by @irq. This mapping can be
+ * established multiple times as long as the parameters are the same.
+ *
+ * Returns a valid pointer on success, and an error pointer otherwise
+ */
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+ int virt_irq, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+ struct irq_phys_map *map;
+ struct irq_phys_map_entry *entry;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ int phys_irq;
+
+ desc = irq_to_desc(irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ phys_irq = data->hwirq;
+
+ /* Create a new mapping */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ /* Try to match an existing mapping */
+ map = vgic_irq_map_search(vcpu, virt_irq);
+ if (map) {
+ /* Make sure this mapping matches */
+ if (map->phys_irq != phys_irq ||
+ map->irq != irq)
+ map = ERR_PTR(-EINVAL);
+
+ /* Found an existing, valid mapping */
+ goto out;
+ }
+
+ map = &entry->map;
+ map->virt_irq = virt_irq;
+ map->phys_irq = phys_irq;
+ map->irq = irq;
+
+ list_add_tail_rcu(&entry->entry, root);
+
+out:
+ spin_unlock(&dist->irq_phys_map_lock);
+ /* If we've found a hit in the existing list, free the useless
+ * entry */
+ if (IS_ERR(map) || map != &entry->map)
+ kfree(entry);
+ return map;
+}
+
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+ int virt_irq)
+{
+ struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+ struct irq_phys_map_entry *entry;
+ struct irq_phys_map *map;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(entry, root, entry) {
+ map = &entry->map;
+ if (map->virt_irq == virt_irq) {
+ rcu_read_unlock();
+ return map;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
+{
+ struct irq_phys_map_entry *entry;
+
+ entry = container_of(rcu, struct irq_phys_map_entry, rcu);
+ kfree(entry);
+}
+
+/**
+ * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
+ * @vcpu: The VCPU pointer
+ * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
+ *
+ * Remove an existing mapping between virtual and physical interrupts.
+ */
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct irq_phys_map_entry *entry;
+ struct list_head *root;
+
+ if (!map)
+ return -EINVAL;
+
+ root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ list_for_each_entry(entry, root, entry) {
+ if (&entry->map == map) {
+ list_del_rcu(&entry->entry);
+ call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+ break;
+ }
+ }
+
+ spin_unlock(&dist->irq_phys_map_lock);
+
+ return 0;
+}
+
+static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct irq_phys_map_entry *entry;
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ list_for_each_entry(entry, root, entry) {
+ list_del_rcu(&entry->entry);
+ call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+ }
+
+ spin_unlock(&dist->irq_phys_map_lock);
+}
+
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1592,33 +1866,28 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
kfree(vgic_cpu->pending_shared);
kfree(vgic_cpu->active_shared);
kfree(vgic_cpu->pend_act_shared);
- kfree(vgic_cpu->vgic_irq_lr_map);
+ vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
vgic_cpu->pending_shared = NULL;
vgic_cpu->active_shared = NULL;
vgic_cpu->pend_act_shared = NULL;
- vgic_cpu->vgic_irq_lr_map = NULL;
}
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+ int sz = nr_longs * sizeof(unsigned long);
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared
|| !vgic_cpu->active_shared
- || !vgic_cpu->pend_act_shared
- || !vgic_cpu->vgic_irq_lr_map) {
+ || !vgic_cpu->pend_act_shared) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
- memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
-
/*
* Store the number of LRs per vcpu, so we don't have to go
* all the way to the distributor structure to find out. Only
@@ -1630,6 +1899,17 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
}
/**
+ * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
+}
+
+/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
*
* The host's GIC naturally limits the maximum amount of VCPUs a guest
@@ -1666,6 +1946,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
kfree(dist->irq_spi_target);
kfree(dist->irq_pending_on_cpu);
kfree(dist->irq_active_on_cpu);
+ vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
dist->irq_sgi_sources = NULL;
dist->irq_spi_cpu = NULL;
dist->irq_spi_target = NULL;
@@ -1748,14 +2029,24 @@ int vgic_init(struct kvm *kvm)
break;
}
- for (i = 0; i < dist->nr_irqs; i++) {
- if (i < VGIC_NR_PPIS)
+ /*
+ * Enable and configure all SGIs to be edge-triggere and
+ * configure all PPIs as level-triggered.
+ */
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ if (i < VGIC_NR_SGIS) {
+ /* SGIs */
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
- if (i < VGIC_NR_PRIVATE_IRQS)
vgic_bitmap_set_irq_val(&dist->irq_cfg,
vcpu->vcpu_id, i,
VGIC_CFG_EDGE);
+ } else if (i < VGIC_NR_PRIVATE_IRQS) {
+ /* PPIs */
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
+ vcpu->vcpu_id, i,
+ VGIC_CFG_LEVEL);
+ }
}
vgic_enable(vcpu);
@@ -1774,7 +2065,7 @@ static int init_vgic_model(struct kvm *kvm, int type)
case KVM_DEV_TYPE_ARM_VGIC_V2:
vgic_v2_init_emulation(kvm);
break;
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
case KVM_DEV_TYPE_ARM_VGIC_V3:
vgic_v3_init_emulation(kvm);
break;
@@ -1789,6 +2080,18 @@ static int init_vgic_model(struct kvm *kvm, int type)
return 0;
}
+/**
+ * kvm_vgic_early_init - Earliest possible vgic initialization stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_early_init(struct kvm *kvm)
+{
+ spin_lock_init(&kvm->arch.vgic.lock);
+ spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
+ INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
+}
+
int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret;
@@ -1834,7 +2137,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
if (ret)
goto out_unlock;
- spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
@@ -1925,7 +2227,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
block_size = KVM_VGIC_V2_CPU_SIZE;
alignment = SZ_4K;
break;
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
case KVM_VGIC_V3_ADDR_TYPE_DIST:
type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
addr_ptr = &vgic->vgic_dist_base;
@@ -2128,9 +2430,6 @@ int kvm_vgic_hyp_init(void)
goto out_free_irq;
}
- /* Callback into for arch code for setup */
- vgic_arch_setup(vgic);
-
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
return 0;
diff --git a/kernel/virt/kvm/async_pf.c b/kernel/virt/kvm/async_pf.c
index cbcabb94c..9378d0919 100644
--- a/kernel/virt/kvm/async_pf.c
+++ b/kernel/virt/kvm/async_pf.c
@@ -94,8 +94,12 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
- if (swaitqueue_active(&vcpu->wq))
- swait_wake_interruptible(&vcpu->wq);
+ /*
+ * This memory barrier pairs with prepare_to_wait's set_current_state()
+ */
+ smp_mb();
+ if (swait_active(&vcpu->wq))
+ swake_up(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
@@ -169,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;
diff --git a/kernel/virt/kvm/async_pf.h b/kernel/virt/kvm/async_pf.h
index e7ef6447c..ec4cfa278 100644
--- a/kernel/virt/kvm/async_pf.h
+++ b/kernel/virt/kvm/async_pf.h
@@ -29,8 +29,8 @@ void kvm_async_pf_deinit(void);
void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
#else
#define kvm_async_pf_init() (0)
-#define kvm_async_pf_deinit() do{}while(0)
-#define kvm_async_pf_vcpu_init(C) do{}while(0)
+#define kvm_async_pf_deinit() do {} while (0)
+#define kvm_async_pf_vcpu_init(C) do {} while (0)
#endif
#endif
diff --git a/kernel/virt/kvm/coalesced_mmio.h b/kernel/virt/kvm/coalesced_mmio.h
index b280c2044..6bca74ca5 100644
--- a/kernel/virt/kvm/coalesced_mmio.h
+++ b/kernel/virt/kvm/coalesced_mmio.h
@@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev {
int kvm_coalesced_mmio_init(struct kvm *kvm);
void kvm_coalesced_mmio_free(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
+ struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
+ struct kvm_coalesced_mmio_zone *zone);
#else
diff --git a/kernel/virt/kvm/eventfd.c b/kernel/virt/kvm/eventfd.c
index 9ff4193df..46dbc0a7d 100644
--- a/kernel/virt/kvm/eventfd.c
+++ b/kernel/virt/kvm/eventfd.c
@@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
@@ -34,73 +35,20 @@
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
+#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/iodev.h>
#ifdef CONFIG_HAVE_KVM_IRQFD
-/*
- * --------------------------------------------------------------------
- * irqfd: Allows an fd to be used to inject an interrupt to the guest
- *
- * Credit goes to Avi Kivity for the original idea.
- * --------------------------------------------------------------------
- */
-
-/*
- * Resampling irqfds are a special variety of irqfds used to emulate
- * level triggered interrupts. The interrupt is asserted on eventfd
- * trigger. On acknowledgement through the irq ack notifier, the
- * interrupt is de-asserted and userspace is notified through the
- * resamplefd. All resamplers on the same gsi are de-asserted
- * together, so we don't need to track the state of each individual
- * user. We can also therefore share the same irq source ID.
- */
-struct _irqfd_resampler {
- struct kvm *kvm;
- /*
- * List of resampling struct _irqfd objects sharing this gsi.
- * RCU list modified under kvm->irqfds.resampler_lock
- */
- struct list_head list;
- struct kvm_irq_ack_notifier notifier;
- /*
- * Entry in list of kvm->irqfd.resampler_list. Use for sharing
- * resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
- */
- struct list_head link;
-};
-
-struct _irqfd {
- /* Used for MSI fast-path */
- struct kvm *kvm;
- wait_queue_t wait;
- /* Update side is protected by irqfds.lock */
- struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
- /* Used for level IRQ fast-path */
- int gsi;
- struct work_struct inject;
- /* The resampler used by this irqfd (resampler-only) */
- struct _irqfd_resampler *resampler;
- /* Eventfd notified on resample (resampler-only) */
- struct eventfd_ctx *resamplefd;
- /* Entry in list of irqfds for a resampler (resampler-only) */
- struct list_head resampler_link;
- /* Used for setup/shutdown */
- struct eventfd_ctx *eventfd;
- struct list_head list;
- poll_table pt;
- struct work_struct shutdown;
-};
static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
{
- struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(work, struct kvm_kernel_irqfd, inject);
struct kvm *kvm = irqfd->kvm;
if (!irqfd->resampler) {
@@ -121,12 +69,13 @@ irqfd_inject(struct work_struct *work)
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
- struct _irqfd_resampler *resampler;
+ struct kvm_kernel_irqfd_resampler *resampler;
struct kvm *kvm;
- struct _irqfd *irqfd;
+ struct kvm_kernel_irqfd *irqfd;
int idx;
- resampler = container_of(kian, struct _irqfd_resampler, notifier);
+ resampler = container_of(kian,
+ struct kvm_kernel_irqfd_resampler, notifier);
kvm = resampler->kvm;
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
@@ -141,9 +90,9 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
}
static void
-irqfd_resampler_shutdown(struct _irqfd *irqfd)
+irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
{
- struct _irqfd_resampler *resampler = irqfd->resampler;
+ struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
struct kvm *kvm = resampler->kvm;
mutex_lock(&kvm->irqfds.resampler_lock);
@@ -168,7 +117,8 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
static void
irqfd_shutdown(struct work_struct *work)
{
- struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(work, struct kvm_kernel_irqfd, shutdown);
u64 cnt;
/*
@@ -191,6 +141,9 @@ irqfd_shutdown(struct work_struct *work)
/*
* It is now safe to release the object's resources
*/
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ irq_bypass_unregister_consumer(&irqfd->consumer);
+#endif
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
@@ -198,7 +151,7 @@ irqfd_shutdown(struct work_struct *work)
/* assumes kvm->irqfds.lock is held */
static bool
-irqfd_is_active(struct _irqfd *irqfd)
+irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
{
return list_empty(&irqfd->list) ? false : true;
}
@@ -209,7 +162,7 @@ irqfd_is_active(struct _irqfd *irqfd)
* assumes kvm->irqfds.lock is held
*/
static void
-irqfd_deactivate(struct _irqfd *irqfd)
+irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
{
BUG_ON(!irqfd_is_active(irqfd));
@@ -218,13 +171,23 @@ irqfd_deactivate(struct _irqfd *irqfd)
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
+int __attribute__((weak)) kvm_arch_set_irq_inatomic(
+ struct kvm_kernel_irq_routing_entry *irq,
+ struct kvm *kvm, int irq_source_id,
+ int level,
+ bool line_status)
+{
+ return -EWOULDBLOCK;
+}
+
/*
* Called with wqh->lock held and interrupts disabled
*/
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
- struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(wait, struct kvm_kernel_irqfd, wait);
unsigned long flags = (unsigned long)key;
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
@@ -238,10 +201,9 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
irq = irqfd->irq_entry;
} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
/* An event has been signaled, inject an interrupt */
- if (irq.type == KVM_IRQ_ROUTING_MSI)
- kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
- false);
- else
+ if (kvm_arch_set_irq_inatomic(&irq, kvm,
+ KVM_USERSPACE_IRQ_SOURCE_ID, 1,
+ false) == -EWOULDBLOCK)
schedule_work(&irqfd->inject);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -274,37 +236,54 @@ static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
- struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(pt, struct kvm_kernel_irqfd, pt);
add_wait_queue(wqh, &irqfd->wait);
}
/* Must be called under irqfds.lock */
-static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
+static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
- int i, n_entries;
+ int n_entries;
n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc);
- irqfd->irq_entry.type = 0;
-
e = entries;
- for (i = 0; i < n_entries; ++i, ++e) {
- /* Only fast-path MSI. */
- if (e->type == KVM_IRQ_ROUTING_MSI)
- irqfd->irq_entry = *e;
- }
+ if (n_entries == 1)
+ irqfd->irq_entry = *e;
+ else
+ irqfd->irq_entry.type = 0;
write_seqcount_end(&irqfd->irq_entry_sc);
}
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+void __attribute__((weak)) kvm_arch_irq_bypass_stop(
+ struct irq_bypass_consumer *cons)
+{
+}
+
+void __attribute__((weak)) kvm_arch_irq_bypass_start(
+ struct irq_bypass_consumer *cons)
+{
+}
+
+int __attribute__((weak)) kvm_arch_update_irqfd_routing(
+ struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ return 0;
+}
+#endif
+
static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
@@ -340,7 +319,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
irqfd->eventfd = eventfd;
if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
- struct _irqfd_resampler *resampler;
+ struct kvm_kernel_irqfd_resampler *resampler;
resamplefd = eventfd_ctx_fdget(args->resamplefd);
if (IS_ERR(resamplefd)) {
@@ -428,6 +407,17 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* we might race against the POLLHUP
*/
fdput(f);
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ irqfd->consumer.token = (void *)irqfd->eventfd;
+ irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
+ irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
+ irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
+ irqfd->consumer.start = kvm_arch_irq_bypass_start;
+ ret = irq_bypass_register_consumer(&irqfd->consumer);
+ if (ret)
+ pr_info("irq bypass consumer (token %p) registration fails: %d\n",
+ irqfd->consumer.token, ret);
+#endif
return 0;
@@ -469,9 +459,18 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
-void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
struct kvm_irq_ack_notifier *kian;
+
+ hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ link)
+ if (kian->gsi == gsi)
+ kian->irq_acked(kian);
+}
+
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
int gsi, idx;
trace_kvm_ack_irq(irqchip, pin);
@@ -479,10 +478,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
- link)
- if (kian->gsi == gsi)
- kian->irq_acked(kian);
+ kvm_notify_acked_gsi(kvm, gsi);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -525,7 +521,7 @@ kvm_eventfd_init(struct kvm *kvm)
static int
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
@@ -581,7 +577,7 @@ kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
void
kvm_irqfd_release(struct kvm *kvm)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
spin_lock_irq(&kvm->irqfds.lock);
@@ -604,13 +600,23 @@ kvm_irqfd_release(struct kvm *kvm)
*/
void kvm_irq_routing_update(struct kvm *kvm)
{
- struct _irqfd *irqfd;
+ struct kvm_kernel_irqfd *irqfd;
spin_lock_irq(&kvm->irqfds.lock);
- list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+ list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
irqfd_update(kvm, irqfd);
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ if (irqfd->producer) {
+ int ret = kvm_arch_update_irqfd_routing(
+ irqfd->kvm, irqfd->producer->irq,
+ irqfd->gsi, 1);
+ WARN_ON(ret);
+ }
+#endif
+ }
+
spin_unlock_irq(&kvm->irqfds.lock);
}
@@ -771,40 +777,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
return KVM_MMIO_BUS;
}
-static int
-kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
+ enum kvm_bus bus_idx,
+ struct kvm_ioeventfd *args)
{
- enum kvm_bus bus_idx;
- struct _ioeventfd *p;
- struct eventfd_ctx *eventfd;
- int ret;
- bus_idx = ioeventfd_bus_from_flags(args->flags);
- /* must be natural-word sized, or 0 to ignore length */
- switch (args->len) {
- case 0:
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
-
- /* check for range overflow */
- if (args->addr + args->len < args->addr)
- return -EINVAL;
-
- /* check for extra flags that we don't understand */
- if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
- return -EINVAL;
-
- /* ioeventfd with no length can't be combined with DATAMATCH */
- if (!args->len &&
- args->flags & (KVM_IOEVENTFD_FLAG_PIO |
- KVM_IOEVENTFD_FLAG_DATAMATCH))
- return -EINVAL;
+ struct eventfd_ctx *eventfd;
+ struct _ioeventfd *p;
+ int ret;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
@@ -843,16 +823,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (ret < 0)
goto unlock_fail;
- /* When length is ignored, MMIO is also put on a separate bus, for
- * faster lookups.
- */
- if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
- ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
- p->addr, 0, &p->dev);
- if (ret < 0)
- goto register_fail;
- }
-
kvm->buses[bus_idx]->ioeventfd_count++;
list_add_tail(&p->list, &kvm->ioeventfds);
@@ -860,8 +830,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return 0;
-register_fail:
- kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
unlock_fail:
mutex_unlock(&kvm->slots_lock);
@@ -873,14 +841,13 @@ fail:
}
static int
-kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_ioeventfd *args)
{
- enum kvm_bus bus_idx;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;
- bus_idx = ioeventfd_bus_from_flags(args->flags);
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
@@ -901,10 +868,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- if (!p->length) {
- kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
- &p->dev);
- }
kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
@@ -918,6 +881,69 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return ret;
}
+static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+ enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
+ int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+
+ if (!args->len && bus_idx == KVM_MMIO_BUS)
+ kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+
+ return ret;
+}
+
+static int
+kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+ enum kvm_bus bus_idx;
+ int ret;
+
+ bus_idx = ioeventfd_bus_from_flags(args->flags);
+ /* must be natural-word sized, or 0 to ignore length */
+ switch (args->len) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /* check for extra flags that we don't understand */
+ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
+ return -EINVAL;
+
+ /* ioeventfd with no length can't be combined with DATAMATCH */
+ if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
+ return -EINVAL;
+
+ ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
+ if (ret)
+ goto fail;
+
+ /* When length is ignored, MMIO is also put on a separate bus, for
+ * faster lookups.
+ */
+ if (!args->len && bus_idx == KVM_MMIO_BUS) {
+ ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+ if (ret < 0)
+ goto fast_fail;
+ }
+
+ return 0;
+
+fast_fail:
+ kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+fail:
+ return ret;
+}
+
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
diff --git a/kernel/virt/kvm/irqchip.c b/kernel/virt/kvm/irqchip.c
index 1d56a901e..f0b08a2a4 100644
--- a/kernel/virt/kvm/irqchip.c
+++ b/kernel/virt/kvm/irqchip.c
@@ -31,17 +31,6 @@
#include <trace/events/kvm.h>
#include "irq.h"
-struct kvm_irq_routing_table {
- int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
- struct kvm_kernel_irq_routing_entry *rt_entries;
- u32 nr_rt_entries;
- /*
- * Array indexed by gsi. Each entry contains list of irq chips
- * the gsi is connected to.
- */
- struct hlist_head map[0];
-};
-
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi)
{
@@ -118,11 +107,32 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return ret;
}
+static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
+{
+ int i;
+
+ if (!rt)
+ return;
+
+ for (i = 0; i < rt->nr_rt_entries; ++i) {
+ struct kvm_kernel_irq_routing_entry *e;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
+ hlist_del(&e->link);
+ kfree(e);
+ }
+ }
+
+ kfree(rt);
+}
+
void kvm_free_irq_routing(struct kvm *kvm)
{
/* Called only during vm destruction. Nobody can use the pointer
at this stage */
- kfree(kvm->irq_routing);
+ struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
+ free_irq_routing_table(rt);
}
static int setup_routing_entry(struct kvm_irq_routing_table *rt,
@@ -134,11 +144,11 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
- * Allow only one to one mapping between GSI and MSI.
+ * Allow only one to one mapping between GSI and non-irqchip routing.
*/
hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
- if (ei->type == KVM_IRQ_ROUTING_MSI ||
- ue->type == KVM_IRQ_ROUTING_MSI ||
+ if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
+ ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
return r;
@@ -173,27 +183,35 @@ int kvm_set_irq_routing(struct kvm *kvm,
nr_rt_entries += 1;
- new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
- + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
+ new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)),
GFP_KERNEL);
if (!new)
return -ENOMEM;
- new->rt_entries = (void *)&new->map[nr_rt_entries];
-
new->nr_rt_entries = nr_rt_entries;
for (i = 0; i < KVM_NR_IRQCHIPS; i++)
for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
new->chip[i][j] = -1;
for (i = 0; i < nr; ++i) {
+ struct kvm_kernel_irq_routing_entry *e;
+
+ r = -ENOMEM;
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto out;
+
r = -EINVAL;
- if (ue->flags)
+ if (ue->flags) {
+ kfree(e);
goto out;
- r = setup_routing_entry(new, &new->rt_entries[i], ue);
- if (r)
+ }
+ r = setup_routing_entry(new, e, ue);
+ if (r) {
+ kfree(e);
goto out;
+ }
++ue;
}
@@ -203,12 +221,15 @@ int kvm_set_irq_routing(struct kvm *kvm,
kvm_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock);
+ kvm_arch_irq_routing_update(kvm);
+
synchronize_srcu_expedited(&kvm->irq_srcu);
new = old;
r = 0;
out:
- kfree(new);
+ free_irq_routing_table(new);
+
return r;
}
diff --git a/kernel/virt/kvm/kvm_main.c b/kernel/virt/kvm/kvm_main.c
index de930768a..eeed326be 100644
--- a/kernel/virt/kvm/kvm_main.c
+++ b/kernel/virt/kvm/kvm_main.c
@@ -66,9 +66,18 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static unsigned int halt_poll_ns;
+/* Architectures should define their poll value according to the halt latency */
+static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
/*
* Ordering of locks:
*
@@ -103,8 +112,7 @@ static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
static void kvm_release_pfn_dirty(pfn_t pfn);
-static void mark_page_dirty_in_slot(struct kvm *kvm,
- struct kvm_memory_slot *memslot, gfn_t gfn);
+static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
@@ -218,9 +226,12 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
- init_swait_head(&vcpu->wq);
+ init_swait_queue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
+ vcpu->pre_pcpu = -1;
+ INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
+
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
@@ -388,6 +399,36 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
return young;
}
+static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int young, idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+ /*
+ * Even though we do not flush TLB, this will still adversely
+ * affect performance on pre-Haswell Intel EPT, where there is
+ * no EPT Access Bit to clear so that we have to tear down EPT
+ * tables instead. If we find this unacceptable, we can always
+ * add a parameter to kvm_age_hva so that it effectively doesn't
+ * do anything on clear_young.
+ *
+ * Also note that currently we never issue secondary TLB flushes
+ * from clear_young, leaving this job up to the regular system
+ * cadence. If we find this inaccurate, we might come up with a
+ * more sophisticated heuristic later.
+ */
+ young = kvm_age_hva(kvm, start, end);
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return young;
+}
+
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
@@ -420,6 +461,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+ .clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
@@ -440,13 +482,60 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
-static void kvm_init_memslots_id(struct kvm *kvm)
+static struct kvm_memslots *kvm_alloc_memslots(void)
{
int i;
- struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memslots *slots;
+ slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ if (!slots)
+ return NULL;
+
+ /*
+ * Init kvm generation close to the maximum to easily test the
+ * code of handling generation number wrap-around.
+ */
+ slots->generation = -150;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[i] = slots->memslots[i].id = i;
+
+ return slots;
+}
+
+static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ if (!memslot->dirty_bitmap)
+ return;
+
+ kvfree(memslot->dirty_bitmap);
+ memslot->dirty_bitmap = NULL;
+}
+
+/*
+ * Free any memory in @free but not in @dont.
+ */
+static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+ if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
+ kvm_destroy_dirty_bitmap(free);
+
+ kvm_arch_free_memslot(kvm, free, dont);
+
+ free->npages = 0;
+}
+
+static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
+{
+ struct kvm_memory_slot *memslot;
+
+ if (!slots)
+ return;
+
+ kvm_for_each_memslot(memslot, slots)
+ kvm_free_memslot(kvm, memslot, NULL);
+
+ kvfree(slots);
}
static struct kvm *kvm_create_vm(unsigned long type)
@@ -472,17 +561,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
r = -ENOMEM;
- kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
- if (!kvm->memslots)
- goto out_err_no_srcu;
-
- /*
- * Init kvm generation close to the maximum to easily test the
- * code of handling generation number wrap-around.
- */
- kvm->memslots->generation = -150;
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ kvm->memslots[i] = kvm_alloc_memslots();
+ if (!kvm->memslots[i])
+ goto out_err_no_srcu;
+ }
- kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
if (init_srcu_struct(&kvm->irq_srcu))
@@ -512,6 +596,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
+ preempt_notifier_inc();
+
return kvm;
out_err:
@@ -523,7 +609,8 @@ out_err_no_srcu:
out_err_no_disable:
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm->buses[i]);
- kvfree(kvm->memslots);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ kvm_free_memslots(kvm, kvm->memslots[i]);
kvm_arch_free_vm(kvm);
return ERR_PTR(r);
}
@@ -540,40 +627,6 @@ void *kvm_kvzalloc(unsigned long size)
return kzalloc(size, GFP_KERNEL);
}
-static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
-{
- if (!memslot->dirty_bitmap)
- return;
-
- kvfree(memslot->dirty_bitmap);
- memslot->dirty_bitmap = NULL;
-}
-
-/*
- * Free any memory in @free but not in @dont.
- */
-static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
- if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
- kvm_destroy_dirty_bitmap(free);
-
- kvm_arch_free_memslot(kvm, free, dont);
-
- free->npages = 0;
-}
-
-static void kvm_free_physmem(struct kvm *kvm)
-{
- struct kvm_memslots *slots = kvm->memslots;
- struct kvm_memory_slot *memslot;
-
- kvm_for_each_memslot(memslot, slots)
- kvm_free_physmem_slot(kvm, memslot, NULL);
-
- kvfree(kvm->memslots);
-}
-
static void kvm_destroy_devices(struct kvm *kvm)
{
struct list_head *node, *tmp;
@@ -607,10 +660,12 @@ static void kvm_destroy_vm(struct kvm *kvm)
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
- kvm_free_physmem(kvm);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ kvm_free_memslots(kvm, kvm->memslots[i]);
cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
+ preempt_notifier_dec();
hardware_disable_all();
mmdrop(mm);
}
@@ -670,8 +725,6 @@ static void update_memslots(struct kvm_memslots *slots,
WARN_ON(mslots[i].id != id);
if (!new->npages) {
WARN_ON(!mslots[i].npages);
- new->base_gfn = 0;
- new->flags = 0;
if (mslots[i].npages)
slots->used_slots--;
} else {
@@ -711,7 +764,7 @@ static void update_memslots(struct kvm_memslots *slots,
slots->id_to_index[mslots[i].id] = i;
}
-static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
+static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
@@ -726,9 +779,9 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
}
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
- struct kvm_memslots *slots)
+ int as_id, struct kvm_memslots *slots)
{
- struct kvm_memslots *old_memslots = kvm->memslots;
+ struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
/*
* Set the low bit in the generation, which disables SPTE caching
@@ -737,7 +790,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
WARN_ON(old_memslots->generation & 1);
slots->generation = old_memslots->generation + 1;
- rcu_assign_pointer(kvm->memslots, slots);
+ rcu_assign_pointer(kvm->memslots[as_id], slots);
synchronize_srcu_expedited(&kvm->srcu);
/*
@@ -747,7 +800,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
*/
slots->generation++;
- kvm_arch_memslots_updated(kvm);
+ kvm_arch_memslots_updated(kvm, slots);
return old_memslots;
}
@@ -761,7 +814,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
* Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region *mem)
{
int r;
gfn_t base_gfn;
@@ -769,6 +822,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot *slot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots = NULL, *old_memslots;
+ int as_id, id;
enum kvm_mr_change change;
r = check_memory_region_flags(mem);
@@ -776,36 +830,36 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out;
r = -EINVAL;
+ as_id = mem->slot >> 16;
+ id = (u16)mem->slot;
+
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
- if ((mem->slot < KVM_USER_MEM_SLOTS) &&
+ if ((id < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
- if (mem->slot >= KVM_MEM_SLOTS_NUM)
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- slot = id_to_memslot(kvm->memslots, mem->slot);
+ slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
- if (!npages)
- mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
-
new = old = *slot;
- new.id = mem->slot;
+ new.id = id;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
@@ -828,17 +882,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out;
}
}
- } else if (old.npages) {
+ } else {
+ if (!old.npages)
+ goto out;
+
change = KVM_MR_DELETE;
- } else /* Modify a non-existent slot: disallowed. */
- goto out;
+ new.base_gfn = 0;
+ new.flags = 0;
+ }
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
/* Check for overlaps */
r = -EEXIST;
- kvm_for_each_memslot(slot, kvm->memslots) {
+ kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
if ((slot->id >= KVM_USER_MEM_SLOTS) ||
- (slot->id == mem->slot))
+ (slot->id == id))
continue;
if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages)))
@@ -867,13 +925,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
if (!slots)
goto out_free;
- memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- slot = id_to_memslot(slots, mem->slot);
+ slot = id_to_memslot(slots, id);
slot->flags |= KVM_MEMSLOT_INVALID;
- old_memslots = install_new_memslots(kvm, slots);
+ old_memslots = install_new_memslots(kvm, as_id, slots);
/* slot was deleted or moved, clear iommu mapping */
kvm_iommu_unmap_pages(kvm, &old);
@@ -898,18 +956,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (r)
goto out_slots;
- /* actual memory is freed via old in kvm_free_physmem_slot below */
+ /* actual memory is freed via old in kvm_free_memslot below */
if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
- old_memslots = install_new_memslots(kvm, slots);
+ old_memslots = install_new_memslots(kvm, as_id, slots);
- kvm_arch_commit_memory_region(kvm, mem, &old, change);
+ kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
- kvm_free_physmem_slot(kvm, &old, &new);
+ kvm_free_memslot(kvm, &old, &new);
kvfree(old_memslots);
/*
@@ -931,14 +989,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
out_slots:
kvfree(slots);
out_free:
- kvm_free_physmem_slot(kvm, &new, &old);
+ kvm_free_memslot(kvm, &new, &old);
out:
return r;
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region *mem)
{
int r;
@@ -952,24 +1010,29 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
- if (mem->slot >= KVM_USER_MEM_SLOTS)
+ if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
+
return kvm_set_memory_region(kvm, mem);
}
int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int r, i;
+ int r, i, as_id, id;
unsigned long n;
unsigned long any = 0;
r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
+ as_id = log->slot >> 16;
+ id = (u16)log->slot;
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
goto out;
- memslot = id_to_memslot(kvm->memslots, log->slot);
+ slots = __kvm_memslots(kvm, as_id);
+ memslot = id_to_memslot(slots, id);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -1018,17 +1081,21 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
int kvm_get_dirty_log_protect(struct kvm *kvm,
struct kvm_dirty_log *log, bool *is_dirty)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int r, i;
+ int r, i, as_id, id;
unsigned long n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
+ as_id = log->slot >> 16;
+ id = (u16)log->slot;
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
goto out;
- memslot = id_to_memslot(kvm->memslots, log->slot);
+ slots = __kvm_memslots(kvm, as_id);
+ memslot = id_to_memslot(slots, id);
dirty_bitmap = memslot->dirty_bitmap;
r = -ENOENT;
@@ -1091,6 +1158,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_memslot);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
+}
+
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
@@ -1166,6 +1238,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
+unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
+
/*
* If writable is set to false, the hva returned by this function is only
* allowed to be read.
@@ -1188,6 +1266,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
+unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
+{
+ struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+
+ return gfn_to_hva_memslot_prot(slot, gfn, writable);
+}
+
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int write, struct page **page)
{
@@ -1355,9 +1440,8 @@ exit:
return pfn;
}
-static pfn_t
-__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
- bool *async, bool write_fault, bool *writable)
+pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
+ bool *async, bool write_fault, bool *writable)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
@@ -1376,65 +1460,59 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
return hva_to_pfn(addr, atomic, async, write_fault,
writable);
}
+EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
-static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
- bool write_fault, bool *writable)
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+ bool *writable)
{
- struct kvm_memory_slot *slot;
-
- if (async)
- *async = false;
-
- slot = gfn_to_memslot(kvm, gfn);
-
- return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
- writable);
+ return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
+ write_fault, writable);
}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
-pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
+pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
+ return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
+EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
-pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable)
+pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+ return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
+EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
-pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
+ return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn);
+EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
-pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
- bool *writable)
+pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+ return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
-pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
+ return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
}
+EXPORT_SYMBOL_GPL(gfn_to_pfn);
-pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
+pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
+ return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages)
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry;
- addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
+ addr = gfn_to_hva_many(slot, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
@@ -1468,6 +1546,16 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);
+struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ pfn_t pfn;
+
+ pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
+
+ return kvm_pfn_to_page(pfn);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
+
void kvm_release_page_clean(struct page *page)
{
WARN_ON(is_error_page(page));
@@ -1530,13 +1618,13 @@ static int next_segment(unsigned long len, int offset)
return len;
}
-int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
- int len)
+static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
+ void *data, int offset, int len)
{
int r;
unsigned long addr;
- addr = gfn_to_hva_prot(kvm, gfn, NULL);
+ addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_from_user(data, (void __user *)addr + offset, len);
@@ -1544,8 +1632,25 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return -EFAULT;
return 0;
}
+
+int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
+ int len)
+{
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+
+ return __kvm_read_guest_page(slot, gfn, data, offset, len);
+}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
+int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
+ int offset, int len)
+{
+ struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+
+ return __kvm_read_guest_page(slot, gfn, data, offset, len);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
+
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
@@ -1566,15 +1671,33 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_read_guest);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len)
+int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{
- int r;
- unsigned long addr;
gfn_t gfn = gpa >> PAGE_SHIFT;
+ int seg;
int offset = offset_in_page(gpa);
+ int ret;
+
+ while ((seg = next_segment(len, offset)) != 0) {
+ ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
+ if (ret < 0)
+ return ret;
+ offset = 0;
+ len -= seg;
+ data += seg;
+ ++gfn;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
+
+static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ void *data, int offset, unsigned long len)
+{
+ int r;
+ unsigned long addr;
- addr = gfn_to_hva_prot(kvm, gfn, NULL);
+ addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
@@ -1584,25 +1707,63 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL(kvm_read_guest_atomic);
-int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
- int offset, int len)
+int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
+ unsigned long len)
+{
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ int offset = offset_in_page(gpa);
+
+ return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
+}
+EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
+
+int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
+ void *data, unsigned long len)
+{
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ int offset = offset_in_page(gpa);
+
+ return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
+
+static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
+ const void *data, int offset, int len)
{
int r;
unsigned long addr;
- addr = gfn_to_hva(kvm, gfn);
+ addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_to_user((void __user *)addr + offset, data, len);
if (r)
return -EFAULT;
- mark_page_dirty(kvm, gfn);
+ mark_page_dirty_in_slot(memslot, gfn);
return 0;
}
+
+int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
+ const void *data, int offset, int len)
+{
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+
+ return __kvm_write_guest_page(slot, gfn, data, offset, len);
+}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
+int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ const void *data, int offset, int len)
+{
+ struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+
+ return __kvm_write_guest_page(slot, gfn, data, offset, len);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
+
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
{
@@ -1624,6 +1785,27 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
EXPORT_SYMBOL_GPL(kvm_write_guest);
+int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len)
+{
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ int seg;
+ int offset = offset_in_page(gpa);
+ int ret;
+
+ while ((seg = next_segment(len, offset)) != 0) {
+ ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
+ if (ret < 0)
+ return ret;
+ offset = 0;
+ len -= seg;
+ data += seg;
+ ++gfn;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
+
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
@@ -1681,7 +1863,7 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
r = __copy_to_user((void __user *)ghc->hva, data, len);
if (r)
return -EFAULT;
- mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+ mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
return 0;
}
@@ -1739,8 +1921,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
-static void mark_page_dirty_in_slot(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
gfn_t gfn)
{
if (memslot && memslot->dirty_bitmap) {
@@ -1755,10 +1936,51 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ mark_page_dirty_in_slot(memslot, gfn);
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct kvm_memory_slot *memslot;
+
+ memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ mark_page_dirty_in_slot(memslot, gfn);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ /* 10us base */
+ if (val == 0 && halt_poll_ns_grow)
+ val = 10000;
+ else
+ val *= halt_poll_ns_grow;
+
+ if (val > halt_poll_ns)
+ val = halt_poll_ns;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ if (halt_poll_ns_shrink == 0)
+ val = 0;
+ else
+ val /= halt_poll_ns_shrink;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
+}
+
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
if (kvm_arch_vcpu_runnable(vcpu)) {
@@ -1779,13 +2001,15 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
- DEFINE_SWAITER(wait);
+ DECLARE_SWAITQUEUE(wait);
bool waited = false;
+ u64 block_ns;
start = cur = ktime_get();
- if (halt_poll_ns) {
- ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+ if (vcpu->halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
+ ++vcpu->stat.halt_attempted_poll;
do {
/*
* This sets KVM_REQ_UNHALT if an interrupt
@@ -1799,8 +2023,10 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
} while (single_task_running() && ktime_before(cur, stop));
}
+ kvm_arch_vcpu_blocking(vcpu);
+
for (;;) {
- swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -1809,11 +2035,27 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
schedule();
}
- swait_finish(&vcpu->wq, &wait);
+ finish_swait(&vcpu->wq, &wait);
cur = ktime_get();
+ kvm_arch_vcpu_unblocking(vcpu);
out:
- trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+ if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ } else
+ vcpu->halt_poll_ns = 0;
+
+ trace_kvm_vcpu_wakeup(block_ns, waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
@@ -1825,11 +2067,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
- struct swait_head *wqp;
+ struct swait_queue_head *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
- if (swaitqueue_active(wqp)) {
- swait_wake_interruptible(wqp);
+ if (swait_active(wqp)) {
+ swake_up(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -1930,7 +2172,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (vcpu == me)
continue;
- if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
@@ -2059,6 +2301,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+
+ /*
+ * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
+ * before kvm->online_vcpu's incremented value.
+ */
smp_wmb();
atomic_inc(&kvm->online_vcpus);
@@ -2471,9 +2718,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
@@ -2482,12 +2726,17 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IRQFD:
case KVM_CAP_IRQFD_RESAMPLE:
#endif
+ case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
return 1;
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
+#if KVM_ADDRESS_SPACE_NUM > 1
+ case KVM_CAP_MULTI_ADDRESS_SPACE:
+ return KVM_ADDRESS_SPACE_NUM;
+#endif
default:
break;
}
@@ -2565,17 +2814,6 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_ioeventfd(kvm, &data);
break;
}
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_SET_BOOT_CPU_ID:
- r = 0;
- mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) != 0)
- r = -EBUSY;
- else
- kvm->bsp_vcpu_id = arg;
- mutex_unlock(&kvm->lock);
- break;
-#endif
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
struct kvm_msi msi;
@@ -2882,18 +3120,12 @@ static int hardware_enable_all(void)
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
- int cpu = (long)v;
-
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
- pr_info("kvm: disabling virtualization on CPU%d\n",
- cpu);
hardware_disable();
break;
case CPU_STARTING:
- pr_info("kvm: enabling virtualization on CPU%d\n",
- cpu);
hardware_enable();
break;
}
@@ -2935,10 +3167,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
const struct kvm_io_range *r2)
{
- if (r1->addr < r2->addr)
+ gpa_t addr1 = r1->addr;
+ gpa_t addr2 = r2->addr;
+
+ if (addr1 < addr2)
return -1;
- if (r1->addr + r1->len > r2->addr + r2->len)
+
+ /* If r2->len == 0, match the exact address. If r2->len != 0,
+ * accept any overlapping write. Any order is acceptable for
+ * overlapping ranges, because kvm_io_bus_get_first_dev ensures
+ * we process all of them.
+ */
+ if (r2->len) {
+ addr1 += r1->len;
+ addr2 += r2->len;
+ }
+
+ if (addr1 > addr2)
return 1;
+
return 0;
}
@@ -3103,7 +3350,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
- new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
@@ -3135,7 +3382,7 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
if (r)
return r;
- new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
diff --git a/kernel/virt/kvm/vfio.c b/kernel/virt/kvm/vfio.c
index 620e37f74..1dd087da6 100644
--- a/kernel/virt/kvm/vfio.c
+++ b/kernel/virt/kvm/vfio.c
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
list_add_tail(&kvg->node, &kv->group_list);
kvg->vfio_group = vfio_group;
+ kvm_arch_start_assignment(dev->kvm);
+
mutex_unlock(&kv->lock);
kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
break;
}
+ kvm_arch_end_assignment(dev->kvm);
+
mutex_unlock(&kv->lock);
kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kvm_vfio_group_put_external_user(kvg->vfio_group);
list_del(&kvg->node);
kfree(kvg);
+ kvm_arch_end_assignment(dev->kvm);
}
kvm_vfio_update_coherency(dev);