From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/arch/s390/kvm/interrupt.c | 320 +++++++++++++++++++-------------------- 1 file changed, 156 insertions(+), 164 deletions(-) (limited to 'kernel/arch/s390/kvm/interrupt.c') diff --git a/kernel/arch/s390/kvm/interrupt.c b/kernel/arch/s390/kvm/interrupt.c index 140a1131a..cc862c486 100644 --- a/kernel/arch/s390/kvm/interrupt.c +++ b/kernel/arch/s390/kvm/interrupt.c @@ -30,7 +30,6 @@ #define IOINT_SCHID_MASK 0x0000ffff #define IOINT_SSID_MASK 0x00030000 #define IOINT_CSSID_MASK 0x03fc0000 -#define IOINT_AI_MASK 0x04000000 #define PFAULT_INIT 0x0600 #define PFAULT_DONE 0x0680 #define VIRTIO_PARAM 0x0d00 @@ -52,11 +51,9 @@ static int psw_mchk_disabled(struct kvm_vcpu *vcpu) static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { - if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) - return 0; - return 1; + return psw_extint_disabled(vcpu) && + psw_ioint_disabled(vcpu) && + psw_mchk_disabled(vcpu); } static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) @@ -72,8 +69,7 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { - if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) + if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) return 0; return ckc_interrupts_enabled(vcpu); } @@ -106,14 +102,10 @@ static inline u8 int_word_to_isc(u32 int_word) return (int_word & 0x38000000) >> 27; } -static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) -{ - return vcpu->kvm->arch.float_int.pending_irqs; -} - -static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) +static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) { - return vcpu->arch.local_int.pending_irqs; + return vcpu->kvm->arch.float_int.pending_irqs | + vcpu->arch.local_int.pending_irqs; } static unsigned long disable_iscs(struct kvm_vcpu *vcpu, @@ -132,8 +124,9 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) { unsigned long active_mask; - active_mask = pending_local_irqs(vcpu); - active_mask |= pending_floating_irqs(vcpu); + active_mask = pending_irqs(vcpu); + if (!active_mask) + return 0; if (psw_extint_disabled(vcpu)) active_mask &= ~IRQ_PEND_EXT_MASK; @@ -168,20 +161,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, - &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, + &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); @@ -194,12 +187,12 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) { - atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); + atomic_or(flag, &vcpu->arch.sie_block->cpuflags); } static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) { - if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) return; else if (psw_ioint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_IO_INT); @@ -209,7 +202,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) return; if (psw_extint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_EXT_INT); @@ -219,7 +212,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) return; if (psw_mchk_disabled(vcpu)) vcpu->arch.sie_block->ictl |= ICTL_LPSW; @@ -309,8 +302,8 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) li->irq.ext.ext_params2 = 0; spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", - 0, ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", + ext.ext_params2); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 0, ext.ext_params2); @@ -366,7 +359,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (deliver) { - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", mchk.mcic); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, @@ -401,7 +394,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); + VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); vcpu->stat.deliver_restart_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); @@ -425,7 +418,6 @@ static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); vcpu->stat.deliver_prefix_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, @@ -448,7 +440,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, cpu_addr, 0); @@ -475,7 +467,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, @@ -504,7 +496,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) memset(&li->irq.pgm, 0, sizeof(pgm_info)); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", + VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", pgm_info.code, ilc); vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, @@ -620,7 +612,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); spin_unlock(&fi->lock); - VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", + VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", ext.ext_params); vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, @@ -649,9 +641,6 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info, list); if (inti) { - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, - KVM_S390_INT_PFAULT_DONE, 0, - inti->ext.ext_params2); list_del(&inti->list); fi->counters[FIRQ_CNTR_PFAULT] -= 1; } @@ -660,6 +649,12 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (inti) { + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", + inti->ext.ext_params2); + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); rc |= put_guest_lc(vcpu, PFAULT_DONE, @@ -689,7 +684,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) list); if (inti) { VCPU_EVENT(vcpu, 4, - "interrupt: virtio parm:%x,parm64:%llx", + "deliver: virtio parm: 0x%x,parm64: 0x%llx", inti->ext.ext_params, inti->ext.ext_params2); vcpu->stat.deliver_virtio_interrupt++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, @@ -739,7 +734,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info, list); if (inti) { - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type); vcpu->stat.deliver_io_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, @@ -799,7 +794,7 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; - if (!sclp_has_sigpif()) + if (!sclp.has_sigpif) return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); return (sigp_ctrl & SIGP_CTRL_C) && @@ -808,23 +803,21 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) { - int rc; + if (deliverable_irqs(vcpu)) + return 1; - rc = !!deliverable_irqs(vcpu); - - if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = 1; + if (kvm_cpu_has_pending_timer(vcpu)) + return 1; /* external call pending and deliverable */ - if (!rc && kvm_s390_ext_call_pending(vcpu) && + if (kvm_s390_ext_call_pending(vcpu) && !psw_extint_disabled(vcpu) && (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) - rc = 1; - - if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) - rc = 1; + return 1; - return rc; + if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) + return 1; + return 0; } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -839,7 +832,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) vcpu->stat.exit_wait_state++; /* fast path */ - if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) + if (kvm_arch_vcpu_runnable(vcpu)) return 0; if (psw_interrupts_disabled(vcpu)) { @@ -853,7 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* underflow */ @@ -862,7 +855,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) __set_cpu_idle(vcpu); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); - VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); + VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); @@ -875,13 +868,13 @@ no_timer: void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) { - if (swaitqueue_active(&vcpu->wq)) { + if (swait_active(&vcpu->wq)) { /* * The vcpu gave up the cpu voluntarily, mark it as a good * yield-candidate. */ vcpu->preempted = true; - swait_wake_interruptible(&vcpu->wq); + swake_up(&vcpu->wq); vcpu->stat.halt_wakeup++; } } @@ -892,7 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* @@ -917,7 +910,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); + atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; } @@ -941,12 +934,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) if (cpu_timer_irq_pending(vcpu)) set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - do { - irqs = deliverable_irqs(vcpu); + while ((irqs = deliverable_irqs(vcpu)) && !rc) { /* bits are in the order of interrupt priority */ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); - if (irq_type == IRQ_PEND_COUNT) - break; if (is_ioirq(irq_type)) { rc = __deliver_io(vcpu, irq_type); } else { @@ -958,9 +948,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } rc = func(vcpu); } - if (rc) - break; - } while (!rc); + } set_intercept_indicators(vcpu); @@ -971,59 +959,47 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - li->irq.pgm = irq->u.pgm; + VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, + irq->u.pgm.code, 0); + + if (irq->u.pgm.code == PGM_PER) { + li->irq.pgm.code |= PGM_PER; + /* only modify PER related information */ + li->irq.pgm.per_address = irq->u.pgm.per_address; + li->irq.pgm.per_code = irq->u.pgm.per_code; + li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; + li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; + } else if (!(irq->u.pgm.code & PGM_PER)) { + li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | + irq->u.pgm.code; + /* only modify non-PER information */ + li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; + li->irq.pgm.mon_code = irq->u.pgm.mon_code; + li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; + li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; + li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; + li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; + } else { + li->irq.pgm = irq->u.pgm; + } set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; } -int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - - VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, - 0, 1); - spin_lock(&li->lock); - irq.u.pgm.code = code; - __inject_prog(vcpu, &irq); - BUG_ON(swaitqueue_active(li->wq)); - spin_unlock(&li->lock); - return 0; -} - -int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - int rc; - - VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", - pgm_info->code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, - pgm_info->code, 0, 1); - spin_lock(&li->lock); - irq.u.pgm = *pgm_info; - rc = __inject_prog(vcpu, &irq); - BUG_ON(swaitqueue_active(li->wq)); - spin_unlock(&li->lock); - return rc; -} - static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", - irq->u.ext.ext_params, irq->u.ext.ext_params2); + VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", + irq->u.ext.ext_params2); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, irq->u.ext.ext_params, - irq->u.ext.ext_params2, 2); + irq->u.ext.ext_params2); li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1038,7 +1014,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) /* another external call is pending */ return -EBUSY; } - atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); return 0; } @@ -1048,23 +1024,22 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_extcall_info *extcall = &li->irq.extcall; uint16_t src_id = irq->u.extcall.code; - VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", + VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", src_id); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, - src_id, 0, 2); + src_id, 0); /* sending vcpu invalid */ - if (src_id >= KVM_MAX_VCPUS || - kvm_get_vcpu(vcpu->kvm, src_id) == NULL) + if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) return -EINVAL; - if (sclp_has_sigpif()) + if (sclp.has_sigpif) return __inject_extcall_sigpif(vcpu, src_id); if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1073,10 +1048,10 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_prefix_info *prefix = &li->irq.prefix; - VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", irq->u.prefix.address); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, - irq->u.prefix.address, 0, 2); + irq->u.prefix.address, 0); if (!is_vcpu_stopped(vcpu)) return -EBUSY; @@ -1093,7 +1068,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_stop_info *stop = &li->irq.stop; int rc = 0; - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) return -EINVAL; @@ -1117,8 +1092,8 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); + VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); set_bit(IRQ_PEND_RESTART, &li->pending_irqs); return 0; @@ -1129,14 +1104,18 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", + VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", irq->u.emerg.code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, - irq->u.emerg.code, 0, 2); + irq->u.emerg.code, 0); + + /* sending vcpu invalid */ + if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) + return -EINVAL; set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1145,10 +1124,10 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info *mchk = &li->irq.mchk; - VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", irq->u.mchk.mcic); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, - irq->u.mchk.mcic, 2); + irq->u.mchk.mcic); /* * Because repressible machine checks can be indicated along with @@ -1175,12 +1154,12 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); + VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1188,12 +1167,12 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); + VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1340,17 +1319,56 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) return 0; } -static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) +/* + * Find a destination VCPU for a floating irq and kick it. + */ +static void __floating_irq_kick(struct kvm *kvm, u64 type) { + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; struct kvm_s390_local_interrupt *li; - struct kvm_s390_float_interrupt *fi; - struct kvm_vcpu *dst_vcpu = NULL; - int sigcpu; + struct kvm_vcpu *dst_vcpu; + int sigcpu, online_vcpus, nr_tries = 0; + + online_vcpus = atomic_read(&kvm->online_vcpus); + if (!online_vcpus) + return; + + /* find idle VCPUs first, then round robin */ + sigcpu = find_first_bit(fi->idle_mask, online_vcpus); + if (sigcpu == online_vcpus) { + do { + sigcpu = fi->next_rr_cpu; + fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; + /* avoid endless loops if all vcpus are stopped */ + if (nr_tries++ >= online_vcpus) + return; + } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); + } + dst_vcpu = kvm_get_vcpu(kvm, sigcpu); + + /* make the VCPU drop out of the SIE, or wake it up if sleeping */ + li = &dst_vcpu->arch.local_int; + spin_lock(&li->lock); + switch (type) { + case KVM_S390_MCHK: + atomic_or(CPUSTAT_STOP_INT, li->cpuflags); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + atomic_or(CPUSTAT_IO_INT, li->cpuflags); + break; + default: + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + break; + } + spin_unlock(&li->lock); + kvm_s390_vcpu_wakeup(dst_vcpu); +} + +static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) +{ u64 type = READ_ONCE(inti->type); int rc; - fi = &kvm->arch.float_int; - switch (type) { case KVM_S390_MCHK: rc = __inject_float_mchk(kvm, inti); @@ -1373,32 +1391,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) if (rc) return rc; - sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); - if (sigcpu == KVM_MAX_VCPUS) { - do { - sigcpu = fi->next_rr_cpu++; - if (sigcpu == KVM_MAX_VCPUS) - sigcpu = fi->next_rr_cpu = 0; - } while (kvm_get_vcpu(kvm, sigcpu) == NULL); - } - dst_vcpu = kvm_get_vcpu(kvm, sigcpu); - li = &dst_vcpu->arch.local_int; - spin_lock(&li->lock); - switch (type) { - case KVM_S390_MCHK: - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); - break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); - break; - default: - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); - break; - } - spin_unlock(&li->lock); - kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); + __floating_irq_kick(kvm, type); return 0; - } int kvm_s390_inject_vm(struct kvm *kvm, @@ -1420,20 +1414,20 @@ int kvm_s390_inject_vm(struct kvm *kvm, inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_INT_SERVICE: - VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); + VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); inti->ext.ext_params = s390int->parm; break; case KVM_S390_INT_PFAULT_DONE: inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_MCHK: - VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", + VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", s390int->parm64); inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ inti->mchk.mcic = s390int->parm64; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (inti->type & IOINT_AI_MASK) + if (inti->type & KVM_S390_INT_IO_AI_MASK) VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); else VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", @@ -1520,8 +1514,6 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) switch (irq->type) { case KVM_S390_PROGRAM_INT: - VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", - irq->u.pgm.code); rc = __inject_prog(vcpu, irq); break; case KVM_S390_SIGP_SET_PREFIX: -- cgit 1.2.3-korg