diff options
Diffstat (limited to 'kernel/drivers/irqchip')
-rw-r--r-- | kernel/drivers/irqchip/irq-atmel-aic.c | 5 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-atmel-aic5.c | 5 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-bcm7038-l1.c | 26 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-gic-v3-its.c | 49 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-gic-v3.c | 28 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-gic.c | 8 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-mxs.c | 2 | ||||
-rw-r--r-- | kernel/drivers/irqchip/irq-sunxi-nmi.c | 4 |
8 files changed, 114 insertions, 13 deletions
diff --git a/kernel/drivers/irqchip/irq-atmel-aic.c b/kernel/drivers/irqchip/irq-atmel-aic.c index 8a0c7f288..981c3959d 100644 --- a/kernel/drivers/irqchip/irq-atmel-aic.c +++ b/kernel/drivers/irqchip/irq-atmel-aic.c @@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d, { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; + unsigned long flags; unsigned smr; int idx; int ret; @@ -194,12 +195,12 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); return ret; } diff --git a/kernel/drivers/irqchip/irq-atmel-aic5.c b/kernel/drivers/irqchip/irq-atmel-aic5.c index 62bb840c6..7dee71bde 100644 --- a/kernel/drivers/irqchip/irq-atmel-aic5.c +++ b/kernel/drivers/irqchip/irq-atmel-aic5.c @@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); + unsigned long flags; unsigned smr; int ret; @@ -269,13 +270,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - irq_gc_lock(bgc); + irq_gc_lock_irqsave(bgc, flags); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); - irq_gc_unlock(bgc); + irq_gc_unlock_irqrestore(bgc, flags); return ret; } diff --git a/kernel/drivers/irqchip/irq-bcm7038-l1.c b/kernel/drivers/irqchip/irq-bcm7038-l1.c index 0fea985ef..d7af88534 100644 --- a/kernel/drivers/irqchip/irq-bcm7038-l1.c +++ b/kernel/drivers/irqchip/irq-bcm7038-l1.c @@ -216,6 +216,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, return 0; } +static void bcm7038_l1_cpu_offline(struct irq_data *d) +{ + struct cpumask *mask = irq_data_get_affinity_mask(d); + int cpu = smp_processor_id(); + cpumask_t new_affinity; + + /* This CPU was not on the affinity mask */ + if (!cpumask_test_cpu(cpu, mask)) + return; + + if (cpumask_weight(mask) > 1) { + /* + * Multiple CPU affinity, remove this CPU from the affinity + * mask + */ + cpumask_copy(&new_affinity, mask); + cpumask_clear_cpu(cpu, &new_affinity); + } else { + /* Only CPU, put on the lowest online CPU */ + cpumask_clear(&new_affinity); + cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); + } + irq_set_affinity_locked(d, &new_affinity, false); +} + static int __init bcm7038_l1_init_one(struct device_node *dn, unsigned int idx, struct bcm7038_l1_chip *intc) @@ -267,6 +292,7 @@ static struct irq_chip bcm7038_l1_irq_chip = { .irq_mask = bcm7038_l1_mask, .irq_unmask = bcm7038_l1_unmask, .irq_set_affinity = bcm7038_l1_set_affinity, + .irq_cpu_offline = bcm7038_l1_cpu_offline, }; static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, diff --git a/kernel/drivers/irqchip/irq-gic-v3-its.c b/kernel/drivers/irqchip/irq-gic-v3-its.c index a159529f9..c5f1757ac 100644 --- a/kernel/drivers/irqchip/irq-gic-v3-its.c +++ b/kernel/drivers/irqchip/irq-gic-v3-its.c @@ -41,6 +41,7 @@ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -71,6 +72,7 @@ struct its_node { struct list_head its_device_list; u64 flags; u32 ite_size; + int numa_node; }; #define ITS_ITT_ALIGN SZ_256 @@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_data *d) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void) list_for_each_entry(its, &its_nodes, entry) { u64 target; + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + continue; + } + /* * We now have to bind each collection to its target * redistributor. @@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); @@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; } +static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_cavium_22375, }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif { } }; @@ -1470,6 +1514,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) its->base = its_base; its->phys_base = res.start; its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->numa_node = of_node_to_nid(node); its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); if (!its->cmd_base) { diff --git a/kernel/drivers/irqchip/irq-gic-v3.c b/kernel/drivers/irqchip/irq-gic-v3.c index d7be6ddc3..e33c729b9 100644 --- a/kernel/drivers/irqchip/irq-gic-v3.c +++ b/kernel/drivers/irqchip/irq-gic-v3.c @@ -142,7 +142,7 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (count--) { + while (--count) { val = readl_relaxed(rbase + GICR_WAKER); if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) break; @@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs if (static_key_true(&supports_deactivate)) gic_write_dir(irqnr); #ifdef CONFIG_SMP + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ handle_IPI(irqnr, regs); #else WARN_ONCE(true, "Unexpected SGI received!\n"); @@ -380,6 +387,15 @@ static void __init gic_dist_init(void) writel_relaxed(0, base + GICD_CTLR); gic_dist_wait_for_rwp(); + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < gic_data.irq_nr; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); /* Enable distributor with ARE, Group1 */ @@ -494,6 +510,9 @@ static void gic_cpu_init(void) rbase = gic_data_rdist_sgi_base(); + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + gic_cpu_config(rbase, gic_redist_wait_for_rwp); /* Give LPIs a spin */ @@ -525,7 +544,7 @@ static struct notifier_block gic_cpu_notifier = { static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { - int cpu = *base_cpu; + int next_cpu, cpu = *base_cpu; unsigned long mpidr = cpu_logical_map(cpu); u16 tlist = 0; @@ -539,9 +558,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, tlist |= 1 << (mpidr & 0xf); - cpu = cpumask_next(cpu, mask); - if (cpu >= nr_cpu_ids) + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) goto out; + cpu = next_cpu; mpidr = cpu_logical_map(cpu); diff --git a/kernel/drivers/irqchip/irq-gic.c b/kernel/drivers/irqchip/irq-gic.c index abf2ffaed..cebd8efe6 100644 --- a/kernel/drivers/irqchip/irq-gic.c +++ b/kernel/drivers/irqchip/irq-gic.c @@ -347,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) if (static_key_true(&supports_deactivate)) writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); #ifdef CONFIG_SMP + /* + * Ensure any shared data written by the CPU sending + * the IPI is read after we've read the ACK register + * on the GIC. + * + * Pairs with the write barrier in gic_raise_softirq + */ + smp_rmb(); handle_IPI(irqnr, regs); #endif continue; diff --git a/kernel/drivers/irqchip/irq-mxs.c b/kernel/drivers/irqchip/irq-mxs.c index efe508459..17304705f 100644 --- a/kernel/drivers/irqchip/irq-mxs.c +++ b/kernel/drivers/irqchip/irq-mxs.c @@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np) void __iomem *icoll_base; icoll_base = of_io_request_and_map(np, 0, np->name); - if (!icoll_base) + if (IS_ERR(icoll_base)) panic("%s: unable to map resource", np->full_name); return icoll_base; } diff --git a/kernel/drivers/irqchip/irq-sunxi-nmi.c b/kernel/drivers/irqchip/irq-sunxi-nmi.c index 4ef178078..1254e98f6 100644 --- a/kernel/drivers/irqchip/irq-sunxi-nmi.c +++ b/kernel/drivers/irqchip/irq-sunxi-nmi.c @@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, gc = irq_get_domain_generic_chip(domain, 0); gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!gc->reg_base) { + if (IS_ERR(gc->reg_base)) { pr_err("unable to map resource\n"); - ret = -ENOMEM; + ret = PTR_ERR(gc->reg_base); goto fail_irqd_remove; } |