diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2015-10-19 08:35:30 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2015-10-19 08:35:30 +0300 |
commit | ec0a2ed6d8a5e555edef907895c041e285fdb495 (patch) | |
tree | a4c8d982f8ac820b1b60818df22ad3ccac2036d5 /kernel | |
parent | 342fa5dfa053559f47caad657132522496dcf1b3 (diff) |
These changes are a raw update to a vanilla kernel 4.1.10, with the
recently announced rt patch patch-4.1.10-rt10.patch. No further changes
needed.
Change-Id: I9a0cf084498133b10771e744b6da4b29dff706ba
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel')
343 files changed, 3092 insertions, 1508 deletions
diff --git a/kernel/Documentation/ABI/testing/configfs-usb-gadget-loopback b/kernel/Documentation/ABI/testing/configfs-usb-gadget-loopback index 9aae5bfb9..06beefbcf 100644 --- a/kernel/Documentation/ABI/testing/configfs-usb-gadget-loopback +++ b/kernel/Documentation/ABI/testing/configfs-usb-gadget-loopback @@ -5,4 +5,4 @@ Description: The attributes: qlen - depth of loopback queue - bulk_buflen - buffer length + buflen - buffer length diff --git a/kernel/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/kernel/Documentation/ABI/testing/configfs-usb-gadget-sourcesink index 29477c319..bc7ff731a 100644 --- a/kernel/Documentation/ABI/testing/configfs-usb-gadget-sourcesink +++ b/kernel/Documentation/ABI/testing/configfs-usb-gadget-sourcesink @@ -9,4 +9,4 @@ Description: isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss) isoc_mult - 0..2 (hs/ss only) isoc_maxburst - 0..15 (ss only) - qlen - buffer length + buflen - buffer length diff --git a/kernel/Documentation/devicetree/bindings/net/ethernet.txt b/kernel/Documentation/devicetree/bindings/net/ethernet.txt index 41b3f3f86..5d88f3748 100644 --- a/kernel/Documentation/devicetree/bindings/net/ethernet.txt +++ b/kernel/Documentation/devicetree/bindings/net/ethernet.txt @@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers: flow control thresholds. - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This is used for components that can have configurable fifo sizes. +- managed: string, specifies the PHY management type. Supported values are: + "auto", "in-band-status". "auto" is the default, it usess MDIO for + management if fixed-link is not specified. Child nodes of the Ethernet controller are typically the individual PHY devices connected via the MDIO bus (sometimes the MDIO bus controller is separate). They are described in the phy.txt file in this same directory. +For non-MDIO PHY management see fixed-link.txt. diff --git a/kernel/Documentation/usb/gadget-testing.txt b/kernel/Documentation/usb/gadget-testing.txt index f45b2bf4b..820664af8 100644 --- a/kernel/Documentation/usb/gadget-testing.txt +++ b/kernel/Documentation/usb/gadget-testing.txt @@ -237,9 +237,7 @@ Testing the LOOPBACK function ----------------------------- device: run the gadget -host: test-usb - -http://www.linux-usb.org/usbtest/testusb.c +host: test-usb (tools/usb/testusb.c) 8. MASS STORAGE function ======================== @@ -588,9 +586,8 @@ Testing the SOURCESINK function ------------------------------- device: run the gadget -host: test-usb +host: test-usb (tools/usb/testusb.c) -http://www.linux-usb.org/usbtest/testusb.c 16. UAC1 function ================= diff --git a/kernel/Makefile b/kernel/Makefile index b8591e5f7..d02f16b51 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 1 -SUBLEVEL = 7 +SUBLEVEL = 10 EXTRAVERSION = NAME = Series 4800 diff --git a/kernel/arch/arm/Kconfig b/kernel/arch/arm/Kconfig index bfaeed7a4..e16a25917 100644 --- a/kernel/arch/arm/Kconfig +++ b/kernel/arch/arm/Kconfig @@ -539,6 +539,7 @@ config ARCH_ORION5X select MVEBU_MBUS select PCI select PLAT_ORION_LEGACY + select MULTI_IRQ_HANDLER help Support for the following Marvell Orion 5x series SoCs: Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182), diff --git a/kernel/arch/arm/boot/compressed/decompress.c b/kernel/arch/arm/boot/compressed/decompress.c index bd245d349..a0765e7ed 100644 --- a/kernel/arch/arm/boot/compressed/decompress.c +++ b/kernel/arch/arm/boot/compressed/decompress.c @@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2); int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { - return decompress(input, len, NULL, NULL, output, NULL, error); + return __decompress(input, len, NULL, NULL, output, 0, NULL, error); } diff --git a/kernel/arch/arm/boot/dts/exynos3250-rinato.dts b/kernel/arch/arm/boot/dts/exynos3250-rinato.dts index 0b9906880..75aba40c6 100644 --- a/kernel/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/kernel/arch/arm/boot/dts/exynos3250-rinato.dts @@ -181,7 +181,7 @@ display-timings { timing-0 { - clock-frequency = <0>; + clock-frequency = <4600000>; hactive = <320>; vactive = <320>; hfront-porch = <1>; diff --git a/kernel/arch/arm/boot/dts/rk3288.dtsi b/kernel/arch/arm/boot/dts/rk3288.dtsi index 165968d51..8eca5878a 100644 --- a/kernel/arch/arm/boot/dts/rk3288.dtsi +++ b/kernel/arch/arm/boot/dts/rk3288.dtsi @@ -584,7 +584,7 @@ compatible = "rockchip,rk3288-wdt", "snps,dw-wdt"; reg = <0xff800000 0x100>; clocks = <&cru PCLK_WDT>; - interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; diff --git a/kernel/arch/arm/kvm/arm.c b/kernel/arch/arm/kvm/arm.c index adb19885a..0c9014d7f 100644 --- a/kernel/arch/arm/kvm/arm.c +++ b/kernel/arch/arm/kvm/arm.c @@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) * Map the VGIC hardware resources before running a vcpu the first * time on this VM. */ - if (unlikely(!vgic_ready(kvm))) { + if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { ret = kvm_vgic_map_resources(kvm); if (ret) return ret; diff --git a/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c b/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c index 57d5df0c1..7581e036b 100644 --- a/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c +++ b/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c @@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = { .dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT, .wkdep_srcs = l4per2_wkup_sleep_deps, .sleepdep_srcs = l4per2_wkup_sleep_deps, - .flags = CLKDM_CAN_HWSUP_SWSUP, + .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu0_7xx_clkdm = { diff --git a/kernel/arch/arm/mach-orion5x/include/mach/irqs.h b/kernel/arch/arm/mach-orion5x/include/mach/irqs.h index a6fa9d8f1..2431d9923 100644 --- a/kernel/arch/arm/mach-orion5x/include/mach/irqs.h +++ b/kernel/arch/arm/mach-orion5x/include/mach/irqs.h @@ -16,42 +16,42 @@ /* * Orion Main Interrupt Controller */ -#define IRQ_ORION5X_BRIDGE 0 -#define IRQ_ORION5X_DOORBELL_H2C 1 -#define IRQ_ORION5X_DOORBELL_C2H 2 -#define IRQ_ORION5X_UART0 3 -#define IRQ_ORION5X_UART1 4 -#define IRQ_ORION5X_I2C 5 -#define IRQ_ORION5X_GPIO_0_7 6 -#define IRQ_ORION5X_GPIO_8_15 7 -#define IRQ_ORION5X_GPIO_16_23 8 -#define IRQ_ORION5X_GPIO_24_31 9 -#define IRQ_ORION5X_PCIE0_ERR 10 -#define IRQ_ORION5X_PCIE0_INT 11 -#define IRQ_ORION5X_USB1_CTRL 12 -#define IRQ_ORION5X_DEV_BUS_ERR 14 -#define IRQ_ORION5X_PCI_ERR 15 -#define IRQ_ORION5X_USB_BR_ERR 16 -#define IRQ_ORION5X_USB0_CTRL 17 -#define IRQ_ORION5X_ETH_RX 18 -#define IRQ_ORION5X_ETH_TX 19 -#define IRQ_ORION5X_ETH_MISC 20 -#define IRQ_ORION5X_ETH_SUM 21 -#define IRQ_ORION5X_ETH_ERR 22 -#define IRQ_ORION5X_IDMA_ERR 23 -#define IRQ_ORION5X_IDMA_0 24 -#define IRQ_ORION5X_IDMA_1 25 -#define IRQ_ORION5X_IDMA_2 26 -#define IRQ_ORION5X_IDMA_3 27 -#define IRQ_ORION5X_CESA 28 -#define IRQ_ORION5X_SATA 29 -#define IRQ_ORION5X_XOR0 30 -#define IRQ_ORION5X_XOR1 31 +#define IRQ_ORION5X_BRIDGE (1 + 0) +#define IRQ_ORION5X_DOORBELL_H2C (1 + 1) +#define IRQ_ORION5X_DOORBELL_C2H (1 + 2) +#define IRQ_ORION5X_UART0 (1 + 3) +#define IRQ_ORION5X_UART1 (1 + 4) +#define IRQ_ORION5X_I2C (1 + 5) +#define IRQ_ORION5X_GPIO_0_7 (1 + 6) +#define IRQ_ORION5X_GPIO_8_15 (1 + 7) +#define IRQ_ORION5X_GPIO_16_23 (1 + 8) +#define IRQ_ORION5X_GPIO_24_31 (1 + 9) +#define IRQ_ORION5X_PCIE0_ERR (1 + 10) +#define IRQ_ORION5X_PCIE0_INT (1 + 11) +#define IRQ_ORION5X_USB1_CTRL (1 + 12) +#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14) +#define IRQ_ORION5X_PCI_ERR (1 + 15) +#define IRQ_ORION5X_USB_BR_ERR (1 + 16) +#define IRQ_ORION5X_USB0_CTRL (1 + 17) +#define IRQ_ORION5X_ETH_RX (1 + 18) +#define IRQ_ORION5X_ETH_TX (1 + 19) +#define IRQ_ORION5X_ETH_MISC (1 + 20) +#define IRQ_ORION5X_ETH_SUM (1 + 21) +#define IRQ_ORION5X_ETH_ERR (1 + 22) +#define IRQ_ORION5X_IDMA_ERR (1 + 23) +#define IRQ_ORION5X_IDMA_0 (1 + 24) +#define IRQ_ORION5X_IDMA_1 (1 + 25) +#define IRQ_ORION5X_IDMA_2 (1 + 26) +#define IRQ_ORION5X_IDMA_3 (1 + 27) +#define IRQ_ORION5X_CESA (1 + 28) +#define IRQ_ORION5X_SATA (1 + 29) +#define IRQ_ORION5X_XOR0 (1 + 30) +#define IRQ_ORION5X_XOR1 (1 + 31) /* * Orion General Purpose Pins */ -#define IRQ_ORION5X_GPIO_START 32 +#define IRQ_ORION5X_GPIO_START 33 #define NR_GPIO_IRQS 32 #define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) diff --git a/kernel/arch/arm/mach-orion5x/irq.c b/kernel/arch/arm/mach-orion5x/irq.c index cd4bac4d7..086ecb87d 100644 --- a/kernel/arch/arm/mach-orion5x/irq.c +++ b/kernel/arch/arm/mach-orion5x/irq.c @@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) stat = readl_relaxed(MAIN_IRQ_CAUSE); stat &= readl_relaxed(MAIN_IRQ_MASK); if (stat) { - unsigned int hwirq = __fls(stat); + unsigned int hwirq = 1 + __fls(stat); handle_IRQ(hwirq, regs); return; } @@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) void __init orion5x_init_irq(void) { - orion_irq_init(0, MAIN_IRQ_MASK); + orion_irq_init(1, MAIN_IRQ_MASK); #ifdef CONFIG_MULTI_IRQ_HANDLER set_handle_irq(orion5x_legacy_handle_irq); diff --git a/kernel/arch/arm/mach-rockchip/platsmp.c b/kernel/arch/arm/mach-rockchip/platsmp.c index 2e6ab67e2..611a5f96d 100644 --- a/kernel/arch/arm/mach-rockchip/platsmp.c +++ b/kernel/arch/arm/mach-rockchip/platsmp.c @@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu) static int pmu_set_power_domain(int pd, bool on) { u32 val = (on) ? 0 : BIT(pd); + struct reset_control *rstc = rockchip_get_core_reset(pd); int ret; + if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { + pr_err("%s: could not get reset control for core %d\n", + __func__, pd); + return PTR_ERR(rstc); + } + /* * We need to soft reset the cpu when we turn off the cpu power domain, * or else the active processors might be stalled when the individual * processor is powered down. */ - if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { - struct reset_control *rstc = rockchip_get_core_reset(pd); - - if (IS_ERR(rstc)) { - pr_err("%s: could not get reset control for core %d\n", - __func__, pd); - return PTR_ERR(rstc); - } - - if (on) - reset_control_deassert(rstc); - else - reset_control_assert(rstc); - - reset_control_put(rstc); - } + if (!IS_ERR(rstc) && !on) + reset_control_assert(rstc); ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val); if (ret < 0) { @@ -112,6 +105,12 @@ static int pmu_set_power_domain(int pd, bool on) } } + if (!IS_ERR(rstc)) { + if (on) + reset_control_deassert(rstc); + reset_control_put(rstc); + } + return 0; } @@ -147,8 +146,12 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu, * the mailbox: * sram_base_addr + 4: 0xdeadbeaf * sram_base_addr + 8: start address for pc + * The cpu0 need to wait the other cpus other than cpu0 entering + * the wfe state.The wait time is affected by many aspects. + * (e.g: cpu frequency, bootrom frequency, sram frequency, ...) * */ - udelay(10); + mdelay(1); /* ensure the cpus other than cpu0 to startup */ + writel(virt_to_phys(secondary_startup), sram_base_addr + 8); writel(0xDEADBEAF, sram_base_addr + 4); dsb_sev(); diff --git a/kernel/arch/arm64/Kconfig b/kernel/arch/arm64/Kconfig index d555ed3a0..09a41259b 100644 --- a/kernel/arch/arm64/Kconfig +++ b/kernel/arch/arm64/Kconfig @@ -103,6 +103,10 @@ config NO_IOPORT_MAP config STACKTRACE_SUPPORT def_bool y +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + config LOCKDEP_SUPPORT def_bool y @@ -411,6 +415,22 @@ config ARM64_ERRATUM_845719 If unsure, say Y. +config ARM64_ERRATUM_843419 + bool "Cortex-A53: 843419: A load or store might access an incorrect address" + depends on MODULES + default y + help + This option builds kernel modules using the large memory model in + order to avoid the use of the ADRP instruction, which can cause + a subsequent memory access to use an incorrect address on Cortex-A53 + parts up to r0p4. + + Note that the kernel itself must be linked with a version of ld + which fixes potentially affected ADRP instructions through the + use of veneers. + + If unsure, say Y. + endmenu @@ -581,7 +601,7 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM64" - depends on ARM64 && OF + depends on ARM64 && OF && !PREEMPT_RT_FULL select SWIOTLB_XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. diff --git a/kernel/arch/arm64/Makefile b/kernel/arch/arm64/Makefile index 4d2a92599..81151663e 100644 --- a/kernel/arch/arm64/Makefile +++ b/kernel/arch/arm64/Makefile @@ -30,6 +30,10 @@ endif CHECKFLAGS += -D__aarch64__ +ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) +CFLAGS_MODULE += -mcmodel=large +endif + # Default value head-y := arch/arm64/kernel/head.o diff --git a/kernel/arch/arm64/include/asm/memory.h b/kernel/arch/arm64/include/asm/memory.h index f800d45ea..44a59c20e 100644 --- a/kernel/arch/arm64/include/asm/memory.h +++ b/kernel/arch/arm64/include/asm/memory.h @@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr; #define PHYS_OFFSET ({ memstart_addr; }) /* + * The maximum physical address that the linear direct mapping + * of system RAM can cover. (PAGE_OFFSET can be interpreted as + * a 2's complement signed quantity and negated to derive the + * maximum size of the linear mapping.) + */ +#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; }) + +/* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. * diff --git a/kernel/arch/arm64/kernel/debug-monitors.c b/kernel/arch/arm64/kernel/debug-monitors.c index b056369fd..70654d843 100644 --- a/kernel/arch/arm64/kernel/debug-monitors.c +++ b/kernel/arch/arm64/kernel/debug-monitors.c @@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, * Use reader/writer locks instead of plain spinlock. */ static LIST_HEAD(break_hook); -static DEFINE_RWLOCK(break_hook_lock); +static DEFINE_SPINLOCK(break_hook_lock); void register_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_add(&hook->node, &break_hook); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_add_rcu(&hook->node, &break_hook); + spin_unlock(&break_hook_lock); } void unregister_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_del(&hook->node); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&break_hook_lock); + synchronize_rcu(); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) @@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) struct break_hook *hook; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; - read_lock(&break_hook_lock); - list_for_each_entry(hook, &break_hook, node) + rcu_read_lock(); + list_for_each_entry_rcu(hook, &break_hook, node) if ((esr & hook->esr_mask) == hook->esr_val) fn = hook->fn; - read_unlock(&break_hook_lock); + rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/kernel/arch/arm64/kernel/fpsimd.c b/kernel/arch/arm64/kernel/fpsimd.c index 3dca15634..c31e59fe2 100644 --- a/kernel/arch/arm64/kernel/fpsimd.c +++ b/kernel/arch/arm64/kernel/fpsimd.c @@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next) void fpsimd_flush_thread(void) { memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_flush_task_state(current); set_thread_flag(TIF_FOREIGN_FPSTATE); } diff --git a/kernel/arch/arm64/kernel/head.S b/kernel/arch/arm64/kernel/head.S index 19f915e8f..36aa31ff2 100644 --- a/kernel/arch/arm64/kernel/head.S +++ b/kernel/arch/arm64/kernel/head.S @@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif + /* EL2 debug */ + mrs x0, pmcr_el0 // Disable debug access traps + ubfx x0, x0, #11, #5 // to EL2 and allow access to + msr mdcr_el2, x0 // all PMU counters from EL1 + /* Stage-2 translation */ msr vttbr_el2, xzr diff --git a/kernel/arch/arm64/kernel/insn.c b/kernel/arch/arm64/kernel/insn.c index 924902083..30eb88e5b 100644 --- a/kernel/arch/arm64/kernel/insn.c +++ b/kernel/arch/arm64/kernel/insn.c @@ -77,7 +77,7 @@ bool __kprobes aarch64_insn_is_nop(u32 insn) } } -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap) { @@ -124,13 +124,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) unsigned long flags = 0; int ret; - spin_lock_irqsave(&patch_lock, flags); + raw_spin_lock_irqsave(&patch_lock, flags); waddr = patch_map(addr, FIX_TEXT_POKE0); ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); patch_unmap(FIX_TEXT_POKE0); - spin_unlock_irqrestore(&patch_lock, flags); + raw_spin_unlock_irqrestore(&patch_lock, flags); return ret; } diff --git a/kernel/arch/arm64/kernel/module.c b/kernel/arch/arm64/kernel/module.c index 67bf4107f..876eb8df5 100644 --- a/kernel/arch/arm64/kernel/module.c +++ b/kernel/arch/arm64/kernel/module.c @@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, AARCH64_INSN_IMM_ADR); break; +#ifndef CONFIG_ARM64_ERRATUM_843419 case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, AARCH64_INSN_IMM_ADR); break; +#endif case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; diff --git a/kernel/arch/arm64/kernel/signal32.c b/kernel/arch/arm64/kernel/signal32.c index c0cff3410..c58aee062 100644 --- a/kernel/arch/arm64/kernel/signal32.c +++ b/kernel/arch/arm64/kernel/signal32.c @@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) /* * VFP save/restore code. + * + * We have to be careful with endianness, since the fpsimd context-switch + * code operates on 128-bit (Q) register values whereas the compat ABI + * uses an array of 64-bit (D) registers. Consequently, we need to swap + * the two halves of each Q register when running on a big-endian CPU. */ +union __fpsimd_vreg { + __uint128_t raw; + struct { +#ifdef __AARCH64EB__ + u64 hi; + u64 lo; +#else + u64 lo; + u64 hi; +#endif + }; +}; + static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) { struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; compat_ulong_t magic = VFP_MAGIC; compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t fpscr, fpexc; - int err = 0; + int i, err = 0; /* * Save the hardware registers to the fpsimd_state structure. @@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) /* * Now copy the FP registers. Since the registers are packed, * we can copy the prefix we want (V0-V15) as it is. - * FIXME: Won't work if big endian. */ - err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, - sizeof(frame->ufp.fpregs)); + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg = { + .raw = fpsimd->vregs[i >> 1], + }; + + __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + } /* Create an AArch32 fpscr from the fpsr and the fpcr. */ fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | @@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) compat_ulong_t magic = VFP_MAGIC; compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t fpscr; - int err = 0; + int i, err = 0; __get_user_error(magic, &frame->magic, err); __get_user_error(size, &frame->size, err); @@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - /* - * Copy the FP registers into the start of the fpsimd_state. - * FIXME: Won't work if big endian. - */ - err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, - sizeof(frame->ufp.fpregs)); + /* Copy the FP registers into the start of the fpsimd_state. */ + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg; + + __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + fpsimd.vregs[i >> 1] = vreg.raw; + } /* Extract the fpsr and the fpcr from the fpscr */ __get_user_error(fpscr, &frame->ufp.fpscr, err); diff --git a/kernel/arch/arm64/kvm/hyp.S b/kernel/arch/arm64/kvm/hyp.S index 5befd010e..64f9e60b3 100644 --- a/kernel/arch/arm64/kvm/hyp.S +++ b/kernel/arch/arm64/kvm/hyp.S @@ -844,8 +844,6 @@ mrs x3, cntv_ctl_el0 and x3, x3, #3 str w3, [x0, #VCPU_TIMER_CNTV_CTL] - bic x3, x3, #1 // Clear Enable - msr cntv_ctl_el0, x3 isb @@ -853,6 +851,9 @@ str x3, [x0, #VCPU_TIMER_CNTV_CVAL] 1: + // Disable the virtual timer + msr cntv_ctl_el0, xzr + // Allow physical timer/counter access for the host mrs x2, cnthctl_el2 orr x2, x2, #3 @@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run) // Guest context add x2, x0, #VCPU_CONTEXT + // We must restore the 32-bit state before the sysregs, thanks + // to Cortex-A57 erratum #852523. + restore_guest_32bit_state bl __restore_sysregs bl __restore_fpsimd skip_debug_state x3, 1f bl __restore_debug 1: - restore_guest_32bit_state restore_guest_regs // That's it, no more messing around. diff --git a/kernel/arch/m32r/boot/compressed/misc.c b/kernel/arch/m32r/boot/compressed/misc.c index 28a09529f..3a7692745 100644 --- a/kernel/arch/m32r/boot/compressed/misc.c +++ b/kernel/arch/m32r/boot/compressed/misc.c @@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data, free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE; puts("\nDecompressing Linux... "); - decompress(input_data, input_len, NULL, NULL, output_data, NULL, error); + __decompress(input_data, input_len, NULL, NULL, output_data, 0, + NULL, error); puts("done.\nBooting the kernel.\n"); } diff --git a/kernel/arch/mips/boot/compressed/decompress.c b/kernel/arch/mips/boot/compressed/decompress.c index 54831069a..080cd53ba 100644 --- a/kernel/arch/mips/boot/compressed/decompress.c +++ b/kernel/arch/mips/boot/compressed/decompress.c @@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start) puts("\n"); /* Decompress the kernel with according algorithm */ - decompress((char *)zimage_start, zimage_size, 0, 0, - (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error); + __decompress((char *)zimage_start, zimage_size, 0, 0, + (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); /* FIXME: should we flush cache here? */ puts("Now, booting the kernel...\n"); diff --git a/kernel/arch/mips/math-emu/cp1emu.c b/kernel/arch/mips/math-emu/cp1emu.c index 6983fcd48..2b95e34fa 100644 --- a/kernel/arch/mips/math-emu/cp1emu.c +++ b/kernel/arch/mips/math-emu/cp1emu.c @@ -1137,7 +1137,7 @@ emul: break; case mfhc_op: - if (!cpu_has_mips_r2) + if (!cpu_has_mips_r2_r6) goto sigill; /* copregister rd -> gpr[rt] */ @@ -1148,7 +1148,7 @@ emul: break; case mthc_op: - if (!cpu_has_mips_r2) + if (!cpu_has_mips_r2_r6) goto sigill; /* copregister rd <- gpr[rt] */ @@ -1181,6 +1181,24 @@ emul: } break; + case bc1eqz_op: + case bc1nez_op: + if (!cpu_has_mips_r6 || delay_slot(xcp)) + return SIGILL; + + cond = likely = 0; + switch (MIPSInst_RS(ir)) { + case bc1eqz_op: + if (get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1) + cond = 1; + break; + case bc1nez_op: + if (!(get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)) + cond = 1; + break; + } + goto branch_common; + case bc_op: if (delay_slot(xcp)) return SIGILL; @@ -1207,7 +1225,7 @@ emul: case bct_op: break; } - +branch_common: set_delay_slot(xcp); if (cond) { /* diff --git a/kernel/arch/parisc/kernel/irq.c b/kernel/arch/parisc/kernel/irq.c index f3191db6e..c0eab24f6 100644 --- a/kernel/arch/parisc/kernel/irq.c +++ b/kernel/arch/parisc/kernel/irq.c @@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs) struct pt_regs *old_regs; unsigned long eirr_val; int irq, cpu = smp_processor_id(); -#ifdef CONFIG_SMP struct irq_desc *desc; +#ifdef CONFIG_SMP cpumask_t dest; #endif @@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs) goto set_out; irq = eirr_to_irq(eirr_val); -#ifdef CONFIG_SMP + /* Filter out spurious interrupts, mostly from serial port at bootup */ desc = irq_to_desc(irq); + if (unlikely(!desc->action)) + goto set_out; + +#ifdef CONFIG_SMP cpumask_copy(&dest, desc->irq_data.affinity); if (irqd_is_per_cpu(&desc->irq_data) && !cpumask_test_cpu(smp_processor_id(), &dest)) { diff --git a/kernel/arch/parisc/kernel/syscall.S b/kernel/arch/parisc/kernel/syscall.S index 7ef22e338..0b8d26d3b 100644 --- a/kernel/arch/parisc/kernel/syscall.S +++ b/kernel/arch/parisc/kernel/syscall.S @@ -821,7 +821,7 @@ cas2_action: /* 64bit CAS */ #ifdef CONFIG_64BIT 19: ldd,ma 0(%sr3,%r26), %r29 - sub,= %r29, %r25, %r0 + sub,*= %r29, %r25, %r0 b,n cas2_end 20: std,ma %r24, 0(%sr3,%r26) copy %r0, %r28 diff --git a/kernel/arch/powerpc/boot/Makefile b/kernel/arch/powerpc/boot/Makefile index 73eddda53..4eec430d8 100644 --- a/kernel/arch/powerpc/boot/Makefile +++ b/kernel/arch/powerpc/boot/Makefile @@ -28,6 +28,9 @@ BOOTCFLAGS += -m64 endif ifdef CONFIG_CPU_BIG_ENDIAN BOOTCFLAGS += -mbig-endian +else +BOOTCFLAGS += -mlittle-endian +BOOTCFLAGS += $(call cc-option,-mabi=elfv2) endif BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc diff --git a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h index 43e6ad424..88d27e325 100644 --- a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h @@ -135,7 +135,19 @@ #define pte_iterate_hashed_end() } while(0) #ifdef CONFIG_PPC_HAS_HASH_64K -#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) \ + ({ \ + unsigned int psize; \ + if (is_kernel_addr(addr)) \ + psize = MMU_PAGE_4K; \ + else \ + psize = get_slice_psize(mm, addr); \ + psize; \ + }) #else #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K #endif diff --git a/kernel/arch/powerpc/include/asm/rtas.h b/kernel/arch/powerpc/include/asm/rtas.h index 7a4ede16b..b77ef369c 100644 --- a/kernel/arch/powerpc/include/asm/rtas.h +++ b/kernel/arch/powerpc/include/asm/rtas.h @@ -343,6 +343,7 @@ extern void rtas_power_off(void); extern void rtas_halt(void); extern void rtas_os_term(char *str); extern int rtas_get_sensor(int sensor, int index, int *state); +extern int rtas_get_sensor_fast(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); extern bool rtas_indicator_present(int token, int *maxindex); diff --git a/kernel/arch/powerpc/include/asm/switch_to.h b/kernel/arch/powerpc/include/asm/switch_to.h index 58abeda64..15cca17cb 100644 --- a/kernel/arch/powerpc/include/asm/switch_to.h +++ b/kernel/arch/powerpc/include/asm/switch_to.h @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} extern void enable_kernel_fp(void); extern void enable_kernel_altivec(void); +extern void enable_kernel_vsx(void); extern int emulate_altivec(struct pt_regs *); extern void __giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *); diff --git a/kernel/arch/powerpc/kernel/eeh.c b/kernel/arch/powerpc/kernel/eeh.c index 9ee61d156..cb565ad0a 100644 --- a/kernel/arch/powerpc/kernel/eeh.c +++ b/kernel/arch/powerpc/kernel/eeh.c @@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) if (!(pe->type & EEH_PE_PHB)) { if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); + + /* + * The config space of some PCI devices can't be accessed + * when their PEs are in frozen state. Otherwise, fenced + * PHB might be seen. Those PEs are identified with flag + * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED + * is set automatically when the PE is put to EEH_PE_ISOLATED. + * + * Restoring BARs possibly triggers PCI config access in + * (OPAL) firmware and then causes fenced PHB. If the + * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's + * pointless to restore BARs and dump config space. + */ eeh_ops->configure_bridge(pe); - eeh_pe_restore_bars(pe); + if (!(pe->state & EEH_PE_CFG_BLOCKED)) { + eeh_pe_restore_bars(pe); - pci_regs_buf[0] = 0; - eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); + pci_regs_buf[0] = 0; + eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); + } } eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); @@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev) return; } - if (eeh_has_flag(EEH_PROBE_MODE_DEV)) - eeh_ops->probe(pdn, NULL); - /* * The EEH cache might not be removed correctly because of * unbalanced kref to the device during unplug time, which @@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev) dev->dev.archdata.edev = NULL; } + if (eeh_has_flag(EEH_PROBE_MODE_DEV)) + eeh_ops->probe(pdn, NULL); + edev->pdev = dev; dev->dev.archdata.edev = edev; diff --git a/kernel/arch/powerpc/kernel/process.c b/kernel/arch/powerpc/kernel/process.c index febb50dd5..0596373cd 100644 --- a/kernel/arch/powerpc/kernel/process.c +++ b/kernel/arch/powerpc/kernel/process.c @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX -#if 0 -/* not currently used, but some crazy RAID module might want to later */ void enable_kernel_vsx(void) { WARN_ON(preemptible()); @@ -220,7 +218,6 @@ void enable_kernel_vsx(void) #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_vsx); -#endif void giveup_vsx(struct task_struct *tsk) { diff --git a/kernel/arch/powerpc/kernel/rtas.c b/kernel/arch/powerpc/kernel/rtas.c index 7a488c108..caffb10e7 100644 --- a/kernel/arch/powerpc/kernel/rtas.c +++ b/kernel/arch/powerpc/kernel/rtas.c @@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state) } EXPORT_SYMBOL(rtas_get_sensor); +int rtas_get_sensor_fast(int sensor, int index, int *state) +{ + int token = rtas_token("get-sensor-state"); + int rc; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + rc = rtas_call(token, 2, 2, state, sensor, index); + WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN && + rc <= RTAS_EXTENDED_DELAY_MAX)); + + if (rc < 0) + return rtas_error_rc(rc); + return rc; +} + bool rtas_indicator_present(int token, int *maxindex) { int proplen, count, i; diff --git a/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c index b027a8973..c6d601cc9 100644 --- a/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); v = pte & ~HPTE_V_HVLOCK; if (v & HPTE_V_VALID) { - u64 pte1; - - pte1 = be64_to_cpu(hpte[1]); hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); - rb = compute_tlbie_rb(v, pte1, pte_index); + rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index); do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); - /* Read PTE low word after tlbie to get final R/C values */ - remove_revmap_chain(kvm, pte_index, rev, v, pte1); + /* + * The reference (R) and change (C) bits in a HPT + * entry can be set by hardware at any time up until + * the HPTE is invalidated and the TLB invalidation + * sequence has completed. This means that when + * removing a HPTE, we need to re-read the HPTE after + * the invalidation sequence has completed in order to + * obtain reliable values of R and C. + */ + remove_revmap_chain(kvm, pte_index, rev, v, + be64_to_cpu(hpte[1])); } r = rev->guest_rpte & ~HPTE_GR_RESERVED; note_hpte_modification(kvm, rev); diff --git a/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 4d70df26c..3b2d2c5b6 100644 --- a/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1127,6 +1127,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL bne 3f lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 beq 4f b guest_exit_cont 3: diff --git a/kernel/arch/powerpc/mm/hugepage-hash64.c b/kernel/arch/powerpc/mm/hugepage-hash64.c index 43dafb9d6..4d87122cf 100644 --- a/kernel/arch/powerpc/mm/hugepage-hash64.c +++ b/kernel/arch/powerpc/mm/hugepage-hash64.c @@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, BUG_ON(index >= 4096); vpn = hpt_vpn(ea, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); hpte_slot_array = get_hpte_slot_array(pmdp); if (psize == MMU_PAGE_4K) { /* @@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, valid = hpte_valid(hpte_slot_array, index); if (valid) { /* update the hpte bits */ + hash = hpt_hash(vpn, shift, ssize); hidx = hpte_hash_index(hpte_slot_array, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; @@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, if (!valid) { unsigned long hpte_group; + hash = hpt_hash(vpn, shift, ssize); /* insert new entry */ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; new_pmd |= _PAGE_HASHPTE; diff --git a/kernel/arch/powerpc/platforms/pseries/ras.c b/kernel/arch/powerpc/platforms/pseries/ras.c index 02e4a1745..3b6647e57 100644 --- a/kernel/arch/powerpc/platforms/pseries/ras.c +++ b/kernel/arch/powerpc/platforms/pseries/ras.c @@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) int state; int critical; - status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state); + status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, + &state); if (state > 3) critical = 1; /* Time Critical */ diff --git a/kernel/arch/powerpc/platforms/pseries/setup.c b/kernel/arch/powerpc/platforms/pseries/setup.c index df6a70419..e6e8b241d 100644 --- a/kernel/arch/powerpc/platforms/pseries/setup.c +++ b/kernel/arch/powerpc/platforms/pseries/setup.c @@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act eeh_dev_init(PCI_DN(np), pci->phb); } break; + case OF_RECONFIG_DETACH_NODE: + pci = PCI_DN(np); + if (pci) + list_del(&pci->list); + break; default: err = NOTIFY_DONE; break; diff --git a/kernel/arch/s390/boot/compressed/misc.c b/kernel/arch/s390/boot/compressed/misc.c index 42506b371..4da604ebf 100644 --- a/kernel/arch/s390/boot/compressed/misc.c +++ b/kernel/arch/s390/boot/compressed/misc.c @@ -167,7 +167,7 @@ unsigned long decompress_kernel(void) #endif puts("Uncompressing Linux... "); - decompress(input_data, input_len, NULL, NULL, output, NULL, error); + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); puts("Ok, booting the kernel.\n"); return (unsigned long) output; } diff --git a/kernel/arch/s390/kernel/setup.c b/kernel/arch/s390/kernel/setup.c index 7262fe438..1942f22e6 100644 --- a/kernel/arch/s390/kernel/setup.c +++ b/kernel/arch/s390/kernel/setup.c @@ -683,7 +683,7 @@ static void __init setup_memory(void) /* * Setup hardware capabilities. */ -static void __init setup_hwcaps(void) +static int __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; struct cpuid cpu_id; @@ -749,9 +749,11 @@ static void __init setup_hwcaps(void) elf_hwcap |= HWCAP_S390_TE; /* - * Vector extension HWCAP_S390_VXRS is bit 11. + * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension + * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX + * instead of facility bit 129. */ - if (test_facility(129)) + if (MACHINE_HAS_VX) elf_hwcap |= HWCAP_S390_VXRS; get_cpu_id(&cpu_id); add_device_randomness(&cpu_id, sizeof(cpu_id)); @@ -788,7 +790,9 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; } + return 0; } +arch_initcall(setup_hwcaps); /* * Add system information as device randomness @@ -871,11 +875,6 @@ void __init setup_arch(char **cmdline_p) cpu_init(); /* - * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). - */ - setup_hwcaps(); - - /* * Create kernel page tables and switch to virtual addressing. */ paging_init(); diff --git a/kernel/arch/sh/boot/compressed/misc.c b/kernel/arch/sh/boot/compressed/misc.c index 95470a472..208a9753a 100644 --- a/kernel/arch/sh/boot/compressed/misc.c +++ b/kernel/arch/sh/boot/compressed/misc.c @@ -132,7 +132,7 @@ void decompress_kernel(void) puts("Uncompressing Linux... "); cache_control(CACHE_ENABLE); - decompress(input_data, input_len, NULL, NULL, output, NULL, error); + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); cache_control(CACHE_DISABLE); puts("Ok, booting the kernel.\n"); } diff --git a/kernel/arch/unicore32/boot/compressed/misc.c b/kernel/arch/unicore32/boot/compressed/misc.c index 176d5bda3..5c65dfee2 100644 --- a/kernel/arch/unicore32/boot/compressed/misc.c +++ b/kernel/arch/unicore32/boot/compressed/misc.c @@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start, output_ptr = get_unaligned_le32(tmp); arch_decomp_puts("Uncompressing Linux..."); - decompress(input_data, input_data_end - input_data, NULL, NULL, - output_data, NULL, error); + __decompress(input_data, input_data_end - input_data, NULL, NULL, + output_data, 0, NULL, error); arch_decomp_puts(" done, booting the kernel.\n"); return output_ptr; } diff --git a/kernel/arch/x86/boot/compressed/misc.c b/kernel/arch/x86/boot/compressed/misc.c index a107b935e..e28437e0f 100644 --- a/kernel/arch/x86/boot/compressed/misc.c +++ b/kernel/arch/x86/boot/compressed/misc.c @@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, #endif debug_putstr("\nDecompressing Linux... "); - decompress(input_data, input_len, NULL, NULL, output, NULL, error); + __decompress(input_data, input_len, NULL, NULL, output, output_len, + NULL, error); parse_elf(output); /* * 32-bit always performs relocations. 64-bit relocations are only diff --git a/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c b/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c index 2079baf06..daf8d2b9a 100644 --- a/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -294,6 +294,7 @@ static struct ahash_alg ghash_async_alg = { .cra_name = "ghash", .cra_driver_name = "ghash-clmulni", .cra_priority = 400, + .cra_ctxsize = sizeof(struct ghash_async_ctx), .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_type = &crypto_ahash_type, diff --git a/kernel/arch/x86/include/asm/desc.h b/kernel/arch/x86/include/asm/desc.h index a0bf89fd2..4e10d73cf 100644 --- a/kernel/arch/x86/include/asm/desc.h +++ b/kernel/arch/x86/include/asm/desc.h @@ -280,21 +280,6 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); diff --git a/kernel/arch/x86/include/asm/mmu.h b/kernel/arch/x86/include/asm/mmu.h index 09b9620a7..364d27481 100644 --- a/kernel/arch/x86/include/asm/mmu.h +++ b/kernel/arch/x86/include/asm/mmu.h @@ -9,8 +9,7 @@ * we put the segment information here. */ typedef struct { - void *ldt; - int size; + struct ldt_struct *ldt; #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/kernel/arch/x86/include/asm/mmu_context.h b/kernel/arch/x86/include/asm/mmu_context.h index e997f70f8..80d67dd80 100644 --- a/kernel/arch/x86/include/asm/mmu_context.h +++ b/kernel/arch/x86/include/asm/mmu_context.h @@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {} #endif /* + * ldt_structs can be allocated, used, and freed, but they are never + * modified while live. + */ +struct ldt_struct { + /* + * Xen requires page-aligned LDTs with special permissions. This is + * needed to prevent us from installing evil descriptors such as + * call gates. On native, we could merge the ldt_struct and LDT + * allocations, but it's not worth trying to optimize. + */ + struct desc_struct *entries; + int size; +}; + +static inline void load_mm_ldt(struct mm_struct *mm) +{ + struct ldt_struct *ldt; + + /* lockless_dereference synchronizes with smp_store_release */ + ldt = lockless_dereference(mm->context.ldt); + + /* + * Any change to mm->context.ldt is followed by an IPI to all + * CPUs with the mm active. The LDT will not be freed until + * after the IPI is handled by all such CPUs. This means that, + * if the ldt_struct changes before we return, the values we see + * will be safe, and the new values will be loaded before we run + * any user code. + * + * NB: don't try to convert this to use RCU without extreme care. + * We would still need IRQs off, because we don't want to change + * the local LDT after an IPI loaded a newer value than the one + * that we can see. + */ + + if (unlikely(ldt)) + set_ldt(ldt->entries, ldt->size); + else + clear_LDT(); + + DEBUG_LOCKS_WARN_ON(preemptible()); +} + +/* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); @@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * was called and then modify_ldt changed * prev->context.ldt but suppressed an IPI to this CPU. * In this case, prev->context.ldt != NULL, because we - * never free an LDT while the mm still exists. That - * means that next->context.ldt != prev->context.ldt, - * because mms never share an LDT. + * never set context.ldt to NULL while the mm still + * exists. That means that next->context.ldt != + * prev->context.ldt, because mms never share an LDT. */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); + load_mm_ldt(next); } #ifdef CONFIG_SMP else { @@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); load_mm_cr4(next); - load_LDT_nolock(&next->context); + load_mm_ldt(next); } } #endif diff --git a/kernel/arch/x86/kernel/acpi/boot.c b/kernel/arch/x86/kernel/acpi/boot.c index dbe76a14c..07bea8022 100644 --- a/kernel/arch/x86/kernel/acpi/boot.c +++ b/kernel/arch/x86/kernel/acpi/boot.c @@ -489,6 +489,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* * stash over-ride to indicate we've been here diff --git a/kernel/arch/x86/kernel/cpu/common.c b/kernel/arch/x86/kernel/cpu/common.c index a62cf04da..205e0f3df 100644 --- a/kernel/arch/x86/kernel/cpu/common.c +++ b/kernel/arch/x86/kernel/cpu/common.c @@ -1434,7 +1434,7 @@ void cpu_init(void) load_sp0(t, ¤t->thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); @@ -1483,7 +1483,7 @@ void cpu_init(void) load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c b/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c index b4a41cf03..e166d833c 100644 --- a/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -116,6 +116,27 @@ void mce_intel_hcpu_update(unsigned long cpu) per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; } +static void cmci_toggle_interrupt_mode(bool on) +{ + unsigned long flags, *owned; + int bank; + u64 val; + + raw_spin_lock_irqsave(&cmci_discover_lock, flags); + owned = this_cpu_ptr(mce_banks_owned); + for_each_set_bit(bank, owned, MAX_NR_BANKS) { + rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + + if (on) + val |= MCI_CTL2_CMCI_EN; + else + val &= ~MCI_CTL2_CMCI_EN; + + wrmsrl(MSR_IA32_MCx_CTL2(bank), val); + } + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); +} + unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && @@ -145,7 +166,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); - cmci_reenable(); + cmci_toggle_interrupt_mode(true); cmci_recheck(); } return CMCI_POLL_INTERVAL; @@ -156,22 +177,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) } } -static void cmci_storm_disable_banks(void) -{ - unsigned long flags, *owned; - int bank; - u64 val; - - raw_spin_lock_irqsave(&cmci_discover_lock, flags); - owned = this_cpu_ptr(mce_banks_owned); - for_each_set_bit(bank, owned, MAX_NR_BANKS) { - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); - val &= ~MCI_CTL2_CMCI_EN; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); - } - raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); -} - static bool cmci_storm_detect(void) { unsigned int cnt = __this_cpu_read(cmci_storm_cnt); @@ -193,7 +198,7 @@ static bool cmci_storm_detect(void) if (cnt <= CMCI_STORM_THRESHOLD) return false; - cmci_storm_disable_banks(); + cmci_toggle_interrupt_mode(false); __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); r = atomic_add_return(1, &cmci_storm_on_cpus); mce_timer_kick(CMCI_STORM_INTERVAL); diff --git a/kernel/arch/x86/kernel/cpu/perf_event.c b/kernel/arch/x86/kernel/cpu/perf_event.c index aa4e3a74e..4cc98a4e8 100644 --- a/kernel/arch/x86/kernel/cpu/perf_event.c +++ b/kernel/arch/x86/kernel/cpu/perf_event.c @@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + struct ldt_struct *ldt; + if (idx > LDT_ENTRIES) return 0; - if (idx > current->active_mm->context.size) + /* IRQs are off, so this synchronizes with smp_store_release */ + ldt = lockless_dereference(current->active_mm->context.ldt); + if (!ldt || idx > ldt->size) return 0; - desc = current->active_mm->context.ldt; + desc = &ldt->entries[idx]; } else { if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt); + desc = raw_cpu_ptr(gdt_page.gdt) + idx; } - return get_desc_base(desc + idx); + return get_desc_base(desc); } #ifdef CONFIG_COMPAT diff --git a/kernel/arch/x86/kernel/ldt.c b/kernel/arch/x86/kernel/ldt.c index c37886d75..2bcc0525f 100644 --- a/kernel/arch/x86/kernel/ldt.c +++ b/kernel/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> @@ -20,82 +21,82 @@ #include <asm/mmu_context.h> #include <asm/syscalls.h> -#ifdef CONFIG_SMP +/* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { - if (current->active_mm == current_mm) - load_LDT(¤t->active_mm->context); + mm_context_t *pc; + + if (current->active_mm != current_mm) + return; + + pc = ¤t->active_mm->context; + set_ldt(pc->ldt->entries, pc->ldt->size); } -#endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ +static struct ldt_struct *alloc_ldt_struct(int size) { - void *oldldt, *newldt; - int oldsize; - - if (mincount <= pc->size) - return 0; - oldsize = pc->size; - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); + struct ldt_struct *new_ldt; + int alloc_size; + + if (size > LDT_ENTRIES) + return NULL; + + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + if (!new_ldt) + return NULL; + + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); + alloc_size = size * LDT_ENTRY_SIZE; + + /* + * Xen is very picky: it requires a page-aligned LDT that has no + * trailing nonzero bytes in any page that contains LDT descriptors. + * Keep it simple: zero the whole allocation and never allocate less + * than PAGE_SIZE. + */ + if (alloc_size > PAGE_SIZE) + new_ldt->entries = vzalloc(alloc_size); else - newldt = (void *)__get_free_page(GFP_KERNEL); - - if (!newldt) - return -ENOMEM; + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (oldsize) - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, - (mincount - oldsize) * LDT_ENTRY_SIZE); + if (!new_ldt->entries) { + kfree(new_ldt); + return NULL; + } - paravirt_alloc_ldt(newldt, mincount); + new_ldt->size = size; + return new_ldt; +} -#ifdef CONFIG_X86_64 - /* CHECKME: Do we really need this ? */ - wmb(); -#endif - pc->ldt = newldt; - wmb(); - pc->size = mincount; - wmb(); - - if (reload) { -#ifdef CONFIG_SMP - preempt_disable(); - load_LDT(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); -#else - load_LDT(pc); -#endif - } - if (oldsize) { - paravirt_free_ldt(oldldt, oldsize); - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - put_page(virt_to_page(oldldt)); - } - return 0; +/* After calling this, the LDT is immutable. */ +static void finalize_ldt_struct(struct ldt_struct *ldt) +{ + paravirt_alloc_ldt(ldt->entries, ldt->size); } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +/* context.lock is held */ +static void install_ldt(struct mm_struct *current_mm, + struct ldt_struct *ldt) { - int err = alloc_ldt(new, old->size, 0); - int i; + /* Synchronizes with lockless_dereference in load_mm_ldt. */ + smp_store_release(¤t_mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using current_mm. */ + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); +} - if (err < 0) - return err; +static void free_ldt_struct(struct ldt_struct *ldt) +{ + if (likely(!ldt)) + return; - for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); - return 0; + paravirt_free_ldt(ldt->entries, ldt->size); + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(ldt->entries); + else + kfree(ldt->entries); + kfree(ldt); } /* @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + struct ldt_struct *new_ldt; struct mm_struct *old_mm; int retval = 0; mutex_init(&mm->context.lock); - mm->context.size = 0; old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); + if (!old_mm) { + mm->context.ldt = NULL; + return 0; } + + mutex_lock(&old_mm->context.lock); + if (!old_mm->context.ldt) { + mm->context.ldt = NULL; + goto out_unlock; + } + + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); + if (!new_ldt) { + retval = -ENOMEM; + goto out_unlock; + } + + memcpy(new_ldt->entries, old_mm->context.ldt->entries, + new_ldt->size * LDT_ENTRY_SIZE); + finalize_ldt_struct(new_ldt); + + mm->context.ldt = new_ldt; + +out_unlock: + mutex_unlock(&old_mm->context.lock); return retval; } @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { -#ifdef CONFIG_X86_32 - /* CHECKME: Can this ever happen ? */ - if (mm == current->active_mm) - clear_LDT(); -#endif - paravirt_free_ldt(mm->context.ldt, mm->context.size); - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - put_page(virt_to_page(mm->context.ldt)); - mm->context.size = 0; - } + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; } static int read_ldt(void __user *ptr, unsigned long bytecount) { - int err; + int retval; unsigned long size; struct mm_struct *mm = current->mm; - if (!mm->context.size) - return 0; + mutex_lock(&mm->context.lock); + + if (!mm->context.ldt) { + retval = 0; + goto out_unlock; + } + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; - mutex_lock(&mm->context.lock); - size = mm->context.size * LDT_ENTRY_SIZE; + size = mm->context.ldt->size * LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; - mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { + retval = -EFAULT; + goto out_unlock; + } + if (size != bytecount) { - /* zero-fill the rest */ - if (clear_user(ptr + size, bytecount - size) != 0) { - err = -EFAULT; - goto error_return; + /* Zero-fill the rest and pretend we read bytecount bytes. */ + if (clear_user(ptr + size, bytecount - size)) { + retval = -EFAULT; + goto out_unlock; } } - return bytecount; -error_return: - return err; + retval = bytecount; + +out_unlock: + mutex_unlock(&mm->context.lock); + return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) struct desc_struct ldt; int error; struct user_desc ldt_info; + int oldsize, newsize; + struct ldt_struct *new_ldt, *old_ldt; error = -EINVAL; if (bytecount != sizeof(ldt_info)) @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) goto out; } - mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, - ldt_info.entry_number + 1, 1); - if (error < 0) - goto out_unlock; - } - - /* Allow LDTs to be cleared by the user. */ - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { - if (oldmode || LDT_empty(&ldt_info)) { - memset(&ldt, 0, sizeof(ldt)); - goto install; + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || + LDT_empty(&ldt_info)) { + /* The user wants to clear the entry. */ + memset(&ldt, 0, sizeof(ldt)); + } else { + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out; } + + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; } - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { - error = -EINVAL; + mutex_lock(&mm->context.lock); + + old_ldt = mm->context.ldt; + oldsize = old_ldt ? old_ldt->size : 0; + newsize = max((int)(ldt_info.entry_number + 1), oldsize); + + error = -ENOMEM; + new_ldt = alloc_ldt_struct(newsize); + if (!new_ldt) goto out_unlock; - } - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + if (old_ldt) + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); + new_ldt->entries[ldt_info.entry_number] = ldt; + finalize_ldt_struct(new_ldt); - /* Install the new entry ... */ -install: - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); + install_ldt(mm, new_ldt); + free_ldt_struct(old_ldt); error = 0; out_unlock: diff --git a/kernel/arch/x86/kernel/process_64.c b/kernel/arch/x86/kernel/process_64.c index ddfdbf74f..5e0bf57d9 100644 --- a/kernel/arch/x86/kernel/process_64.c +++ b/kernel/arch/x86/kernel/process_64.c @@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - if (dead_task->mm->context.size) { + if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, - dead_task->mm->context.size); + dead_task->mm->context.ldt->size); BUG(); } } diff --git a/kernel/arch/x86/kernel/step.c b/kernel/arch/x86/kernel/step.c index 9b4d51d0c..0ccb53a9f 100644 --- a/kernel/arch/x86/kernel/step.c +++ b/kernel/arch/x86/kernel/step.c @@ -5,6 +5,7 @@ #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> +#include <asm/mmu_context.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { @@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re struct desc_struct *desc; unsigned long base; - seg &= ~7UL; + seg >>= 3; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(!child->mm->context.ldt || + seg >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { - desc = child->mm->context.ldt + seg; + desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ diff --git a/kernel/arch/x86/kvm/mmu.c b/kernel/arch/x86/kvm/mmu.c index b73337634..554e877e0 100644 --- a/kernel/arch/x86/kvm/mmu.c +++ b/kernel/arch/x86/kvm/mmu.c @@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep) { return ACCESS_ONCE(*sptep); } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - /* It is valid if the spte is zapped. */ - return spte == 0ull; -} #else union split_spte { struct { @@ -478,23 +472,6 @@ retry: return spte.spte; } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - union split_spte sspte = (union split_spte)spte; - u32 high_mmio_mask = shadow_mmio_mask >> 32; - - /* It is valid if the spte is zapped. */ - if (spte == 0ull) - return true; - - /* It is valid if the spte is being zapped. */ - if (sspte.spte_low == 0ull && - (sspte.spte_high & high_mmio_mask) == high_mmio_mask) - return true; - - return false; -} #endif static bool spte_is_locklessly_modifiable(u64 spte) @@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) return vcpu_match_mmio_gva(vcpu, addr); } - -/* - * On direct hosts, the last spte is only allows two states - * for mmio page fault: - * - It is the mmio spte - * - It is zapped or it is being zapped. - * - * This function completely checks the spte when the last spte - * is not the mmio spte. - */ -static bool check_direct_spte_mmio_pf(u64 spte) -{ - return __check_direct_spte_mmio_pf(spte); -} - static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) { struct kvm_shadow_walk_iterator iterator; @@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) } /* - * It's ok if the gva is remapped by other cpus on shadow guest, - * it's a BUG if the gfn is not a mmio page. - */ - if (direct && !check_direct_spte_mmio_pf(spte)) - return RET_MMIO_PF_BUG; - - /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c index 6cceb2cb2..37d79a026 100644 --- a/kernel/arch/x86/kvm/x86.c +++ b/kernel/arch/x86/kvm/x86.c @@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; - kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); + adjust_tsc_offset_guest(vcpu, adj); } vcpu->arch.ia32_tsc_adjust_msr = data; } diff --git a/kernel/arch/x86/math-emu/fpu_entry.c b/kernel/arch/x86/math-emu/fpu_entry.c index 9b8681241..274a52b11 100644 --- a/kernel/arch/x86/math-emu/fpu_entry.c +++ b/kernel/arch/x86/math-emu/fpu_entry.c @@ -29,7 +29,6 @@ #include <asm/uaccess.h> #include <asm/traps.h> -#include <asm/desc.h> #include <asm/user.h> #include <asm/i387.h> @@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info) math_abort(FPU_info, SIGILL); } - code_descriptor = LDT_DESCRIPTOR(FPU_CS); + code_descriptor = FPU_get_ldt_descriptor(FPU_CS); if (SEG_D_SIZE(code_descriptor)) { /* The above test may be wrong, the book is not clear */ /* Segmented 32 bit protected mode */ diff --git a/kernel/arch/x86/math-emu/fpu_system.h b/kernel/arch/x86/math-emu/fpu_system.h index 2c614410a..d342fce49 100644 --- a/kernel/arch/x86/math-emu/fpu_system.h +++ b/kernel/arch/x86/math-emu/fpu_system.h @@ -16,9 +16,24 @@ #include <linux/kernel.h> #include <linux/mm.h> -/* s is always from a cpu register, and the cpu does bounds checking - * during register load --> no further bounds checks needed */ -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) +#include <asm/desc.h> +#include <asm/mmu_context.h> + +static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) +{ + static struct desc_struct zero_desc; + struct desc_struct ret = zero_desc; + +#ifdef CONFIG_MODIFY_LDT_SYSCALL + seg >>= 3; + mutex_lock(¤t->mm->context.lock); + if (current->mm->context.ldt && seg < current->mm->context.ldt->size) + ret = current->mm->context.ldt->entries[seg]; + mutex_unlock(¤t->mm->context.lock); +#endif + return ret; +} + #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff --git a/kernel/arch/x86/math-emu/get_address.c b/kernel/arch/x86/math-emu/get_address.c index 6ef5e9938..8300db71c 100644 --- a/kernel/arch/x86/math-emu/get_address.c +++ b/kernel/arch/x86/math-emu/get_address.c @@ -20,7 +20,6 @@ #include <linux/stddef.h> #include <asm/uaccess.h> -#include <asm/desc.h> #include "fpu_system.h" #include "exception.h" @@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment, addr->selector = PM_REG_(segment); } - descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); + descriptor = FPU_get_ldt_descriptor(addr->selector); base_address = SEG_BASE_ADDR(descriptor); address = base_address + offset; limit = base_address diff --git a/kernel/arch/x86/mm/init_32.c b/kernel/arch/x86/mm/init_32.c index c8140e128..c23ab1ee3 100644 --- a/kernel/arch/x86/mm/init_32.c +++ b/kernel/arch/x86/mm/init_32.c @@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end) vaddr = start; pgd_idx = pgd_index(vaddr); + pmd_idx = pmd_index(vaddr); for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); diff --git a/kernel/arch/x86/power/cpu.c b/kernel/arch/x86/power/cpu.c index 757678fb2..bf9384488 100644 --- a/kernel/arch/x86/power/cpu.c +++ b/kernel/arch/x86/power/cpu.c @@ -23,6 +23,7 @@ #include <asm/debugreg.h> #include <asm/fpu-internal.h> /* pcntxt_mask */ #include <asm/cpu.h> +#include <asm/mmu_context.h> #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -154,7 +155,7 @@ static void fix_processor_context(void) syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ - load_LDT(¤t->active_mm->context); /* This does lldt */ + load_mm_ldt(current->active_mm); /* This does lldt */ } /** diff --git a/kernel/arch/xtensa/include/asm/traps.h b/kernel/arch/xtensa/include/asm/traps.h index 677bfcf4e..28f33a8b7 100644 --- a/kernel/arch/xtensa/include/asm/traps.h +++ b/kernel/arch/xtensa/include/asm/traps.h @@ -25,30 +25,39 @@ static inline void spill_registers(void) { #if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( - " call12 1f\n" + " call8 1f\n" " _j 2f\n" " retw\n" " .align 4\n" "1:\n" +#if XCHAL_NUM_AREGS == 32 + " _entry a1, 32\n" + " addi a8, a0, 3\n" + " _entry a1, 16\n" + " mov a12, a12\n" + " retw\n" +#else " _entry a1, 48\n" - " addi a12, a0, 3\n" -#if XCHAL_NUM_AREGS > 32 - " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " call12 1f\n" + " retw\n" + " .align 4\n" + "1:\n" + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n" " _entry a1, 48\n" " mov a12, a0\n" " .endr\n" -#endif - " _entry a1, 48\n" + " _entry a1, 16\n" #if XCHAL_NUM_AREGS % 12 == 0 - " mov a8, a8\n" -#elif XCHAL_NUM_AREGS % 12 == 4 " mov a12, a12\n" -#elif XCHAL_NUM_AREGS % 12 == 8 +#elif XCHAL_NUM_AREGS % 12 == 4 " mov a4, a4\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a8, a8\n" #endif " retw\n" +#endif "2:\n" - : : : "a12", "a13", "memory"); + : : : "a8", "a9", "memory"); #else __asm__ __volatile__ ( " mov a12, a12\n" diff --git a/kernel/arch/xtensa/kernel/entry.S b/kernel/arch/xtensa/kernel/entry.S index 82bbfa5a0..a2a902140 100644 --- a/kernel/arch/xtensa/kernel/entry.S +++ b/kernel/arch/xtensa/kernel/entry.S @@ -568,12 +568,13 @@ user_exception_exit: * (if we have restored WSBITS-1 frames). */ +2: #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr #endif -2: j common_exception_exit + j common_exception_exit /* This is the kernel exception exit. * We avoided to do a MOVSP when we entered the exception, but we @@ -1820,7 +1821,7 @@ ENDPROC(system_call) mov a12, a0 .endr #endif - _entry a1, 48 + _entry a1, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 4 @@ -1844,7 +1845,7 @@ ENDPROC(system_call) ENTRY(_switch_to) - entry a1, 16 + entry a1, 48 mov a11, a3 # and 'next' (a3) diff --git a/kernel/block/blk-mq-sysfs.c b/kernel/block/blk-mq-sysfs.c index b79685e06..279c5d674 100644 --- a/kernel/block/blk-mq-sysfs.c +++ b/kernel/block/blk-mq-sysfs.c @@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) { - char *start_page = page; struct request *rq; + int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg); + + list_for_each_entry(rq, list, queuelist) { + const int rq_len = 2 * sizeof(rq) + 2; + + /* if the output will be truncated */ + if (PAGE_SIZE - 1 < len + rq_len) { + /* backspacing if it can't hold '\t...\n' */ + if (PAGE_SIZE - 1 < len + 5) + len -= rq_len; + len += snprintf(page + len, PAGE_SIZE - 1 - len, + "\t...\n"); + break; + } + len += snprintf(page + len, PAGE_SIZE - 1 - len, + "\t%p\n", rq); + } - page += sprintf(page, "%s:\n", msg); - - list_for_each_entry(rq, list, queuelist) - page += sprintf(page, "\t%p\n", rq); - - return page - start_page; + return len; } static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) diff --git a/kernel/drivers/acpi/acpi_pnp.c b/kernel/drivers/acpi/acpi_pnp.c index ff6d8adc9..fb765524c 100644 --- a/kernel/drivers/acpi/acpi_pnp.c +++ b/kernel/drivers/acpi/acpi_pnp.c @@ -153,6 +153,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */ {"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */ {"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */ + {"ALI5123"}, /* ALi Fast Infrared Controller */ {"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */ {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ {"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */ diff --git a/kernel/drivers/acpi/pci_link.c b/kernel/drivers/acpi/pci_link.c index cfd7581cc..b09ad5544 100644 --- a/kernel/drivers/acpi/pci_link.c +++ b/kernel/drivers/acpi/pci_link.c @@ -826,6 +826,22 @@ void acpi_penalize_isa_irq(int irq, int active) } /* + * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with + * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for + * PCI IRQs. + */ +void acpi_penalize_sci_irq(int irq, int trigger, int polarity) +{ + if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { + if (trigger != ACPI_MADT_TRIGGER_LEVEL || + polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) + acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS; + else + acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; + } +} + +/* * Over-ride default table to reserve additional IRQs for use by ISA * e.g. acpi_irq_isa=5 * Useful for telling ACPI how not to interfere with your ISA sound card. diff --git a/kernel/drivers/ata/ahci.c b/kernel/drivers/ata/ahci.c index 65ee94454..e6ea912ae 100644 --- a/kernel/drivers/ata/ahci.c +++ b/kernel/drivers/ata/ahci.c @@ -349,6 +349,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* JMicron 362B and 362C have an AHCI function with IDE class code */ { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr }, { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr }, + /* May need to update quirk_jmicron_async_suspend() for additions */ /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ @@ -1377,18 +1378,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; - /* - * The JMicron chip 361/363 contains one SATA controller and one - * PATA controller,for powering on these both controllers, we must - * follow the sequence one by one, otherwise one of them can not be - * powered on successfully, so here we disable the async suspend - * method for these chips. - */ - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && - (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || - pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) - device_disable_async_suspend(&pdev->dev); - /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) diff --git a/kernel/drivers/ata/pata_jmicron.c b/kernel/drivers/ata/pata_jmicron.c index 47e418b8c..4d1a5d2c4 100644 --- a/kernel/drivers/ata/pata_jmicron.c +++ b/kernel/drivers/ata/pata_jmicron.c @@ -143,18 +143,6 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i }; const struct ata_port_info *ppi[] = { &info, NULL }; - /* - * The JMicron chip 361/363 contains one SATA controller and one - * PATA controller,for powering on these both controllers, we must - * follow the sequence one by one, otherwise one of them can not be - * powered on successfully, so here we disable the async suspend - * method for these chips. - */ - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && - (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || - pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) - device_disable_async_suspend(&pdev->dev); - return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } diff --git a/kernel/drivers/auxdisplay/ks0108.c b/kernel/drivers/auxdisplay/ks0108.c index 5b9385239..0d752851a 100644 --- a/kernel/drivers/auxdisplay/ks0108.c +++ b/kernel/drivers/auxdisplay/ks0108.c @@ -139,6 +139,7 @@ static int __init ks0108_init(void) ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME, NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); + parport_put_port(ks0108_parport); if (ks0108_pardevice == NULL) { printk(KERN_ERR KS0108_NAME ": ERROR: " "parport didn't register new device\n"); diff --git a/kernel/drivers/base/devres.c b/kernel/drivers/base/devres.c index c8a53d1e0..875464690 100644 --- a/kernel/drivers/base/devres.c +++ b/kernel/drivers/base/devres.c @@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res, if (!dr) { add_dr(dev, &new_dr->node); dr = new_dr; - new_dr = NULL; + new_res = NULL; } spin_unlock_irqrestore(&dev->devres_lock, flags); - devres_free(new_dr); + devres_free(new_res); return dr->data; } diff --git a/kernel/drivers/base/node.c b/kernel/drivers/base/node.c index a2aa65b42..b10479c87 100644 --- a/kernel/drivers/base/node.c +++ b/kernel/drivers/base/node.c @@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int page_nid; + /* + * memory block could have several absent sections from start. + * skip pfn range from absent section + */ + if (!pfn_present(pfn)) { + pfn = round_down(pfn + PAGES_PER_SECTION, + PAGES_PER_SECTION) - 1; + continue; + } + page_nid = get_nid_for_pfn(pfn); if (page_nid < 0) continue; diff --git a/kernel/drivers/base/platform.c b/kernel/drivers/base/platform.c index ebf034b97..7403de948 100644 --- a/kernel/drivers/base/platform.c +++ b/kernel/drivers/base/platform.c @@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev) while (--i >= 0) { struct resource *r = &pdev->resource[i]; - unsigned long type = resource_type(r); - - if (type == IORESOURCE_MEM || type == IORESOURCE_IO) + if (r->parent) release_resource(r); } @@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev) for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; - unsigned long type = resource_type(r); - - if (type == IORESOURCE_MEM || type == IORESOURCE_IO) + if (r->parent) release_resource(r); } } diff --git a/kernel/drivers/base/power/clock_ops.c b/kernel/drivers/base/power/clock_ops.c index c7b0fcebf..ac3c07db9 100644 --- a/kernel/drivers/base/power/clock_ops.c +++ b/kernel/drivers/base/power/clock_ops.c @@ -37,7 +37,7 @@ struct pm_clock_entry { * @dev: The device for the given clock * @ce: PM clock entry corresponding to the clock. */ -static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) +static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) { int ret; @@ -49,8 +49,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) dev_err(dev, "%s: failed to enable clk %p, error %d\n", __func__, ce->clk, ret); } - - return ret; } /** diff --git a/kernel/drivers/block/zram/zcomp.c b/kernel/drivers/block/zram/zcomp.c index f1ff39a3d..54d946a9e 100644 --- a/kernel/drivers/block/zram/zcomp.c +++ b/kernel/drivers/block/zram/zcomp.c @@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp) * allocate new zcomp and initialize it. return compressing * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in - * case of allocation error. + * case of allocation error, or any other error potentially + * returned by functions zcomp_strm_{multi,single}_create. */ struct zcomp *zcomp_create(const char *compress, int max_strm) { struct zcomp *comp; struct zcomp_backend *backend; + int error; backend = find_backend(compress); if (!backend) @@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm) comp->backend = backend; if (max_strm > 1) - zcomp_strm_multi_create(comp, max_strm); + error = zcomp_strm_multi_create(comp, max_strm); else - zcomp_strm_single_create(comp); - if (!comp->stream) { + error = zcomp_strm_single_create(comp); + if (error) { kfree(comp); - return ERR_PTR(-ENOMEM); + return ERR_PTR(error); } return comp; } diff --git a/kernel/drivers/clk/pistachio/clk-pistachio.c b/kernel/drivers/clk/pistachio/clk-pistachio.c index 8c0fe8828..c4ceb5eaf 100644 --- a/kernel/drivers/clk/pistachio/clk-pistachio.c +++ b/kernel/drivers/clk/pistachio/clk-pistachio.c @@ -159,9 +159,15 @@ PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux", "wifi_pll_mux", "bt_pll_mux" }; static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 }; -static unsigned int pistachio_critical_clks[] __initdata = { - CLK_MIPS, - CLK_PERIPH_SYS, +static unsigned int pistachio_critical_clks_core[] __initdata = { + CLK_MIPS +}; + +static unsigned int pistachio_critical_clks_sys[] __initdata = { + PERIPH_CLK_SYS, + PERIPH_CLK_SYS_BUS, + PERIPH_CLK_DDR, + PERIPH_CLK_ROM, }; static void __init pistachio_clk_init(struct device_node *np) @@ -193,8 +199,8 @@ static void __init pistachio_clk_init(struct device_node *np) pistachio_clk_register_provider(p); - pistachio_clk_force_enable(p, pistachio_critical_clks, - ARRAY_SIZE(pistachio_critical_clks)); + pistachio_clk_force_enable(p, pistachio_critical_clks_core, + ARRAY_SIZE(pistachio_critical_clks_core)); } CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init); @@ -261,6 +267,9 @@ static void __init pistachio_clk_periph_init(struct device_node *np) ARRAY_SIZE(pistachio_periph_gates)); pistachio_clk_register_provider(p); + + pistachio_clk_force_enable(p, pistachio_critical_clks_sys, + ARRAY_SIZE(pistachio_critical_clks_sys)); } CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph", pistachio_clk_periph_init); diff --git a/kernel/drivers/clk/pistachio/clk-pll.c b/kernel/drivers/clk/pistachio/clk-pll.c index de537560b..ebd0d2a3b 100644 --- a/kernel/drivers/clk/pistachio/clk-pll.c +++ b/kernel/drivers/clk/pistachio/clk-pll.c @@ -115,8 +115,7 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw) u32 val; val = pll_readl(pll, PLL_CTRL3); - val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD | - PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD | + val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD | PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD); pll_writel(pll, val, PLL_CTRL3); @@ -233,7 +232,7 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw) u32 val; val = pll_readl(pll, PLL_CTRL1); - val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD | + val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD); pll_writel(pll, val, PLL_CTRL1); diff --git a/kernel/drivers/clk/pxa/clk-pxa25x.c b/kernel/drivers/clk/pxa/clk-pxa25x.c index 6cd88d963..542e45ef5 100644 --- a/kernel/drivers/clk/pxa/clk-pxa25x.c +++ b/kernel/drivers/clk/pxa/clk-pxa25x.c @@ -79,7 +79,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info) clks[3] / 1000000, (clks[3] % 1000000) / 10000); } - return (unsigned int)clks[0]; + return (unsigned int)clks[0] / KHz; } static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw, diff --git a/kernel/drivers/clk/pxa/clk-pxa27x.c b/kernel/drivers/clk/pxa/clk-pxa27x.c index 5f9b54b02..267511df1 100644 --- a/kernel/drivers/clk/pxa/clk-pxa27x.c +++ b/kernel/drivers/clk/pxa/clk-pxa27x.c @@ -80,7 +80,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info) pr_info("System bus clock: %ld.%02ldMHz\n", clks[4] / 1000000, (clks[4] % 1000000) / 10000); } - return (unsigned int)clks[0]; + return (unsigned int)clks[0] / KHz; } bool pxa27x_is_ppll_disabled(void) diff --git a/kernel/drivers/clk/pxa/clk-pxa3xx.c b/kernel/drivers/clk/pxa/clk-pxa3xx.c index ac03ba49e..4af4eed5f 100644 --- a/kernel/drivers/clk/pxa/clk-pxa3xx.c +++ b/kernel/drivers/clk/pxa/clk-pxa3xx.c @@ -78,7 +78,7 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info) pr_info("System bus clock: %ld.%02ldMHz\n", clks[4] / 1000000, (clks[4] % 1000000) / 10000); } - return (unsigned int)clks[0]; + return (unsigned int)clks[0] / KHz; } static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw, diff --git a/kernel/drivers/clk/qcom/gcc-apq8084.c b/kernel/drivers/clk/qcom/gcc-apq8084.c index 54a756b90..457c54058 100644 --- a/kernel/drivers/clk/qcom/gcc-apq8084.c +++ b/kernel/drivers/clk/qcom/gcc-apq8084.c @@ -2105,6 +2105,7 @@ static struct clk_branch gcc_ce1_clk = { "ce1_clk_src", }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, diff --git a/kernel/drivers/clk/qcom/gcc-msm8916.c b/kernel/drivers/clk/qcom/gcc-msm8916.c index c66f7bc2a..5d75bffab 100644 --- a/kernel/drivers/clk/qcom/gcc-msm8916.c +++ b/kernel/drivers/clk/qcom/gcc-msm8916.c @@ -2278,7 +2278,7 @@ static struct clk_branch gcc_prng_ahb_clk = { .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, - .enable_mask = BIT(0), + .enable_mask = BIT(8), .hw.init = &(struct clk_init_data){ .name = "gcc_prng_ahb_clk", .parent_names = (const char *[]){ diff --git a/kernel/drivers/clk/qcom/gcc-msm8974.c b/kernel/drivers/clk/qcom/gcc-msm8974.c index c39d09874..f06a082e3 100644 --- a/kernel/drivers/clk/qcom/gcc-msm8974.c +++ b/kernel/drivers/clk/qcom/gcc-msm8974.c @@ -1783,6 +1783,7 @@ static struct clk_branch gcc_ce1_clk = { "ce1_clk_src", }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, diff --git a/kernel/drivers/clk/rockchip/clk-rk3288.c b/kernel/drivers/clk/rockchip/clk-rk3288.c index d17eb4528..37f96117f 100644 --- a/kernel/drivers/clk/rockchip/clk-rk3288.c +++ b/kernel/drivers/clk/rockchip/clk-rk3288.c @@ -578,7 +578,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0, RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(2), 5, GFLAGS), - MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0, + MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(21), 4, 1, MFLAGS), GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0, RK3288_CLKGATE_CON(5), 3, GFLAGS), diff --git a/kernel/drivers/clk/samsung/clk-exynos4.c b/kernel/drivers/clk/samsung/clk-exynos4.c index 714d6ba78..f7890bf65 100644 --- a/kernel/drivers/clk/samsung/clk-exynos4.c +++ b/kernel/drivers/clk/samsung/clk-exynos4.c @@ -85,6 +85,7 @@ #define DIV_PERIL4 0xc560 #define DIV_PERIL5 0xc564 #define E4X12_DIV_CAM1 0xc568 +#define E4X12_GATE_BUS_FSYS1 0xc744 #define GATE_SCLK_CAM 0xc820 #define GATE_IP_CAM 0xc920 #define GATE_IP_TV 0xc924 @@ -1095,6 +1096,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { 0), GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0, 0), + GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, diff --git a/kernel/drivers/clk/samsung/clk-s5pv210.c b/kernel/drivers/clk/samsung/clk-s5pv210.c index e668e479a..bdd284249 100644 --- a/kernel/drivers/clk/samsung/clk-s5pv210.c +++ b/kernel/drivers/clk/samsung/clk-s5pv210.c @@ -828,6 +828,8 @@ static void __init __s5pv210_clk_init(struct device_node *np, s5pv210_clk_sleep_init(); + samsung_clk_of_add_provider(np, ctx); + pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n" "\tmout_epll = %ld, mout_vpll = %ld\n", is_s5p6442 ? "S5P6442" : "S5PV210", diff --git a/kernel/drivers/clk/versatile/clk-sp810.c b/kernel/drivers/clk/versatile/clk-sp810.c index c6e86a9a2..5122ef25f 100644 --- a/kernel/drivers/clk/versatile/clk-sp810.c +++ b/kernel/drivers/clk/versatile/clk-sp810.c @@ -128,8 +128,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec, { struct clk_sp810 *sp810 = data; - if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] > - ARRAY_SIZE(sp810->timerclken))) + if (WARN_ON(clkspec->args_count != 1 || + clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken))) return NULL; return sp810->timerclken[clkspec->args[0]].clk; diff --git a/kernel/drivers/crypto/vmx/aes.c b/kernel/drivers/crypto/vmx/aes.c index ab300ea19..41f93334c 100644 --- a/kernel/drivers/crypto/vmx/aes.c +++ b/kernel/drivers/crypto/vmx/aes.c @@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); pagefault_enable(); @@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); aes_p8_encrypt(src, dst, &ctx->enc_key); pagefault_enable(); } @@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); aes_p8_decrypt(src, dst, &ctx->dec_key); pagefault_enable(); } diff --git a/kernel/drivers/crypto/vmx/aes_cbc.c b/kernel/drivers/crypto/vmx/aes_cbc.c index 1a559b7dd..c8e7f653e 100644 --- a/kernel/drivers/crypto/vmx/aes_cbc.c +++ b/kernel/drivers/crypto/vmx/aes_cbc.c @@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); pagefault_enable(); @@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, } else { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); @@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, } else { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); diff --git a/kernel/drivers/crypto/vmx/aes_ctr.c b/kernel/drivers/crypto/vmx/aes_ctr.c index 96dbee4bf..266e708d6 100644 --- a/kernel/drivers/crypto/vmx/aes_ctr.c +++ b/kernel/drivers/crypto/vmx/aes_ctr.c @@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); pagefault_enable(); @@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); pagefault_enable(); @@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); pagefault_enable(); diff --git a/kernel/drivers/crypto/vmx/ghash.c b/kernel/drivers/crypto/vmx/ghash.c index d0ffe277a..917b3f09e 100644 --- a/kernel/drivers/crypto/vmx/ghash.c +++ b/kernel/drivers/crypto/vmx/ghash.c @@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); enable_kernel_fp(); gcm_init_p8(ctx->htable, (const u64 *) key); pagefault_enable(); @@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc, GHASH_DIGEST_SIZE - dctx->bytes); pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); @@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc, if (len) { pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, src, len); pagefault_enable(); @@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) dctx->buffer[i] = 0; pagefault_disable(); enable_kernel_altivec(); + enable_kernel_vsx(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); diff --git a/kernel/drivers/crypto/vmx/ghashp8-ppc.pl b/kernel/drivers/crypto/vmx/ghashp8-ppc.pl index 0a6f89983..d8429cb71 100644 --- a/kernel/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/kernel/drivers/crypto/vmx/ghashp8-ppc.pl @@ -61,6 +61,12 @@ $code=<<___; mtspr 256,r0 li r10,0x30 lvx_u $H,0,r4 # load H + le?xor r7,r7,r7 + le?addi r7,r7,0x8 # need a vperm start with 08 + le?lvsr 5,0,r7 + le?vspltisb 6,0x0f + le?vxor 5,5,6 # set a b-endian mask + le?vperm $H,$H,$H,5 vspltisb $xC2,-16 # 0xf0 vspltisb $t0,1 # one diff --git a/kernel/drivers/gpu/drm/i915/i915_drv.c b/kernel/drivers/gpu/drm/i915/i915_drv.c index a19d2c71e..fb91df163 100644 --- a/kernel/drivers/gpu/drm/i915/i915_drv.c +++ b/kernel/drivers/gpu/drm/i915/i915_drv.c @@ -647,15 +647,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) pci_disable_device(drm_dev->pdev); /* - * During hibernation on some GEN4 platforms the BIOS may try to access + * During hibernation on some platforms the BIOS may try to access * the device even though it's already in D3 and hang the machine. So * leave the device in D0 on those platforms and hope the BIOS will - * power down the device properly. Platforms where this was seen: - * Lenovo Thinkpad X301, X61s + * power down the device properly. The issue was seen on multiple old + * GENs with different BIOS vendors, so having an explicit blacklist + * is inpractical; apply the workaround on everything pre GEN6. The + * platforms where the issue was seen: + * Lenovo Thinkpad X301, X61s, X60, T60, X41 + * Fujitsu FSC S7110 + * Acer Aspire 1830T */ - if (!(hibernation && - drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && - INTEL_INFO(dev_priv)->gen == 4)) + if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) pci_set_power_state(drm_dev->pdev, PCI_D3hot); return 0; diff --git a/kernel/drivers/gpu/drm/i915/i915_drv.h b/kernel/drivers/gpu/drm/i915/i915_drv.h index 683a9b004..7d53d7e15 100644 --- a/kernel/drivers/gpu/drm/i915/i915_drv.h +++ b/kernel/drivers/gpu/drm/i915/i915_drv.h @@ -3190,13 +3190,13 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper, lower, tmp; \ - tmp = I915_READ(upper_reg); \ + u32 upper, lower, old_upper, loop = 0; \ + upper = I915_READ(upper_reg); \ do { \ - upper = tmp; \ + old_upper = upper; \ lower = I915_READ(lower_reg); \ - tmp = I915_READ(upper_reg); \ - } while (upper != tmp); \ + upper = I915_READ(upper_reg); \ + } while (upper != old_upper && loop++ < 2); \ (u64)upper << 32 | lower; }) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) diff --git a/kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3eadb970..a67a351e8 100644 --- a/kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1026,6 +1026,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; + obj->dirty = 1; /* be paranoid */ obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains; @@ -1033,7 +1034,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, i915_vma_move_to_active(vma, ring); if (obj->base.write_domain) { - obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); diff --git a/kernel/drivers/gpu/drm/i915/intel_display.c b/kernel/drivers/gpu/drm/i915/intel_display.c index 4021633ca..338d1de57 100644 --- a/kernel/drivers/gpu/drm/i915/intel_display.c +++ b/kernel/drivers/gpu/drm/i915/intel_display.c @@ -13781,6 +13781,24 @@ void intel_modeset_init(struct drm_device *dev) if (INTEL_INFO(dev)->num_pipes == 0) return; + /* + * There may be no VBT; and if the BIOS enabled SSC we can + * just keep using it to avoid unnecessary flicker. Whereas if the + * BIOS isn't using it, don't assume it will work even if the VBT + * indicates as much. + */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { + bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & + DREF_SSC1_ENABLE); + + if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", + bios_lvds_use_ssc ? "en" : "dis", + dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); + dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + } + } + intel_init_display(dev); intel_init_audio(dev); @@ -14266,7 +14284,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, void intel_modeset_gem_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; @@ -14275,16 +14292,6 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev); mutex_unlock(&dev->struct_mutex); - /* - * There may be no VBT; and if the BIOS enabled SSC we can - * just keep using it to avoid unnecessary flicker. Whereas if the - * BIOS isn't using it, don't assume it will work even if the VBT - * indicates as much. - */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & - DREF_SSC1_ENABLE); - intel_modeset_init_hw(dev); intel_setup_overlay(dev); diff --git a/kernel/drivers/gpu/drm/i915/intel_dp.c b/kernel/drivers/gpu/drm/i915/intel_dp.c index b1fe32b11..fb2983f77 100644 --- a/kernel/drivers/gpu/drm/i915/intel_dp.c +++ b/kernel/drivers/gpu/drm/i915/intel_dp.c @@ -4691,9 +4691,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_dp_probe_oui(intel_dp); - if (!intel_dp_probe_mst(intel_dp)) + if (!intel_dp_probe_mst(intel_dp)) { + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); goto mst_fail; - + } } else { if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) @@ -4701,10 +4704,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } if (!intel_dp->is_mst) { - /* - * we'll check the link status via the normal hot plug path later - - * but for short hpds we should check it now - */ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); intel_dp_check_link_status(intel_dp); drm_modeset_unlock(&dev->mode_config.connection_mutex); diff --git a/kernel/drivers/gpu/drm/i915/intel_dsi.c b/kernel/drivers/gpu/drm/i915/intel_dsi.c index 51966426a..c7a0b8d8f 100644 --- a/kernel/drivers/gpu/drm/i915/intel_dsi.c +++ b/kernel/drivers/gpu/drm/i915/intel_dsi.c @@ -1036,11 +1036,7 @@ void intel_dsi_init(struct drm_device *dev) intel_connector->unregister = intel_connector_unregister; /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */ - if (dev_priv->vbt.dsi.config->dual_link) { - /* XXX: does dual link work on either pipe? */ - intel_encoder->crtc_mask = (1 << PIPE_A); - intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); - } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { + if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { intel_encoder->crtc_mask = (1 << PIPE_A); intel_dsi->ports = (1 << PORT_A); } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) { @@ -1048,6 +1044,9 @@ void intel_dsi_init(struct drm_device *dev) intel_dsi->ports = (1 << PORT_C); } + if (dev_priv->vbt.dsi.config->dual_link) + intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); + /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; diff --git a/kernel/drivers/gpu/drm/qxl/qxl_display.c b/kernel/drivers/gpu/drm/qxl/qxl_display.c index 4a0a8b29b..32248791b 100644 --- a/kernel/drivers/gpu/drm/qxl/qxl_display.c +++ b/kernel/drivers/gpu/drm/qxl/qxl_display.c @@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector, *pwidth = head->width; *pheight = head->height; drm_mode_probed_add(connector, mode); + /* remember the last custom size for mode validation */ + qdev->monitors_config_width = mode->hdisplay; + qdev->monitors_config_height = mode->vdisplay; return 1; } +static struct mode_size { + int w; + int h; +} common_modes[] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} +}; + static int qxl_add_common_modes(struct drm_connector *connector, unsigned pwidth, unsigned pheight) @@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; int i; - struct mode_size { - int w; - int h; - } common_modes[] = { - { 640, 480}, - { 720, 480}, - { 800, 600}, - { 848, 480}, - {1024, 768}, - {1152, 768}, - {1280, 720}, - {1280, 800}, - {1280, 854}, - {1280, 960}, - {1280, 1024}, - {1440, 900}, - {1400, 1050}, - {1680, 1050}, - {1600, 1200}, - {1920, 1080}, - {1920, 1200} - }; - for (i = 0; i < ARRAY_SIZE(common_modes); i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); @@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector) static int qxl_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = ddev->dev_private; + int i; + /* TODO: is this called for user defined modes? (xrandr --add-mode) * TODO: check that the mode fits in the framebuffer */ - DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, - mode->vdisplay, mode->status); - return MODE_OK; + + if(qdev->monitors_config_width == mode->hdisplay && + qdev->monitors_config_height == mode->vdisplay) + return MODE_OK; + + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { + if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay) + return MODE_OK; + } + return MODE_BAD; } static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) diff --git a/kernel/drivers/gpu/drm/qxl/qxl_drv.h b/kernel/drivers/gpu/drm/qxl/qxl_drv.h index 7c6cafe21..e66143cc1 100644 --- a/kernel/drivers/gpu/drm/qxl/qxl_drv.h +++ b/kernel/drivers/gpu/drm/qxl/qxl_drv.h @@ -325,6 +325,8 @@ struct qxl_device { struct work_struct fb_work; struct drm_property *hotplug_mode_update_property; + int monitors_config_width; + int monitors_config_height; }; /* forward declaration for QXL_INFO_IO */ diff --git a/kernel/drivers/gpu/drm/radeon/atombios_dp.c b/kernel/drivers/gpu/drm/radeon/atombios_dp.c index b435c859d..447dbfa6c 100644 --- a/kernel/drivers/gpu/drm/radeon/atombios_dp.c +++ b/kernel/drivers/gpu/drm/radeon/atombios_dp.c @@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return -E2BIG; tx_buf[0] = msg->address & 0xff; - tx_buf[1] = msg->address >> 8; - tx_buf[2] = msg->request << 4; + tx_buf[1] = (msg->address >> 8) & 0xff; + tx_buf[2] = (msg->request << 4) | + ((msg->address >> 16) & 0xf); tx_buf[3] = msg->size ? (msg->size - 1) : 0; switch (msg->request & ~DP_AUX_I2C_MOT) { diff --git a/kernel/drivers/gpu/drm/radeon/radeon_audio.c b/kernel/drivers/gpu/drm/radeon/radeon_audio.c index 59b3d3221..d77dd1430 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon_audio.c +++ b/kernel/drivers/gpu/drm/radeon/radeon_audio.c @@ -522,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, return err; } - if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) { - if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB) - frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; - else - frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; - } else { - frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) { + if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) { + if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB) + frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; + else + frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; + } else { + frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + } } err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); diff --git a/kernel/drivers/gpu/drm/radeon/radeon_combios.c b/kernel/drivers/gpu/drm/radeon/radeon_combios.c index c097d3a82..a9b01bcf7 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon_combios.c +++ b/kernel/drivers/gpu/drm/radeon/radeon_combios.c @@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_device == 0x30ae) return; + /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x280a) + return; + /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) diff --git a/kernel/drivers/gpu/drm/radeon/radeon_connectors.c b/kernel/drivers/gpu/drm/radeon/radeon_connectors.c index 94b21ae70..5a2cafb4f 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/kernel/drivers/gpu/drm/radeon/radeon_connectors.c @@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } else if (radeon_dp_needs_link_train(radeon_connector)) { + /* Don't try to start link training before we + * have the dpcd */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + /* set it to OFF so that drm_helper_connector_dpms() * won't return immediately since the current state * is ON at this point. diff --git a/kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c index fcbd60bb0..3b0c229d7 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg AUX_SW_WR_BYTES(bytes)); /* write the data header into the registers */ - /* request, addres, msg size */ - byte = (msg->request << 4); + /* request, address, msg size */ + byte = (msg->request << 4) | ((msg->address >> 16) & 0xf); WREG32(AUX_SW_DATA + aux_offset[instance], AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE); diff --git a/kernel/drivers/hid/hid-cp2112.c b/kernel/drivers/hid/hid-cp2112.c index a2dbbbe0d..39bf74793 100644 --- a/kernel/drivers/hid/hid-cp2112.c +++ b/kernel/drivers/hid/hid-cp2112.c @@ -537,7 +537,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; - __be16 word; + __le16 word; ssize_t count; size_t read_length = 0; unsigned int retries; @@ -554,7 +554,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, if (I2C_SMBUS_READ == read_write) count = cp2112_read_req(buf, addr, read_length); else - count = cp2112_write_req(buf, addr, data->byte, NULL, + count = cp2112_write_req(buf, addr, command, NULL, 0); break; case I2C_SMBUS_BYTE_DATA: @@ -569,7 +569,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, break; case I2C_SMBUS_WORD_DATA: read_length = 2; - word = cpu_to_be16(data->word); + word = cpu_to_le16(data->word); if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, @@ -582,7 +582,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, size = I2C_SMBUS_WORD_DATA; read_write = I2C_SMBUS_READ; read_length = 2; - word = cpu_to_be16(data->word); + word = cpu_to_le16(data->word); count = cp2112_write_read_req(buf, addr, read_length, command, (u8 *)&word, 2); @@ -675,7 +675,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, data->byte = buf[0]; break; case I2C_SMBUS_WORD_DATA: - data->word = be16_to_cpup((__be16 *)buf); + data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { diff --git a/kernel/drivers/hid/usbhid/hid-core.c b/kernel/drivers/hid/usbhid/hid-core.c index bfbe1bedd..eab5bd6a2 100644 --- a/kernel/drivers/hid/usbhid/hid-core.c +++ b/kernel/drivers/hid/usbhid/hid-core.c @@ -164,7 +164,7 @@ static void hid_io_error(struct hid_device *hid) if (time_after(jiffies, usbhid->stop_retry)) { /* Retries failed, so do a port reset unless we lack bandwidth*/ - if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl) + if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl) && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) { schedule_work(&usbhid->reset_work); diff --git a/kernel/drivers/iio/gyro/Kconfig b/kernel/drivers/iio/gyro/Kconfig index b3d0e94f7..8d2439345 100644 --- a/kernel/drivers/iio/gyro/Kconfig +++ b/kernel/drivers/iio/gyro/Kconfig @@ -53,7 +53,8 @@ config ADXRS450 config BMG160 tristate "BOSCH BMG160 Gyro Sensor" depends on I2C - select IIO_TRIGGERED_BUFFER if IIO_BUFFER + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor driver. This driver also supports BMI055 gyroscope. diff --git a/kernel/drivers/iio/imu/adis16400_core.c b/kernel/drivers/iio/imu/adis16400_core.c index 2fd68f221..d42e4fe2c 100644 --- a/kernel/drivers/iio/imu/adis16400_core.c +++ b/kernel/drivers/iio/imu/adis16400_core.c @@ -780,7 +780,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SERIAL_NUMBER | ADIS16400_BURST_DIAG_STAT, - .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ + .gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ .temp_scale_nano = 73860000, /* 0.07386 C */ .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */ diff --git a/kernel/drivers/iio/imu/adis16480.c b/kernel/drivers/iio/imu/adis16480.c index 989605dd6..b94bfd3f5 100644 --- a/kernel/drivers/iio/imu/adis16480.c +++ b/kernel/drivers/iio/imu/adis16480.c @@ -110,6 +110,10 @@ struct adis16480_chip_info { unsigned int num_channels; const struct iio_chan_spec *channels; + unsigned int gyro_max_val; + unsigned int gyro_max_scale; + unsigned int accel_max_val; + unsigned int accel_max_scale; }; struct adis16480 { @@ -497,19 +501,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev, static int adis16480_read_raw(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val, int *val2, long info) { + struct adis16480 *st = iio_priv(indio_dev); + switch (info) { case IIO_CHAN_INFO_RAW: return adis_single_conversion(indio_dev, chan, 0, val); case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_ANGL_VEL: - *val = 0; - *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */ - return IIO_VAL_INT_PLUS_MICRO; + *val = st->chip_info->gyro_max_scale; + *val2 = st->chip_info->gyro_max_val; + return IIO_VAL_FRACTIONAL; case IIO_ACCEL: - *val = 0; - *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */ - return IIO_VAL_INT_PLUS_MICRO; + *val = st->chip_info->accel_max_scale; + *val2 = st->chip_info->accel_max_val; + return IIO_VAL_FRACTIONAL; case IIO_MAGN: *val = 0; *val2 = 100; /* 0.0001 gauss */ @@ -674,18 +680,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16375] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), + /* + * storing the value in rad/degree and the scale in degree + * gives us the result in rad and better precession than + * storing the scale directly in rad. + */ + .gyro_max_val = IIO_RAD_TO_DEGREE(22887), + .gyro_max_scale = 300, + .accel_max_val = IIO_M_S_2_TO_G(21973), + .accel_max_scale = 18, }, [ADIS16480] = { .channels = adis16480_channels, .num_channels = ARRAY_SIZE(adis16480_channels), + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(12500), + .accel_max_scale = 5, }, [ADIS16485] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(20000), + .accel_max_scale = 5, }, [ADIS16488] = { .channels = adis16480_channels, .num_channels = ARRAY_SIZE(adis16480_channels), + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(22500), + .accel_max_scale = 18, }, }; diff --git a/kernel/drivers/iio/industrialio-buffer.c b/kernel/drivers/iio/industrialio-buffer.c index df919f44d..7fa280b28 100644 --- a/kernel/drivers/iio/industrialio-buffer.c +++ b/kernel/drivers/iio/industrialio-buffer.c @@ -151,7 +151,7 @@ unsigned int iio_buffer_poll(struct file *filp, struct iio_buffer *rb = indio_dev->buffer; if (!indio_dev->info) - return -ENODEV; + return 0; poll_wait(filp, &rb->pollq, wait); if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) diff --git a/kernel/drivers/iio/industrialio-event.c b/kernel/drivers/iio/industrialio-event.c index a99692ba9..69b8c338f 100644 --- a/kernel/drivers/iio/industrialio-event.c +++ b/kernel/drivers/iio/industrialio-event.c @@ -84,7 +84,7 @@ static unsigned int iio_event_poll(struct file *filep, unsigned int events = 0; if (!indio_dev->info) - return -ENODEV; + return events; poll_wait(filep, &ev_int->wait, wait); diff --git a/kernel/drivers/infiniband/core/uverbs.h b/kernel/drivers/infiniband/core/uverbs.h index b716b0815..bebf11a66 100644 --- a/kernel/drivers/infiniband/core/uverbs.h +++ b/kernel/drivers/infiniband/core/uverbs.h @@ -85,7 +85,7 @@ */ struct ib_uverbs_device { - struct kref ref; + atomic_t refcount; int num_comp_vectors; struct completion comp; struct device *dev; @@ -94,6 +94,7 @@ struct ib_uverbs_device { struct cdev cdev; struct rb_root xrcd_tree; struct mutex xrcd_tree_mutex; + struct kobject kobj; }; struct ib_uverbs_event_file { diff --git a/kernel/drivers/infiniband/core/uverbs_cmd.c b/kernel/drivers/infiniband/core/uverbs_cmd.c index a9f048990..ccc2494b4 100644 --- a/kernel/drivers/infiniband/core/uverbs_cmd.c +++ b/kernel/drivers/infiniband/core/uverbs_cmd.c @@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, next->send_flags = user_wr->send_flags; if (is_ud) { + if (next->opcode != IB_WR_SEND && + next->opcode != IB_WR_SEND_WITH_IMM) { + ret = -EINVAL; + goto out_put; + } + next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); if (!next->wr.ud.ah) { @@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, user_wr->wr.atomic.compare_add; next->wr.atomic.swap = user_wr->wr.atomic.swap; next->wr.atomic.rkey = user_wr->wr.atomic.rkey; + case IB_WR_SEND: break; default: - break; + ret = -EINVAL; + goto out_put; } } diff --git a/kernel/drivers/infiniband/core/uverbs_main.c b/kernel/drivers/infiniband/core/uverbs_main.c index 88cce9bb7..09686d49d 100644 --- a/kernel/drivers/infiniband/core/uverbs_main.c +++ b/kernel/drivers/infiniband/core/uverbs_main.c @@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device); -static void ib_uverbs_release_dev(struct kref *ref) +static void ib_uverbs_release_dev(struct kobject *kobj) { struct ib_uverbs_device *dev = - container_of(ref, struct ib_uverbs_device, ref); + container_of(kobj, struct ib_uverbs_device, kobj); - complete(&dev->comp); + kfree(dev); } +static struct kobj_type ib_uverbs_dev_ktype = { + .release = ib_uverbs_release_dev, +}; + static void ib_uverbs_release_event_file(struct kref *ref) { struct ib_uverbs_event_file *file = @@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, return context->device->dealloc_ucontext(context); } +static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) +{ + complete(&dev->comp); +} + static void ib_uverbs_release_file(struct kref *ref) { struct ib_uverbs_file *file = container_of(ref, struct ib_uverbs_file, ref); module_put(file->device->ib_dev->owner); - kref_put(&file->device->ref, ib_uverbs_release_dev); + if (atomic_dec_and_test(&file->device->refcount)) + ib_uverbs_comp_dev(file->device); kfree(file); } @@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) int ret; dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); - if (dev) - kref_get(&dev->ref); - else + if (!atomic_inc_not_zero(&dev->refcount)) return -ENXIO; if (!try_module_get(dev->ib_dev->owner)) { @@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) mutex_init(&file->mutex); filp->private_data = file; + kobject_get(&dev->kobj); return nonseekable_open(inode, filp); @@ -772,13 +781,16 @@ err_module: module_put(dev->ib_dev->owner); err: - kref_put(&dev->ref, ib_uverbs_release_dev); + if (atomic_dec_and_test(&dev->refcount)) + ib_uverbs_comp_dev(dev); + return ret; } static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; + struct ib_uverbs_device *dev = file->device; ib_uverbs_cleanup_ucontext(file, file->ucontext); @@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) kref_put(&file->async_file->ref, ib_uverbs_release_event_file); kref_put(&file->ref, ib_uverbs_release_file); + kobject_put(&dev->kobj); return 0; } @@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device) if (!uverbs_dev) return; - kref_init(&uverbs_dev->ref); + atomic_set(&uverbs_dev->refcount, 1); init_completion(&uverbs_dev->comp); uverbs_dev->xrcd_tree = RB_ROOT; mutex_init(&uverbs_dev->xrcd_tree_mutex); + kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype); spin_lock(&map_lock); devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); @@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device) cdev_init(&uverbs_dev->cdev, NULL); uverbs_dev->cdev.owner = THIS_MODULE; uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; + uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj; kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); if (cdev_add(&uverbs_dev->cdev, base, 1)) goto err_cdev; @@ -941,9 +956,10 @@ err_cdev: clear_bit(devnum, overflow_map); err: - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); + if (atomic_dec_and_test(&uverbs_dev->refcount)) + ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); - kfree(uverbs_dev); + kobject_put(&uverbs_dev->kobj); return; } @@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) else clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); + if (atomic_dec_and_test(&uverbs_dev->refcount)) + ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); - kfree(uverbs_dev); + kobject_put(&uverbs_dev->kobj); } static char *uverbs_devnode(struct device *dev, umode_t *mode) diff --git a/kernel/drivers/infiniband/hw/mlx4/ah.c b/kernel/drivers/infiniband/hw/mlx4/ah.c index f50a54622..33fdd5012 100644 --- a/kernel/drivers/infiniband/hw/mlx4/ah.c +++ b/kernel/drivers/infiniband/hw/mlx4/ah.c @@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) enum rdma_link_layer ll; memset(ah_attr, 0, sizeof *ah_attr); - ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); + if (ll == IB_LINK_LAYER_ETHERNET) + ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29; + else + ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; + ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; if (ah->av.ib.stat_rate) ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; diff --git a/kernel/drivers/infiniband/hw/mlx4/cq.c b/kernel/drivers/infiniband/hw/mlx4/cq.c index 0176caa57..2857ed897 100644 --- a/kernel/drivers/infiniband/hw/mlx4/cq.c +++ b/kernel/drivers/infiniband/hw/mlx4/cq.c @@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, * simulated FLUSH_ERR completions */ list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { - mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); + mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); if (*npolled >= num_entries) goto out; } diff --git a/kernel/drivers/infiniband/hw/mlx4/mcg.c b/kernel/drivers/infiniband/hw/mlx4/mcg.c index ed327e6c8..a0559a8af 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mcg.c +++ b/kernel/drivers/infiniband/hw/mlx4/mcg.c @@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) { struct mlx4_ib_dev *dev = ctx->dev; struct ib_ah_attr ah_attr; + unsigned long flags; - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); if (!dev->sm_ah[ctx->port - 1]) { /* port is not yet Active, sm_ah not ready */ - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); return -EAGAIN; } mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, NULL, mad); diff --git a/kernel/drivers/infiniband/hw/mlx4/sysfs.c b/kernel/drivers/infiniband/hw/mlx4/sysfs.c index 6797108ce..69fb5ba94 100644 --- a/kernel/drivers/infiniband/hw/mlx4/sysfs.c +++ b/kernel/drivers/infiniband/hw/mlx4/sysfs.c @@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) struct mlx4_port *p; int i; int ret; + int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == + IB_LINK_LAYER_ETHERNET; p = kzalloc(sizeof *p, GFP_KERNEL); if (!p) @@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) p->pkey_group.name = "pkey_idx"; p->pkey_group.attrs = - alloc_group_attrs(show_port_pkey, store_port_pkey, + alloc_group_attrs(show_port_pkey, + is_eth ? NULL : store_port_pkey, dev->dev->caps.pkey_table_len[port_num]); if (!p->pkey_group.attrs) { ret = -ENOMEM; diff --git a/kernel/drivers/infiniband/hw/mlx5/mr.c b/kernel/drivers/infiniband/hw/mlx5/mr.c index 71c593583..0c52f0787 100644 --- a/kernel/drivers/infiniband/hw/mlx5/mr.c +++ b/kernel/drivers/infiniband/hw/mlx5/mr.c @@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return &mr->ibmr; error: - /* - * Destroy the umem *before* destroying the MR, to ensure we - * will not have any in-flight notifiers when destroying the - * MR. - * - * As the MR is completely invalid to begin with, and this - * error path is only taken if we can't push the mr entry into - * the pagefault tree, this is safe. - */ - ib_umem_release(umem); - /* Kill the MR, and return an error code. */ - clean_mr(mr); return ERR_PTR(err); } diff --git a/kernel/drivers/infiniband/hw/qib/qib_keys.c b/kernel/drivers/infiniband/hw/qib/qib_keys.c index ad843c786..5afaa2185 100644 --- a/kernel/drivers/infiniband/hw/qib/qib_keys.c +++ b/kernel/drivers/infiniband/hw/qib/qib_keys.c @@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) * unrestricted LKEY. */ rkt->gen++; + /* + * bits are capped in qib_verbs.c to insure enough bits + * for generation number + */ mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) << 8); diff --git a/kernel/drivers/infiniband/hw/qib/qib_verbs.c b/kernel/drivers/infiniband/hw/qib/qib_verbs.c index 4a3599890..9dd5d9a05 100644 --- a/kernel/drivers/infiniband/hw/qib/qib_verbs.c +++ b/kernel/drivers/infiniband/hw/qib/qib_verbs.c @@ -40,6 +40,7 @@ #include <linux/rculist.h> #include <linux/mm.h> #include <linux/random.h> +#include <linux/vmalloc.h> #include "qib.h" #include "qib_common.h" @@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd) * the LKEY). The remaining bits act as a generation number or tag. */ spin_lock_init(&dev->lk_table.lock); + /* insure generation is at least 4 bits see keys.c */ + if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) { + qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", + ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS); + ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS; + } dev->lk_table.max = 1 << ib_qib_lkey_table_size; lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); dev->lk_table.table = (struct qib_mregion __rcu **) - __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); + vmalloc(lk_tab_size); if (dev->lk_table.table == NULL) { ret = -ENOMEM; goto err_lk; @@ -2265,7 +2272,7 @@ err_tx: sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); err_hdrs: - free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); + vfree(dev->lk_table.table); err_lk: kfree(dev->qp_table); err_qpt: @@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd) sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - free_pages((unsigned long) dev->lk_table.table, - get_order(lk_tab_size)); + vfree(dev->lk_table.table); kfree(dev->qp_table); } diff --git a/kernel/drivers/infiniband/hw/qib/qib_verbs.h b/kernel/drivers/infiniband/hw/qib/qib_verbs.h index bfc8948fd..44ca28c83 100644 --- a/kernel/drivers/infiniband/hw/qib/qib_verbs.h +++ b/kernel/drivers/infiniband/hw/qib/qib_verbs.h @@ -647,6 +647,8 @@ struct qib_qpn_table { struct qpn_map map[QPNMAP_ENTRIES]; }; +#define MAX_LKEY_TABLE_BITS 23 + struct qib_lkey_table { spinlock_t lock; /* protect changes in this struct */ u32 next; /* next unused index (speeds search) */ diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c index 6a594aac2..c933d882c 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task, goto out; } + tx_desc->mapped = true; tx_desc->dma_addr = dma_addr; tx_desc->tx_sg[0].addr = tx_desc->dma_addr; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; @@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task) static void iscsi_iser_cleanup_task(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_tx_desc *tx_desc = &iser_task->desc; - struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct iser_conn *iser_conn = task->conn->dd_data; struct iser_device *device = iser_conn->ib_conn.device; /* DEVICE_REMOVAL event might have already released the device */ if (!device) return; - ib_dma_unmap_single(device->ib_device, - tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (likely(tx_desc->mapped)) { + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->mapped = false; + } /* mgmt tasks do not need special cleanup */ if (!task->sc) diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h index 262ba1f8e..d2b6caf76 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -270,6 +270,7 @@ enum iser_desc_type { * sg[1] optionally points to either of immediate data * unsolicited data-out or control * @num_sge: number sges used on this TX task + * @mapped: Is the task header mapped */ struct iser_tx_desc { struct iser_hdr iser_header; @@ -278,6 +279,7 @@ struct iser_tx_desc { u64 dma_addr; struct ib_sge tx_sg[2]; int num_sge; + bool mapped; }; #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ diff --git a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c index 3e2118e8e..0a47f42fe 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c @@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn, unsigned long buf_offset; unsigned long data_seg_len; uint32_t itt; - int err = 0; + int err; struct ib_sge *tx_dsg; itt = (__force uint32_t)hdr->itt; @@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn, memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); /* build the tx desc */ - iser_initialize_task_headers(task, tx_desc); + err = iser_initialize_task_headers(task, tx_desc); + if (err) + goto send_data_out_error; mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; tx_dsg = &tx_desc->tx_sg[1]; @@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn, send_data_out_error: kmem_cache_free(ig.desc_cache, tx_desc); - iser_err("conn %p failed err %d\n",conn, err); + iser_err("conn %p failed err %d\n", conn, err); return err; } diff --git a/kernel/drivers/infiniband/ulp/srp/ib_srp.c b/kernel/drivers/infiniband/ulp/srp/ib_srp.c index 75c01b27b..025f93105 100644 --- a/kernel/drivers/infiniband/ulp/srp/ib_srp.c +++ b/kernel/drivers/infiniband/ulp/srp/ib_srp.c @@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host) return c; } +/* + * Return values: + * < 0 upon failure. Caller is responsible for SRP target port cleanup. + * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port + * removal has been scheduled. + * 0 and target->state != SRP_TARGET_REMOVED upon success. + */ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) { struct srp_rport_identifiers ids; @@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev, srp_free_ch_ib(target, ch); srp_free_req_data(target, ch); target->ch_count = ch - target->ch; - break; + goto connected; } } @@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev, node_idx++; } +connected: target->scsi_host->nr_hw_queues = target->ch_count; ret = srp_add_target(host, target); @@ -3298,6 +3306,8 @@ out: mutex_unlock(&host->add_target_mutex); scsi_host_put(target->scsi_host); + if (ret < 0) + scsi_host_put(target->scsi_host); return ret; diff --git a/kernel/drivers/input/evdev.c b/kernel/drivers/input/evdev.c index a18f41b89..2ae522f0d 100644 --- a/kernel/drivers/input/evdev.c +++ b/kernel/drivers/input/evdev.c @@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id) { struct evdev_client *client = file->private_data; struct evdev *evdev = client->evdev; - int retval; - retval = mutex_lock_interruptible(&evdev->mutex); - if (retval) - return retval; + mutex_lock(&evdev->mutex); - if (!evdev->exist || client->revoked) - retval = -ENODEV; - else - retval = input_flush_device(&evdev->handle, file); + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); mutex_unlock(&evdev->mutex); - return retval; + return 0; } static void evdev_free(struct device *dev) diff --git a/kernel/drivers/iommu/fsl_pamu.c b/kernel/drivers/iommu/fsl_pamu.c index abeedc9a7..2570f2a25 100644 --- a/kernel/drivers/iommu/fsl_pamu.c +++ b/kernel/drivers/iommu/fsl_pamu.c @@ -41,7 +41,6 @@ struct pamu_isr_data { static struct paace *ppaact; static struct paace *spaact; -static struct ome *omt __initdata; /* * Table for matching compatible strings, for device tree @@ -50,7 +49,7 @@ static struct ome *omt __initdata; * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" * string would be used. */ -static const struct of_device_id guts_device_ids[] __initconst = { +static const struct of_device_id guts_device_ids[] = { { .compatible = "fsl,qoriq-device-config-1.0", }, { .compatible = "fsl,qoriq-device-config-2.0", }, {} @@ -599,7 +598,7 @@ found_cpu_node: * Memory accesses to QMAN and BMAN private memory need not be coherent, so * clear the PAACE entry coherency attribute for them. */ -static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) +static void setup_qbman_paace(struct paace *ppaace, int paace_type) { switch (paace_type) { case QMAN_PAACE: @@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) * this table to translate device transaction to appropriate corenet * transaction. */ -static void __init setup_omt(struct ome *omt) +static void setup_omt(struct ome *omt) { struct ome *ome; @@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt) * Get the maximum number of PAACT table entries * and subwindows supported by PAMU */ -static void __init get_pamu_cap_values(unsigned long pamu_reg_base) +static void get_pamu_cap_values(unsigned long pamu_reg_base) { u32 pc_val; @@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base) } /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ -static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, - phys_addr_t ppaact_phys, phys_addr_t spaact_phys, - phys_addr_t omt_phys) +static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, + phys_addr_t ppaact_phys, phys_addr_t spaact_phys, + phys_addr_t omt_phys) { u32 *pc; struct pamu_mmap_regs *pamu_regs; @@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu } /* Enable all device LIODNS */ -static void __init setup_liodns(void) +static void setup_liodns(void) { int i, len; struct paace *ppaace; @@ -846,7 +845,7 @@ struct ccsr_law { /* * Create a coherence subdomain for a given memory block. */ -static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) +static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) { struct device_node *np; const __be32 *iprop; @@ -988,7 +987,7 @@ error: static const struct { u32 svr; u32 port_id; -} port_id_map[] __initconst = { +} port_id_map[] = { {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ @@ -1006,7 +1005,7 @@ static const struct { #define SVR_SECURITY 0x80000 /* The Security (E) bit */ -static int __init fsl_pamu_probe(struct platform_device *pdev) +static int fsl_pamu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; void __iomem *pamu_regs = NULL; @@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) int irq; phys_addr_t ppaact_phys; phys_addr_t spaact_phys; + struct ome *omt; phys_addr_t omt_phys; size_t mem_size = 0; unsigned int order = 0; @@ -1200,7 +1200,7 @@ error: return ret; } -static struct platform_driver fsl_of_pamu_driver __initdata = { +static struct platform_driver fsl_of_pamu_driver = { .driver = { .name = "fsl-of-pamu", }, diff --git a/kernel/drivers/iommu/intel-iommu.c b/kernel/drivers/iommu/intel-iommu.c index c87c4b1bf..c23427951 100644 --- a/kernel/drivers/iommu/intel-iommu.c +++ b/kernel/drivers/iommu/intel-iommu.c @@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu struct context_entry *context; u64 *entry; + entry = &root->lo; if (ecs_enabled(iommu)) { if (devfn >= 0x80) { devfn -= 0x80; @@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu } devfn *= 2; } - entry = &root->lo; if (*entry & 1) context = phys_to_virt(*entry & VTD_PAGE_MASK); else { diff --git a/kernel/drivers/iommu/io-pgtable-arm.c b/kernel/drivers/iommu/io-pgtable-arm.c index 4e460216b..e29d5d7fe 100644 --- a/kernel/drivers/iommu/io-pgtable-arm.c +++ b/kernel/drivers/iommu/io-pgtable-arm.c @@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte; static bool selftest_running = false; +static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + unsigned long iova, size_t size, int lvl, + arm_lpae_iopte *ptep); + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, @@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte = prot; - /* We require an unmap first */ if (iopte_leaf(*ptep, lvl)) { + /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; + } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { + /* + * We need to unmap and free the old table before + * overwriting it with a block entry. + */ + arm_lpae_iopte *tblp; + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); + if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) + return -EINVAL; } if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) diff --git a/kernel/drivers/iommu/tegra-smmu.c b/kernel/drivers/iommu/tegra-smmu.c index c845d99ec..e0ff5f4d7 100644 --- a/kernel/drivers/iommu/tegra-smmu.c +++ b/kernel/drivers/iommu/tegra-smmu.c @@ -26,6 +26,7 @@ struct tegra_smmu { const struct tegra_smmu_soc *soc; unsigned long pfn_mask; + unsigned long tlb_mask; unsigned long *asids; struct mutex lock; @@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) #define SMMU_TLB_CONFIG 0x14 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) -#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) +#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ + ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) #define SMMU_PTC_CONFIG 0x18 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) @@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", mc->soc->num_address_bits, smmu->pfn_mask); + smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; + dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, + smmu->tlb_mask); value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); @@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, smmu_writel(smmu, value, SMMU_PTC_CONFIG); value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | - SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); + SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); if (soc->supports_round_robin_arbitration) value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; diff --git a/kernel/drivers/isdn/gigaset/ser-gigaset.c b/kernel/drivers/isdn/gigaset/ser-gigaset.c index 8c91fd5eb..3ac9c4194 100644 --- a/kernel/drivers/isdn/gigaset/ser-gigaset.c +++ b/kernel/drivers/isdn/gigaset/ser-gigaset.c @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) cs->hw.ser->tty = tty; atomic_set(&cs->hw.ser->refcnt, 1); init_completion(&cs->hw.ser->dead_cmp); - tty->disc_data = cs; + /* Set the amount of data we're willing to receive per call + * from the hardware driver to half of the input buffer size + * to leave some reserve. + * Note: We don't do flow control towards the hardware driver. + * If more data is received than will fit into the input buffer, + * it will be dropped and an error will be logged. This should + * never happen as the device is slow and the buffer size ample. + */ + tty->receive_room = RBUFSIZE/2; + /* OK.. Initialization of the datastructures and the HW is done.. Now * startup system and notify the LL that we are ready to run */ diff --git a/kernel/drivers/md/md.c b/kernel/drivers/md/md.c index e4621511d..e8c44fcb1 100644 --- a/kernel/drivers/md/md.c +++ b/kernel/drivers/md/md.c @@ -5365,6 +5365,8 @@ static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; mddev_detach(mddev); + /* Ensure ->event_work is done */ + flush_workqueue(md_misc_wq); spin_lock(&mddev->lock); mddev->ready = 0; mddev->pers = NULL; diff --git a/kernel/drivers/md/raid10.c b/kernel/drivers/md/raid10.c index f55c3f35b..fe0122771 100644 --- a/kernel/drivers/md/raid10.c +++ b/kernel/drivers/md/raid10.c @@ -3566,6 +3566,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) /* far_copies must be 1 */ conf->prev.stride = conf->dev_sectors; } + conf->reshape_safe = conf->reshape_progress; spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3770,7 +3771,6 @@ static int run(struct mddev *mddev) } conf->offset_diff = min_offset_diff; - conf->reshape_safe = conf->reshape_progress; clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); @@ -4113,6 +4113,7 @@ static int raid10_start_reshape(struct mddev *mddev) conf->reshape_progress = size; } else conf->reshape_progress = 0; + conf->reshape_safe = conf->reshape_progress; spin_unlock_irq(&conf->device_lock); if (mddev->delta_disks && mddev->bitmap) { @@ -4180,6 +4181,7 @@ abort: rdev->new_data_offset = rdev->data_offset; smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); return ret; @@ -4534,6 +4536,7 @@ static void end_reshape(struct r10conf *conf) md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); /* read-ahead size must cover two whole stripes, which is diff --git a/kernel/drivers/md/raid5.c b/kernel/drivers/md/raid5.c index 47413f7f0..a90d5876f 100644 --- a/kernel/drivers/md/raid5.c +++ b/kernel/drivers/md/raid5.c @@ -2153,6 +2153,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!sc) return -ENOMEM; + /* Need to ensure auto-resizing doesn't interfere */ + mutex_lock(&conf->cache_size_mutex); + for (i = conf->max_nr_stripes; i; i--) { nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) @@ -2169,6 +2172,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) kmem_cache_free(sc, nsh); } kmem_cache_destroy(sc); + mutex_unlock(&conf->cache_size_mutex); return -ENOMEM; } /* Step 2 - Must use GFP_NOIO now. @@ -2215,6 +2219,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; + mutex_unlock(&conf->cache_size_mutex); /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -2242,7 +2247,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) static int drop_one_stripe(struct r5conf *conf) { struct stripe_head *sh; - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; spin_lock_irq(conf->hash_locks + hash); sh = get_free_stripe(conf, hash); @@ -5848,12 +5853,14 @@ static void raid5d(struct md_thread *thread) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && + mutex_trylock(&conf->cache_size_mutex)) { grow_one_stripe(conf, __GFP_NOWARN); /* Set flag even if allocation failed. This helps * slow down allocation requests when mem is short */ set_bit(R5_DID_ALLOC, &conf->cache_state); + mutex_unlock(&conf->cache_size_mutex); } async_tx_issue_pending_all(); @@ -5885,18 +5892,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) return -EINVAL; conf->min_nr_stripes = size; + mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) ; + mutex_unlock(&conf->cache_size_mutex); err = md_allow_write(mddev); if (err) return err; + mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) break; + mutex_unlock(&conf->cache_size_mutex); return 0; } @@ -6363,11 +6374,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); - int ret = 0; - while (ret < sc->nr_to_scan) { - if (drop_one_stripe(conf) == 0) - return SHRINK_STOP; - ret++; + unsigned long ret = SHRINK_STOP; + + if (mutex_trylock(&conf->cache_size_mutex)) { + ret= 0; + while (ret < sc->nr_to_scan && + conf->max_nr_stripes > conf->min_nr_stripes) { + if (drop_one_stripe(conf) == 0) { + ret = SHRINK_STOP; + break; + } + ret++; + } + mutex_unlock(&conf->cache_size_mutex); } return ret; } @@ -6436,6 +6455,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); + mutex_init(&conf->cache_size_mutex); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); INIT_LIST_HEAD(&conf->handle_list); diff --git a/kernel/drivers/md/raid5.h b/kernel/drivers/md/raid5.h index 66199a494..584c7bd62 100644 --- a/kernel/drivers/md/raid5.h +++ b/kernel/drivers/md/raid5.h @@ -482,7 +482,8 @@ struct r5conf { */ int active_name; char cache_name[2][32]; - struct kmem_cache *slab_cache; /* for allocating stripes */ + struct kmem_cache *slab_cache; /* for allocating stripes */ + struct mutex cache_size_mutex; /* Protect changes to cache size */ int seq_flush, seq_write; int quiesce; diff --git a/kernel/drivers/media/platform/am437x/am437x-vpfe.c b/kernel/drivers/media/platform/am437x/am437x-vpfe.c index a30cc2f7e..ddf59ee5c 100644 --- a/kernel/drivers/media/platform/am437x/am437x-vpfe.c +++ b/kernel/drivers/media/platform/am437x/am437x-vpfe.c @@ -1185,14 +1185,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe) static int vpfe_release(struct file *file) { struct vpfe_device *vpfe = video_drvdata(file); + bool fh_singular; int ret; mutex_lock(&vpfe->lock); - if (v4l2_fh_is_singular_file(file)) - vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); + /* Save the singular status before we call the clean-up helper */ + fh_singular = v4l2_fh_is_singular_file(file); + + /* the release helper will cleanup any on-going streaming */ ret = _vb2_fop_release(file, NULL); + /* + * If this was the last open file. + * Then de-initialize hw module. + */ + if (fh_singular) + vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); + mutex_unlock(&vpfe->lock); return ret; @@ -1577,7 +1587,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, return -EBUSY; } - ret = vpfe_try_fmt(file, priv, fmt); + ret = vpfe_try_fmt(file, priv, &format); if (ret) return ret; diff --git a/kernel/drivers/media/platform/omap3isp/isp.c b/kernel/drivers/media/platform/omap3isp/isp.c index 18d0a8717..947d8be7b 100644 --- a/kernel/drivers/media/platform/omap3isp/isp.c +++ b/kernel/drivers/media/platform/omap3isp/isp.c @@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags, int ret; if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && - !(link->flags & MEDIA_LNK_FL_ENABLED)) { + !(flags & MEDIA_LNK_FL_ENABLED)) { /* Powering off entities is assumed to never fail. */ isp_pipeline_pm_power(source, -sink_use); isp_pipeline_pm_power(sink, -source_use); return 0; } - if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && (flags & MEDIA_LNK_FL_ENABLED)) { ret = isp_pipeline_pm_power(source, sink_use); diff --git a/kernel/drivers/media/rc/rc-main.c b/kernel/drivers/media/rc/rc-main.c index f8c5e47a3..0aba9ff92 100644 --- a/kernel/drivers/media/rc/rc-main.c +++ b/kernel/drivers/media/rc/rc-main.c @@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); - if (!dev || !dev->input_dev) - return -ENODEV; - if (dev->rc_map.name) ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); if (dev->driver_name) diff --git a/kernel/drivers/memory/tegra/tegra114.c b/kernel/drivers/memory/tegra/tegra114.c index 511e9a25c..16c4d26f5 100644 --- a/kernel/drivers/memory/tegra/tegra114.c +++ b/kernel/drivers/memory/tegra/tegra114.c @@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = { .num_swgroups = ARRAY_SIZE(tegra114_swgroups), .supports_round_robin_arbitration = false, .supports_request_limit = false, + .num_tlb_lines = 32, .num_asids = 4, .ops = &tegra114_smmu_ops, }; diff --git a/kernel/drivers/memory/tegra/tegra124.c b/kernel/drivers/memory/tegra/tegra124.c index 278d40b85..b153d0b73 100644 --- a/kernel/drivers/memory/tegra/tegra124.c +++ b/kernel/drivers/memory/tegra/tegra124.c @@ -981,6 +981,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = { .num_swgroups = ARRAY_SIZE(tegra124_swgroups), .supports_round_robin_arbitration = true, .supports_request_limit = true, + .num_tlb_lines = 32, .num_asids = 128, .ops = &tegra124_smmu_ops, }; diff --git a/kernel/drivers/memory/tegra/tegra30.c b/kernel/drivers/memory/tegra/tegra30.c index 71fe9376f..f422b18f4 100644 --- a/kernel/drivers/memory/tegra/tegra30.c +++ b/kernel/drivers/memory/tegra/tegra30.c @@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = { .num_swgroups = ARRAY_SIZE(tegra30_swgroups), .supports_round_robin_arbitration = false, .supports_request_limit = false, + .num_tlb_lines = 16, .num_asids = 4, .ops = &tegra30_smmu_ops, }; diff --git a/kernel/drivers/misc/cxl/pci.c b/kernel/drivers/misc/cxl/pci.c index 1ef016472..4f1b0bdb9 100644 --- a/kernel/drivers/misc/cxl/pci.c +++ b/kernel/drivers/misc/cxl/pci.c @@ -778,14 +778,9 @@ int cxl_reset(struct cxl *adapter) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); int rc; - int i; - u32 val; dev_info(&dev->dev, "CXL reset\n"); - for (i = 0; i < adapter->slices; i++) - cxl_remove_afu(adapter->afu[i]); - /* pcie_warm_reset requests a fundamental pci reset which includes a * PERST assert/deassert. PERST triggers a loading of the image * if "user" or "factory" is selected in sysfs */ @@ -794,20 +789,6 @@ int cxl_reset(struct cxl *adapter) return rc; } - /* the PERST done above fences the PHB. So, reset depends on EEH - * to unbind the driver, tell Sapphire to reinit the PHB, and rebind - * the driver. Do an mmio read explictly to ensure EEH notices the - * fenced PHB. Retry for a few seconds before giving up. */ - i = 0; - while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) && - (i < 5)) { - msleep(500); - i++; - } - - if (val != 0xffffffff) - dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n"); - return rc; } @@ -1062,8 +1043,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) int slice; int rc; - pci_dev_get(dev); - if (cxl_verbose) dump_cxl_config_space(dev); diff --git a/kernel/drivers/mmc/core/core.c b/kernel/drivers/mmc/core/core.c index 92e767142..588fb7908 100644 --- a/kernel/drivers/mmc/core/core.c +++ b/kernel/drivers/mmc/core/core.c @@ -330,8 +330,10 @@ EXPORT_SYMBOL(mmc_start_bkops); */ static void mmc_wait_data_done(struct mmc_request *mrq) { - mrq->host->context_info.is_done_rcv = true; - wake_up_interruptible(&mrq->host->context_info.wait); + struct mmc_context_info *context_info = &mrq->host->context_info; + + context_info->is_done_rcv = true; + wake_up_interruptible(&context_info->wait); } static void mmc_wait_done(struct mmc_request *mrq) diff --git a/kernel/drivers/mmc/host/sdhci-pci.c b/kernel/drivers/mmc/host/sdhci-pci.c index 7a3fc16d0..53cfc7ced 100644 --- a/kernel/drivers/mmc/host/sdhci-pci.c +++ b/kernel/drivers/mmc/host/sdhci-pci.c @@ -549,6 +549,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) static const struct sdhci_pci_fixes sdhci_o2 = { .probe = sdhci_pci_o2_probe, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, .probe_slot = sdhci_pci_o2_probe_slot, .resume = sdhci_pci_o2_resume, }; diff --git a/kernel/drivers/mmc/host/sdhci.c b/kernel/drivers/mmc/host/sdhci.c index bec8a307f..fd41b9143 100644 --- a/kernel/drivers/mmc/host/sdhci.c +++ b/kernel/drivers/mmc/host/sdhci.c @@ -1146,6 +1146,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); break; case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); break; case MMC_TIMING_MMC_HS400: @@ -1598,7 +1599,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) (ios->timing == MMC_TIMING_UHS_SDR25) || (ios->timing == MMC_TIMING_UHS_SDR50) || (ios->timing == MMC_TIMING_UHS_SDR104) || - (ios->timing == MMC_TIMING_UHS_DDR50))) { + (ios->timing == MMC_TIMING_UHS_DDR50) || + (ios->timing == MMC_TIMING_MMC_DDR52))) { u16 preset; sdhci_enable_preset_value(host, true); diff --git a/kernel/drivers/net/bonding/bond_main.c b/kernel/drivers/net/bonding/bond_main.c index d5fe5d5f4..16d87bf8a 100644 --- a/kernel/drivers/net/bonding/bond_main.c +++ b/kernel/drivers/net/bonding/bond_main.c @@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } +static struct slave *bond_get_old_active(struct bonding *bond, + struct slave *new_active) +{ + struct slave *slave; + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) { + if (slave == new_active) + continue; + + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) + return slave; + } + + return NULL; +} + /* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings @@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, if (!new_active) return; + if (!old_active) + old_active = bond_get_old_active(bond, new_active); + if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -1902,6 +1922,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); + bond_remove_proc_entry(bond); unregister_netdevice(bond_dev); } return ret; diff --git a/kernel/drivers/net/dsa/bcm_sf2.c b/kernel/drivers/net/dsa/bcm_sf2.c index cedb572bf..db9ebbc1a 100644 --- a/kernel/drivers/net/dsa/bcm_sf2.c +++ b/kernel/drivers/net/dsa/bcm_sf2.c @@ -417,7 +417,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) core_writel(priv, port, CORE_FAST_AGE_PORT); reg = core_readl(priv, CORE_FAST_AGE_CTRL); - reg |= EN_AGE_PORT | FAST_AGE_STR_DONE; + reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { @@ -431,6 +431,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) if (!timeout) return -ETIMEDOUT; + core_writel(priv, 0, CORE_FAST_AGE_CTRL); + return 0; } @@ -506,7 +508,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, u32 reg; reg = core_readl(priv, CORE_G_PCTL_PORT(port)); - cur_hw_state = reg >> G_MISTP_STATE_SHIFT; + cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); switch (state) { case BR_STATE_DISABLED: @@ -530,10 +532,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, } /* Fast-age ARL entries if we are moving a port from Learning or - * Forwarding state to Disabled, Blocking or Listening state + * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening + * state (hw_state) */ if (cur_hw_state != hw_state) { - if (cur_hw_state & 4 && !(hw_state & 4)) { + if (cur_hw_state >= G_MISTP_LEARN_STATE && + hw_state <= G_MISTP_LISTEN_STATE) { ret = bcm_sf2_sw_fast_age_port(ds, port); if (ret) { pr_err("%s: fast-ageing failed\n", __func__); @@ -889,15 +893,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - u32 duplex, pause, speed; + u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); - speed = core_readl(priv, CORE_SPDSTS); - - speed >>= (port * SPDSTS_SHIFT); - speed &= SPDSTS_MASK; status->link = 0; @@ -925,18 +925,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); - switch (speed) { - case SPDSTS_10: - status->speed = SPEED_10; - break; - case SPDSTS_100: - status->speed = SPEED_100; - break; - case SPDSTS_1000: - status->speed = SPEED_1000; - break; - } - if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; diff --git a/kernel/drivers/net/dsa/bcm_sf2.h b/kernel/drivers/net/dsa/bcm_sf2.h index 22e2ebf31..789d7b773 100644 --- a/kernel/drivers/net/dsa/bcm_sf2.h +++ b/kernel/drivers/net/dsa/bcm_sf2.h @@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ } \ -static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \ - u64 val) \ +static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ + u32 off) \ { \ spin_lock(&priv->indir_lock); \ reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ diff --git a/kernel/drivers/net/ethernet/altera/altera_tse_main.c b/kernel/drivers/net/ethernet/altera/altera_tse_main.c index da48e6637..8207877d6 100644 --- a/kernel/drivers/net/ethernet/altera/altera_tse_main.c +++ b/kernel/drivers/net/ethernet/altera/altera_tse_main.c @@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget) if (rxcomplete < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_complete(napi); netdev_dbg(priv->dev, "NAPI Complete, did %d packets with budget %d\n", diff --git a/kernel/drivers/net/ethernet/broadcom/tg3.c b/kernel/drivers/net/ethernet/broadcom/tg3.c index 069952fa5..0d8af5bb5 100644 --- a/kernel/drivers/net/ethernet/broadcom/tg3.c +++ b/kernel/drivers/net/ethernet/broadcom/tg3.c @@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev, tg3_ape_scratchpad_read(tp, &temperature, attr->index, sizeof(temperature)); spin_unlock_bh(&tp->lock); - return sprintf(buf, "%u\n", temperature); + return sprintf(buf, "%u\n", temperature * 1000); } diff --git a/kernel/drivers/net/ethernet/brocade/bna/bnad.c b/kernel/drivers/net/ethernet/brocade/bna/bnad.c index caae6cb2b..a1c30ee60 100644 --- a/kernel/drivers/net/ethernet/brocade/bna/bnad.c +++ b/kernel/drivers/net/ethernet/brocade/bna/bnad.c @@ -675,6 +675,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (!next_cmpl->valid) break; } + packets++; /* TODO: BNA_CQ_EF_LOCAL ? */ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | @@ -691,7 +692,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); - packets++; rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; ccb->bytes_per_intr += totlen; diff --git a/kernel/drivers/net/ethernet/freescale/fec_main.c b/kernel/drivers/net/ethernet/freescale/fec_main.c index 66d47e448..570390b5c 100644 --- a/kernel/drivers/net/ethernet/freescale/fec_main.c +++ b/kernel/drivers/net/ethernet/freescale/fec_main.c @@ -1396,6 +1396,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c index c754b2027..c9da1b5d4 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, static inline bool fm10k_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, diff --git a/kernel/drivers/net/ethernet/intel/igb/igb.h b/kernel/drivers/net/ethernet/intel/igb/igb.h index c2bd4f98a..212d668da 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb.h +++ b/kernel/drivers/net/ethernet/intel/igb/igb.h @@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); #ifdef CONFIG_IGB_HWMON void igb_sysfs_exit(struct igb_adapter *adapter); int igb_sysfs_init(struct igb_adapter *adapter); diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c b/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c index d5673eb90..0afc0913e 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); unsigned int count = ch->combined_count; + unsigned int max_combined = 0; /* Verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) @@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev, return -EINVAL; /* Verify the number of channels doesn't exceed hw limits */ - if (count > igb_max_channels(adapter)) + max_combined = igb_max_channels(adapter); + if (count > max_combined) return -EINVAL; if (count != adapter->rss_queues) { adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); /* Hardware has to reinitialize queues and interrupts to * match the new configuration. diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_main.c b/kernel/drivers/net/ethernet/intel/igb/igb_main.c index a0a9b1fcb..4f6bf9968 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_main.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_main.c @@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; - if (!q_vector) + if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); - else + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { memset(q_vector, 0, size); + } if (!q_vector) return -ENOMEM; @@ -2901,6 +2905,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + /* Determine if we need to pair queues. */ switch (hw->mac.type) { case e1000_82575: @@ -6584,7 +6596,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, static inline bool igb_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5be12a00e..463ff4720 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, static inline bool ixgbe_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } /** diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e71cdde9c..1d7b00b03 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, static inline bool ixgbevf_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } /** diff --git a/kernel/drivers/net/ethernet/marvell/mvneta.c b/kernel/drivers/net/ethernet/marvell/mvneta.c index 74d0389bf..4d608f011 100644 --- a/kernel/drivers/net/ethernet/marvell/mvneta.c +++ b/kernel/drivers/net/ethernet/marvell/mvneta.c @@ -3029,8 +3029,8 @@ static int mvneta_probe(struct platform_device *pdev) const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; + const char *managed; int phy_mode; - int fixed_phy = 0; int err; /* Our multiqueue support is not complete, so for now, only @@ -3064,7 +3064,6 @@ static int mvneta_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot register fixed PHY\n"); goto err_free_irq; } - fixed_phy = 1; /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. @@ -3088,8 +3087,10 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); pp->phy_node = phy_node; pp->phy_interface = phy_mode; - pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && - fixed_phy; + + err = of_property_read_string(dn, "managed", &managed); + pp->use_inband_status = (err == 0 && + strcmp(managed, "in-band-status") == 0); pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eab4e080e..80aac2010 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1256,8 +1256,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) rss_context->hash_fn = MLX4_RSS_HASH_TOP; memcpy(rss_context->rss_key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); - netdev_rss_key_fill(rss_context->rss_key, - MLX4_EN_RSS_KEY_SIZE); } else { en_err(priv, "Unknown RSS hash function requested\n"); err = -EINVAL; diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c b/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c index 2619c9fbf..983b1d512 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -573,7 +573,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", __func__, i, port); - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( @@ -608,7 +608,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; if (i == mlx4_master_func_num(dev)) continue; - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( diff --git a/kernel/drivers/net/ethernet/rocker/rocker.c b/kernel/drivers/net/ethernet/rocker/rocker.c index cf98cc9bb..73b6fc21e 100644 --- a/kernel/drivers/net/ethernet/rocker/rocker.c +++ b/kernel/drivers/net/ethernet/rocker/rocker.c @@ -4587,6 +4587,7 @@ static void rocker_remove_ports(struct rocker *rocker) rocker_port = rocker->ports[i]; rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE); unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); } kfree(rocker->ports); } diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/descs.h b/kernel/drivers/net/ethernet/stmicro/stmmac/descs.h index ad3996038..799c2929c 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -158,6 +158,8 @@ struct dma_desc { u32 buffer2_size:13; u32 reserved4:3; } etx; /* -- enhanced -- */ + + u64 all_flags; } des01; unsigned int des2; unsigned int des3; diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 6249a4ec0..573708123 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -38,7 +38,6 @@ struct rk_priv_data { bool clock_input; struct clk *clk_mac; - struct clk *clk_mac_pll; struct clk *gmac_clkin; struct clk *mac_clk_rx; struct clk *mac_clk_tx; @@ -208,7 +207,7 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv) dev_info(dev, "%s: clock input from PHY\n", __func__); } else { if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) - clk_set_rate(bsp_priv->clk_mac_pll, 50000000); + clk_set_rate(bsp_priv->clk_mac, 50000000); } return 0; diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/kernel/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 1e2bcf5f8..7d944449f 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { + p->des01.all_flags = 0; p->des01.erx.own = 1; p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; @@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.etx.own = 0; + p->des01.all_flags = 0; if (mode == STMMAC_CHAIN_MODE) ehn_desc_tx_set_on_chain(p, end); else diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/kernel/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 35ad4f427..48c345644 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { + p->des01.all_flags = 0; p->des01.rx.own = 1; p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; @@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.tx.own = 0; + p->des01.all_flags = 0; if (mode == STMMAC_CHAIN_MODE) ndesc_tx_set_on_chain(p, end); else diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2c5ce2bac..c274cdc5d 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -829,8 +829,11 @@ static int stmmac_init_phy(struct net_device *dev) phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); - if (IS_ERR(phydev)) { + if (IS_ERR_OR_NULL(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); + if (!phydev) + return -ENODEV; + return PTR_ERR(phydev); } @@ -1189,41 +1192,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) goto err_tx_skbuff; if (priv->extend_desc) { - priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * - sizeof(struct - dma_extended_desc), - &priv->dma_rx_phy, - GFP_KERNEL); + priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize * + sizeof(struct + dma_extended_desc), + &priv->dma_rx_phy, + GFP_KERNEL); if (!priv->dma_erx) goto err_dma; - priv->dma_etx = dma_alloc_coherent(priv->device, txsize * - sizeof(struct - dma_extended_desc), - &priv->dma_tx_phy, - GFP_KERNEL); + priv->dma_etx = dma_zalloc_coherent(priv->device, txsize * + sizeof(struct + dma_extended_desc), + &priv->dma_tx_phy, + GFP_KERNEL); if (!priv->dma_etx) { dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_extended_desc), - priv->dma_erx, priv->dma_rx_phy); + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); goto err_dma; } } else { - priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * - sizeof(struct dma_desc), - &priv->dma_rx_phy, - GFP_KERNEL); + priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize * + sizeof(struct dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); if (!priv->dma_rx) goto err_dma; - priv->dma_tx = dma_alloc_coherent(priv->device, txsize * - sizeof(struct dma_desc), - &priv->dma_tx_phy, - GFP_KERNEL); + priv->dma_tx = dma_zalloc_coherent(priv->device, txsize * + sizeof(struct dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); if (!priv->dma_tx) { dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_desc), - priv->dma_rx, priv->dma_rx_phy); + sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); goto err_dma; } } diff --git a/kernel/drivers/net/macvtap.c b/kernel/drivers/net/macvtap.c index 8c350c5d5..58858c558 100644 --- a/kernel/drivers/net/macvtap.c +++ b/kernel/drivers/net/macvtap.c @@ -1054,10 +1054,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return 0; case TUNSETSNDBUF: - if (get_user(u, up)) + if (get_user(s, sp)) return -EFAULT; - q->sk.sk_sndbuf = u; + q->sk.sk_sndbuf = s; return 0; case TUNGETVNETHDRSZ: diff --git a/kernel/drivers/net/phy/fixed_phy.c b/kernel/drivers/net/phy/fixed_phy.c index 1960b46ad..479b93f95 100644 --- a/kernel/drivers/net/phy/fixed_phy.c +++ b/kernel/drivers/net/phy/fixed_phy.c @@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) u16 lpagb = 0; u16 lpa = 0; + if (!fp->status.link) + goto done; + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + if (fp->status.duplex) { bmcr |= BMCR_FULLDPLX; @@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) } } - if (fp->status.link) - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - if (fp->status.pause) lpa |= LPA_PAUSE_CAP; if (fp->status.asym_pause) lpa |= LPA_PAUSE_ASYM; +done: fp->regs[MII_PHYSID1] = 0; fp->regs[MII_PHYSID2] = 0; diff --git a/kernel/drivers/net/usb/usbnet.c b/kernel/drivers/net/usb/usbnet.c index 3c86b1072..e0498571a 100644 --- a/kernel/drivers/net/usb/usbnet.c +++ b/kernel/drivers/net/usb/usbnet.c @@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; - int retval, pm; + int retval, pm, mpn; clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); @@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net) usbnet_purge_paused_rxq(dev); + mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); + /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. @@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net) if (!pm) usb_autopm_put_interface(dev->intf); - if (info->manage_power && - !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) + if (info->manage_power && mpn) info->manage_power(dev, 0); else usb_autopm_put_interface(dev->intf); diff --git a/kernel/drivers/net/virtio_net.c b/kernel/drivers/net/virtio_net.c index 63c7810e1..7fbca37a1 100644 --- a/kernel/drivers/net/virtio_net.c +++ b/kernel/drivers/net/virtio_net.c @@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) else vi->hdr_len = sizeof(struct virtio_net_hdr); - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) diff --git a/kernel/drivers/net/vxlan.c b/kernel/drivers/net/vxlan.c index 21a0fbf1e..0085b8df8 100644 --- a/kernel/drivers/net/vxlan.c +++ b/kernel/drivers/net/vxlan.c @@ -2212,6 +2212,8 @@ static int vxlan_open(struct net_device *dev) if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { ret = vxlan_igmp_join(vxlan); + if (ret == -EADDRINUSE) + ret = 0; if (ret) { vxlan_sock_release(vs); return ret; diff --git a/kernel/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/kernel/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 23806c243..fd4a5353d 100644 --- a/kernel/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/kernel/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ + {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ diff --git a/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 57966e3c8..3fa2fb7c8 100644 --- a/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, MSR, bt_msr); rtlpriv->cfg->ops->led_control(hw, ledaction); - if ((bt_msr & 0xfc) == MSR_AP) + if ((bt_msr & MSR_MASK) == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); diff --git a/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h index 53668fc8f..1d6110f9c 100644 --- a/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h +++ b/kernel/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h @@ -429,6 +429,7 @@ #define MSR_ADHOC 0x01 #define MSR_INFRA 0x02 #define MSR_AP 0x03 +#define MSR_MASK 0x03 #define RRSR_RSC_OFFSET 21 #define RRSR_SHORT_OFFSET 23 diff --git a/kernel/drivers/net/xen-netback/netback.c b/kernel/drivers/net/xen-netback/netback.c index 0d2594395..0866c5dfd 100644 --- a/kernel/drivers/net/xen-netback/netback.c +++ b/kernel/drivers/net/xen-netback/netback.c @@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) smp_rmb(); while (dc != dp) { - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); pending_idx = queue->dealloc_ring[pending_index(dc++)]; - pending_idx_release[gop-queue->tx_unmap_ops] = + pending_idx_release[gop - queue->tx_unmap_ops] = pending_idx; - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = queue->mmap_pages[pending_idx]; gnttab_set_unmap_op(gop, idx_to_kaddr(queue, pending_idx), diff --git a/kernel/drivers/nfc/st21nfca/st21nfca.c b/kernel/drivers/nfc/st21nfca/st21nfca.c index d251f7229..051286562 100644 --- a/kernel/drivers/nfc/st21nfca/st21nfca.c +++ b/kernel/drivers/nfc/st21nfca/st21nfca.c @@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE); if (r < 0) - goto free_info; + return r; /* Get pipe list */ r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list), &skb_pipe_list); if (r < 0) - goto free_info; + return r; /* Complete the existing gate_pipe table */ for (i = 0; i < skb_pipe_list->len; i++) { @@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) info->src_host_id != ST21NFCA_ESE_HOST_ID) { pr_err("Unexpected apdu_reader pipe on host %x\n", info->src_host_id); + kfree_skb(skb_pipe_info); continue; } @@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) hdev->pipes[st21nfca_gates[j].pipe].dest_host = info->src_host_id; } + kfree_skb(skb_pipe_info); } /* @@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) st21nfca_gates[i].gate, st21nfca_gates[i].pipe); if (r < 0) - goto free_info; + goto free_list; } } memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates)); -free_info: - kfree_skb(skb_pipe_info); +free_list: kfree_skb(skb_pipe_list); return r; } diff --git a/kernel/drivers/of/address.c b/kernel/drivers/of/address.c index 8bfda6ade..384574c39 100644 --- a/kernel/drivers/of/address.c +++ b/kernel/drivers/of/address.c @@ -845,10 +845,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from, struct resource res; while (dn) { - if (of_address_to_resource(dn, 0, &res)) - continue; - if (res.start == base_address) + if (!of_address_to_resource(dn, 0, &res) && + res.start == base_address) return dn; + dn = of_find_matching_node(dn, matches); } diff --git a/kernel/drivers/of/fdt.c b/kernel/drivers/of/fdt.c index cde35c5d0..d91f721a0 100644 --- a/kernel/drivers/of/fdt.c +++ b/kernel/drivers/of/fdt.c @@ -955,7 +955,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, } #ifdef CONFIG_HAVE_MEMBLOCK -#define MAX_PHYS_ADDR ((phys_addr_t)~0) +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) { @@ -972,16 +974,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) } size &= PAGE_MASK; - if (base > MAX_PHYS_ADDR) { + if (base > MAX_MEMBLOCK_ADDR) { pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", base, base + size); return; } - if (base + size - 1 > MAX_PHYS_ADDR) { + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", - ((u64)MAX_PHYS_ADDR) + 1, base + size); - size = MAX_PHYS_ADDR - base + 1; + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; } if (base + size < phys_offset) { diff --git a/kernel/drivers/of/of_mdio.c b/kernel/drivers/of/of_mdio.c index 0c064485d..bec8ec2b3 100644 --- a/kernel/drivers/of/of_mdio.c +++ b/kernel/drivers/of/of_mdio.c @@ -263,7 +263,8 @@ EXPORT_SYMBOL(of_phy_attach); bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; - int len; + int len, err; + const char *managed; /* New binding */ dn = of_get_child_by_name(np, "fixed-link"); @@ -272,6 +273,10 @@ bool of_phy_is_fixed_link(struct device_node *np) return true; } + err = of_property_read_string(np, "managed", &managed); + if (err == 0 && strcmp(managed, "auto") != 0) + return true; + /* Old binding */ if (of_get_property(np, "fixed-link", &len) && len == (5 * sizeof(__be32))) @@ -286,8 +291,18 @@ int of_phy_register_fixed_link(struct device_node *np) struct fixed_phy_status status = {}; struct device_node *fixed_link_node; const __be32 *fixed_link_prop; - int len; + int len, err; struct phy_device *phy; + const char *managed; + + err = of_property_read_string(np, "managed", &managed); + if (err == 0) { + if (strcmp(managed, "in-band-status") == 0) { + /* status is zeroed, namely its .link member */ + phy = fixed_phy_register(PHY_POLL, &status, np); + return IS_ERR(phy) ? PTR_ERR(phy) : 0; + } + } /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); diff --git a/kernel/drivers/parisc/lba_pci.c b/kernel/drivers/parisc/lba_pci.c index dceb9ddfd..a32c1f6c2 100644 --- a/kernel/drivers/parisc/lba_pci.c +++ b/kernel/drivers/parisc/lba_pci.c @@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev) if (lba_dev->hba.lmmio_space.flags) pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, lba_dev->hba.lmmio_space_offset); - if (lba_dev->hba.gmmio_space.flags) - pci_add_resource(&resources, &lba_dev->hba.gmmio_space); + if (lba_dev->hba.gmmio_space.flags) { + /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */ + pr_warn("LBA: Not registering GMMIO space %pR\n", + &lba_dev->hba.gmmio_space); + } pci_add_resource(&resources, &lba_dev->hba.bus_num); diff --git a/kernel/drivers/pci/Kconfig b/kernel/drivers/pci/Kconfig index 944f50015..73de4efcb 100644 --- a/kernel/drivers/pci/Kconfig +++ b/kernel/drivers/pci/Kconfig @@ -2,7 +2,7 @@ # PCI configuration # config PCI_BUS_ADDR_T_64BIT - def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC)) + def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) depends on PCI config PCI_MSI diff --git a/kernel/drivers/pci/access.c b/kernel/drivers/pci/access.c index 1d0f6f8d0..5908d6e3b 100644 --- a/kernel/drivers/pci/access.c +++ b/kernel/drivers/pci/access.c @@ -439,6 +439,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = { .release = pci_vpd_pci22_release, }; +static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, + void *arg) +{ + struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); + ssize_t ret; + + if (!tdev) + return -ENODEV; + + ret = pci_read_vpd(tdev, pos, count, arg); + pci_dev_put(tdev); + return ret; +} + +static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, + const void *arg) +{ + struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); + ssize_t ret; + + if (!tdev) + return -ENODEV; + + ret = pci_write_vpd(tdev, pos, count, arg); + pci_dev_put(tdev); + return ret; +} + +static const struct pci_vpd_ops pci_vpd_f0_ops = { + .read = pci_vpd_f0_read, + .write = pci_vpd_f0_write, + .release = pci_vpd_pci22_release, +}; + +static int pci_vpd_f0_dev_check(struct pci_dev *dev) +{ + struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); + int ret = 0; + + if (!tdev) + return -ENODEV; + if (!tdev->vpd || !tdev->multifunction || + dev->class != tdev->class || dev->vendor != tdev->vendor || + dev->device != tdev->device) + ret = -ENODEV; + + pci_dev_put(tdev); + return ret; +} + int pci_vpd_pci22_init(struct pci_dev *dev) { struct pci_vpd_pci22 *vpd; @@ -447,12 +497,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev) cap = pci_find_capability(dev, PCI_CAP_ID_VPD); if (!cap) return -ENODEV; + if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { + int ret = pci_vpd_f0_dev_check(dev); + + if (ret) + return ret; + } vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); if (!vpd) return -ENOMEM; vpd->base.len = PCI_VPD_PCI22_SIZE; - vpd->base.ops = &pci_vpd_pci22_ops; + if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) + vpd->base.ops = &pci_vpd_f0_ops; + else + vpd->base.ops = &pci_vpd_pci22_ops; mutex_init(&vpd->lock); vpd->cap = cap; vpd->busy = false; diff --git a/kernel/drivers/pci/quirks.c b/kernel/drivers/pci/quirks.c index c6dc1dfd2..804cd3b02 100644 --- a/kernel/drivers/pci/quirks.c +++ b/kernel/drivers/pci/quirks.c @@ -1576,6 +1576,18 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3 #endif +static void quirk_jmicron_async_suspend(struct pci_dev *dev) +{ + if (dev->multifunction) { + device_disable_async_suspend(&dev->dev); + dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n"); + } +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend); + #ifdef CONFIG_X86_IO_APIC static void quirk_alder_ioapic(struct pci_dev *pdev) { @@ -1903,6 +1915,15 @@ static void quirk_netmos(struct pci_dev *dev) DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); +static void quirk_f0_vpd_link(struct pci_dev *dev) +{ + if (!dev->multifunction || !PCI_FUNC(dev->devfn)) + return; + dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); + static void quirk_e100_interrupt(struct pci_dev *dev) { u16 command, pmcsr; @@ -2838,12 +2859,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); static void fixup_ti816x_class(struct pci_dev *dev) { + u32 class = dev->class; + /* TI 816x devices do not have class code set when in PCIe boot mode */ - dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n"); - dev->class = PCI_CLASS_MULTIMEDIA_VIDEO; + dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8; + dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n", + class, dev->class); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800, - PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class); + PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class); /* Some PCIe devices do not work reliably with the claimed maximum * payload size supported. diff --git a/kernel/drivers/pinctrl/pinctrl-at91.c b/kernel/drivers/pinctrl/pinctrl-at91.c index 2f797cb7e..774781450 100644 --- a/kernel/drivers/pinctrl/pinctrl-at91.c +++ b/kernel/drivers/pinctrl/pinctrl-at91.c @@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = { static void __iomem *pin_to_controller(struct at91_pinctrl *info, unsigned int bank) { + if (!gpio_chips[bank]) + return NULL; + return gpio_chips[bank]->regbase; } @@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, pin = &pins_conf[i]; at91_pin_dbg(info->dev, pin); pio = pin_to_controller(info, pin->bank); + + if (!pio) + continue; + mask = pin_to_mask(pin->pin); at91_mux_disable_interrupt(pio, mask); switch (pin->mux) { @@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, *config = 0; dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id); pio = pin_to_controller(info, pin_to_bank(pin_id)); + + if (!pio) + return -EINVAL; + pin = pin_id % MAX_NB_GPIO_PER_BANK; if (at91_mux_get_multidrive(pio, pin)) @@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, "%s:%d, pin_id=%d, config=0x%lx", __func__, __LINE__, pin_id, config); pio = pin_to_controller(info, pin_to_bank(pin_id)); + + if (!pio) + return -EINVAL; + pin = pin_id % MAX_NB_GPIO_PER_BANK; mask = pin_to_mask(pin); diff --git a/kernel/drivers/platform/x86/hp-wmi.c b/kernel/drivers/platform/x86/hp-wmi.c index 06697315a..fb4dd7b3e 100644 --- a/kernel/drivers/platform/x86/hp-wmi.c +++ b/kernel/drivers/platform/x86/hp-wmi.c @@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_HARDWARE_QUERY 0x4 #define HPWMI_WIRELESS_QUERY 0x5 #define HPWMI_BIOS_QUERY 0x9 +#define HPWMI_FEATURE_QUERY 0xb #define HPWMI_HOTKEY_QUERY 0xc -#define HPWMI_FEATURE_QUERY 0xd +#define HPWMI_FEATURE2_QUERY 0xd #define HPWMI_WIRELESS2_QUERY 0x1b #define HPWMI_POSTCODEERROR_QUERY 0x2a @@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void) return (state & 0x4) ? 1 : 0; } -static int __init hp_wmi_bios_2009_later(void) +static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, sizeof(state), sizeof(state)); - if (ret) - return ret; + if (!ret) + return 1; - return (state & 0x10) ? 1 : 0; + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; } -static int hp_wmi_enable_hotkeys(void) +static int __init hp_wmi_bios_2009_later(void) { - int ret; - int query = 0x6e; + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state, + sizeof(state), sizeof(state)); + if (!ret) + return 1; - ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), - 0); + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; +} +static int __init hp_wmi_enable_hotkeys(void) +{ + int value = 0x6e; + int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, + sizeof(value), 0); if (ret) return -EINVAL; return 0; @@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void) hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); - if (hp_wmi_bios_2009_later() == 4) + if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) hp_wmi_enable_hotkeys(); status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); diff --git a/kernel/drivers/platform/x86/ideapad-laptop.c b/kernel/drivers/platform/x86/ideapad-laptop.c index cb7cd8d79..cd78f1166 100644 --- a/kernel/drivers/platform/x86/ideapad-laptop.c +++ b/kernel/drivers/platform/x86/ideapad-laptop.c @@ -852,6 +852,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { + .ident = "Lenovo Yoga 3 14", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"), + }, + }, + { .ident = "Lenovo Yoga 3 Pro 1370", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/kernel/drivers/regulator/pbias-regulator.c b/kernel/drivers/regulator/pbias-regulator.c index bd2b75c0d..4fa7bcaf4 100644 --- a/kernel/drivers/regulator/pbias-regulator.c +++ b/kernel/drivers/regulator/pbias-regulator.c @@ -30,6 +30,7 @@ struct pbias_reg_info { u32 enable; u32 enable_mask; + u32 disable_val; u32 vmode; unsigned int enable_time; char *name; @@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = { .enable = BIT(1), .enable_mask = BIT(1), .vmode = BIT(0), + .disable_val = 0, .enable_time = 100, .name = "pbias_mmc_omap2430" }; @@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = { static const struct pbias_reg_info pbias_mmc_omap4 = { .enable = BIT(26) | BIT(22), .enable_mask = BIT(26) | BIT(25) | BIT(22), + .disable_val = BIT(25), .vmode = BIT(21), .enable_time = 100, .name = "pbias_mmc_omap4" @@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = { static const struct pbias_reg_info pbias_mmc_omap5 = { .enable = BIT(27) | BIT(26), .enable_mask = BIT(27) | BIT(25) | BIT(26), + .disable_val = BIT(25), .vmode = BIT(21), .enable_time = 100, .name = "pbias_mmc_omap5" @@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev) drvdata[data_idx].desc.enable_reg = res->start; drvdata[data_idx].desc.enable_mask = info->enable_mask; drvdata[data_idx].desc.enable_val = info->enable; + drvdata[data_idx].desc.disable_val = info->disable_val; cfg.init_data = pbias_matches[idx].init_data; cfg.driver_data = &drvdata[data_idx]; diff --git a/kernel/drivers/rtc/rtc-abx80x.c b/kernel/drivers/rtc/rtc-abx80x.c index 4337c3bc6..afea84c7a 100644 --- a/kernel/drivers/rtc/rtc-abx80x.c +++ b/kernel/drivers/rtc/rtc-abx80x.c @@ -28,7 +28,7 @@ #define ABX8XX_REG_WD 0x07 #define ABX8XX_REG_CTRL1 0x10 -#define ABX8XX_CTRL_WRITE BIT(1) +#define ABX8XX_CTRL_WRITE BIT(0) #define ABX8XX_CTRL_12_24 BIT(6) #define ABX8XX_REG_CFG_KEY 0x1f diff --git a/kernel/drivers/rtc/rtc-s3c.c b/kernel/drivers/rtc/rtc-s3c.c index 76cbad7a9..c5a2523b0 100644 --- a/kernel/drivers/rtc/rtc-s3c.c +++ b/kernel/drivers/rtc/rtc-s3c.c @@ -39,6 +39,7 @@ struct s3c_rtc { void __iomem *base; struct clk *rtc_clk; struct clk *rtc_src_clk; + bool clk_disabled; struct s3c_rtc_data *data; @@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info) unsigned long irq_flags; spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); - clk_enable(info->rtc_clk); - if (info->data->needs_src_clk) - clk_enable(info->rtc_src_clk); + if (info->clk_disabled) { + clk_enable(info->rtc_clk); + if (info->data->needs_src_clk) + clk_enable(info->rtc_src_clk); + info->clk_disabled = false; + } spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); } @@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info) unsigned long irq_flags; spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); - if (info->data->needs_src_clk) - clk_disable(info->rtc_src_clk); - clk_disable(info->rtc_clk); + if (!info->clk_disabled) { + if (info->data->needs_src_clk) + clk_disable(info->rtc_src_clk); + clk_disable(info->rtc_clk); + info->clk_disabled = true; + } spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); } @@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) s3c_rtc_disable_clk(info); + if (enabled) + s3c_rtc_enable_clk(info); + else + s3c_rtc_disable_clk(info); + return 0; } diff --git a/kernel/drivers/rtc/rtc-s5m.c b/kernel/drivers/rtc/rtc-s5m.c index 8c70d785b..ab60287ee 100644 --- a/kernel/drivers/rtc/rtc-s5m.c +++ b/kernel/drivers/rtc/rtc-s5m.c @@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) case S2MPS13X: data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); ret = regmap_write(info->regmap, info->regs->ctrl, data[0]); + if (ret < 0) + break; + + /* + * Should set WUDR & (RUDR or AUDR) bits to high after writing + * RTC_CTRL register like writing Alarm registers. We can't find + * the description from datasheet but vendor code does that + * really. + */ + ret = s5m8767_rtc_set_alarm_reg(info); break; default: diff --git a/kernel/drivers/s390/char/sclp_early.c b/kernel/drivers/s390/char/sclp_early.c index 1efa4fdb7..f45cd0cb1 100644 --- a/kernel/drivers/s390/char/sclp_early.c +++ b/kernel/drivers/s390/char/sclp_early.c @@ -7,6 +7,7 @@ #define KMSG_COMPONENT "sclp_early" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/errno.h> #include <asm/ctl_reg.h> #include <asm/sclp.h> #include <asm/ipl.h> diff --git a/kernel/drivers/soc/tegra/pmc.c b/kernel/drivers/soc/tegra/pmc.c index c956395cf..c89bada87 100644 --- a/kernel/drivers/soc/tegra/pmc.c +++ b/kernel/drivers/soc/tegra/pmc.c @@ -732,12 +732,12 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) u32 value, checksum; if (!pmc->soc->has_tsense_reset) - goto out; + return; np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip"); if (!np) { dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled); - goto out; + return; } if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) { diff --git a/kernel/drivers/spi/spi-bcm2835.c b/kernel/drivers/spi/spi-bcm2835.c index 37875cf94..a5067739e 100644 --- a/kernel/drivers/spi/spi-bcm2835.c +++ b/kernel/drivers/spi/spi-bcm2835.c @@ -257,13 +257,11 @@ static int bcm2835_spi_transfer_one(struct spi_master *master, spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); - /* handle all the modes */ + /* handle all the 3-wire mode */ if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) cs |= BCM2835_SPI_CS_REN; - if (spi->mode & SPI_CPOL) - cs |= BCM2835_SPI_CS_CPOL; - if (spi->mode & SPI_CPHA) - cs |= BCM2835_SPI_CS_CPHA; + else + cs &= ~BCM2835_SPI_CS_REN; /* for gpio_cs set dummy CS so that no HW-CS get changed * we can not run this in bcm2835_spi_set_cs, as it does @@ -291,6 +289,25 @@ static int bcm2835_spi_transfer_one(struct spi_master *master, return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); } +static int bcm2835_spi_prepare_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_device *spi = msg->spi; + struct bcm2835_spi *bs = spi_master_get_devdata(master); + u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); + + cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA); + + if (spi->mode & SPI_CPOL) + cs |= BCM2835_SPI_CS_CPOL; + if (spi->mode & SPI_CPHA) + cs |= BCM2835_SPI_CS_CPHA; + + bcm2835_wr(bs, BCM2835_SPI_CS, cs); + + return 0; +} + static void bcm2835_spi_handle_err(struct spi_master *master, struct spi_message *msg) { @@ -429,6 +446,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) master->set_cs = bcm2835_spi_set_cs; master->transfer_one = bcm2835_spi_transfer_one; master->handle_err = bcm2835_spi_handle_err; + master->prepare_message = bcm2835_spi_prepare_message; master->dev.of_node = pdev->dev.of_node; bs = spi_master_get_devdata(master); diff --git a/kernel/drivers/spi/spi-bitbang-txrx.h b/kernel/drivers/spi/spi-bitbang-txrx.h index 06b34e5bc..47bb9b898 100644 --- a/kernel/drivers/spi/spi-bitbang-txrx.h +++ b/kernel/drivers/spi/spi-bitbang-txrx.h @@ -49,7 +49,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, { /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ - bool oldbit = !(word & 1); + u32 oldbit = (!(word & (1<<(bits-1)))) << 31; /* clock starts at inactive polarity */ for (word <<= (32 - bits); likely(bits); bits--) { @@ -81,7 +81,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi, { /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ - bool oldbit = !(word & (1 << 31)); + u32 oldbit = (!(word & (1<<(bits-1)))) << 31; /* clock starts at inactive polarity */ for (word <<= (32 - bits); likely(bits); bits--) { diff --git a/kernel/drivers/spi/spi-dw-mmio.c b/kernel/drivers/spi/spi-dw-mmio.c index eb03e1215..7edede6e0 100644 --- a/kernel/drivers/spi/spi-dw-mmio.c +++ b/kernel/drivers/spi/spi-dw-mmio.c @@ -74,6 +74,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) dws->max_freq = clk_get_rate(dwsmmio->clk); + of_property_read_u32(pdev->dev.of_node, "reg-io-width", + &dws->reg_io_width); + num_cs = 4; if (pdev->dev.of_node) diff --git a/kernel/drivers/spi/spi-dw.c b/kernel/drivers/spi/spi-dw.c index 8d67d03c7..4fbfcdc5c 100644 --- a/kernel/drivers/spi/spi-dw.c +++ b/kernel/drivers/spi/spi-dw.c @@ -194,7 +194,7 @@ static void dw_writer(struct dw_spi *dws) else txw = *(u16 *)(dws->tx); } - dw_writel(dws, DW_SPI_DR, txw); + dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } } @@ -205,7 +205,7 @@ static void dw_reader(struct dw_spi *dws) u16 rxw; while (max--) { - rxw = dw_readl(dws, DW_SPI_DR); + rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ if (dws->rx_end - dws->len) { if (dws->n_bytes == 1) diff --git a/kernel/drivers/spi/spi-dw.h b/kernel/drivers/spi/spi-dw.h index 6c91391c1..b75ed327d 100644 --- a/kernel/drivers/spi/spi-dw.h +++ b/kernel/drivers/spi/spi-dw.h @@ -109,6 +109,7 @@ struct dw_spi { u32 fifo_len; /* depth of the FIFO buffer */ u32 max_freq; /* max bus freq supported */ + u32 reg_io_width; /* DR I/O width in bytes */ u16 bus_num; u16 num_cs; /* supported slave numbers */ @@ -145,11 +146,45 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset) return __raw_readl(dws->regs + offset); } +static inline u16 dw_readw(struct dw_spi *dws, u32 offset) +{ + return __raw_readw(dws->regs + offset); +} + static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val) { __raw_writel(val, dws->regs + offset); } +static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val) +{ + __raw_writew(val, dws->regs + offset); +} + +static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset) +{ + switch (dws->reg_io_width) { + case 2: + return dw_readw(dws, offset); + case 4: + default: + return dw_readl(dws, offset); + } +} + +static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val) +{ + switch (dws->reg_io_width) { + case 2: + dw_writew(dws, offset, val); + break; + case 4: + default: + dw_writel(dws, offset, val); + break; + } +} + static inline void spi_enable_chip(struct dw_spi *dws, int enable) { dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); diff --git a/kernel/drivers/spi/spi-img-spfi.c b/kernel/drivers/spi/spi-img-spfi.c index acce90ac7..bb916c8d4 100644 --- a/kernel/drivers/spi/spi-img-spfi.c +++ b/kernel/drivers/spi/spi-img-spfi.c @@ -105,6 +105,10 @@ struct img_spfi { bool rx_dma_busy; }; +struct img_spfi_device_data { + bool gpio_requested; +}; + static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) { return readl(spfi->regs + reg); @@ -267,15 +271,15 @@ static int img_spfi_start_pio(struct spi_master *master, cpu_relax(); } - ret = spfi_wait_all_done(spfi); - if (ret < 0) - return ret; - if (rx_bytes > 0 || tx_bytes > 0) { dev_err(spfi->dev, "PIO transfer timed out\n"); return -ETIMEDOUT; } + ret = spfi_wait_all_done(spfi); + if (ret < 0) + return ret; + return 0; } @@ -440,21 +444,50 @@ static int img_spfi_unprepare(struct spi_master *master, static int img_spfi_setup(struct spi_device *spi) { - int ret; - - ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (ret) - dev_err(&spi->dev, "can't request chipselect gpio %d\n", + int ret = -EINVAL; + struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi); + + if (!spfi_data) { + spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL); + if (!spfi_data) + return -ENOMEM; + spfi_data->gpio_requested = false; + spi_set_ctldata(spi, spfi_data); + } + if (!spfi_data->gpio_requested) { + ret = gpio_request_one(spi->cs_gpio, + (spi->mode & SPI_CS_HIGH) ? + GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, + dev_name(&spi->dev)); + if (ret) + dev_err(&spi->dev, "can't request chipselect gpio %d\n", spi->cs_gpio); - + else + spfi_data->gpio_requested = true; + } else { + if (gpio_is_valid(spi->cs_gpio)) { + int mode = ((spi->mode & SPI_CS_HIGH) ? + GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH); + + ret = gpio_direction_output(spi->cs_gpio, mode); + if (ret) + dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n", + spi->cs_gpio, ret); + } + } return ret; } static void img_spfi_cleanup(struct spi_device *spi) { - gpio_free(spi->cs_gpio); + struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi); + + if (spfi_data) { + if (spfi_data->gpio_requested) + gpio_free(spi->cs_gpio); + kfree(spfi_data); + spi_set_ctldata(spi, NULL); + } } static void img_spfi_config(struct spi_master *master, struct spi_device *spi, diff --git a/kernel/drivers/spi/spi-sh-msiof.c b/kernel/drivers/spi/spi-sh-msiof.c index bcc7c635d..7872f3c78 100644 --- a/kernel/drivers/spi/spi-sh-msiof.c +++ b/kernel/drivers/spi/spi-sh-msiof.c @@ -48,8 +48,8 @@ struct sh_msiof_spi_priv { const struct sh_msiof_chipdata *chipdata; struct sh_msiof_spi_info *info; struct completion done; - int tx_fifo_size; - int rx_fifo_size; + unsigned int tx_fifo_size; + unsigned int rx_fifo_size; void *tx_dma_page; void *rx_dma_page; dma_addr_t tx_dma_addr; @@ -95,8 +95,6 @@ struct sh_msiof_spi_priv { #define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ #define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */ -#define MAX_WDLEN 256U - /* TSCR and RSCR */ #define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */ #define SCR_BRPS(i) (((i) - 1) << 8) @@ -850,7 +848,12 @@ static int sh_msiof_transfer_one(struct spi_master *master, * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. */ - unsigned int l = min(len, MAX_WDLEN * 4); + unsigned int l = 0; + + if (tx_buf) + l = min(len, p->tx_fifo_size * 4); + if (rx_buf) + l = min(len, p->rx_fifo_size * 4); if (bits <= 8) { if (l & 3) @@ -963,7 +966,7 @@ static const struct sh_msiof_chipdata sh_data = { static const struct sh_msiof_chipdata r8a779x_data = { .tx_fifo_size = 64, - .rx_fifo_size = 256, + .rx_fifo_size = 64, .master_flags = SPI_MASTER_MUST_TX, }; diff --git a/kernel/drivers/spi/spi-xilinx.c b/kernel/drivers/spi/spi-xilinx.c index 133f53a9c..a339c1e99 100644 --- a/kernel/drivers/spi/spi-xilinx.c +++ b/kernel/drivers/spi/spi-xilinx.c @@ -249,19 +249,23 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) xspi->tx_ptr = t->tx_buf; xspi->rx_ptr = t->rx_buf; remaining_words = t->len / xspi->bytes_per_word; - reinit_completion(&xspi->done); if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) { + u32 isr; use_irq = true; - xspi->write_fn(XSPI_INTR_TX_EMPTY, - xspi->regs + XIPIF_V123B_IISR_OFFSET); - /* Enable the global IPIF interrupt */ - xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, - xspi->regs + XIPIF_V123B_DGIER_OFFSET); /* Inhibit irq to avoid spurious irqs on tx_empty*/ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, xspi->regs + XSPI_CR_OFFSET); + /* ACK old irqs (if any) */ + isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET); + if (isr) + xspi->write_fn(isr, + xspi->regs + XIPIF_V123B_IISR_OFFSET); + /* Enable the global IPIF interrupt */ + xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, + xspi->regs + XIPIF_V123B_DGIER_OFFSET); + reinit_completion(&xspi->done); } while (remaining_words) { @@ -302,8 +306,10 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) remaining_words -= n_words; } - if (use_irq) + if (use_irq) { xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET); + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + } return t->len; } diff --git a/kernel/drivers/staging/comedi/drivers/adl_pci7x3x.c b/kernel/drivers/staging/comedi/drivers/adl_pci7x3x.c index 934af3ff7..b0fc027cf 100644 --- a/kernel/drivers/staging/comedi/drivers/adl_pci7x3x.c +++ b/kernel/drivers/staging/comedi/drivers/adl_pci7x3x.c @@ -120,8 +120,20 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev, { unsigned long reg = (unsigned long)s->private; - if (comedi_dio_update_state(s, data)) - outl(s->state, dev->iobase + reg); + if (comedi_dio_update_state(s, data)) { + unsigned int val = s->state; + + if (s->n_chan == 16) { + /* + * It seems the PCI-7230 needs the 16-bit DO state + * to be shifted left by 16 bits before being written + * to the 32-bit register. Set the value in both + * halves of the register to be sure. + */ + val |= val << 16; + } + outl(val, dev->iobase + reg); + } data[1] = s->state; diff --git a/kernel/drivers/staging/comedi/drivers/usbduxsigma.c b/kernel/drivers/staging/comedi/drivers/usbduxsigma.c index eaa9add49..dc0b25a54 100644 --- a/kernel/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/kernel/drivers/staging/comedi/drivers/usbduxsigma.c @@ -550,27 +550,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev, if (err) return 3; - /* Step 4: fix up any arguments */ - - if (high_speed) { - /* - * every 2 channels get a time window of 125us. Thus, if we - * sample all 16 channels we need 1ms. If we sample only one - * channel we need only 125us - */ - devpriv->ai_interval = interval; - devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval); - } else { - /* interval always 1ms */ - devpriv->ai_interval = 1; - devpriv->ai_timer = cmd->scan_begin_arg / 1000000; - } - if (devpriv->ai_timer < 1) - err |= -EINVAL; - - if (err) - return 4; - return 0; } @@ -668,6 +647,22 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev, down(&devpriv->sem); + if (devpriv->high_speed) { + /* + * every 2 channels get a time window of 125us. Thus, if we + * sample all 16 channels we need 1ms. If we sample only one + * channel we need only 125us + */ + unsigned int interval = usbduxsigma_chans_to_interval(len); + + devpriv->ai_interval = interval; + devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval); + } else { + /* interval always 1ms */ + devpriv->ai_interval = 1; + devpriv->ai_timer = cmd->scan_begin_arg / 1000000; + } + for (i = 0; i < len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); @@ -917,25 +912,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev, if (err) return 3; - /* Step 4: fix up any arguments */ - - /* we count in timer steps */ - if (high_speed) { - /* timing of the conversion itself: every 125 us */ - devpriv->ao_timer = cmd->convert_arg / 125000; - } else { - /* - * timing of the scan: every 1ms - * we get all channels at once - */ - devpriv->ao_timer = cmd->scan_begin_arg / 1000000; - } - if (devpriv->ao_timer < 1) - err |= -EINVAL; - - if (err) - return 4; - return 0; } @@ -948,6 +924,20 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev, down(&devpriv->sem); + if (cmd->convert_src == TRIG_TIMER) { + /* + * timing of the conversion itself: every 125 us + * at high speed (not used yet) + */ + devpriv->ao_timer = cmd->convert_arg / 125000; + } else { + /* + * timing of the scan: every 1ms + * we get all channels at once + */ + devpriv->ao_timer = cmd->scan_begin_arg / 1000000; + } + devpriv->ao_counter = devpriv->ao_timer; if (cmd->start_src == TRIG_NOW) { diff --git a/kernel/drivers/tty/serial/8250/8250_pci.c b/kernel/drivers/tty/serial/8250/8250_pci.c index 46bcebba5..9373cca12 100644 --- a/kernel/drivers/tty/serial/8250/8250_pci.c +++ b/kernel/drivers/tty/serial/8250/8250_pci.c @@ -2000,6 +2000,12 @@ pci_wch_ch38x_setup(struct serial_private *priv, #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 @@ -2314,27 +2320,12 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { * Pericom */ { - .vendor = 0x12d8, - .device = 0x7952, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .setup = pci_pericom_setup, - }, - { - .vendor = 0x12d8, - .device = 0x7954, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .setup = pci_pericom_setup, - }, - { - .vendor = 0x12d8, - .device = 0x7958, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .setup = pci_pericom_setup, + .vendor = PCI_VENDOR_ID_PERICOM, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, }, - /* * PLX */ @@ -3031,6 +3022,10 @@ enum pci_board_num_t { pbn_fintek_8, pbn_fintek_12, pbn_wch384_4, + pbn_pericom_PI7C9X7951, + pbn_pericom_PI7C9X7952, + pbn_pericom_PI7C9X7954, + pbn_pericom_PI7C9X7958, }; /* @@ -3848,7 +3843,6 @@ static struct pciserial_board pci_boards[] = { .base_baud = 115200, .first_offset = 0x40, }, - [pbn_wch384_4] = { .flags = FL_BASE0, .num_ports = 4, @@ -3856,6 +3850,33 @@ static struct pciserial_board pci_boards[] = { .uart_offset = 8, .first_offset = 0xC0, }, + /* + * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART + */ + [pbn_pericom_PI7C9X7951] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_pericom_PI7C9X7952] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_pericom_PI7C9X7954] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_pericom_PI7C9X7958] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 0x8, + }, }; static const struct pci_device_id blacklist[] = { @@ -5117,6 +5138,25 @@ static struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_exar_XR17V8358 }, /* + * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART + */ + { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_pericom_PI7C9X7951 }, + { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_pericom_PI7C9X7958 }, + /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, diff --git a/kernel/drivers/tty/serial/8250/8250_pnp.c b/kernel/drivers/tty/serial/8250/8250_pnp.c index 50a09cd76..658b392d1 100644 --- a/kernel/drivers/tty/serial/8250/8250_pnp.c +++ b/kernel/drivers/tty/serial/8250/8250_pnp.c @@ -41,6 +41,12 @@ static const struct pnp_device_id pnp_dev_table[] = { { "AEI1240", 0 }, /* Rockwell 56K ACF II Fax+Data+Voice Modem */ { "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ }, + /* + * ALi Fast Infrared Controller + * Native driver (ali-ircc) is broken so at least + * it can be used with irtty-sir. + */ + { "ALI5123", 0 }, /* AZT3005 PnP SOUND DEVICE */ { "AZT4001", 0 }, /* Best Data Products Inc. Smart One 336F PnP Modem */ @@ -364,6 +370,11 @@ static const struct pnp_device_id pnp_dev_table[] = { /* Winbond CIR port, should not be probed. We should keep track of it to prevent the legacy serial driver from probing it */ { "WEC1022", CIR_PORT }, + /* + * SMSC IrCC SIR/FIR port, should not be probed by serial driver + * as well so its own driver can bind to it. + */ + { "SMCF010", CIR_PORT }, { "", 0 } }; diff --git a/kernel/drivers/tty/serial/men_z135_uart.c b/kernel/drivers/tty/serial/men_z135_uart.c index 35c55505b..5a41b8fbb 100644 --- a/kernel/drivers/tty/serial/men_z135_uart.c +++ b/kernel/drivers/tty/serial/men_z135_uart.c @@ -392,7 +392,6 @@ static irqreturn_t men_z135_intr(int irq, void *data) struct men_z135_port *uart = (struct men_z135_port *)data; struct uart_port *port = &uart->port; bool handled = false; - unsigned long flags; int irq_id; uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG); @@ -401,7 +400,7 @@ static irqreturn_t men_z135_intr(int irq, void *data) if (!irq_id) goto out; - spin_lock_irqsave(&port->lock, flags); + spin_lock(&port->lock); /* It's save to write to IIR[7:6] RXC[9:8] */ iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG); @@ -427,7 +426,7 @@ static irqreturn_t men_z135_intr(int irq, void *data) handled = true; } - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock(&port->lock); out: return IRQ_RETVAL(handled); } @@ -717,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port, baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16); - spin_lock(&port->lock); + spin_lock_irq(&port->lock); if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); @@ -725,7 +724,7 @@ static void men_z135_set_termios(struct uart_port *port, iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG); uart_update_timeout(port, termios->c_cflag, baud); - spin_unlock(&port->lock); + spin_unlock_irq(&port->lock); } static const char *men_z135_type(struct uart_port *port) diff --git a/kernel/drivers/tty/serial/samsung.c b/kernel/drivers/tty/serial/samsung.c index a0ae942d9..1e0d9b8c4 100644 --- a/kernel/drivers/tty/serial/samsung.c +++ b/kernel/drivers/tty/serial/samsung.c @@ -295,15 +295,6 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport, if (ourport->tx_mode != S3C24XX_TX_DMA) enable_tx_dma(ourport); - while (xmit->tail & (dma_get_cache_alignment() - 1)) { - if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull) - return 0; - wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - count--; - } - dma->tx_size = count & ~(dma_get_cache_alignment() - 1); dma->tx_transfer_addr = dma->tx_addr + xmit->tail; @@ -342,7 +333,9 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport) return; } - if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize) + if (!ourport->dma || !ourport->dma->tx_chan || + count < ourport->min_dma_size || + xmit->tail & (dma_get_cache_alignment() - 1)) s3c24xx_serial_start_tx_pio(ourport); else s3c24xx_serial_start_tx_dma(ourport, count); @@ -736,15 +729,20 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) struct uart_port *port = &ourport->port; struct circ_buf *xmit = &port->state->xmit; unsigned long flags; - int count; + int count, dma_count = 0; spin_lock_irqsave(&port->lock, flags); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) { - s3c24xx_serial_start_tx_dma(ourport, count); - goto out; + if (ourport->dma && ourport->dma->tx_chan && + count >= ourport->min_dma_size) { + int align = dma_get_cache_alignment() - + (xmit->tail & (dma_get_cache_alignment() - 1)); + if (count-align >= ourport->min_dma_size) { + dma_count = count-align; + count = align; + } } if (port->x_char) { @@ -765,14 +763,24 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) /* try and drain the buffer... */ - count = port->fifosize; - while (!uart_circ_empty(xmit) && count-- > 0) { + if (count > port->fifosize) { + count = port->fifosize; + dma_count = 0; + } + + while (!uart_circ_empty(xmit) && count > 0) { if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull) break; wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; + count--; + } + + if (!count && dma_count) { + s3c24xx_serial_start_tx_dma(ourport, dma_count); + goto out; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) { @@ -1838,6 +1846,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) else if (ourport->info->fifosize) ourport->port.fifosize = ourport->info->fifosize; + /* + * DMA transfers must be aligned at least to cache line size, + * so find minimal transfer size suitable for DMA mode + */ + ourport->min_dma_size = max_t(int, ourport->port.fifosize, + dma_get_cache_alignment()); + probe_index++; dbg("%s: initialising port %p...\n", __func__, ourport); diff --git a/kernel/drivers/tty/serial/samsung.h b/kernel/drivers/tty/serial/samsung.h index d275032aa..fc5deaa4f 100644 --- a/kernel/drivers/tty/serial/samsung.h +++ b/kernel/drivers/tty/serial/samsung.h @@ -82,6 +82,7 @@ struct s3c24xx_uart_port { unsigned char tx_claimed; unsigned int pm_level; unsigned long baudclk_rate; + unsigned int min_dma_size; unsigned int rx_irq; unsigned int tx_irq; diff --git a/kernel/drivers/usb/dwc3/ep0.c b/kernel/drivers/usb/dwc3/ep0.c index 69e769c35..06ecd1e68 100644 --- a/kernel/drivers/usb/dwc3/ep0.c +++ b/kernel/drivers/usb/dwc3/ep0.c @@ -820,6 +820,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, unsigned maxp = ep0->endpoint.maxpacket; transfer_size += (maxp - (transfer_size % maxp)); + + /* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */ + if (transfer_size > DWC3_EP0_BOUNCE_SIZE) + transfer_size = DWC3_EP0_BOUNCE_SIZE; + transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); @@ -941,11 +946,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, return; } - WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE); - maxpacket = dep->endpoint.maxpacket; transfer_size = roundup(req->request.length, maxpacket); + if (transfer_size > DWC3_EP0_BOUNCE_SIZE) { + dev_WARN(dwc->dev, "bounce buf can't handle req len\n"); + transfer_size = DWC3_EP0_BOUNCE_SIZE; + } + dwc->ep0_bounced = true; /* diff --git a/kernel/drivers/usb/gadget/function/f_uac2.c b/kernel/drivers/usb/gadget/function/f_uac2.c index 531861547..96d935b00 100644 --- a/kernel/drivers/usb/gadget/function/f_uac2.c +++ b/kernel/drivers/usb/gadget/function/f_uac2.c @@ -975,6 +975,29 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) "%s:%d Error!\n", __func__, __LINE__); } +static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + struct usb_endpoint_descriptor *ep_desc, + unsigned int factor, bool is_playback) +{ + int chmask, srate, ssize; + u16 max_packet_size; + + if (is_playback) { + chmask = uac2_opts->p_chmask; + srate = uac2_opts->p_srate; + ssize = uac2_opts->p_ssize; + } else { + chmask = uac2_opts->c_chmask; + srate = uac2_opts->c_srate; + ssize = uac2_opts->c_ssize; + } + + max_packet_size = num_channels(chmask) * ssize * + DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); + ep_desc->wMaxPacketSize = cpu_to_le16(min(max_packet_size, + le16_to_cpu(ep_desc->wMaxPacketSize))); +} + static int afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) { @@ -1070,10 +1093,14 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) uac2->p_prm.uac2 = uac2; uac2->c_prm.uac2 = uac2; + /* Calculate wMaxPacketSize according to audio bandwidth */ + set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); + set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); + set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true); + set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false); + hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; - hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize; hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; - hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize; ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL); if (ret) diff --git a/kernel/drivers/usb/gadget/udc/m66592-udc.c b/kernel/drivers/usb/gadget/udc/m66592-udc.c index 309706fe4..9704053df 100644 --- a/kernel/drivers/usb/gadget/udc/m66592-udc.c +++ b/kernel/drivers/usb/gadget/udc/m66592-udc.c @@ -1052,7 +1052,7 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) tmp = m66592_read(m66592, M66592_INTSTS0) & M66592_CTSQ; udelay(1); - } while (tmp != M66592_CS_IDST || timeout-- > 0); + } while (tmp != M66592_CS_IDST && timeout-- > 0); if (tmp == M66592_CS_IDST) m66592_bset(m66592, diff --git a/kernel/drivers/usb/host/ehci-sysfs.c b/kernel/drivers/usb/host/ehci-sysfs.c index 5e44407aa..5216f2b09 100644 --- a/kernel/drivers/usb/host/ehci-sysfs.c +++ b/kernel/drivers/usb/host/ehci-sysfs.c @@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev, int count = PAGE_SIZE; char *ptr = buf; - ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); + ehci = hcd_to_ehci(dev_get_drvdata(dev)); nports = HCS_N_PORTS(ehci->hcs_params); for (index = 0; index < nports; ++index) { @@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev, struct ehci_hcd *ehci; int portnum, new_owner; - ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); + ehci = hcd_to_ehci(dev_get_drvdata(dev)); new_owner = PORT_OWNER; /* Owned by companion */ if (sscanf(buf, "%d", &portnum) != 1) return -EINVAL; @@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev, struct ehci_hcd *ehci; int n; - ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); + ehci = hcd_to_ehci(dev_get_drvdata(dev)); n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max); return n; } @@ -101,7 +101,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, unsigned long flags; ssize_t ret; - ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); + ehci = hcd_to_ehci(dev_get_drvdata(dev)); if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) return -EINVAL; diff --git a/kernel/drivers/usb/serial/ftdi_sio.c b/kernel/drivers/usb/serial/ftdi_sio.c index 4c8b3b821..a5a0376bb 100644 --- a/kernel/drivers/usb/serial/ftdi_sio.c +++ b/kernel/drivers/usb/serial/ftdi_sio.c @@ -605,6 +605,10 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) }, /* * ELV devices: */ diff --git a/kernel/drivers/usb/serial/ftdi_sio_ids.h b/kernel/drivers/usb/serial/ftdi_sio_ids.h index 792e05412..2943b97b2 100644 --- a/kernel/drivers/usb/serial/ftdi_sio_ids.h +++ b/kernel/drivers/usb/serial/ftdi_sio_ids.h @@ -568,6 +568,14 @@ */ #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ +/* + * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID) + */ +#define FTDI_CUSTOMWARE_MINIPLEX_PID 0xfd48 /* MiniPlex first generation NMEA Multiplexer */ +#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49 /* MiniPlex-USB and MiniPlex-2 series */ +#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID 0xfd4a /* MiniPlex-2Wi */ +#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b /* MiniPlex-3 series */ + /********************************/ /** third-party VID/PID combos **/ diff --git a/kernel/drivers/usb/serial/pl2303.c b/kernel/drivers/usb/serial/pl2303.c index f5257af33..ae682e4ee 100644 --- a/kernel/drivers/usb/serial/pl2303.c +++ b/kernel/drivers/usb/serial/pl2303.c @@ -362,21 +362,38 @@ static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4], static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4], speed_t baud) { - unsigned int tmp; + unsigned int baseline, mantissa, exponent; /* * Apparently the formula is: - * baudrate = 12M * 32 / (2^buf[1]) / buf[0] + * baudrate = 12M * 32 / (mantissa * 4^exponent) + * where + * mantissa = buf[8:0] + * exponent = buf[11:9] */ - tmp = 12000000 * 32 / baud; + baseline = 12000000 * 32; + mantissa = baseline / baud; + if (mantissa == 0) + mantissa = 1; /* Avoid dividing by zero if baud > 32*12M. */ + exponent = 0; + while (mantissa >= 512) { + if (exponent < 7) { + mantissa >>= 2; /* divide by 4 */ + exponent++; + } else { + /* Exponent is maxed. Trim mantissa and leave. */ + mantissa = 511; + break; + } + } + buf[3] = 0x80; buf[2] = 0; - buf[1] = (tmp >= 256); - while (tmp >= 256) { - tmp >>= 2; - buf[1] <<= 1; - } - buf[0] = tmp; + buf[1] = exponent << 1 | mantissa >> 8; + buf[0] = mantissa & 0xff; + + /* Calculate and return the exact baud rate. */ + baud = (baseline / mantissa) >> (exponent << 1); return baud; } diff --git a/kernel/drivers/usb/serial/qcserial.c b/kernel/drivers/usb/serial/qcserial.c index d15654572..ebcec8cda 100644 --- a/kernel/drivers/usb/serial/qcserial.c +++ b/kernel/drivers/usb/serial/qcserial.c @@ -139,6 +139,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */ /* non-Gobi Sierra Wireless devices */ + {DEVICE_SWI(0x03f0, 0x4e1d)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ diff --git a/kernel/drivers/usb/serial/symbolserial.c b/kernel/drivers/usb/serial/symbolserial.c index 8fceec729..6ed804450 100644 --- a/kernel/drivers/usb/serial/symbolserial.c +++ b/kernel/drivers/usb/serial/symbolserial.c @@ -94,7 +94,7 @@ exit: static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port) { - struct symbol_private *priv = usb_get_serial_data(port->serial); + struct symbol_private *priv = usb_get_serial_port_data(port); unsigned long flags; int result = 0; @@ -120,7 +120,7 @@ static void symbol_close(struct usb_serial_port *port) static void symbol_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; - struct symbol_private *priv = usb_get_serial_data(port->serial); + struct symbol_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); priv->throttled = true; @@ -130,7 +130,7 @@ static void symbol_throttle(struct tty_struct *tty) static void symbol_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; - struct symbol_private *priv = usb_get_serial_data(port->serial); + struct symbol_private *priv = usb_get_serial_port_data(port); int result; bool was_throttled; diff --git a/kernel/fs/btrfs/transaction.c b/kernel/fs/btrfs/transaction.c index 94e909c5a..00d18c2bd 100644 --- a/kernel/fs/btrfs/transaction.c +++ b/kernel/fs/btrfs/transaction.c @@ -1875,8 +1875,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, spin_unlock(&root->fs_info->trans_lock); wait_for_commit(root, prev_trans); + ret = prev_trans->aborted; btrfs_put_transaction(prev_trans); + if (ret) + goto cleanup_transaction; } else { spin_unlock(&root->fs_info->trans_lock); } diff --git a/kernel/fs/ceph/super.c b/kernel/fs/ceph/super.c index 4e9905374..0d47422e3 100644 --- a/kernel/fs/ceph/super.c +++ b/kernel/fs/ceph/super.c @@ -466,7 +466,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT) seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes); if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) - seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name); + seq_show_option(m, "snapdirname", fsopt->snapdir_name); return 0; } diff --git a/kernel/fs/cifs/cifsfs.c b/kernel/fs/cifs/cifsfs.c index 0a9fb6b53..6a1119e87 100644 --- a/kernel/fs/cifs/cifsfs.c +++ b/kernel/fs/cifs/cifsfs.c @@ -394,17 +394,17 @@ cifs_show_options(struct seq_file *s, struct dentry *root) struct sockaddr *srcaddr; srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; - seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string); + seq_show_option(s, "vers", tcon->ses->server->vals->version_string); cifs_show_security(s, tcon->ses); cifs_show_cache_flavor(s, cifs_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) seq_puts(s, ",multiuser"); else if (tcon->ses->user_name) - seq_printf(s, ",username=%s", tcon->ses->user_name); + seq_show_option(s, "username", tcon->ses->user_name); if (tcon->ses->domainName) - seq_printf(s, ",domain=%s", tcon->ses->domainName); + seq_show_option(s, "domain", tcon->ses->domainName); if (srcaddr->sa_family != AF_UNSPEC) { struct sockaddr_in *saddr4; diff --git a/kernel/fs/cifs/ioctl.c b/kernel/fs/cifs/ioctl.c index 8b7898b76..64a9bca97 100644 --- a/kernel/fs/cifs/ioctl.c +++ b/kernel/fs/cifs/ioctl.c @@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, goto out_drop_write; } + if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { + rc = -EBADF; + cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); + goto out_fput; + } + if ((!src_file.file->private_data) || (!dst_file->private_data)) { rc = -EBADF; cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); diff --git a/kernel/fs/coredump.c b/kernel/fs/coredump.c index bbbe139ab..8dd099dc5 100644 --- a/kernel/fs/coredump.c +++ b/kernel/fs/coredump.c @@ -506,10 +506,10 @@ void do_coredump(const siginfo_t *siginfo) const struct cred *old_cred; struct cred *cred; int retval = 0; - int flag = 0; int ispipe; struct files_struct *displaced; - bool need_nonrelative = false; + /* require nonrelative corefile path and be extra careful */ + bool need_suid_safe = false; bool core_dumped = false; static atomic_t core_dump_count = ATOMIC_INIT(0); struct coredump_params cprm = { @@ -543,9 +543,8 @@ void do_coredump(const siginfo_t *siginfo) */ if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { /* Setuid core dump mode */ - flag = O_EXCL; /* Stop rewrite attacks */ cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ - need_nonrelative = true; + need_suid_safe = true; } retval = coredump_wait(siginfo->si_signo, &core_state); @@ -626,7 +625,7 @@ void do_coredump(const siginfo_t *siginfo) if (cprm.limit < binfmt->min_coredump) goto fail_unlock; - if (need_nonrelative && cn.corename[0] != '/') { + if (need_suid_safe && cn.corename[0] != '/') { printk(KERN_WARNING "Pid %d(%s) can only dump core "\ "to fully qualified path!\n", task_tgid_vnr(current), current->comm); @@ -634,8 +633,35 @@ void do_coredump(const siginfo_t *siginfo) goto fail_unlock; } + /* + * Unlink the file if it exists unless this is a SUID + * binary - in that case, we're running around with root + * privs and don't want to unlink another user's coredump. + */ + if (!need_suid_safe) { + mm_segment_t old_fs; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + /* + * If it doesn't exist, that's fine. If there's some + * other problem, we'll catch it at the filp_open(). + */ + (void) sys_unlink((const char __user *)cn.corename); + set_fs(old_fs); + } + + /* + * There is a race between unlinking and creating the + * file, but if that causes an EEXIST here, that's + * fine - another process raced with us while creating + * the corefile, and the other process won. To userspace, + * what matters is that at least one of the two processes + * writes its coredump successfully, not which one. + */ cprm.file = filp_open(cn.corename, - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, + O_CREAT | 2 | O_NOFOLLOW | + O_LARGEFILE | O_EXCL, 0600); if (IS_ERR(cprm.file)) goto fail_unlock; @@ -652,11 +678,15 @@ void do_coredump(const siginfo_t *siginfo) if (!S_ISREG(inode->i_mode)) goto close_fail; /* - * Dont allow local users get cute and trick others to coredump - * into their pre-created files. + * Don't dump core if the filesystem changed owner or mode + * of the file during file creation. This is an issue when + * a process dumps core while its cwd is e.g. on a vfat + * filesystem. */ if (!uid_eq(inode->i_uid, current_fsuid())) goto close_fail; + if ((inode->i_mode & 0677) != 0600) + goto close_fail; if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) goto close_fail; if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) diff --git a/kernel/fs/ecryptfs/dentry.c b/kernel/fs/ecryptfs/dentry.c index 8db0b4644..63cd2c147 100644 --- a/kernel/fs/ecryptfs/dentry.c +++ b/kernel/fs/ecryptfs/dentry.c @@ -45,20 +45,20 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - int rc; - - if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) - return 1; + int rc = 1; if (flags & LOOKUP_RCU) return -ECHILD; - rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); + if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) + rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); + if (d_really_is_positive(dentry)) { - struct inode *lower_inode = - ecryptfs_inode_to_lower(d_inode(dentry)); + struct inode *inode = d_inode(dentry); - fsstack_copy_attr_all(d_inode(dentry), lower_inode); + fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode)); + if (!inode->i_nlink) + return 0; } return rc; } diff --git a/kernel/fs/ext4/super.c b/kernel/fs/ext4/super.c index ca12affdb..ff89971e3 100644 --- a/kernel/fs/ext4/super.c +++ b/kernel/fs/ext4/super.c @@ -324,6 +324,22 @@ static void save_error_info(struct super_block *sb, const char *func, ext4_commit_super(sb, 1); } +/* + * The del_gendisk() function uninitializes the disk-specific data + * structures, including the bdi structure, without telling anyone + * else. Once this happens, any attempt to call mark_buffer_dirty() + * (for example, by ext4_commit_super), will cause a kernel OOPS. + * This is a kludge to prevent these oops until we can put in a proper + * hook in del_gendisk() to inform the VFS and file system layers. + */ +static int block_device_ejected(struct super_block *sb) +{ + struct inode *bd_inode = sb->s_bdev->bd_inode; + struct backing_dev_info *bdi = inode_to_bdi(bd_inode); + + return bdi->dev == NULL; +} + static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; @@ -1738,10 +1754,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq, } if (sbi->s_qf_names[USRQUOTA]) - seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); + seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) - seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); + seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]); #endif } @@ -4591,7 +4607,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; - if (!sbh) + if (!sbh || block_device_ejected(sb)) return error; if (buffer_write_io_error(sbh)) { /* @@ -4807,10 +4823,11 @@ static int ext4_freeze(struct super_block *sb) error = jbd2_journal_flush(journal); if (error < 0) goto out; + + /* Journal blocked and flushed, clear needs_recovery flag. */ + EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); } - /* Journal blocked and flushed, clear needs_recovery flag. */ - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: if (journal) @@ -4828,8 +4845,11 @@ static int ext4_unfreeze(struct super_block *sb) if (sb->s_flags & MS_RDONLY) return 0; - /* Reset the needs_recovery flag before the fs is unlocked. */ - EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); + if (EXT4_SB(sb)->s_journal) { + /* Reset the needs_recovery flag before the fs is unlocked. */ + EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); + } + ext4_commit_super(sb, 1); return 0; } diff --git a/kernel/fs/gfs2/super.c b/kernel/fs/gfs2/super.c index 859c6edbf..c18b49dc5 100644 --- a/kernel/fs/gfs2/super.c +++ b/kernel/fs/gfs2/super.c @@ -1334,11 +1334,11 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) if (is_ancestor(root, sdp->sd_master_dir)) seq_puts(s, ",meta"); if (args->ar_lockproto[0]) - seq_printf(s, ",lockproto=%s", args->ar_lockproto); + seq_show_option(s, "lockproto", args->ar_lockproto); if (args->ar_locktable[0]) - seq_printf(s, ",locktable=%s", args->ar_locktable); + seq_show_option(s, "locktable", args->ar_locktable); if (args->ar_hostdata[0]) - seq_printf(s, ",hostdata=%s", args->ar_hostdata); + seq_show_option(s, "hostdata", args->ar_hostdata); if (args->ar_spectator) seq_puts(s, ",spectator"); if (args->ar_localflocks) diff --git a/kernel/fs/hfs/bnode.c b/kernel/fs/hfs/bnode.c index d3fa6bd95..221719eac 100644 --- a/kernel/fs/hfs/bnode.c +++ b/kernel/fs/hfs/bnode.c @@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) page_cache_release(page); goto fail; } - page_cache_release(page); node->page[i] = page; } @@ -398,11 +397,11 @@ node_error: void hfs_bnode_free(struct hfs_bnode *node) { - //int i; + int i; - //for (i = 0; i < node->tree->pages_per_bnode; i++) - // if (node->page[i]) - // page_cache_release(node->page[i]); + for (i = 0; i < node->tree->pages_per_bnode; i++) + if (node->page[i]) + page_cache_release(node->page[i]); kfree(node); } diff --git a/kernel/fs/hfs/brec.c b/kernel/fs/hfs/brec.c index 9f4ee7f52..6fc766df0 100644 --- a/kernel/fs/hfs/brec.c +++ b/kernel/fs/hfs/brec.c @@ -131,13 +131,16 @@ skip: hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); - if (new_node) { - /* update parent key if we inserted a key - * at the start of the first node - */ - if (!rec && new_node != node) - hfs_brec_update_parent(fd); + /* + * update parent key if we inserted a key + * at the start of the node and it is not the new node + */ + if (!rec && new_node != node) { + hfs_bnode_read_key(node, fd->search_key, data_off + size); + hfs_brec_update_parent(fd); + } + if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); @@ -166,9 +169,6 @@ skip: goto again; } - if (!rec) - hfs_brec_update_parent(fd); - return 0; } @@ -366,6 +366,8 @@ again: if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); + if (fd->record < 0) + return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; diff --git a/kernel/fs/hfs/super.c b/kernel/fs/hfs/super.c index eee7206c3..410b65eea 100644 --- a/kernel/fs/hfs/super.c +++ b/kernel/fs/hfs/super.c @@ -135,9 +135,9 @@ static int hfs_show_options(struct seq_file *seq, struct dentry *root) struct hfs_sb_info *sbi = HFS_SB(root->d_sb); if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f)) - seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator); + seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4); if (sbi->s_type != cpu_to_be32(0x3f3f3f3f)) - seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type); + seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4); seq_printf(seq, ",uid=%u,gid=%u", from_kuid_munged(&init_user_ns, sbi->s_uid), from_kgid_munged(&init_user_ns, sbi->s_gid)); diff --git a/kernel/fs/hfsplus/bnode.c b/kernel/fs/hfsplus/bnode.c index 759708fd9..63924662a 100644 --- a/kernel/fs/hfsplus/bnode.c +++ b/kernel/fs/hfsplus/bnode.c @@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) page_cache_release(page); goto fail; } - page_cache_release(page); node->page[i] = page; } @@ -566,13 +565,11 @@ node_error: void hfs_bnode_free(struct hfs_bnode *node) { -#if 0 int i; for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) page_cache_release(node->page[i]); -#endif kfree(node); } diff --git a/kernel/fs/hfsplus/options.c b/kernel/fs/hfsplus/options.c index c90b72ee6..bb806e58c 100644 --- a/kernel/fs/hfsplus/options.c +++ b/kernel/fs/hfsplus/options.c @@ -218,9 +218,9 @@ int hfsplus_show_options(struct seq_file *seq, struct dentry *root) struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb); if (sbi->creator != HFSPLUS_DEF_CR_TYPE) - seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator); + seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4); if (sbi->type != HFSPLUS_DEF_CR_TYPE) - seq_printf(seq, ",type=%.4s", (char *)&sbi->type); + seq_show_option_n(seq, "type", (char *)&sbi->type, 4); seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask, from_kuid_munged(&init_user_ns, sbi->uid), from_kgid_munged(&init_user_ns, sbi->gid)); diff --git a/kernel/fs/hostfs/hostfs_kern.c b/kernel/fs/hostfs/hostfs_kern.c index 07d8d8f52..de2d6245e 100644 --- a/kernel/fs/hostfs/hostfs_kern.c +++ b/kernel/fs/hostfs/hostfs_kern.c @@ -260,7 +260,7 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root) size_t offset = strlen(root_ino) + 1; if (strlen(root_path) > offset) - seq_printf(seq, ",%s", root_path + offset); + seq_show_option(seq, root_path + offset, NULL); if (append) seq_puts(seq, ",append"); diff --git a/kernel/fs/hpfs/namei.c b/kernel/fs/hpfs/namei.c index a0872f239..9e92c9c2d 100644 --- a/kernel/fs/hpfs/namei.c +++ b/kernel/fs/hpfs/namei.c @@ -8,6 +8,17 @@ #include <linux/sched.h> #include "hpfs_fn.h" +static void hpfs_update_directory_times(struct inode *dir) +{ + time_t t = get_seconds(); + if (t == dir->i_mtime.tv_sec && + t == dir->i_ctime.tv_sec) + return; + dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t; + dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0; + hpfs_write_inode_nolock(dir); +} + static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { const unsigned char *name = dentry->d_name.name; @@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) result->i_mode = mode | S_IFDIR; hpfs_write_inode_nolock(result); } + hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; @@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b result->i_mode = mode | S_IFREG; hpfs_write_inode_nolock(result); } + hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; @@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de insert_inode_hash(result); hpfs_write_inode_nolock(result); + hpfs_update_directory_times(dir); d_instantiate(dentry, result); brelse(bh); hpfs_unlock(dir->i_sb); @@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy insert_inode_hash(result); hpfs_write_inode_nolock(result); + hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; @@ -423,6 +438,8 @@ again: out1: hpfs_brelse4(&qbh); out: + if (!err) + hpfs_update_directory_times(dir); hpfs_unlock(dir->i_sb); return err; } @@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: + if (!err) + hpfs_update_directory_times(dir); hpfs_unlock(dir->i_sb); return err; } @@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto end1; } - end: +end: hpfs_i(i)->i_parent_dir = new_dir->i_ino; if (S_ISDIR(i->i_mode)) { inc_nlink(new_dir); @@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, brelse(bh); } end1: + if (!err) { + hpfs_update_directory_times(old_dir); + hpfs_update_directory_times(new_dir); + } hpfs_unlock(i->i_sb); return err; } diff --git a/kernel/fs/jbd2/checkpoint.c b/kernel/fs/jbd2/checkpoint.c index 9c00e2e5a..78c1545a3 100644 --- a/kernel/fs/jbd2/checkpoint.c +++ b/kernel/fs/jbd2/checkpoint.c @@ -419,12 +419,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal) * journal_clean_one_cp_list * * Find all the written-back checkpoint buffers in the given list and - * release them. + * release them. If 'destroy' is set, clean all buffers unconditionally. * * Called with j_list_lock held. * Returns 1 if we freed the transaction, 0 otherwise. */ -static int journal_clean_one_cp_list(struct journal_head *jh) +static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) { struct journal_head *last_jh; struct journal_head *next_jh = jh; @@ -438,7 +438,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh) do { jh = next_jh; next_jh = jh->b_cpnext; - ret = __try_to_free_cp_buf(jh); + if (!destroy) + ret = __try_to_free_cp_buf(jh); + else + ret = __jbd2_journal_remove_checkpoint(jh) + 1; if (!ret) return freed; if (ret == 2) @@ -461,10 +464,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh) * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. + * If 'destroy' is set, release all buffers unconditionally. * * Called with j_list_lock held. */ -void __jbd2_journal_clean_checkpoint_list(journal_t *journal) +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) { transaction_t *transaction, *last_transaction, *next_transaction; int ret; @@ -478,7 +482,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) do { transaction = next_transaction; next_transaction = transaction->t_cpnext; - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list); + ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, + destroy); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if @@ -494,7 +499,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) * we can possibly see not yet submitted buffers on io_list */ ret = journal_clean_one_cp_list(transaction-> - t_checkpoint_io_list); + t_checkpoint_io_list, destroy); if (need_resched()) return; /* @@ -508,6 +513,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) } /* + * Remove buffers from all checkpoint lists as journal is aborted and we just + * need to free memory + */ +void jbd2_journal_destroy_checkpoint(journal_t *journal) +{ + /* + * We loop because __jbd2_journal_clean_checkpoint_list() may abort + * early due to a need of rescheduling. + */ + while (1) { + spin_lock(&journal->j_list_lock); + if (!journal->j_checkpoint_transactions) { + spin_unlock(&journal->j_list_lock); + break; + } + __jbd2_journal_clean_checkpoint_list(journal, true); + spin_unlock(&journal->j_list_lock); + cond_resched(); + } +} + +/* * journal_remove_checkpoint: called after a buffer has been committed * to disk (either by being write-back flushed to disk, or being * committed to the log). diff --git a/kernel/fs/jbd2/commit.c b/kernel/fs/jbd2/commit.c index b73e0215b..362e5f614 100644 --- a/kernel/fs/jbd2/commit.c +++ b/kernel/fs/jbd2/commit.c @@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) * frees some memory */ spin_lock(&journal->j_list_lock); - __jbd2_journal_clean_checkpoint_list(journal); + __jbd2_journal_clean_checkpoint_list(journal, false); spin_unlock(&journal->j_list_lock); jbd_debug(3, "JBD2: commit phase 1\n"); diff --git a/kernel/fs/jbd2/journal.c b/kernel/fs/jbd2/journal.c index 112fad9e1..7003c0925 100644 --- a/kernel/fs/jbd2/journal.c +++ b/kernel/fs/jbd2/journal.c @@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal) while (journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); mutex_lock(&journal->j_checkpoint_mutex); - jbd2_log_do_checkpoint(journal); + err = jbd2_log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); + /* + * If checkpointing failed, just free the buffers to avoid + * looping forever + */ + if (err) { + jbd2_journal_destroy_checkpoint(journal); + spin_lock(&journal->j_list_lock); + break; + } spin_lock(&journal->j_list_lock); } diff --git a/kernel/fs/libfs.c b/kernel/fs/libfs.c index 02813592e..f4641fd27 100644 --- a/kernel/fs/libfs.c +++ b/kernel/fs/libfs.c @@ -1176,7 +1176,7 @@ void make_empty_dir_inode(struct inode *inode) inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; inode->i_rdev = 0; - inode->i_size = 2; + inode->i_size = 0; inode->i_blkbits = PAGE_SHIFT; inode->i_blocks = 0; diff --git a/kernel/fs/nfs/flexfilelayout/flexfilelayout.c b/kernel/fs/nfs/flexfilelayout/flexfilelayout.c index 6f5f0f425..fecd9201d 100644 --- a/kernel/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/kernel/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1039,6 +1039,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task, hdr->res.verf->committed == NFS_DATA_SYNC) ff_layout_set_layoutcommit(hdr); + /* zero out fattr since we don't care DS attr at all */ + hdr->fattr.valid = 0; + if (task->tk_status >= 0) + nfs_writeback_update_inode(hdr); + return 0; } diff --git a/kernel/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/kernel/fs/nfs/flexfilelayout/flexfilelayoutdev.c index f13e1969e..b28fa4cbe 100644 --- a/kernel/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/kernel/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, range->offset, range->length)) continue; /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) - * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4) + * + array length + deviceid(NFS4_DEVICEID4_SIZE) + * + status(4) + opnum(4) */ p = xdr_reserve_space(xdr, - 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); + 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); if (unlikely(!p)) return -ENOBUFS; p = xdr_encode_hyper(p, err->offset); p = xdr_encode_hyper(p, err->length); p = xdr_encode_opaque_fixed(p, &err->stateid, NFS4_STATEID_SIZE); + /* Encode 1 error */ + *p++ = cpu_to_be32(1); p = xdr_encode_opaque_fixed(p, &err->deviceid, NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(err->status); diff --git a/kernel/fs/nfs/inode.c b/kernel/fs/nfs/inode.c index 5d25b9d97..976ba792f 100644 --- a/kernel/fs/nfs/inode.c +++ b/kernel/fs/nfs/inode.c @@ -1270,13 +1270,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat return 0; } -static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr) -{ - if (!(fattr->valid & NFS_ATTR_FATTR_CTIME)) - return 0; - return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; -} - static atomic_long_t nfs_attr_generation_counter; static unsigned long nfs_read_attr_generation_counter(void) @@ -1425,7 +1418,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n const struct nfs_inode *nfsi = NFS_I(inode); return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || - nfs_ctime_need_update(inode, fattr) || ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); } @@ -1488,6 +1480,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr { unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; + /* + * Don't revalidate the pagecache if we hold a delegation, but do + * force an attribute update + */ + if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) + invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED; + if (S_ISDIR(inode->i_mode)) invalid |= NFS_INO_INVALID_DATA; nfs_set_cache_invalid(inode, invalid); diff --git a/kernel/fs/nfs/nfs4proc.c b/kernel/fs/nfs/nfs4proc.c index d3f205126..c245874d7 100644 --- a/kernel/fs/nfs/nfs4proc.c +++ b/kernel/fs/nfs/nfs4proc.c @@ -1152,6 +1152,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) return 0; if ((delegation->type & fmode) != fmode) return 0; + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) + return 0; if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) return 0; nfs_mark_delegation_referenced(delegation); @@ -1216,6 +1218,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) } static void nfs_clear_open_stateid_locked(struct nfs4_state *state, + nfs4_stateid *arg_stateid, nfs4_stateid *stateid, fmode_t fmode) { clear_bit(NFS_O_RDWR_STATE, &state->flags); @@ -1234,8 +1237,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, if (stateid == NULL) return; /* Handle races with OPEN */ - if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || - !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { + if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || + (nfs4_stateid_match_other(stateid, &state->open_stateid) && + !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { nfs_resync_open_stateid_locked(state); return; } @@ -1244,10 +1248,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, nfs4_stateid_copy(&state->open_stateid, stateid); } -static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) +static void nfs_clear_open_stateid(struct nfs4_state *state, + nfs4_stateid *arg_stateid, + nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); - nfs_clear_open_stateid_locked(state, stateid, fmode); + nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); write_sequnlock(&state->seqlock); if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) nfs4_schedule_state_manager(state->owner->so_server->nfs_client); @@ -2413,7 +2419,7 @@ static int _nfs4_do_open(struct inode *dir, goto err_free_label; state = ctx->state; - if ((opendata->o_arg.open_flags & O_EXCL) && + if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { nfs4_exclusive_attrset(opendata, sattr); @@ -2672,7 +2678,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data) goto out_release; } } - nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); + nfs_clear_open_stateid(state, &calldata->arg.stateid, + res_stateid, calldata->arg.fmode); out_release: nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); @@ -8571,6 +8578,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, .state_renewal_ops = &nfs41_state_renewal_ops, + .mig_recovery_ops = &nfs41_mig_recovery_ops, }; #endif diff --git a/kernel/fs/nfs/pagelist.c b/kernel/fs/nfs/pagelist.c index 7b4552678..069914ce7 100644 --- a/kernel/fs/nfs/pagelist.c +++ b/kernel/fs/nfs/pagelist.c @@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init); void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) { spin_lock(&hdr->lock); - if (pos < hdr->io_start + hdr->good_bytes) { - set_bit(NFS_IOHDR_ERROR, &hdr->flags); + if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags) + || pos < hdr->io_start + hdr->good_bytes) { clear_bit(NFS_IOHDR_EOF, &hdr->flags); hdr->good_bytes = pos - hdr->io_start; hdr->error = error; diff --git a/kernel/fs/nfs/pnfs_nfs.c b/kernel/fs/nfs/pnfs_nfs.c index f37e25b63..1705c78ee 100644 --- a/kernel/fs/nfs/pnfs_nfs.c +++ b/kernel/fs/nfs/pnfs_nfs.c @@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) return false; } +/* + * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does, + * declare a match. + */ static bool _same_data_server_addrs_locked(const struct list_head *dsaddrs1, const struct list_head *dsaddrs2) { struct nfs4_pnfs_ds_addr *da1, *da2; - - /* step through both lists, comparing as we go */ - for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), - da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); - da1 != NULL && da2 != NULL; - da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), - da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { - if (!same_sockaddr((struct sockaddr *)&da1->da_addr, - (struct sockaddr *)&da2->da_addr)) - return false; + struct sockaddr *sa1, *sa2; + bool match = false; + + list_for_each_entry(da1, dsaddrs1, da_node) { + sa1 = (struct sockaddr *)&da1->da_addr; + match = false; + list_for_each_entry(da2, dsaddrs2, da_node) { + sa2 = (struct sockaddr *)&da2->da_addr; + match = same_sockaddr(sa1, sa2); + if (match) + break; + } + if (!match) + break; } - if (da1 == NULL && da2 == NULL) - return true; - - return false; + return match; } /* diff --git a/kernel/fs/nfs/write.c b/kernel/fs/nfs/write.c index daf355642..07115b9b1 100644 --- a/kernel/fs/nfs/write.c +++ b/kernel/fs/nfs/write.c @@ -1383,24 +1383,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, { struct nfs_pgio_args *argp = &hdr->args; struct nfs_pgio_res *resp = &hdr->res; + u64 size = argp->offset + resp->count; if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) + fattr->size = size; + if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { + fattr->valid &= ~NFS_ATTR_FATTR_SIZE; return; - if (argp->offset + resp->count != fattr->size) - return; - if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) + } + if (size != fattr->size) return; /* Set attribute barrier */ nfs_fattr_set_barrier(fattr); + /* ...and update size */ + fattr->valid |= NFS_ATTR_FATTR_SIZE; } void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) { - struct nfs_fattr *fattr = hdr->res.fattr; + struct nfs_fattr *fattr = &hdr->fattr; struct inode *inode = hdr->inode; - if (fattr == NULL) - return; spin_lock(&inode->i_lock); nfs_writeback_check_extend(hdr, fattr); nfs_post_op_update_inode_force_wcc_locked(inode, fattr); diff --git a/kernel/fs/nfsd/nfs4state.c b/kernel/fs/nfsd/nfs4state.c index 6e13504f7..397798368 100644 --- a/kernel/fs/nfsd/nfs4state.c +++ b/kernel/fs/nfsd/nfs4state.c @@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); } -static void +static bool unhash_delegation_locked(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; lockdep_assert_held(&state_lock); + if (list_empty(&dp->dl_perfile)) + return false; + dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; /* Ensure that deleg break won't try to requeue it */ ++dp->dl_time; @@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp) list_del_init(&dp->dl_recall_lru); list_del_init(&dp->dl_perfile); spin_unlock(&fp->fi_lock); + return true; } static void destroy_delegation(struct nfs4_delegation *dp) { + bool unhashed; + spin_lock(&state_lock); - unhash_delegation_locked(dp); + unhashed = unhash_delegation_locked(dp); spin_unlock(&state_lock); - put_clnt_odstate(dp->dl_clnt_odstate); - nfs4_put_deleg_lease(dp->dl_stid.sc_file); - nfs4_put_stid(&dp->dl_stid); + if (unhashed) { + put_clnt_odstate(dp->dl_clnt_odstate); + nfs4_put_deleg_lease(dp->dl_stid.sc_file); + nfs4_put_stid(&dp->dl_stid); + } } static void revoke_delegation(struct nfs4_delegation *dp) @@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) sop->so_ops->so_free(sop); } -static void unhash_ol_stateid(struct nfs4_ol_stateid *stp) +static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); + if (list_empty(&stp->st_perfile)) + return false; + spin_lock(&fp->fi_lock); - list_del(&stp->st_perfile); + list_del_init(&stp->st_perfile); spin_unlock(&fp->fi_lock); list_del(&stp->st_perstateowner); + return true; } static void nfs4_free_ol_stateid(struct nfs4_stid *stid) @@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, list_add(&stp->st_locks, reaplist); } -static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) +static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); list_del_init(&stp->st_locks); - unhash_ol_stateid(stp); nfs4_unhash_stid(&stp->st_stid); + return unhash_ol_stateid(stp); } static void release_lock_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); + bool unhashed; spin_lock(&oo->oo_owner.so_client->cl_lock); - unhash_lock_stateid(stp); + unhashed = unhash_lock_stateid(stp); spin_unlock(&oo->oo_owner.so_client->cl_lock); - nfs4_put_stid(&stp->st_stid); + if (unhashed) + nfs4_put_stid(&stp->st_stid); } static void unhash_lockowner_locked(struct nfs4_lockowner *lo) @@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo) while (!list_empty(&lo->lo_owner.so_stateids)) { stp = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); - unhash_lock_stateid(stp); + WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); @@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, { struct nfs4_ol_stateid *stp; + lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); + while (!list_empty(&open_stp->st_locks)) { stp = list_entry(open_stp->st_locks.next, struct nfs4_ol_stateid, st_locks); - unhash_lock_stateid(stp); + WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, reaplist); } } -static void unhash_open_stateid(struct nfs4_ol_stateid *stp, +static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { + bool unhashed; + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); - unhash_ol_stateid(stp); + unhashed = unhash_ol_stateid(stp); release_open_stateid_locks(stp, reaplist); + return unhashed; } static void release_open_stateid(struct nfs4_ol_stateid *stp) @@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp) LIST_HEAD(reaplist); spin_lock(&stp->st_stid.sc_client->cl_lock); - unhash_open_stateid(stp, &reaplist); - put_ol_stateid_locked(stp, &reaplist); + if (unhash_open_stateid(stp, &reaplist)) + put_ol_stateid_locked(stp, &reaplist); spin_unlock(&stp->st_stid.sc_client->cl_lock); free_ol_stateid_reaplist(&reaplist); } @@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo) while (!list_empty(&oo->oo_owner.so_stateids)) { stp = list_first_entry(&oo->oo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); - unhash_open_stateid(stp, &reaplist); - put_ol_stateid_locked(stp, &reaplist); + if (unhash_open_stateid(stp, &reaplist)) + put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); @@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp) spin_lock(&state_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); - unhash_delegation_locked(dp); + WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); @@ -4346,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn) new_timeo = min(new_timeo, t); break; } - unhash_delegation_locked(dp); + WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); @@ -4714,7 +4733,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (check_for_locks(stp->st_stid.sc_file, lockowner(stp->st_stateowner))) break; - unhash_lock_stateid(stp); + WARN_ON(!unhash_lock_stateid(stp)); spin_unlock(&cl->cl_lock); nfs4_put_stid(s); ret = nfs_ok; @@ -4930,20 +4949,23 @@ out: static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) { struct nfs4_client *clp = s->st_stid.sc_client; + bool unhashed; LIST_HEAD(reaplist); s->st_stid.sc_type = NFS4_CLOSED_STID; spin_lock(&clp->cl_lock); - unhash_open_stateid(s, &reaplist); + unhashed = unhash_open_stateid(s, &reaplist); if (clp->cl_minorversion) { - put_ol_stateid_locked(s, &reaplist); + if (unhashed) + put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); - move_to_close_lru(s, clp->net); + if (unhashed) + move_to_close_lru(s, clp->net); } } @@ -5982,7 +6004,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, struct list_head *collect, - void (*func)(struct nfs4_ol_stateid *)) + bool (*func)(struct nfs4_ol_stateid *)) { struct nfs4_openowner *oop; struct nfs4_ol_stateid *stp, *st_next; @@ -5996,9 +6018,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, list_for_each_entry_safe(lst, lst_next, &stp->st_locks, st_locks) { if (func) { - func(lst); - nfsd_inject_add_lock_to_list(lst, - collect); + if (func(lst)) + nfsd_inject_add_lock_to_list(lst, + collect); } ++count; /* @@ -6268,7 +6290,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, continue; atomic_inc(&clp->cl_refcount); - unhash_delegation_locked(dp); + WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, victims); } ++count; @@ -6598,7 +6620,7 @@ nfs4_state_shutdown_net(struct net *net) spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); - unhash_delegation_locked(dp); + WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); diff --git a/kernel/fs/nfsd/nfs4xdr.c b/kernel/fs/nfsd/nfs4xdr.c index d4d84451e..3dd1b616b 100644 --- a/kernel/fs/nfsd/nfs4xdr.c +++ b/kernel/fs/nfsd/nfs4xdr.c @@ -2139,6 +2139,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, return nfsd4_encode_user(xdr, rqstp, ace->who_uid); } +static inline __be32 +nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type) +{ + __be32 *p; + + if (layout_type) { + p = xdr_reserve_space(xdr, 8); + if (!p) + return nfserr_resource; + *p++ = cpu_to_be32(1); + *p++ = cpu_to_be32(layout_type); + } else { + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + *p++ = cpu_to_be32(0); + } + + return 0; +} + #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ FATTR4_WORD0_RDATTR_ERROR) #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID @@ -2692,20 +2713,16 @@ out_acl: p = xdr_encode_hyper(p, stat.ino); } #ifdef CONFIG_NFSD_PNFS - if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) || - (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) { - if (exp->ex_layout_type) { - p = xdr_reserve_space(xdr, 8); - if (!p) - goto out_resource; - *p++ = cpu_to_be32(1); - *p++ = cpu_to_be32(exp->ex_layout_type); - } else { - p = xdr_reserve_space(xdr, 4); - if (!p) - goto out_resource; - *p++ = cpu_to_be32(0); - } + if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) { + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); + if (status) + goto out; + } + + if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) { + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); + if (status) + goto out; } if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) { diff --git a/kernel/fs/ocfs2/super.c b/kernel/fs/ocfs2/super.c index 403c5660b..a482e312c 100644 --- a/kernel/fs/ocfs2/super.c +++ b/kernel/fs/ocfs2/super.c @@ -1550,8 +1550,8 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root) seq_printf(s, ",localflocks,"); if (osb->osb_cluster_stack[0]) - seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN, - osb->osb_cluster_stack); + seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack, + OCFS2_STACK_LABEL_LEN); if (opts & OCFS2_MOUNT_USRQUOTA) seq_printf(s, ",usrquota"); if (opts & OCFS2_MOUNT_GRPQUOTA) diff --git a/kernel/fs/overlayfs/super.c b/kernel/fs/overlayfs/super.c index bf8537c7f..155989455 100644 --- a/kernel/fs/overlayfs/super.c +++ b/kernel/fs/overlayfs/super.c @@ -517,10 +517,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) struct super_block *sb = dentry->d_sb; struct ovl_fs *ufs = sb->s_fs_info; - seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); + seq_show_option(m, "lowerdir", ufs->config.lowerdir); if (ufs->config.upperdir) { - seq_printf(m, ",upperdir=%s", ufs->config.upperdir); - seq_printf(m, ",workdir=%s", ufs->config.workdir); + seq_show_option(m, "upperdir", ufs->config.upperdir); + seq_show_option(m, "workdir", ufs->config.workdir); } return 0; } diff --git a/kernel/fs/reiserfs/super.c b/kernel/fs/reiserfs/super.c index 0111ad046..cf6fa25f8 100644 --- a/kernel/fs/reiserfs/super.c +++ b/kernel/fs/reiserfs/super.c @@ -714,18 +714,20 @@ static int reiserfs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",acl"); if (REISERFS_SB(s)->s_jdev) - seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev); + seq_show_option(seq, "jdev", REISERFS_SB(s)->s_jdev); if (journal->j_max_commit_age != journal->j_default_max_commit_age) seq_printf(seq, ",commit=%d", journal->j_max_commit_age); #ifdef CONFIG_QUOTA if (REISERFS_SB(s)->s_qf_names[USRQUOTA]) - seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]); + seq_show_option(seq, "usrjquota", + REISERFS_SB(s)->s_qf_names[USRQUOTA]); else if (opts & (1 << REISERFS_USRQUOTA)) seq_puts(seq, ",usrquota"); if (REISERFS_SB(s)->s_qf_names[GRPQUOTA]) - seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]); + seq_show_option(seq, "grpjquota", + REISERFS_SB(s)->s_qf_names[GRPQUOTA]); else if (opts & (1 << REISERFS_GRPQUOTA)) seq_puts(seq, ",grpquota"); if (REISERFS_SB(s)->s_jquota_fmt) { diff --git a/kernel/fs/xfs/libxfs/xfs_da_format.h b/kernel/fs/xfs/libxfs/xfs_da_format.h index 74bcbabfa..b14bbd6bb 100644 --- a/kernel/fs/xfs/libxfs/xfs_da_format.h +++ b/kernel/fs/xfs/libxfs/xfs_da_format.h @@ -680,8 +680,15 @@ typedef struct xfs_attr_leaf_name_remote { typedef struct xfs_attr_leafblock { xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */ xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ - xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */ - xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */ + /* + * The rest of the block contains the following structures after the + * leaf entries, growing from the bottom up. The variables are never + * referenced and definining them can actually make gcc optimize away + * accesses to the 'entries' array above index 0 so don't do that. + * + * xfs_attr_leaf_name_local_t namelist; + * xfs_attr_leaf_name_remote_t valuelist; + */ } xfs_attr_leafblock_t; /* diff --git a/kernel/fs/xfs/libxfs/xfs_dir2_data.c b/kernel/fs/xfs/libxfs/xfs_dir2_data.c index de1ea16f5..534bbf283 100644 --- a/kernel/fs/xfs/libxfs/xfs_dir2_data.c +++ b/kernel/fs/xfs/libxfs/xfs_dir2_data.c @@ -252,7 +252,8 @@ xfs_dir3_data_reada_verify( return; case cpu_to_be32(XFS_DIR2_DATA_MAGIC): case cpu_to_be32(XFS_DIR3_DATA_MAGIC): - xfs_dir3_data_verify(bp); + bp->b_ops = &xfs_dir3_data_buf_ops; + bp->b_ops->verify_read(bp); return; default: xfs_buf_ioerror(bp, -EFSCORRUPTED); diff --git a/kernel/fs/xfs/libxfs/xfs_dir2_node.c b/kernel/fs/xfs/libxfs/xfs_dir2_node.c index 41b80d3d3..06bb4218b 100644 --- a/kernel/fs/xfs/libxfs/xfs_dir2_node.c +++ b/kernel/fs/xfs/libxfs/xfs_dir2_node.c @@ -2132,6 +2132,7 @@ xfs_dir2_node_replace( int error; /* error return value */ int i; /* btree level */ xfs_ino_t inum; /* new inode number */ + int ftype; /* new file type */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */ int rval; /* internal return value */ @@ -2145,7 +2146,14 @@ xfs_dir2_node_replace( state = xfs_da_state_alloc(); state->args = args; state->mp = args->dp->i_mount; + + /* + * We have to save new inode number and ftype since + * xfs_da3_node_lookup_int() is going to overwrite them + */ inum = args->inumber; + ftype = args->filetype; + /* * Lookup the entry to change in the btree. */ @@ -2183,7 +2191,7 @@ xfs_dir2_node_replace( * Fill in the new inode number and log the entry. */ dep->inumber = cpu_to_be64(inum); - args->dp->d_ops->data_put_ftype(dep, args->filetype); + args->dp->d_ops->data_put_ftype(dep, ftype); xfs_dir2_data_log_entry(args, state->extrablk.bp, dep); rval = 0; } diff --git a/kernel/fs/xfs/xfs_super.c b/kernel/fs/xfs/xfs_super.c index 858e1e62b..65a45372f 100644 --- a/kernel/fs/xfs/xfs_super.c +++ b/kernel/fs/xfs/xfs_super.c @@ -504,9 +504,9 @@ xfs_showargs( seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); if (mp->m_logname) - seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); + seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname); if (mp->m_rtname) - seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); + seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname); if (mp->m_dalign > 0) seq_printf(m, "," MNTOPT_SUNIT "=%d", diff --git a/kernel/include/linux/acpi.h b/kernel/include/linux/acpi.h index 4550be3bb..808c43afa 100644 --- a/kernel/include/linux/acpi.h +++ b/kernel/include/linux/acpi.h @@ -198,7 +198,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); - +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); diff --git a/kernel/include/linux/iio/iio.h b/kernel/include/linux/iio/iio.h index d86b753e9..5ed7771ad 100644 --- a/kernel/include/linux/iio/iio.h +++ b/kernel/include/linux/iio/iio.h @@ -642,6 +642,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) /** + * IIO_RAD_TO_DEGREE() - Convert rad to degree + * @rad: A value in rad + * + * Returns the given value converted from rad to degree + */ +#define IIO_RAD_TO_DEGREE(rad) \ + (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL) + +/** * IIO_G_TO_M_S_2() - Convert g to meter / second**2 * @g: A value in g * @@ -649,4 +658,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, */ #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) +/** + * IIO_M_S_2_TO_G() - Convert meter / second**2 to g + * @ms2: A value in meter / second**2 + * + * Returns the given value converted from meter / second**2 to g + */ +#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL) + #endif /* _INDUSTRIAL_IO_H_ */ diff --git a/kernel/include/linux/jbd2.h b/kernel/include/linux/jbd2.h index edb640ae9..eb1cebed3 100644 --- a/kernel/include/linux/jbd2.h +++ b/kernel/include/linux/jbd2.h @@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ -void __jbd2_journal_clean_checkpoint_list(journal_t *journal); +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); int __jbd2_journal_remove_checkpoint(struct journal_head *); +void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); diff --git a/kernel/include/linux/mm.h b/kernel/include/linux/mm.h index 0755b9fd0..b2085582d 100644 --- a/kernel/include/linux/mm.h +++ b/kernel/include/linux/mm.h @@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page) } /* + * Return true only if the page has been allocated with + * ALLOC_NO_WATERMARKS and the low watermark was not + * met implying that the system is under some pressure. + */ +static inline bool page_is_pfmemalloc(struct page *page) +{ + /* + * Page index cannot be this large so this must be + * a pfmemalloc page. + */ + return page->index == -1UL; +} + +/* + * Only to be called by the page allocator on a freshly allocated + * page. + */ +static inline void set_page_pfmemalloc(struct page *page) +{ + page->index = -1UL; +} + +static inline void clear_page_pfmemalloc(struct page *page) +{ + page->index = 0; +} + +/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. diff --git a/kernel/include/linux/mm_types.h b/kernel/include/linux/mm_types.h index c26072c6d..89c047144 100644 --- a/kernel/include/linux/mm_types.h +++ b/kernel/include/linux/mm_types.h @@ -64,15 +64,6 @@ struct page { union { pgoff_t index; /* Our offset within mapping. */ void *freelist; /* sl[aou]b first free object */ - bool pfmemalloc; /* If set by the page allocator, - * ALLOC_NO_WATERMARKS was set - * and the low watermark was not - * met implying that the system - * is under some pressure. The - * caller should try ensure - * this page is only used to - * free other pages. - */ }; union { diff --git a/kernel/include/linux/pci.h b/kernel/include/linux/pci.h index 3ef3a5206..6e935e5ea 100644 --- a/kernel/include/linux/pci.h +++ b/kernel/include/linux/pci.h @@ -180,6 +180,8 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), /* Do not use PM reset even if device advertises NoSoftRst- */ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), + /* Get VPD from function 0 VPD */ + PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), }; enum pci_irq_reroute_variant { diff --git a/kernel/include/linux/seq_file.h b/kernel/include/linux/seq_file.h index afbb1fd77..7848473a5 100644 --- a/kernel/include/linux/seq_file.h +++ b/kernel/include/linux/seq_file.h @@ -148,6 +148,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq) #endif } +/** + * seq_show_options - display mount options with appropriate escapes. + * @m: the seq_file handle + * @name: the mount option name + * @value: the mount option name's value, can be NULL + */ +static inline void seq_show_option(struct seq_file *m, const char *name, + const char *value) +{ + seq_putc(m, ','); + seq_escape(m, name, ",= \t\n\\"); + if (value) { + seq_putc(m, '='); + seq_escape(m, value, ", \t\n\\"); + } +} + +/** + * seq_show_option_n - display mount options with appropriate escapes + * where @value must be a specific length. + * @m: the seq_file handle + * @name: the mount option name + * @value: the mount option name's value, cannot be NULL + * @length: the length of @value to display + * + * This is a macro since this uses "length" to define the size of the + * stack buffer. + */ +#define seq_show_option_n(m, name, value, length) { \ + char val_buf[length + 1]; \ + strncpy(val_buf, value, length); \ + val_buf[length] = '\0'; \ + seq_show_option(m, name, val_buf); \ +} + #define SEQ_START_TOKEN ((void *)1) /* * Helpers for iteration over list_head-s in seq_files diff --git a/kernel/include/linux/skbuff.h b/kernel/include/linux/skbuff.h index 26a910316..b42539122 100644 --- a/kernel/include/linux/skbuff.h +++ b/kernel/include/linux/skbuff.h @@ -1597,20 +1597,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; /* - * Propagate page->pfmemalloc to the skb if we can. The problem is - * that not all callers have unique ownership of the page. If - * pfmemalloc is set, we check the mapping as a mapping implies - * page->index is set (index and pfmemalloc share space). - * If it's a valid mapping, we cannot use page->pfmemalloc but we - * do not lose pfmemalloc information as the pages would not be - * allocated using __GFP_MEMALLOC. + * Propagate page pfmemalloc to the skb if we can. The problem is + * that not all callers have unique ownership of the page but rely + * on page_is_pfmemalloc doing the right thing(tm). */ frag->page.p = page; frag->page_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); - if (page->pfmemalloc && !page->mapping) + if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; } @@ -2257,7 +2253,7 @@ static inline struct page *dev_alloc_page(void) static inline void skb_propagate_pfmemalloc(struct page *page, struct sk_buff *skb) { - if (page && page->pfmemalloc) + if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; } diff --git a/kernel/include/linux/sunrpc/xprtsock.h b/kernel/include/linux/sunrpc/xprtsock.h index 7591788e9..357e44c1a 100644 --- a/kernel/include/linux/sunrpc/xprtsock.h +++ b/kernel/include/linux/sunrpc/xprtsock.h @@ -42,6 +42,7 @@ struct sock_xprt { /* * Connection of transports */ + unsigned long sock_state; struct delayed_work connect_worker; struct sockaddr_storage srcaddr; unsigned short srcport; @@ -76,6 +77,8 @@ struct sock_xprt { */ #define TCP_RPC_REPLY (1UL << 6) +#define XPRT_SOCK_CONNECTING 1U + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/kernel/include/net/act_api.h b/kernel/include/net/act_api.h index 3ee4c92af..931738bc5 100644 --- a/kernel/include/net/act_api.h +++ b/kernel/include/net/act_api.h @@ -99,7 +99,6 @@ struct tc_action_ops { int tcf_hash_search(struct tc_action *a, u32 index); void tcf_hash_destroy(struct tc_action *a); -int tcf_hash_release(struct tc_action *a, int bind); u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); int tcf_hash_check(u32 index, struct tc_action *a, int bind); int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, @@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); void tcf_hash_insert(struct tc_action *a); +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict); + +static inline int tcf_hash_release(struct tc_action *a, bool bind) +{ + return __tcf_hash_release(a, bind, false); +} + int tcf_register_action(struct tc_action_ops *a, unsigned int mask); int tcf_unregister_action(struct tc_action_ops *a); int tcf_action_destroy(struct list_head *actions, int bind); diff --git a/kernel/include/net/ip.h b/kernel/include/net/ip.h index d14af7edd..f41fc497b 100644 --- a/kernel/include/net/ip.h +++ b/kernel/include/net/ip.h @@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) } /* datagram.c */ +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void ip4_datagram_release_cb(struct sock *sk); diff --git a/kernel/include/soc/tegra/mc.h b/kernel/include/soc/tegra/mc.h index 63deb8d9f..d298857cd 100644 --- a/kernel/include/soc/tegra/mc.h +++ b/kernel/include/soc/tegra/mc.h @@ -59,6 +59,7 @@ struct tegra_smmu_soc { bool supports_round_robin_arbitration; bool supports_request_limit; + unsigned int num_tlb_lines; unsigned int num_asids; const struct tegra_smmu_ops *ops; diff --git a/kernel/include/trace/events/sunrpc.h b/kernel/include/trace/events/sunrpc.h index fd1a02cb3..003dca933 100644 --- a/kernel/include/trace/events/sunrpc.h +++ b/kernel/include/trace/events/sunrpc.h @@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue, TP_STRUCT__entry( __field(struct svc_xprt *, xprt) - __field(struct svc_rqst *, rqst) + __field_struct(struct sockaddr_storage, ss) + __field(int, pid) + __field(unsigned long, flags) ), TP_fast_assign( __entry->xprt = xprt; - __entry->rqst = rqst; + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->pid = rqst? rqst->rq_task->pid : 0; + __entry->flags = xprt ? xprt->xpt_flags : 0; ), TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, - (struct sockaddr *)&__entry->xprt->xpt_remote, - __entry->rqst ? __entry->rqst->rq_task->pid : 0, - show_svc_xprt_flags(__entry->xprt->xpt_flags)) + (struct sockaddr *)&__entry->ss, + __entry->pid, show_svc_xprt_flags(__entry->flags)) ); TRACE_EVENT(svc_xprt_dequeue, @@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt, TP_STRUCT__entry( __field(struct svc_xprt *, xprt) __field(int, len) + __field_struct(struct sockaddr_storage, ss) + __field(unsigned long, flags) ), TP_fast_assign( __entry->xprt = xprt; + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); __entry->len = len; + __entry->flags = xprt ? xprt->xpt_flags : 0; ), TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, - (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, - show_svc_xprt_flags(__entry->xprt->xpt_flags)) + (struct sockaddr *)&__entry->ss, + __entry->len, show_svc_xprt_flags(__entry->flags)) ); #endif /* _TRACE_SUNRPC_H */ diff --git a/kernel/kernel/cgroup.c b/kernel/kernel/cgroup.c index 2a7a0fe19..21f5bdfc5 100644 --- a/kernel/kernel/cgroup.c +++ b/kernel/kernel/cgroup.c @@ -1319,7 +1319,7 @@ static int cgroup_show_options(struct seq_file *seq, for_each_subsys(ss, ssid) if (root->subsys_mask & (1 << ssid)) - seq_printf(seq, ",%s", ss->name); + seq_show_option(seq, ss->name, NULL); if (root->flags & CGRP_ROOT_NOPREFIX) seq_puts(seq, ",noprefix"); if (root->flags & CGRP_ROOT_XATTR) @@ -1327,13 +1327,14 @@ static int cgroup_show_options(struct seq_file *seq, spin_lock(&release_agent_path_lock); if (strlen(root->release_agent_path)) - seq_printf(seq, ",release_agent=%s", root->release_agent_path); + seq_show_option(seq, "release_agent", + root->release_agent_path); spin_unlock(&release_agent_path_lock); if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) seq_puts(seq, ",clone_children"); if (strlen(root->name)) - seq_printf(seq, ",name=%s", root->name); + seq_show_option(seq, "name", root->name); return 0; } diff --git a/kernel/kernel/fork.c b/kernel/kernel/fork.c index dd407cea0..1b0e656f6 100644 --- a/kernel/kernel/fork.c +++ b/kernel/kernel/fork.c @@ -1888,13 +1888,21 @@ static int check_unshare_flags(unsigned long unshare_flags) CLONE_NEWUSER|CLONE_NEWPID)) return -EINVAL; /* - * Not implemented, but pretend it works if there is nothing to - * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND - * needs to unshare vm. + * Not implemented, but pretend it works if there is nothing + * to unshare. Note that unsharing the address space or the + * signal handlers also need to unshare the signal queues (aka + * CLONE_THREAD). */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { - /* FIXME: get_task_mm() increments ->mm_users */ - if (atomic_read(¤t->mm->mm_users) > 1) + if (!thread_group_empty(current)) + return -EINVAL; + } + if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { + if (atomic_read(¤t->sighand->count) > 1) + return -EINVAL; + } + if (unshare_flags & CLONE_VM) { + if (!current_is_single_threaded()) return -EINVAL; } @@ -1963,16 +1971,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* - * If unsharing a thread from a thread group, must also unshare vm. - */ - if (unshare_flags & CLONE_THREAD) - unshare_flags |= CLONE_VM; - /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* + * If unsharing a signal handlers, must also unshare the signal queues. + */ + if (unshare_flags & CLONE_SIGHAND) + unshare_flags |= CLONE_THREAD; + /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) diff --git a/kernel/kernel/sched/core.c b/kernel/kernel/sched/core.c index 392aad4ec..799b75b27 100644 --- a/kernel/kernel/sched/core.c +++ b/kernel/kernel/sched/core.c @@ -5672,6 +5672,14 @@ static int sched_cpu_active(struct notifier_block *nfb, case CPU_STARTING: set_cpu_rq_start_time(); return NOTIFY_OK; + case CPU_ONLINE: + /* + * At this point a starting CPU has marked itself as online via + * set_cpu_online(). But it might not yet have marked itself + * as active, which is essential from here on. + * + * Thus, fall-through and help the starting CPU along. + */ case CPU_DOWN_FAILED: set_cpu_active((long)hcpu, true); return NOTIFY_OK; diff --git a/kernel/kernel/sched/work-simple.c b/kernel/kernel/sched/work-simple.c index c996f755d..e57a05225 100644 --- a/kernel/kernel/sched/work-simple.c +++ b/kernel/kernel/sched/work-simple.c @@ -10,6 +10,7 @@ #include <linux/kthread.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/export.h> #define SWORK_EVENT_PENDING (1 << 0) diff --git a/kernel/lib/decompress_bunzip2.c b/kernel/lib/decompress_bunzip2.c index 6dd0335ea..0234361b2 100644 --- a/kernel/lib/decompress_bunzip2.c +++ b/kernel/lib/decompress_bunzip2.c @@ -743,12 +743,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long len, +STATIC int INIT __decompress(unsigned char *buf, long len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *outbuf, + unsigned char *outbuf, long olen, long *pos, - void(*error)(char *x)) + void (*error)(char *x)) { return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); } diff --git a/kernel/lib/decompress_inflate.c b/kernel/lib/decompress_inflate.c index d4c789163..555c06bf2 100644 --- a/kernel/lib/decompress_inflate.c +++ b/kernel/lib/decompress_inflate.c @@ -1,4 +1,5 @@ #ifdef STATIC +#define PREBOOT /* Pre-boot environment: included */ /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots @@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len) } /* Included from initramfs et al code */ -STATIC int INIT gunzip(unsigned char *buf, long len, +STATIC int INIT __gunzip(unsigned char *buf, long len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *out_buf, + unsigned char *out_buf, long out_len, long *pos, void(*error)(char *x)) { u8 *zbuf; struct z_stream_s *strm; int rc; - size_t out_len; rc = -1; if (flush) { out_len = 0x8000; /* 32 K */ out_buf = malloc(out_len); } else { - out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ + if (!out_len) + out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ } if (!out_buf) { error("Out of memory while allocating output buffer"); @@ -181,4 +182,24 @@ gunzip_nomem1: return rc; /* returns Z_OK (0) if successful */ } -#define decompress gunzip +#ifndef PREBOOT +STATIC int INIT gunzip(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, + long *pos, + void (*error)(char *x)) +{ + return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); +} +#else +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *pos, + void (*error)(char *x)) +{ + return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error); +} +#endif diff --git a/kernel/lib/decompress_unlz4.c b/kernel/lib/decompress_unlz4.c index 40f66ebe5..036fc882c 100644 --- a/kernel/lib/decompress_unlz4.c +++ b/kernel/lib/decompress_unlz4.c @@ -196,12 +196,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long in_len, +STATIC int INIT __decompress(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *output, + unsigned char *output, long out_len, long *posp, - void(*error)(char *x) + void (*error)(char *x) ) { return unlz4(buf, in_len - 4, fill, flush, output, posp, error); diff --git a/kernel/lib/decompress_unlzma.c b/kernel/lib/decompress_unlzma.c index 0be83af62..decb64629 100644 --- a/kernel/lib/decompress_unlzma.c +++ b/kernel/lib/decompress_unlzma.c @@ -667,13 +667,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long in_len, +STATIC int INIT __decompress(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *output, + unsigned char *output, long out_len, long *posp, - void(*error)(char *x) - ) + void (*error)(char *x)) { return unlzma(buf, in_len - 4, fill, flush, output, posp, error); } diff --git a/kernel/lib/decompress_unlzo.c b/kernel/lib/decompress_unlzo.c index b94a31bdd..f4c158e3a 100644 --- a/kernel/lib/decompress_unlzo.c +++ b/kernel/lib/decompress_unlzo.c @@ -31,6 +31,7 @@ */ #ifdef STATIC +#define PREBOOT #include "lzo/lzo1x_decompress_safe.c" #else #include <linux/decompress/unlzo.h> @@ -287,4 +288,14 @@ exit: return ret; } -#define decompress unlzo +#ifdef PREBOOT +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long olen, + long *pos, + void (*error)(char *x)) +{ + return unlzo(buf, len, fill, flush, out_buf, pos, error); +} +#endif diff --git a/kernel/lib/decompress_unxz.c b/kernel/lib/decompress_unxz.c index b07a78340..25d59a95b 100644 --- a/kernel/lib/decompress_unxz.c +++ b/kernel/lib/decompress_unxz.c @@ -394,4 +394,14 @@ error_alloc_state: * This macro is used by architecture-specific files to decompress * the kernel image. */ -#define decompress unxz +#ifdef XZ_PREBOOT +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long olen, + long *pos, + void (*error)(char *x)) +{ + return unxz(buf, len, fill, flush, out_buf, pos, error); +} +#endif diff --git a/kernel/lib/rhashtable.c b/kernel/lib/rhashtable.c index 8609378e6..cf910e48f 100644 --- a/kernel/lib/rhashtable.c +++ b/kernel/lib/rhashtable.c @@ -612,6 +612,8 @@ next: iter->skip = 0; } + iter->p = NULL; + /* Ensure we see any new tables. */ smp_rmb(); @@ -622,8 +624,6 @@ next: return ERR_PTR(-EAGAIN); } - iter->p = NULL; - out: return obj; diff --git a/kernel/localversion-rt b/kernel/localversion-rt index 700c857ef..d79dde624 100644 --- a/kernel/localversion-rt +++ b/kernel/localversion-rt @@ -1 +1 @@ --rt8 +-rt10 diff --git a/kernel/mm/page_alloc.c b/kernel/mm/page_alloc.c index 5b70c9977..41bd90d60 100644 --- a/kernel/mm/page_alloc.c +++ b/kernel/mm/page_alloc.c @@ -1021,12 +1021,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, set_page_owner(page, order, gfp_flags); /* - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to + * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking * steps that will free more memory. The caller should avoid the page * being used for !PFMEMALLOC purposes. */ - page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); + if (alloc_flags & ALLOC_NO_WATERMARKS) + set_page_pfmemalloc(page); + else + clear_page_pfmemalloc(page); return 0; } diff --git a/kernel/mm/slab.c b/kernel/mm/slab.c index 7eb38dd1c..3dd2d1ff9 100644 --- a/kernel/mm/slab.c +++ b/kernel/mm/slab.c @@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, } /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ - if (unlikely(page->pfmemalloc)) + if (page_is_pfmemalloc(page)) pfmemalloc_active = true; nr_pages = (1 << cachep->gfporder); @@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { diff --git a/kernel/mm/slub.c b/kernel/mm/slub.c index f657453ad..905e283d7 100644 --- a/kernel/mm/slub.c +++ b/kernel/mm/slub.c @@ -1409,7 +1409,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) order = compound_order(page); page->slab_cache = s; __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); start = page_address(page); diff --git a/kernel/mm/vmscan.c b/kernel/mm/vmscan.c index 0d024fc8a..1a17bd7c0 100644 --- a/kernel/mm/vmscan.c +++ b/kernel/mm/vmscan.c @@ -1153,7 +1153,7 @@ cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); - putback_lru_page(page); + list_add(&page->lru, &ret_pages); continue; activate_locked: diff --git a/kernel/net/bridge/br_forward.c b/kernel/net/bridge/br_forward.c index e97572b5d..0ff6e1bbc 100644 --- a/kernel/net/bridge/br_forward.c +++ b/kernel/net/bridge/br_forward.c @@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) } else { skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); + skb_sender_cpu_clear(skb); dev_queue_xmit(skb); } diff --git a/kernel/net/bridge/br_mdb.c b/kernel/net/bridge/br_mdb.c index e29ad70b3..d1f910c0d 100644 --- a/kernel/net/bridge/br_mdb.c +++ b/kernel/net/bridge/br_mdb.c @@ -348,7 +348,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, return -ENOMEM; rcu_assign_pointer(*pp, p); - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); return 0; } @@ -371,6 +370,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, if (!p || p->br != br || p->state == BR_STATE_DISABLED) return -EINVAL; + memset(&ip, 0, sizeof(ip)); ip.proto = entry->addr.proto; if (ip.proto == htons(ETH_P_IP)) ip.u.ip4 = entry->addr.u.ip4; @@ -417,6 +417,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) if (!netif_running(br->dev) || br->multicast_disabled) return -EINVAL; + memset(&ip, 0, sizeof(ip)); ip.proto = entry->addr.proto; if (ip.proto == htons(ETH_P_IP)) { if (timer_pending(&br->ip4_other_query.timer)) diff --git a/kernel/net/bridge/br_multicast.c b/kernel/net/bridge/br_multicast.c index ff667e18b..9ba383f5b 100644 --- a/kernel/net/bridge/br_multicast.c +++ b/kernel/net/bridge/br_multicast.c @@ -980,7 +980,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, ih = igmpv3_report_hdr(skb); num = ntohs(ih->ngrec); - len = sizeof(*ih); + len = skb_transport_offset(skb) + sizeof(*ih); for (i = 0; i < num; i++) { len += sizeof(*grec); @@ -1035,7 +1035,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, icmp6h = icmp6_hdr(skb); num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); - len = sizeof(*icmp6h); + len = skb_transport_offset(skb) + sizeof(*icmp6h); for (i = 0; i < num; i++) { __be16 *nsrcs, _nsrcs; diff --git a/kernel/net/bridge/br_netlink.c b/kernel/net/bridge/br_netlink.c index 4b5c23699..a7559ef31 100644 --- a/kernel/net/bridge/br_netlink.c +++ b/kernel/net/bridge/br_netlink.c @@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + 0; } @@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -711,9 +715,17 @@ static int br_port_slave_changelink(struct net_device *brdev, struct nlattr *tb[], struct nlattr *data[]) { + struct net_bridge *br = netdev_priv(brdev); + int ret; + if (!data) return 0; - return br_setport(br_port_get_rtnl(dev), data); + + spin_lock_bh(&br->lock); + ret = br_setport(br_port_get_rtnl(dev), data); + spin_unlock_bh(&br->lock); + + return ret; } static int br_port_fill_slave_info(struct sk_buff *skb, diff --git a/kernel/net/ceph/ceph_common.c b/kernel/net/ceph/ceph_common.c index 79e8f71ae..3f76eb84b 100644 --- a/kernel/net/ceph/ceph_common.c +++ b/kernel/net/ceph/ceph_common.c @@ -495,8 +495,11 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) struct ceph_options *opt = client->options; size_t pos = m->count; - if (opt->name) - seq_printf(m, "name=%s,", opt->name); + if (opt->name) { + seq_puts(m, "name="); + seq_escape(m, opt->name, ", \t\n\\"); + seq_putc(m, ','); + } if (opt->key) seq_puts(m, "secret=<hidden>,"); diff --git a/kernel/net/core/datagram.c b/kernel/net/core/datagram.c index b80fb91bb..617088aee 100644 --- a/kernel/net/core/datagram.c +++ b/kernel/net/core/datagram.c @@ -131,6 +131,35 @@ out_noerr: goto out; } +static struct sk_buff *skb_set_peeked(struct sk_buff *skb) +{ + struct sk_buff *nskb; + + if (skb->peeked) + return skb; + + /* We have to unshare an skb before modifying it. */ + if (!skb_shared(skb)) + goto done; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return ERR_PTR(-ENOMEM); + + skb->prev->next = nskb; + skb->next->prev = nskb; + nskb->prev = skb->prev; + nskb->next = skb->next; + + consume_skb(skb); + skb = nskb; + +done: + skb->peeked = 1; + + return skb; +} + /** * __skb_recv_datagram - Receive a datagram skbuff * @sk: socket @@ -165,7 +194,9 @@ out_noerr: struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, int *peeked, int *off, int *err) { + struct sk_buff_head *queue = &sk->sk_receive_queue; struct sk_buff *skb, *last; + unsigned long cpu_flags; long timeo; /* * Caller is allowed not to check sk->sk_err before skb_recv_datagram() @@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ - unsigned long cpu_flags; - struct sk_buff_head *queue = &sk->sk_receive_queue; int _off = *off; last = (struct sk_buff *)queue; @@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, _off -= skb->len; continue; } - skb->peeked = 1; + + skb = skb_set_peeked(skb); + error = PTR_ERR(skb); + if (IS_ERR(skb)) + goto unlock_err; + atomic_inc(&skb->users); } else __skb_unlink(skb, queue); @@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, return NULL; +unlock_err: + spin_unlock_irqrestore(&queue->lock, cpu_flags); no_packet: *err = error; return NULL; @@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); } - skb->csum_valid = !sum; + if (!skb_shared(skb)) + skb->csum_valid = !sum; return sum; } EXPORT_SYMBOL(__skb_checksum_complete_head); @@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) netdev_rx_csum_fault(skb->dev); } - /* Save full packet checksum */ - skb->csum = csum; - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum_complete_sw = 1; - skb->csum_valid = !sum; + if (!skb_shared(skb)) { + /* Save full packet checksum */ + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_complete_sw = 1; + skb->csum_valid = !sum; + } return sum; } diff --git a/kernel/net/core/dev.c b/kernel/net/core/dev.c index 349de9dba..f8c23dee5 100644 --- a/kernel/net/core/dev.c +++ b/kernel/net/core/dev.c @@ -673,10 +673,6 @@ int dev_get_iflink(const struct net_device *dev) if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) return dev->netdev_ops->ndo_get_iflink(dev); - /* If dev->rtnl_link_ops is set, it's a virtual interface. */ - if (dev->rtnl_link_ops) - return 0; - return dev->ifindex; } EXPORT_SYMBOL(dev_get_iflink); @@ -3349,6 +3345,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, local_irq_save(flags); rps_lock(sd); + if (!netif_running(skb->dev)) + goto drop; qlen = skb_queue_len(&sd->input_pkt_queue); if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { if (qlen) { @@ -3370,6 +3368,7 @@ enqueue: goto enqueue; } +drop: sd->dropped++; rps_unlock(sd); @@ -3704,8 +3703,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) pt_prev = NULL; - rcu_read_lock(); - another_round: skb->skb_iif = skb->dev->ifindex; @@ -3715,7 +3712,7 @@ another_round: skb->protocol == cpu_to_be16(ETH_P_8021AD)) { skb = skb_vlan_untag(skb); if (unlikely(!skb)) - goto unlock; + goto out; } #ifdef CONFIG_NET_CLS_ACT @@ -3745,7 +3742,7 @@ skip_taps: if (static_key_false(&ingress_needed)) { skb = handle_ing(skb, &pt_prev, &ret, orig_dev); if (!skb) - goto unlock; + goto out; } skb->tc_verd = 0; @@ -3762,7 +3759,7 @@ ncls: if (vlan_do_receive(&skb)) goto another_round; else if (unlikely(!skb)) - goto unlock; + goto out; } rx_handler = rcu_dereference(skb->dev->rx_handler); @@ -3774,7 +3771,7 @@ ncls: switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: ret = NET_RX_SUCCESS; - goto unlock; + goto out; case RX_HANDLER_ANOTHER: goto another_round; case RX_HANDLER_EXACT: @@ -3828,8 +3825,7 @@ drop: ret = NET_RX_DROP; } -unlock: - rcu_read_unlock(); +out: return ret; } @@ -3860,29 +3856,30 @@ static int __netif_receive_skb(struct sk_buff *skb) static int netif_receive_skb_internal(struct sk_buff *skb) { + int ret; + net_timestamp_check(netdev_tstamp_prequeue, skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; + rcu_read_lock(); + #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu, ret; - - rcu_read_lock(); - - cpu = get_rps_cpu(skb->dev, skb, &rflow); + int cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); return ret; } - rcu_read_unlock(); } #endif - return __netif_receive_skb(skb); + ret = __netif_receive_skb(skb); + rcu_read_unlock(); + return ret; } /** @@ -4432,8 +4429,10 @@ static int process_backlog(struct napi_struct *napi, int quota) struct sk_buff *skb; while ((skb = __skb_dequeue(&sd->process_queue))) { + rcu_read_lock(); local_irq_enable(); __netif_receive_skb(skb); + rcu_read_unlock(); local_irq_disable(); input_queue_head_incr(sd); if (++work >= quota) { @@ -6070,6 +6069,7 @@ static void rollback_registered_many(struct list_head *head) unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; + on_each_cpu(flush_backlog, dev, 1); } synchronize_net(); @@ -6340,7 +6340,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) struct netdev_queue *tx; size_t sz = count * sizeof(*tx); - BUG_ON(count < 1 || count > 0xffff); + if (count < 1 || count > 0xffff) + return -EINVAL; tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!tx) { @@ -6693,8 +6694,6 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; - on_each_cpu(flush_backlog, dev, 1); - netdev_wait_allrefs(dev); /* paranoia */ @@ -7218,7 +7217,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx_ni(skb); input_queue_head_incr(oldsd); } - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } diff --git a/kernel/net/core/fib_rules.c b/kernel/net/core/fib_rules.c index 9a12668f7..0ad144fb0 100644 --- a/kernel/net/core/fib_rules.c +++ b/kernel/net/core/fib_rules.c @@ -615,15 +615,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, { int idx = 0; struct fib_rule *rule; + int err = 0; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { if (idx < cb->args[1]) goto skip; - if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWRULE, - NLM_F_MULTI, ops) < 0) + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWRULE, + NLM_F_MULTI, ops); + if (err) break; skip: idx++; @@ -632,7 +634,7 @@ skip: cb->args[1] = idx; rules_ops_put(ops); - return skb->len; + return err; } static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) @@ -648,7 +650,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) if (ops == NULL) return -EAFNOSUPPORT; - return dump_rules(skb, cb, ops); + dump_rules(skb, cb, ops); + + return skb->len; } rcu_read_lock(); diff --git a/kernel/net/core/pktgen.c b/kernel/net/core/pktgen.c index 508155b28..043ea1867 100644 --- a/kernel/net/core/pktgen.c +++ b/kernel/net/core/pktgen.c @@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg) pktgen_rem_thread(t); /* Wait for kthread_stop */ - while (!kthread_should_stop()) { + for (;;) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; schedule(); } __set_current_state(TASK_RUNNING); diff --git a/kernel/net/core/request_sock.c b/kernel/net/core/request_sock.c index 87b22c0bc..b42f0e26f 100644 --- a/kernel/net/core/request_sock.c +++ b/kernel/net/core/request_sock.c @@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) spin_lock_bh(&queue->syn_wait_lock); while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; + /* Because of following del_timer_sync(), + * we must release the spinlock here + * or risk a dead lock. + */ + spin_unlock_bh(&queue->syn_wait_lock); atomic_inc(&lopt->qlen_dec); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); reqsk_put(req); + spin_lock_bh(&queue->syn_wait_lock); } spin_unlock_bh(&queue->syn_wait_lock); } diff --git a/kernel/net/core/rtnetlink.c b/kernel/net/core/rtnetlink.c index 8de368240..fe95cb704 100644 --- a/kernel/net/core/rtnetlink.c +++ b/kernel/net/core/rtnetlink.c @@ -1287,10 +1287,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, }; -static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { - [IFLA_VF_INFO] = { .type = NLA_NESTED }, -}; - static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, @@ -1437,96 +1433,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } -static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) +static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) { - int rem, err = -EINVAL; - struct nlattr *vf; const struct net_device_ops *ops = dev->netdev_ops; + int err = -EINVAL; - nla_for_each_nested(vf, attr, rem) { - switch (nla_type(vf)) { - case IFLA_VF_MAC: { - struct ifla_vf_mac *ivm; - ivm = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_mac) - err = ops->ndo_set_vf_mac(dev, ivm->vf, - ivm->mac); - break; - } - case IFLA_VF_VLAN: { - struct ifla_vf_vlan *ivv; - ivv = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_vlan) - err = ops->ndo_set_vf_vlan(dev, ivv->vf, - ivv->vlan, - ivv->qos); - break; - } - case IFLA_VF_TX_RATE: { - struct ifla_vf_tx_rate *ivt; - struct ifla_vf_info ivf; - ivt = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_get_vf_config) - err = ops->ndo_get_vf_config(dev, ivt->vf, - &ivf); - if (err) - break; - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rate) - err = ops->ndo_set_vf_rate(dev, ivt->vf, - ivf.min_tx_rate, - ivt->rate); - break; - } - case IFLA_VF_RATE: { - struct ifla_vf_rate *ivt; - ivt = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rate) - err = ops->ndo_set_vf_rate(dev, ivt->vf, - ivt->min_tx_rate, - ivt->max_tx_rate); - break; - } - case IFLA_VF_SPOOFCHK: { - struct ifla_vf_spoofchk *ivs; - ivs = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_spoofchk) - err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, - ivs->setting); - break; - } - case IFLA_VF_LINK_STATE: { - struct ifla_vf_link_state *ivl; - ivl = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_link_state) - err = ops->ndo_set_vf_link_state(dev, ivl->vf, - ivl->link_state); - break; - } - case IFLA_VF_RSS_QUERY_EN: { - struct ifla_vf_rss_query_en *ivrssq_en; + if (tb[IFLA_VF_MAC]) { + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); - ivrssq_en = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rss_query_en) - err = ops->ndo_set_vf_rss_query_en(dev, - ivrssq_en->vf, - ivrssq_en->setting); - break; - } - default: - err = -EINVAL; - break; - } - if (err) - break; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_mac) + err = ops->ndo_set_vf_mac(dev, ivm->vf, + ivm->mac); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_VLAN]) { + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_vlan) + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, + ivv->qos); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_TX_RATE]) { + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); + struct ifla_vf_info ivf; + + err = -EOPNOTSUPP; + if (ops->ndo_get_vf_config) + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); + if (err < 0) + return err; + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivf.min_tx_rate, + ivt->rate); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_RATE]) { + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivt->min_tx_rate, + ivt->max_tx_rate); + if (err < 0) + return err; } + + if (tb[IFLA_VF_SPOOFCHK]) { + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_spoofchk) + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, + ivs->setting); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_LINK_STATE]) { + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_link_state) + err = ops->ndo_set_vf_link_state(dev, ivl->vf, + ivl->link_state); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_RSS_QUERY_EN]) { + struct ifla_vf_rss_query_en *ivrssq_en; + + err = -EOPNOTSUPP; + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); + if (ops->ndo_set_vf_rss_query_en) + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, + ivrssq_en->setting); + if (err < 0) + return err; + } + return err; } @@ -1722,14 +1720,21 @@ static int do_setlink(const struct sk_buff *skb, } if (tb[IFLA_VFINFO_LIST]) { + struct nlattr *vfinfo[IFLA_VF_MAX + 1]; struct nlattr *attr; int rem; + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { - if (nla_type(attr) != IFLA_VF_INFO) { + if (nla_type(attr) != IFLA_VF_INFO || + nla_len(attr) < NLA_HDRLEN) { err = -EINVAL; goto errout; } - err = do_setvfinfo(dev, attr); + err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, + ifla_vf_policy); + if (err < 0) + goto errout; + err = do_setvfinfo(dev, vfinfo); if (err < 0) goto errout; status |= DO_SETLINK_NOTIFY; diff --git a/kernel/net/core/skbuff.c b/kernel/net/core/skbuff.c index 98da59c44..fc09e8f3d 100644 --- a/kernel/net/core/skbuff.c +++ b/kernel/net/core/skbuff.c @@ -341,7 +341,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) if (skb && frag_size) { skb->head_frag = 1; - if (virt_to_head_page(data)->pfmemalloc) + if (page_is_pfmemalloc(virt_to_head_page(data))) skb->pfmemalloc = 1; } return skb; diff --git a/kernel/net/core/sock_diag.c b/kernel/net/core/sock_diag.c index 74dddf84a..556ecf96a 100644 --- a/kernel/net/core/sock_diag.c +++ b/kernel/net/core/sock_diag.c @@ -86,6 +86,9 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, goto out; fprog = filter->prog->orig_prog; + if (!fprog) + goto out; + flen = bpf_classic_proglen(fprog); attr = nla_reserve(skb, attrtype, flen); diff --git a/kernel/net/dsa/slave.c b/kernel/net/dsa/slave.c index 827cda560..57978c5b2 100644 --- a/kernel/net/dsa/slave.c +++ b/kernel/net/dsa/slave.c @@ -732,7 +732,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, return -ENODEV; /* Use already configured phy mode */ - p->phy_interface = p->phy->interface; + if (p->phy_interface == PHY_INTERFACE_MODE_NA) + p->phy_interface = p->phy->interface; phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, p->phy_interface); diff --git a/kernel/net/ipv4/datagram.c b/kernel/net/ipv4/datagram.c index 90c0e8386..574fad9cc 100644 --- a/kernel/net/ipv4/datagram.c +++ b/kernel/net/ipv4/datagram.c @@ -20,7 +20,7 @@ #include <net/route.h> #include <net/tcp_states.h> -int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; @@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk_dst_reset(sk); - lock_sock(sk); - oif = sk->sk_bound_dev_if; saddr = inet->inet_saddr; if (ipv4_is_multicast(usin->sin_addr.s_addr)) { @@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk_dst_set(sk, &rt->dst); err = 0; out: - release_sock(sk); return err; } +EXPORT_SYMBOL(__ip4_datagram_connect); + +int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip4_datagram_connect(sk, uaddr, addr_len); + release_sock(sk); + return res; +} EXPORT_SYMBOL(ip4_datagram_connect); /* Because UDP xmit path can manipulate sk_dst_cache without holding diff --git a/kernel/net/ipv4/fib_trie.c b/kernel/net/ipv4/fib_trie.c index 09b62e17d..0ca933db1 100644 --- a/kernel/net/ipv4/fib_trie.c +++ b/kernel/net/ipv4/fib_trie.c @@ -1780,8 +1780,6 @@ void fib_table_flush_external(struct fib_table *tb) if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); - } else { - leaf_pull_suffix(pn, n); } } } @@ -1852,8 +1850,6 @@ int fib_table_flush(struct fib_table *tb) if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); - } else { - leaf_pull_suffix(pn, n); } } @@ -2457,7 +2453,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, key = l->key + 1; iter->pos++; - if (pos-- <= 0) + if (--pos <= 0) break; l = NULL; diff --git a/kernel/net/ipv4/inet_connection_sock.c b/kernel/net/ipv4/inet_connection_sock.c index 8976ca423..b27fc401c 100644 --- a/kernel/net/ipv4/inet_connection_sock.c +++ b/kernel/net/ipv4/inet_connection_sock.c @@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, } spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); return found; } diff --git a/kernel/net/ipv4/ip_fragment.c b/kernel/net/ipv4/ip_fragment.c index cc1da6d9c..cae22a1a8 100644 --- a/kernel/net/ipv4/ip_fragment.c +++ b/kernel/net/ipv4/ip_fragment.c @@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ - end = offset + skb->len - ihl; + end = offset + skb->len - skb_network_offset(skb) - ihl; err = -EINVAL; /* Is this the final fragment? */ @@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) goto err; err = -ENOMEM; - if (!pskb_pull(skb, ihl)) + if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) goto err; err = pskb_trim_rcsum(skb, end - offset); @@ -613,6 +613,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; iph->tot_len = htons(len); iph->tos |= ecn; + + ip_send_check(iph); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; diff --git a/kernel/net/ipv4/ip_tunnel.c b/kernel/net/ipv4/ip_tunnel.c index 4c2c3ba4b..626d9e56a 100644 --- a/kernel/net/ipv4/ip_tunnel.c +++ b/kernel/net/ipv4/ip_tunnel.c @@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, EXPORT_SYMBOL(ip_tunnel_encap); static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, - struct rtable *rt, __be16 df) + struct rtable *rt, __be16 df, + const struct iphdr *inner_iph) { struct ip_tunnel *tunnel = netdev_priv(dev); int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; @@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IP)) { if (!skb_is_gso(skb) && - (df & htons(IP_DF)) && mtu < pkt_size) { + (inner_iph->frag_off & htons(IP_DF)) && + mtu < pkt_size) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); return -E2BIG; @@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ip_rt_put(rt); goto tx_error; } diff --git a/kernel/net/ipv4/tcp_ipv4.c b/kernel/net/ipv4/tcp_ipv4.c index fc1c658ec..441ca6f38 100644 --- a/kernel/net/ipv4/tcp_ipv4.c +++ b/kernel/net/ipv4/tcp_ipv4.c @@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/kernel/net/ipv4/tcp_output.c b/kernel/net/ipv4/tcp_output.c index a369e8a70..986440b24 100644 --- a/kernel/net/ipv4/tcp_output.c +++ b/kernel/net/ipv4/tcp_output.c @@ -2893,6 +2893,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); + skb_mstamp_get(&skb->skb_mstamp); /* Send it off. */ if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); diff --git a/kernel/net/ipv4/udp.c b/kernel/net/ipv4/udp.c index 83aa604f9..1b8c5ba7d 100644 --- a/kernel/net/ipv4/udp.c +++ b/kernel/net/ipv4/udp.c @@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = sk->sk_rx_dst; + dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); - if (dst) - skb_dst_set_noref(skb, dst); + if (dst) { + /* DST_NOCACHE can not be used without taking a reference */ + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } } int udp_rcv(struct sk_buff *skb) diff --git a/kernel/net/ipv6/datagram.c b/kernel/net/ipv6/datagram.c index 62d908e64..b10a88986 100644 --- a/kernel/net/ipv6/datagram.c +++ b/kernel/net/ipv6/datagram.c @@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); } -int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); @@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (usin->sin6_family == AF_INET) { if (__ipv6_only_sock(sk)) return -EAFNOSUPPORT; - err = ip4_datagram_connect(sk, uaddr, addr_len); + err = __ip4_datagram_connect(sk, uaddr, addr_len); goto ipv4_connected; } @@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sin.sin_addr.s_addr = daddr->s6_addr32[3]; sin.sin_port = usin->sin6_port; - err = ip4_datagram_connect(sk, - (struct sockaddr *) &sin, - sizeof(sin)); + err = __ip4_datagram_connect(sk, + (struct sockaddr *) &sin, + sizeof(sin)); ipv4_connected: if (err) @@ -204,6 +204,16 @@ out: fl6_sock_release(flowlabel); return err; } + +int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip6_datagram_connect(sk, uaddr, addr_len); + release_sock(sk); + return res; +} EXPORT_SYMBOL_GPL(ip6_datagram_connect); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, diff --git a/kernel/net/ipv6/exthdrs_offload.c b/kernel/net/ipv6/exthdrs_offload.c index 447a7fbd1..f5e2ba1c1 100644 --- a/kernel/net/ipv6/exthdrs_offload.c +++ b/kernel/net/ipv6/exthdrs_offload.c @@ -36,6 +36,6 @@ out: return ret; out_rt: - inet_del_offload(&rthdr_offload, IPPROTO_ROUTING); + inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING); goto out; } diff --git a/kernel/net/ipv6/ip6_gre.c b/kernel/net/ipv6/ip6_gre.c index a38d3ac0f..69f4f689f 100644 --- a/kernel/net/ipv6/ip6_gre.c +++ b/kernel/net/ipv6/ip6_gre.c @@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); ip6gre_tunnel_unlink(ign, t); + ip6_tnl_dst_reset(t); dev_put(dev); } diff --git a/kernel/net/ipv6/ip6_input.c b/kernel/net/ipv6/ip6_input.c index f2e464eba..57990c929 100644 --- a/kernel/net/ipv6/ip6_input.c +++ b/kernel/net/ipv6/ip6_input.c @@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb) if (offset < 0) goto out; - if (!ipv6_is_mld(skb, nexthdr, offset)) - goto out; + if (ipv6_is_mld(skb, nexthdr, offset)) + deliver = true; - deliver = true; + goto out; } /* unknown RA - process it normally */ } diff --git a/kernel/net/ipv6/ip6_offload.c b/kernel/net/ipv6/ip6_offload.c index e893cd186..08b62047c 100644 --- a/kernel/net/ipv6/ip6_offload.c +++ b/kernel/net/ipv6/ip6_offload.c @@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, - .gro_receive = ipv6_gro_receive, - .gro_complete = ipv6_gro_complete, }, }; diff --git a/kernel/net/ipv6/ip6mr.c b/kernel/net/ipv6/ip6mr.c index 74ceb73c1..5f36266b1 100644 --- a/kernel/net/ipv6/ip6mr.c +++ b/kernel/net/ipv6/ip6mr.c @@ -550,7 +550,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) if (it->cache == &mrt->mfc6_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == mrt->mfc6_cache_array) + else if (it->cache == &mrt->mfc6_cache_array[it->ct]) read_unlock(&mrt_lock); } diff --git a/kernel/net/ipv6/route.c b/kernel/net/ipv6/route.c index c73ae5039..f371fefa7 100644 --- a/kernel/net/ipv6/route.c +++ b/kernel/net/ipv6/route.c @@ -1515,7 +1515,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc, return -EINVAL; } -int ip6_route_add(struct fib6_config *cfg) +int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) { int err; struct net *net = cfg->fc_nlinfo.nl_net; @@ -1523,7 +1523,6 @@ int ip6_route_add(struct fib6_config *cfg) struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; - struct mx6_config mxc = { .mx = NULL, }; int addr_type; if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) @@ -1719,6 +1718,32 @@ install_route: cfg->fc_nlinfo.nl_net = dev_net(dev); + *rt_ret = rt; + + return 0; +out: + if (dev) + dev_put(dev); + if (idev) + in6_dev_put(idev); + if (rt) + dst_free(&rt->dst); + + *rt_ret = NULL; + + return err; +} + +int ip6_route_add(struct fib6_config *cfg) +{ + struct mx6_config mxc = { .mx = NULL, }; + struct rt6_info *rt = NULL; + int err; + + err = ip6_route_info_create(cfg, &rt); + if (err) + goto out; + err = ip6_convert_metrics(&mxc, cfg); if (err) goto out; @@ -1726,14 +1751,12 @@ install_route: err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); kfree(mxc.mx); + return err; out: - if (dev) - dev_put(dev); - if (idev) - in6_dev_put(idev); if (rt) dst_free(&rt->dst); + return err; } @@ -2496,19 +2519,78 @@ errout: return err; } -static int ip6_route_multipath(struct fib6_config *cfg, int add) +struct rt6_nh { + struct rt6_info *rt6_info; + struct fib6_config r_cfg; + struct mx6_config mxc; + struct list_head next; +}; + +static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) +{ + struct rt6_nh *nh; + + list_for_each_entry(nh, rt6_nh_list, next) { + pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n", + &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, + nh->r_cfg.fc_ifindex); + } +} + +static int ip6_route_info_append(struct list_head *rt6_nh_list, + struct rt6_info *rt, struct fib6_config *r_cfg) +{ + struct rt6_nh *nh; + struct rt6_info *rtnh; + int err = -EEXIST; + + list_for_each_entry(nh, rt6_nh_list, next) { + /* check if rt6_info already exists */ + rtnh = nh->rt6_info; + + if (rtnh->dst.dev == rt->dst.dev && + rtnh->rt6i_idev == rt->rt6i_idev && + ipv6_addr_equal(&rtnh->rt6i_gateway, + &rt->rt6i_gateway)) + return err; + } + + nh = kzalloc(sizeof(*nh), GFP_KERNEL); + if (!nh) + return -ENOMEM; + nh->rt6_info = rt; + err = ip6_convert_metrics(&nh->mxc, r_cfg); + if (err) { + kfree(nh); + return err; + } + memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); + list_add_tail(&nh->next, rt6_nh_list); + + return 0; +} + +static int ip6_route_multipath_add(struct fib6_config *cfg) { struct fib6_config r_cfg; struct rtnexthop *rtnh; + struct rt6_info *rt; + struct rt6_nh *err_nh; + struct rt6_nh *nh, *nh_safe; int remaining; int attrlen; - int err = 0, last_err = 0; + int err = 1; + int nhn = 0; + int replace = (cfg->fc_nlinfo.nlh && + (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); + LIST_HEAD(rt6_nh_list); remaining = cfg->fc_mp_len; -beginning: rtnh = (struct rtnexthop *)cfg->fc_mp; - /* Parse a Multipath Entry */ + /* Parse a Multipath Entry and build a list (rt6_nh_list) of + * rt6_info structs per nexthop + */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) @@ -2524,22 +2606,32 @@ beginning: r_cfg.fc_flags |= RTF_GATEWAY; } } - err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg); + + err = ip6_route_info_create(&r_cfg, &rt); + if (err) + goto cleanup; + + err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { - last_err = err; - /* If we are trying to remove a route, do not stop the - * loop when ip6_route_del() fails (because next hop is - * already gone), we should try to remove all next hops. - */ - if (add) { - /* If add fails, we should try to delete all - * next hops that have been already added. - */ - add = 0; - remaining = cfg->fc_mp_len - remaining; - goto beginning; - } + dst_free(&rt->dst); + goto cleanup; + } + + rtnh = rtnh_next(rtnh, &remaining); + } + + err_nh = NULL; + list_for_each_entry(nh, &rt6_nh_list, next) { + err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc); + /* nh->rt6_info is used or freed at this point, reset to NULL*/ + nh->rt6_info = NULL; + if (err) { + if (replace && nhn) + ip6_print_replace_route_err(&rt6_nh_list); + err_nh = nh; + goto add_errout; } + /* Because each route is added like a single route we remove * these flags after the first nexthop: if there is a collision, * we have already failed to add the first nexthop: @@ -2549,6 +2641,63 @@ beginning: */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); + nhn++; + } + + goto cleanup; + +add_errout: + /* Delete routes that were already added */ + list_for_each_entry(nh, &rt6_nh_list, next) { + if (err_nh == nh) + break; + ip6_route_del(&nh->r_cfg); + } + +cleanup: + list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { + if (nh->rt6_info) + dst_free(&nh->rt6_info->dst); + if (nh->mxc.mx) + kfree(nh->mxc.mx); + list_del(&nh->next); + kfree(nh); + } + + return err; +} + +static int ip6_route_multipath_del(struct fib6_config *cfg) +{ + struct fib6_config r_cfg; + struct rtnexthop *rtnh; + int remaining; + int attrlen; + int err = 1, last_err = 0; + + remaining = cfg->fc_mp_len; + rtnh = (struct rtnexthop *)cfg->fc_mp; + + /* Parse a Multipath Entry */ + while (rtnh_ok(rtnh, remaining)) { + memcpy(&r_cfg, cfg, sizeof(*cfg)); + if (rtnh->rtnh_ifindex) + r_cfg.fc_ifindex = rtnh->rtnh_ifindex; + + attrlen = rtnh_attrlen(rtnh); + if (attrlen > 0) { + struct nlattr *nla, *attrs = rtnh_attrs(rtnh); + + nla = nla_find(attrs, attrlen, RTA_GATEWAY); + if (nla) { + nla_memcpy(&r_cfg.fc_gateway, nla, 16); + r_cfg.fc_flags |= RTF_GATEWAY; + } + } + err = ip6_route_del(&r_cfg); + if (err) + last_err = err; + rtnh = rtnh_next(rtnh, &remaining); } @@ -2565,7 +2714,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) return err; if (cfg.fc_mp) - return ip6_route_multipath(&cfg, 0); + return ip6_route_multipath_del(&cfg); else return ip6_route_del(&cfg); } @@ -2580,7 +2729,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) return err; if (cfg.fc_mp) - return ip6_route_multipath(&cfg, 1); + return ip6_route_multipath_add(&cfg); else return ip6_route_add(&cfg); } diff --git a/kernel/net/ipv6/tcp_ipv6.c b/kernel/net/ipv6/tcp_ipv6.c index 3adffb300..e541d68db 100644 --- a/kernel/net/ipv6/tcp_ipv6.c +++ b/kernel/net/ipv6/tcp_ipv6.c @@ -946,7 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/kernel/net/mac80211/tx.c b/kernel/net/mac80211/tx.c index 667111ee6..5787f15a3 100644 --- a/kernel/net/mac80211/tx.c +++ b/kernel/net/mac80211/tx.c @@ -301,9 +301,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) return TX_CONTINUE; - if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) - return TX_CONTINUE; - if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; diff --git a/kernel/net/netlink/af_netlink.c b/kernel/net/netlink/af_netlink.c index bf6e76643..980121e75 100644 --- a/kernel/net/netlink/af_netlink.c +++ b/kernel/net/netlink/af_netlink.c @@ -123,6 +123,24 @@ static inline u32 netlink_group_mask(u32 group) return group ? 1 << (group - 1) : 0; } +static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, + gfp_t gfp_mask) +{ + unsigned int len = skb_end_offset(skb); + struct sk_buff *new; + + new = alloc_skb(len, gfp_mask); + if (new == NULL) + return NULL; + + NETLINK_CB(new).portid = NETLINK_CB(skb).portid; + NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; + NETLINK_CB(new).creds = NETLINK_CB(skb).creds; + + memcpy(skb_put(new, len), skb->data, len); + return new; +} + int netlink_add_tap(struct netlink_tap *nt) { if (unlikely(nt->dev->type != ARPHRD_NETLINK)) @@ -204,7 +222,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, int ret = -ENOMEM; dev_hold(dev); - nskb = skb_clone(skb, GFP_ATOMIC); + + if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) + nskb = netlink_to_full_skb(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->dev = dev; nskb->protocol = htons((u16) sk->sk_protocol); @@ -276,11 +298,6 @@ static void netlink_rcv_wake(struct sock *sk) } #ifdef CONFIG_NETLINK_MMAP -static bool netlink_skb_is_mmaped(const struct sk_buff *skb) -{ - return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; -} - static bool netlink_rx_is_mmaped(struct sock *sk) { return nlk_sk(sk)->rx_ring.pg_vec != NULL; @@ -355,25 +372,52 @@ err1: return NULL; } + +static void +__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, + unsigned int order) +{ + struct netlink_sock *nlk = nlk_sk(sk); + struct sk_buff_head *queue; + struct netlink_ring *ring; + + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; + + spin_lock_bh(&queue->lock); + + ring->frame_max = req->nm_frame_nr - 1; + ring->head = 0; + ring->frame_size = req->nm_frame_size; + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; + + swap(ring->pg_vec_len, req->nm_block_nr); + swap(ring->pg_vec_order, order); + swap(ring->pg_vec, pg_vec); + + __skb_queue_purge(queue); + spin_unlock_bh(&queue->lock); + + WARN_ON(atomic_read(&nlk->mapped)); + + if (pg_vec) + free_pg_vec(pg_vec, order, req->nm_block_nr); +} + static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, - bool closing, bool tx_ring) + bool tx_ring) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_ring *ring; - struct sk_buff_head *queue; void **pg_vec = NULL; unsigned int order = 0; - int err; ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; - if (!closing) { - if (atomic_read(&nlk->mapped)) - return -EBUSY; - if (atomic_read(&ring->pending)) - return -EBUSY; - } + if (atomic_read(&nlk->mapped)) + return -EBUSY; + if (atomic_read(&ring->pending)) + return -EBUSY; if (req->nm_block_nr) { if (ring->pg_vec != NULL) @@ -405,31 +449,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, return -EINVAL; } - err = -EBUSY; mutex_lock(&nlk->pg_vec_lock); - if (closing || atomic_read(&nlk->mapped) == 0) { - err = 0; - spin_lock_bh(&queue->lock); - - ring->frame_max = req->nm_frame_nr - 1; - ring->head = 0; - ring->frame_size = req->nm_frame_size; - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; - - swap(ring->pg_vec_len, req->nm_block_nr); - swap(ring->pg_vec_order, order); - swap(ring->pg_vec, pg_vec); - - __skb_queue_purge(queue); - spin_unlock_bh(&queue->lock); - - WARN_ON(atomic_read(&nlk->mapped)); + if (atomic_read(&nlk->mapped) == 0) { + __netlink_set_ring(sk, req, tx_ring, pg_vec, order); + mutex_unlock(&nlk->pg_vec_lock); + return 0; } + mutex_unlock(&nlk->pg_vec_lock); if (pg_vec) free_pg_vec(pg_vec, order, req->nm_block_nr); - return err; + + return -EBUSY; } static void netlink_mm_open(struct vm_area_struct *vma) @@ -817,7 +849,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) } #else /* CONFIG_NETLINK_MMAP */ -#define netlink_skb_is_mmaped(skb) false #define netlink_rx_is_mmaped(sk) false #define netlink_tx_is_mmaped(sk) false #define netlink_mmap sock_no_mmap @@ -898,10 +929,10 @@ static void netlink_sock_destruct(struct sock *sk) memset(&req, 0, sizeof(req)); if (nlk->rx_ring.pg_vec) - netlink_set_ring(sk, &req, true, false); + __netlink_set_ring(sk, &req, false, NULL, 0); memset(&req, 0, sizeof(req)); if (nlk->tx_ring.pg_vec) - netlink_set_ring(sk, &req, true, true); + __netlink_set_ring(sk, &req, true, NULL, 0); } #endif /* CONFIG_NETLINK_MMAP */ @@ -1065,8 +1096,8 @@ static int netlink_insert(struct sock *sk, u32 portid) lock_sock(sk); - err = -EBUSY; - if (nlk_sk(sk)->portid) + err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; + if (nlk_sk(sk)->bound) goto err; err = -ENOMEM; @@ -1079,12 +1110,20 @@ static int netlink_insert(struct sock *sk, u32 portid) err = __netlink_insert(table, sk); if (err) { + /* In case the hashtable backend returns with -EBUSY + * from here, it must not escape to the caller. + */ + if (unlikely(err == -EBUSY)) + err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; - nlk_sk(sk)->portid = 0; sock_put(sk); } + /* We need to ensure that the socket is hashed and visible. */ + smp_wmb(); + nlk_sk(sk)->bound = portid; + err: release_sock(sk); return err; @@ -1464,6 +1503,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; long unsigned int groups = nladdr->nl_groups; + bool bound; if (addr_len < sizeof(struct sockaddr_nl)) return -EINVAL; @@ -1480,9 +1520,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } - if (nlk->portid) + bound = nlk->bound; + if (bound) { + /* Ensure nlk->portid is up-to-date. */ + smp_rmb(); + if (nladdr->nl_pid != nlk->portid) return -EINVAL; + } if (nlk->netlink_bind && groups) { int group; @@ -1498,7 +1543,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, } } - if (!nlk->portid) { + /* No need for barriers here as we return to user-space without + * using any of the bound attributes. + */ + if (!bound) { err = nladdr->nl_pid ? netlink_insert(sk, nladdr->nl_pid) : netlink_autobind(sock); @@ -1546,7 +1594,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; - if (!nlk->portid) + /* No need for barriers here as we return to user-space without + * using any of the bound attributes. + */ + if (!nlk->bound) err = netlink_autobind(sock); if (err == 0) { @@ -2197,7 +2248,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, return -EINVAL; if (copy_from_user(&req, optval, sizeof(req))) return -EFAULT; - err = netlink_set_ring(sk, &req, false, + err = netlink_set_ring(sk, &req, optname == NETLINK_TX_RING); break; } @@ -2303,10 +2354,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) dst_group = nlk->dst_group; } - if (!nlk->portid) { + if (!nlk->bound) { err = netlink_autobind(sock); if (err) goto out; + } else { + /* Ensure nlk is hashed and visible. */ + smp_rmb(); } /* It's a really convoluted way for userland to ask for mmaped diff --git a/kernel/net/netlink/af_netlink.h b/kernel/net/netlink/af_netlink.h index 89008405d..14437d9b1 100644 --- a/kernel/net/netlink/af_netlink.h +++ b/kernel/net/netlink/af_netlink.h @@ -35,6 +35,7 @@ struct netlink_sock { unsigned long state; size_t max_recvmsg_len; wait_queue_head_t wait; + bool bound; bool cb_running; struct netlink_callback cb; struct mutex *cb_mutex; @@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) return container_of(sk, struct netlink_sock, sk); } +static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) +{ +#ifdef CONFIG_NETLINK_MMAP + return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; +#else + return false; +#endif /* CONFIG_NETLINK_MMAP */ +} + struct netlink_table { struct rhashtable hash; struct hlist_head mc_list; diff --git a/kernel/net/nfc/nci/hci.c b/kernel/net/nfc/nci/hci.c index ed54ec533..b33fed6d1 100644 --- a/kernel/net/nfc/nci/hci.c +++ b/kernel/net/nfc/nci/hci.c @@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); - if (r == NCI_STATUS_OK) + if (r == NCI_STATUS_OK && skb) *skb = conn_info->rx_skb; return r; diff --git a/kernel/net/openvswitch/datapath.c b/kernel/net/openvswitch/datapath.c index 096c6276e..27e14962b 100644 --- a/kernel/net/openvswitch/datapath.c +++ b/kernel/net/openvswitch/datapath.c @@ -906,7 +906,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (error) goto err_kfree_flow; - ovs_flow_mask_key(&new_flow->key, &key, &mask); + ovs_flow_mask_key(&new_flow->key, &key, true, &mask); /* Extract flow identifier. */ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], @@ -1033,7 +1033,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, struct sw_flow_key masked_key; int error; - ovs_flow_mask_key(&masked_key, key, mask); + ovs_flow_mask_key(&masked_key, key, true, mask); error = ovs_nla_copy_actions(a, &masked_key, &acts, log); if (error) { OVS_NLERR(log, diff --git a/kernel/net/openvswitch/flow_table.c b/kernel/net/openvswitch/flow_table.c index 4613df8c8..aa349514e 100644 --- a/kernel/net/openvswitch/flow_table.c +++ b/kernel/net/openvswitch/flow_table.c @@ -56,20 +56,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) } void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, - const struct sw_flow_mask *mask) + bool full, const struct sw_flow_mask *mask) { - const long *m = (const long *)((const u8 *)&mask->key + - mask->range.start); - const long *s = (const long *)((const u8 *)src + - mask->range.start); - long *d = (long *)((u8 *)dst + mask->range.start); + int start = full ? 0 : mask->range.start; + int len = full ? sizeof *dst : range_n_bytes(&mask->range); + const long *m = (const long *)((const u8 *)&mask->key + start); + const long *s = (const long *)((const u8 *)src + start); + long *d = (long *)((u8 *)dst + start); int i; - /* The memory outside of the 'mask->range' are not set since - * further operations on 'dst' only uses contents within - * 'mask->range'. + /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, + * if 'full' is false the memory outside of the 'mask->range' is left + * uninitialized. This can be used as an optimization when further + * operations on 'dst' only use contents within 'mask->range'. */ - for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) + for (i = 0; i < len; i += sizeof(long)) *d++ = *s++ & *m++; } @@ -473,7 +474,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, u32 hash; struct sw_flow_key masked_key; - ovs_flow_mask_key(&masked_key, unmasked, mask); + ovs_flow_mask_key(&masked_key, unmasked, false, mask); hash = flow_hash(&masked_key, &mask->range); head = find_bucket(ti, hash); hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { diff --git a/kernel/net/openvswitch/flow_table.h b/kernel/net/openvswitch/flow_table.h index 616eda10d..2dd9900f5 100644 --- a/kernel/net/openvswitch/flow_table.h +++ b/kernel/net/openvswitch/flow_table.h @@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, - const struct sw_flow_mask *mask); + bool full, const struct sw_flow_mask *mask); #endif /* flow_table.h */ diff --git a/kernel/net/packet/af_packet.c b/kernel/net/packet/af_packet.c index ef1eb2050..f9f259247 100644 --- a/kernel/net/packet/af_packet.c +++ b/kernel/net/packet/af_packet.c @@ -2308,7 +2308,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, addr, hlen); - if (tp_len > dev->mtu + dev->hard_header_len) { + if (likely(tp_len >= 0) && + tp_len > dev->mtu + dev->hard_header_len) { struct ethhdr *ehdr; /* Earlier code assumed this would be a VLAN pkt, * double-check this now that we have the actual @@ -2689,7 +2690,7 @@ static int packet_release(struct socket *sock) static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) { struct packet_sock *po = pkt_sk(sk); - const struct net_device *dev_curr; + struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; @@ -2713,15 +2714,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) po->num = proto; po->prot_hook.type = proto; - - if (po->prot_hook.dev) - dev_put(po->prot_hook.dev); - po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } + if (dev_curr) + dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; diff --git a/kernel/net/rds/info.c b/kernel/net/rds/info.c index 9a6b4f661..140a44a5f 100644 --- a/kernel/net/rds/info.c +++ b/kernel/net/rds/info.c @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, /* check for all kinds of wrapping and the like */ start = (unsigned long)optval; - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { ret = -EINVAL; goto out; } diff --git a/kernel/net/sched/act_api.c b/kernel/net/sched/act_api.c index 3d43e4979..f8d9c2a2c 100644 --- a/kernel/net/sched/act_api.c +++ b/kernel/net/sched/act_api.c @@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a) } EXPORT_SYMBOL(tcf_hash_destroy); -int tcf_hash_release(struct tc_action *a, int bind) +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict) { struct tcf_common *p = a->priv; int ret = 0; @@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind) if (p) { if (bind) p->tcfc_bindcnt--; - else if (p->tcfc_bindcnt > 0) + else if (strict && p->tcfc_bindcnt > 0) return -EPERM; p->tcfc_refcnt--; @@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind) ret = 1; } } + return ret; } -EXPORT_SYMBOL(tcf_hash_release); +EXPORT_SYMBOL(__tcf_hash_release); static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a) @@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a) head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; hlist_for_each_entry_safe(p, n, head, tcfc_head) { a->priv = p; - ret = tcf_hash_release(a, 0); + ret = __tcf_hash_release(a, false, true); if (ret == ACT_P_DELETED) { module_put(a->ops->owner); n_i++; @@ -413,7 +414,7 @@ int tcf_action_destroy(struct list_head *actions, int bind) int ret = 0; list_for_each_entry_safe(a, tmp, actions, list) { - ret = tcf_hash_release(a, bind); + ret = __tcf_hash_release(a, bind, true); if (ret == ACT_P_DELETED) module_put(a->ops->owner); else if (ret < 0) diff --git a/kernel/net/sched/act_bpf.c b/kernel/net/sched/act_bpf.c index dc6a2d324..521ffca91 100644 --- a/kernel/net/sched/act_bpf.c +++ b/kernel/net/sched/act_bpf.c @@ -27,9 +27,10 @@ struct tcf_bpf_cfg { struct bpf_prog *filter; struct sock_filter *bpf_ops; - char *bpf_name; + const char *bpf_name; u32 bpf_fd; u16 bpf_num_ops; + bool is_ebpf; }; static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, @@ -200,6 +201,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) cfg->bpf_ops = bpf_ops; cfg->bpf_num_ops = bpf_num_ops; cfg->filter = fp; + cfg->is_ebpf = false; return 0; } @@ -234,18 +236,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) cfg->bpf_fd = bpf_fd; cfg->bpf_name = name; cfg->filter = fp; + cfg->is_ebpf = true; return 0; } +static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) +{ + if (cfg->is_ebpf) + bpf_prog_put(cfg->filter); + else + bpf_prog_destroy(cfg->filter); + + kfree(cfg->bpf_ops); + kfree(cfg->bpf_name); +} + +static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, + struct tcf_bpf_cfg *cfg) +{ + cfg->is_ebpf = tcf_bpf_is_ebpf(prog); + cfg->filter = prog->filter; + + cfg->bpf_ops = prog->bpf_ops; + cfg->bpf_name = prog->bpf_name; +} + static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *act, int replace, int bind) { struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; + struct tcf_bpf_cfg cfg, old; struct tc_act_bpf *parm; struct tcf_bpf *prog; - struct tcf_bpf_cfg cfg; bool is_bpf, is_ebpf; int ret; @@ -294,6 +318,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, prog = to_bpf(act); spin_lock_bh(&prog->tcf_lock); + if (ret != ACT_P_CREATED) + tcf_bpf_prog_fill_cfg(prog, &old); + prog->bpf_ops = cfg.bpf_ops; prog->bpf_name = cfg.bpf_name; @@ -309,29 +336,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, if (ret == ACT_P_CREATED) tcf_hash_insert(act); + else + tcf_bpf_cfg_cleanup(&old); return ret; destroy_fp: - if (is_ebpf) - bpf_prog_put(cfg.filter); - else - bpf_prog_destroy(cfg.filter); - - kfree(cfg.bpf_ops); - kfree(cfg.bpf_name); - + tcf_bpf_cfg_cleanup(&cfg); return ret; } static void tcf_bpf_cleanup(struct tc_action *act, int bind) { - const struct tcf_bpf *prog = act->priv; + struct tcf_bpf_cfg tmp; - if (tcf_bpf_is_ebpf(prog)) - bpf_prog_put(prog->filter); - else - bpf_prog_destroy(prog->filter); + tcf_bpf_prog_fill_cfg(act->priv, &tmp); + tcf_bpf_cfg_cleanup(&tmp); } static struct tc_action_ops act_bpf_ops __read_mostly = { diff --git a/kernel/net/sched/cls_bpf.c b/kernel/net/sched/cls_bpf.c index 91bd9c194..c0b86f2bf 100644 --- a/kernel/net/sched/cls_bpf.c +++ b/kernel/net/sched/cls_bpf.c @@ -364,7 +364,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, goto errout; if (oldprog) { - list_replace_rcu(&prog->link, &oldprog->link); + list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); } else { diff --git a/kernel/net/sched/cls_flow.c b/kernel/net/sched/cls_flow.c index a620c4e28..75df923f5 100644 --- a/kernel/net/sched/cls_flow.c +++ b/kernel/net/sched/cls_flow.c @@ -419,6 +419,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (!fnew) goto err2; + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); + fold = (struct flow_filter *)*arg; if (fold) { err = -EINVAL; @@ -480,7 +482,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, fnew->mask = ~0U; fnew->tp = tp; get_random_bytes(&fnew->hashrnd, 4); - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); } fnew->perturb_timer.function = flow_perturbation; @@ -520,7 +521,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (*arg == 0) list_add_tail_rcu(&fnew->list, &head->filters); else - list_replace_rcu(&fnew->list, &fold->list); + list_replace_rcu(&fold->list, &fnew->list); *arg = (unsigned long)fnew; diff --git a/kernel/net/sched/cls_fw.c b/kernel/net/sched/cls_fw.c index 715e01e59..f23a3b68b 100644 --- a/kernel/net/sched/cls_fw.c +++ b/kernel/net/sched/cls_fw.c @@ -33,7 +33,6 @@ struct fw_head { u32 mask; - bool mask_set; struct fw_filter __rcu *ht[HTSIZE]; struct rcu_head rcu; }; @@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, } } } else { - /* old method */ + /* Old method: classify the packet using its skb mark. */ if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id ^ tp->q->handle)))) { res->classid = id; @@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) static int fw_init(struct tcf_proto *tp) { - struct fw_head *head; - - head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; - - head->mask_set = false; - rcu_assign_pointer(tp->root, head); + /* We don't allocate fw_head here, because in the old method + * we don't need it at all. + */ return 0; } @@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, int err; if (!opt) - return handle ? -EINVAL : 0; + return handle ? -EINVAL : 0; /* Succeed if it is old method. */ err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); if (err < 0) @@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (!handle) return -EINVAL; - if (!head->mask_set) { - head->mask = 0xFFFFFFFF; + if (!head) { + u32 mask = 0xFFFFFFFF; if (tb[TCA_FW_MASK]) - head->mask = nla_get_u32(tb[TCA_FW_MASK]); - head->mask_set = true; + mask = nla_get_u32(tb[TCA_FW_MASK]); + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOBUFS; + head->mask = mask; + + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); diff --git a/kernel/net/sched/cls_u32.c b/kernel/net/sched/cls_u32.c index cab9e9b43..4fbb67430 100644 --- a/kernel/net/sched/cls_u32.c +++ b/kernel/net/sched/cls_u32.c @@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force) return false; } } + + if (tp_c->refcnt > 1) + return false; + + if (tp_c->refcnt == 1) { + struct tc_u_hnode *ht; + + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) + if (!ht_empty(ht)) + return false; + } } if (root_ht && --root_ht->refcnt == 0) diff --git a/kernel/net/sched/sch_fq_codel.c b/kernel/net/sched/sch_fq_codel.c index c244c45b7..9291598b5 100644 --- a/kernel/net/sched/sch_fq_codel.c +++ b/kernel/net/sched/sch_fq_codel.c @@ -162,10 +162,10 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) skb = dequeue_head(flow); len = qdisc_pkt_len(skb); q->backlogs[idx] -= len; - kfree_skb(skb); sch->q.qlen--; qdisc_qstats_drop(sch); qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); flow->dropped++; return idx; } diff --git a/kernel/net/sctp/protocol.c b/kernel/net/sctp/protocol.c index 53b7acde9..e13c3c3ea 100644 --- a/kernel/net/sctp/protocol.c +++ b/kernel/net/sctp/protocol.c @@ -1166,7 +1166,7 @@ static void sctp_v4_del_protocol(void) unregister_inetaddr_notifier(&sctp_inetaddr_notifier); } -static int __net_init sctp_net_init(struct net *net) +static int __net_init sctp_defaults_init(struct net *net) { int status; @@ -1259,12 +1259,6 @@ static int __net_init sctp_net_init(struct net *net) sctp_dbg_objcnt_init(net); - /* Initialize the control inode/socket for handling OOTB packets. */ - if ((status = sctp_ctl_sock_init(net))) { - pr_err("Failed to initialize the SCTP control sock\n"); - goto err_ctl_sock_init; - } - /* Initialize the local address list. */ INIT_LIST_HEAD(&net->sctp.local_addr_list); spin_lock_init(&net->sctp.local_addr_lock); @@ -1280,9 +1274,6 @@ static int __net_init sctp_net_init(struct net *net) return 0; -err_ctl_sock_init: - sctp_dbg_objcnt_exit(net); - sctp_proc_exit(net); err_init_proc: cleanup_sctp_mibs(net); err_init_mibs: @@ -1291,15 +1282,12 @@ err_sysctl_register: return status; } -static void __net_exit sctp_net_exit(struct net *net) +static void __net_exit sctp_defaults_exit(struct net *net) { /* Free the local address list */ sctp_free_addr_wq(net); sctp_free_local_addr_list(net); - /* Free the control endpoint. */ - inet_ctl_sock_destroy(net->sctp.ctl_sock); - sctp_dbg_objcnt_exit(net); sctp_proc_exit(net); @@ -1307,9 +1295,32 @@ static void __net_exit sctp_net_exit(struct net *net) sctp_sysctl_net_unregister(net); } -static struct pernet_operations sctp_net_ops = { - .init = sctp_net_init, - .exit = sctp_net_exit, +static struct pernet_operations sctp_defaults_ops = { + .init = sctp_defaults_init, + .exit = sctp_defaults_exit, +}; + +static int __net_init sctp_ctrlsock_init(struct net *net) +{ + int status; + + /* Initialize the control inode/socket for handling OOTB packets. */ + status = sctp_ctl_sock_init(net); + if (status) + pr_err("Failed to initialize the SCTP control sock\n"); + + return status; +} + +static void __net_init sctp_ctrlsock_exit(struct net *net) +{ + /* Free the control endpoint. */ + inet_ctl_sock_destroy(net->sctp.ctl_sock); +} + +static struct pernet_operations sctp_ctrlsock_ops = { + .init = sctp_ctrlsock_init, + .exit = sctp_ctrlsock_exit, }; /* Initialize the universe into something sensible. */ @@ -1442,8 +1453,11 @@ static __init int sctp_init(void) sctp_v4_pf_init(); sctp_v6_pf_init(); - status = sctp_v4_protosw_init(); + status = register_pernet_subsys(&sctp_defaults_ops); + if (status) + goto err_register_defaults; + status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; @@ -1451,9 +1465,9 @@ static __init int sctp_init(void) if (status) goto err_v6_protosw_init; - status = register_pernet_subsys(&sctp_net_ops); + status = register_pernet_subsys(&sctp_ctrlsock_ops); if (status) - goto err_register_pernet_subsys; + goto err_register_ctrlsock; status = sctp_v4_add_protocol(); if (status) @@ -1469,12 +1483,14 @@ out: err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: - unregister_pernet_subsys(&sctp_net_ops); -err_register_pernet_subsys: + unregister_pernet_subsys(&sctp_ctrlsock_ops); +err_register_ctrlsock: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: + unregister_pernet_subsys(&sctp_defaults_ops); +err_register_defaults: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); @@ -1507,12 +1523,14 @@ static __exit void sctp_exit(void) sctp_v6_del_protocol(); sctp_v4_del_protocol(); - unregister_pernet_subsys(&sctp_net_ops); + unregister_pernet_subsys(&sctp_ctrlsock_ops); /* Free protosw registrations */ sctp_v6_protosw_exit(); sctp_v4_protosw_exit(); + unregister_pernet_subsys(&sctp_defaults_ops); + /* Unregister with socket layer. */ sctp_v6_pf_exit(); sctp_v4_pf_exit(); diff --git a/kernel/net/sunrpc/xprt.c b/kernel/net/sunrpc/xprt.c index 1d4fe24af..d109d308e 100644 --- a/kernel/net/sunrpc/xprt.c +++ b/kernel/net/sunrpc/xprt.c @@ -611,6 +611,7 @@ static void xprt_autoclose(struct work_struct *work) xprt->ops->close(xprt); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); xprt_release_write(xprt, NULL); + wake_up_bit(&xprt->state, XPRT_LOCKED); } /** @@ -720,6 +721,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) xprt->ops->release_xprt(xprt, NULL); out: spin_unlock_bh(&xprt->transport_lock); + wake_up_bit(&xprt->state, XPRT_LOCKED); } /** @@ -1389,6 +1391,10 @@ out: static void xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); + + /* Exclude transport connect/disconnect handlers */ + wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); + del_timer_sync(&xprt->timer); rpc_xprt_debugfs_unregister(xprt); diff --git a/kernel/net/sunrpc/xprtsock.c b/kernel/net/sunrpc/xprtsock.c index 66891e32c..5e3ad598d 100644 --- a/kernel/net/sunrpc/xprtsock.c +++ b/kernel/net/sunrpc/xprtsock.c @@ -834,6 +834,7 @@ static void xs_reset_transport(struct sock_xprt *transport) sk->sk_user_data = NULL; xs_restore_old_callbacks(transport, sk); + xprt_clear_connected(xprt); write_unlock_bh(&sk->sk_callback_lock); xs_sock_reset_connection_flags(xprt); @@ -1433,6 +1434,7 @@ out: static void xs_tcp_state_change(struct sock *sk) { struct rpc_xprt *xprt; + struct sock_xprt *transport; read_lock_bh(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk))) @@ -1444,13 +1446,12 @@ static void xs_tcp_state_change(struct sock *sk) sock_flag(sk, SOCK_ZAPPED), sk->sk_shutdown); + transport = container_of(xprt, struct sock_xprt, xprt); trace_rpc_socket_state_change(xprt, sk->sk_socket); switch (sk->sk_state) { case TCP_ESTABLISHED: spin_lock(&xprt->transport_lock); if (!xprt_test_and_set_connected(xprt)) { - struct sock_xprt *transport = container_of(xprt, - struct sock_xprt, xprt); /* Reset TCP record info */ transport->tcp_offset = 0; @@ -1459,6 +1460,8 @@ static void xs_tcp_state_change(struct sock *sk) transport->tcp_flags = TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; xprt->connect_cookie++; + clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); + xprt_clear_connecting(xprt); xprt_wake_pending_tasks(xprt, -EAGAIN); } @@ -1494,6 +1497,9 @@ static void xs_tcp_state_change(struct sock *sk) smp_mb__after_atomic(); break; case TCP_CLOSE: + if (test_and_clear_bit(XPRT_SOCK_CONNECTING, + &transport->sock_state)) + xprt_clear_connecting(xprt); xs_sock_mark_closed(xprt); } out: @@ -2110,6 +2116,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; + set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); switch (ret) { case 0: @@ -2174,7 +2181,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) case -EINPROGRESS: case -EALREADY: xprt_unlock_connect(xprt, transport); - xprt_clear_connecting(xprt); return; case -EINVAL: /* Happens, for instance, if the user specified a link @@ -2216,13 +2222,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); - /* Start by resetting any existing state */ - xs_reset_transport(transport); - - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { + if (transport->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", xprt, xprt->reestablish_timeout / HZ); + + /* Start by resetting any existing state */ + xs_reset_transport(transport); + queue_delayed_work(rpciod_workqueue, &transport->connect_worker, xprt->reestablish_timeout); diff --git a/kernel/net/tipc/socket.c b/kernel/net/tipc/socket.c index f485600c4..20cc6df07 100644 --- a/kernel/net/tipc/socket.c +++ b/kernel/net/tipc/socket.c @@ -2009,6 +2009,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); if (res) goto exit; + security_sk_clone(sock->sk, new_sock->sk); new_sk = new_sock->sk; new_tsock = tipc_sk(new_sk); diff --git a/kernel/security/selinux/hooks.c b/kernel/security/selinux/hooks.c index 7f8d7f19e..280235cc3 100644 --- a/kernel/security/selinux/hooks.c +++ b/kernel/security/selinux/hooks.c @@ -1095,7 +1095,7 @@ static void selinux_write_opts(struct seq_file *m, seq_puts(m, prefix); if (has_comma) seq_putc(m, '\"'); - seq_puts(m, opts->mnt_opts[i]); + seq_escape(m, opts->mnt_opts[i], "\"\n\\"); if (has_comma) seq_putc(m, '\"'); } diff --git a/kernel/sound/pci/hda/patch_realtek.c b/kernel/sound/pci/hda/patch_realtek.c index 91f692856..6fe862594 100644 --- a/kernel/sound/pci/hda/patch_realtek.c +++ b/kernel/sound/pci/hda/patch_realtek.c @@ -1134,7 +1134,7 @@ static const struct hda_fixup alc880_fixups[] = { /* override all pins as BIOS on old Amilo is broken */ .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { - { 0x14, 0x0121411f }, /* HP */ + { 0x14, 0x0121401f }, /* HP */ { 0x15, 0x99030120 }, /* speaker */ { 0x16, 0x99030130 }, /* bass speaker */ { 0x17, 0x411111f0 }, /* N/A */ @@ -1154,7 +1154,7 @@ static const struct hda_fixup alc880_fixups[] = { /* almost compatible with FUJITSU, but no bass and SPDIF */ .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { - { 0x14, 0x0121411f }, /* HP */ + { 0x14, 0x0121401f }, /* HP */ { 0x15, 0x99030120 }, /* speaker */ { 0x16, 0x411111f0 }, /* N/A */ { 0x17, 0x411111f0 }, /* N/A */ @@ -1363,7 +1363,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE), - SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), + SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU), @@ -5118,8 +5118,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -6454,6 +6457,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13), SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13), + SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13), SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), diff --git a/kernel/sound/soc/codecs/adav80x.c b/kernel/sound/soc/codecs/adav80x.c index 4373ada95..3a91a00fb 100644 --- a/kernel/sound/soc/codecs/adav80x.c +++ b/kernel/sound/soc/codecs/adav80x.c @@ -864,7 +864,6 @@ const struct regmap_config adav80x_regmap_config = { .val_bits = 8, .pad_bits = 1, .reg_bits = 7, - .read_flag_mask = 0x01, .max_register = ADAV80X_PLL_OUTE, diff --git a/kernel/sound/soc/codecs/arizona.c b/kernel/sound/soc/codecs/arizona.c index eff4b4d51..ee91edcf3 100644 --- a/kernel/sound/soc/codecs/arizona.c +++ b/kernel/sound/soc/codecs/arizona.c @@ -1610,17 +1610,6 @@ int arizona_init_dai(struct arizona_priv *priv, int id) } EXPORT_SYMBOL_GPL(arizona_init_dai); -static irqreturn_t arizona_fll_clock_ok(int irq, void *data) -{ - struct arizona_fll *fll = data; - - arizona_fll_dbg(fll, "clock OK\n"); - - complete(&fll->ok); - - return IRQ_HANDLED; -} - static struct { unsigned int min; unsigned int max; @@ -1902,17 +1891,18 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll) static int arizona_enable_fll(struct arizona_fll *fll) { struct arizona *arizona = fll->arizona; - unsigned long time_left; bool use_sync = false; int already_enabled = arizona_is_enabled_fll(fll); struct arizona_fll_cfg cfg; + int i; + unsigned int val; if (already_enabled < 0) return already_enabled; if (already_enabled) { /* Facilitate smooth refclk across the transition */ - regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7, + regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9, ARIZONA_FLL1_GAIN_MASK, 0); regmap_update_bits_async(fll->arizona->regmap, fll->base + 1, ARIZONA_FLL1_FREERUN, @@ -1964,9 +1954,6 @@ static int arizona_enable_fll(struct arizona_fll *fll) if (!already_enabled) pm_runtime_get(arizona->dev); - /* Clear any pending completions */ - try_wait_for_completion(&fll->ok); - regmap_update_bits_async(arizona->regmap, fll->base + 1, ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA); if (use_sync) @@ -1978,10 +1965,24 @@ static int arizona_enable_fll(struct arizona_fll *fll) regmap_update_bits_async(arizona->regmap, fll->base + 1, ARIZONA_FLL1_FREERUN, 0); - time_left = wait_for_completion_timeout(&fll->ok, - msecs_to_jiffies(250)); - if (time_left == 0) + arizona_fll_dbg(fll, "Waiting for FLL lock...\n"); + val = 0; + for (i = 0; i < 15; i++) { + if (i < 5) + usleep_range(200, 400); + else + msleep(20); + + regmap_read(arizona->regmap, + ARIZONA_INTERRUPT_RAW_STATUS_5, + &val); + if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1))) + break; + } + if (i == 15) arizona_fll_warn(fll, "Timed out waiting for lock\n"); + else + arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i); return 0; } @@ -2066,11 +2067,8 @@ EXPORT_SYMBOL_GPL(arizona_set_fll); int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq, int ok_irq, struct arizona_fll *fll) { - int ret; unsigned int val; - init_completion(&fll->ok); - fll->id = id; fll->base = base; fll->arizona = arizona; @@ -2092,13 +2090,6 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq, snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name), "FLL%d clock OK", id); - ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name, - arizona_fll_clock_ok, fll); - if (ret != 0) { - dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n", - id, ret); - } - regmap_update_bits(arizona->regmap, fll->base + 1, ARIZONA_FLL1_FREERUN, 0); diff --git a/kernel/sound/soc/codecs/arizona.h b/kernel/sound/soc/codecs/arizona.h index 11ff899b0..14e8485b5 100644 --- a/kernel/sound/soc/codecs/arizona.h +++ b/kernel/sound/soc/codecs/arizona.h @@ -233,7 +233,6 @@ struct arizona_fll { int id; unsigned int base; unsigned int vco_mult; - struct completion ok; unsigned int fout; int sync_src; diff --git a/kernel/sound/soc/codecs/rt5640.c b/kernel/sound/soc/codecs/rt5640.c index 178e55d4d..06317f7d9 100644 --- a/kernel/sound/soc/codecs/rt5640.c +++ b/kernel/sound/soc/codecs/rt5640.c @@ -985,6 +985,35 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w, return 0; } +static int rt5640_lout_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + hp_amp_power_on(codec); + snd_soc_update_bits(codec, RT5640_PWR_ANLG1, + RT5640_PWR_LM, RT5640_PWR_LM); + snd_soc_update_bits(codec, RT5640_OUTPUT, + RT5640_L_MUTE | RT5640_R_MUTE, 0); + break; + + case SND_SOC_DAPM_PRE_PMD: + snd_soc_update_bits(codec, RT5640_OUTPUT, + RT5640_L_MUTE | RT5640_R_MUTE, + RT5640_L_MUTE | RT5640_R_MUTE); + snd_soc_update_bits(codec, RT5640_PWR_ANLG1, + RT5640_PWR_LM, 0); + break; + + default: + return 0; + } + + return 0; +} + static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1180,13 +1209,16 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = { 0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)), SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0, 0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)), - SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0, + SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0, rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)), SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM, 0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5640_hp_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0, + rt5640_lout_event, + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1, RT5640_PWR_HP_L_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1, @@ -1501,8 +1533,10 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = { {"HP R Playback", "Switch", "HP Amp"}, {"HPOL", NULL, "HP L Playback"}, {"HPOR", NULL, "HP R Playback"}, - {"LOUTL", NULL, "LOUT MIX"}, - {"LOUTR", NULL, "LOUT MIX"}, + + {"LOUT amp", NULL, "LOUT MIX"}, + {"LOUTL", NULL, "LOUT amp"}, + {"LOUTR", NULL, "LOUT amp"}, }; static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = { diff --git a/kernel/sound/soc/samsung/arndale_rt5631.c b/kernel/sound/soc/samsung/arndale_rt5631.c index 8bf2e2c4b..9e371eb3e 100644 --- a/kernel/sound/soc/samsung/arndale_rt5631.c +++ b/kernel/sound/soc/samsung/arndale_rt5631.c @@ -116,15 +116,6 @@ static int arndale_audio_probe(struct platform_device *pdev) return ret; } -static int arndale_audio_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; -} - static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = { { .compatible = "samsung,arndale-rt5631", }, { .compatible = "samsung,arndale-alc5631", }, @@ -139,7 +130,6 @@ static struct platform_driver arndale_audio_driver = { .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match), }, .probe = arndale_audio_probe, - .remove = arndale_audio_remove, }; module_platform_driver(arndale_audio_driver); diff --git a/kernel/sound/usb/mixer.c b/kernel/sound/usb/mixer.c index 8b7e391dd..cd8ed2e39 100644 --- a/kernel/sound/usb/mixer.c +++ b/kernel/sound/usb/mixer.c @@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list) for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & (1 << c))) continue; - if (cval->cached & (1 << c)) { + if (cval->cached & (1 << (c + 1))) { err = snd_usb_set_cur_mix_value(cval, c + 1, idx, cval->cache_val[idx]); if (err < 0) |