summaryrefslogtreecommitdiffstats
path: root/kernel/arch
diff options
context:
space:
mode:
authorDon Dugger <donald.d.dugger@intel.com>2015-10-29 22:15:25 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2015-10-29 22:15:25 +0000
commitafc76d554ed517e38d46b6b182a7016406a1323f (patch)
tree06133cb33a43488837ea67667458e1582f2f40ce /kernel/arch
parent41f827bfbb10e03c4d228fbcc801dd51fb9983b0 (diff)
parentec0a2ed6d8a5e555edef907895c041e285fdb495 (diff)
Merge "These changes are a raw update to a vanilla kernel 4.1.10, with the recently announced rt patch patch-4.1.10-rt10.patch. No further changes needed."
Diffstat (limited to 'kernel/arch')
-rw-r--r--kernel/arch/arm/Kconfig1
-rw-r--r--kernel/arch/arm/boot/compressed/decompress.c2
-rw-r--r--kernel/arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--kernel/arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--kernel/arch/arm/kvm/arm.c2
-rw-r--r--kernel/arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--kernel/arch/arm/mach-orion5x/include/mach/irqs.h64
-rw-r--r--kernel/arch/arm/mach-orion5x/irq.c4
-rw-r--r--kernel/arch/arm/mach-rockchip/platsmp.c37
-rw-r--r--kernel/arch/arm64/Kconfig22
-rw-r--r--kernel/arch/arm64/Makefile4
-rw-r--r--kernel/arch/arm64/include/asm/memory.h8
-rw-r--r--kernel/arch/arm64/kernel/debug-monitors.c21
-rw-r--r--kernel/arch/arm64/kernel/fpsimd.c1
-rw-r--r--kernel/arch/arm64/kernel/head.S5
-rw-r--r--kernel/arch/arm64/kernel/insn.c6
-rw-r--r--kernel/arch/arm64/kernel/module.c2
-rw-r--r--kernel/arch/arm64/kernel/signal32.c47
-rw-r--r--kernel/arch/arm64/kvm/hyp.S9
-rw-r--r--kernel/arch/m32r/boot/compressed/misc.c3
-rw-r--r--kernel/arch/mips/boot/compressed/decompress.c4
-rw-r--r--kernel/arch/mips/math-emu/cp1emu.c24
-rw-r--r--kernel/arch/parisc/kernel/irq.c8
-rw-r--r--kernel/arch/parisc/kernel/syscall.S2
-rw-r--r--kernel/arch/powerpc/boot/Makefile3
-rw-r--r--kernel/arch/powerpc/include/asm/pgtable-ppc64.h14
-rw-r--r--kernel/arch/powerpc/include/asm/rtas.h1
-rw-r--r--kernel/arch/powerpc/include/asm/switch_to.h1
-rw-r--r--kernel/arch/powerpc/kernel/eeh.c27
-rw-r--r--kernel/arch/powerpc/kernel/process.c3
-rw-r--r--kernel/arch/powerpc/kernel/rtas.c17
-rw-r--r--kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c18
-rw-r--r--kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--kernel/arch/powerpc/mm/hugepage-hash64.c3
-rw-r--r--kernel/arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--kernel/arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--kernel/arch/s390/boot/compressed/misc.c2
-rw-r--r--kernel/arch/s390/kernel/setup.c15
-rw-r--r--kernel/arch/sh/boot/compressed/misc.c2
-rw-r--r--kernel/arch/unicore32/boot/compressed/misc.c4
-rw-r--r--kernel/arch/x86/boot/compressed/misc.c3
-rw-r--r--kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c1
-rw-r--r--kernel/arch/x86/include/asm/desc.h15
-rw-r--r--kernel/arch/x86/include/asm/mmu.h3
-rw-r--r--kernel/arch/x86/include/asm/mmu_context.h54
-rw-r--r--kernel/arch/x86/kernel/acpi/boot.c1
-rw-r--r--kernel/arch/x86/kernel/cpu/common.c4
-rw-r--r--kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c41
-rw-r--r--kernel/arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--kernel/arch/x86/kernel/ldt.c262
-rw-r--r--kernel/arch/x86/kernel/process_64.c4
-rw-r--r--kernel/arch/x86/kernel/step.c8
-rw-r--r--kernel/arch/x86/kvm/mmu.c45
-rw-r--r--kernel/arch/x86/kvm/x86.c2
-rw-r--r--kernel/arch/x86/math-emu/fpu_entry.c3
-rw-r--r--kernel/arch/x86/math-emu/fpu_system.h21
-rw-r--r--kernel/arch/x86/math-emu/get_address.c3
-rw-r--r--kernel/arch/x86/mm/init_32.c1
-rw-r--r--kernel/arch/x86/power/cpu.c3
-rw-r--r--kernel/arch/xtensa/include/asm/traps.h29
-rw-r--r--kernel/arch/xtensa/kernel/entry.S7
61 files changed, 561 insertions, 362 deletions
diff --git a/kernel/arch/arm/Kconfig b/kernel/arch/arm/Kconfig
index bfaeed7a4..e16a25917 100644
--- a/kernel/arch/arm/Kconfig
+++ b/kernel/arch/arm/Kconfig
@@ -539,6 +539,7 @@ config ARCH_ORION5X
select MVEBU_MBUS
select PCI
select PLAT_ORION_LEGACY
+ select MULTI_IRQ_HANDLER
help
Support for the following Marvell Orion 5x series SoCs:
Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
diff --git a/kernel/arch/arm/boot/compressed/decompress.c b/kernel/arch/arm/boot/compressed/decompress.c
index bd245d349..a0765e7ed 100644
--- a/kernel/arch/arm/boot/compressed/decompress.c
+++ b/kernel/arch/arm/boot/compressed/decompress.c
@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
- return decompress(input, len, NULL, NULL, output, NULL, error);
+ return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
}
diff --git a/kernel/arch/arm/boot/dts/exynos3250-rinato.dts b/kernel/arch/arm/boot/dts/exynos3250-rinato.dts
index 0b9906880..75aba40c6 100644
--- a/kernel/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/kernel/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -181,7 +181,7 @@
display-timings {
timing-0 {
- clock-frequency = <0>;
+ clock-frequency = <4600000>;
hactive = <320>;
vactive = <320>;
hfront-porch = <1>;
diff --git a/kernel/arch/arm/boot/dts/rk3288.dtsi b/kernel/arch/arm/boot/dts/rk3288.dtsi
index 165968d51..8eca5878a 100644
--- a/kernel/arch/arm/boot/dts/rk3288.dtsi
+++ b/kernel/arch/arm/boot/dts/rk3288.dtsi
@@ -584,7 +584,7 @@
compatible = "rockchip,rk3288-wdt", "snps,dw-wdt";
reg = <0xff800000 0x100>;
clocks = <&cru PCLK_WDT>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/kernel/arch/arm/kvm/arm.c b/kernel/arch/arm/kvm/arm.c
index adb19885a..0c9014d7f 100644
--- a/kernel/arch/arm/kvm/arm.c
+++ b/kernel/arch/arm/kvm/arm.c
@@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the first
* time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
+ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
diff --git a/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c b/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c
index 57d5df0c1..7581e036b 100644
--- a/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/kernel/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
.dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT,
.wkdep_srcs = l4per2_wkup_sleep_deps,
.sleepdep_srcs = l4per2_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain mpu0_7xx_clkdm = {
diff --git a/kernel/arch/arm/mach-orion5x/include/mach/irqs.h b/kernel/arch/arm/mach-orion5x/include/mach/irqs.h
index a6fa9d8f1..2431d9923 100644
--- a/kernel/arch/arm/mach-orion5x/include/mach/irqs.h
+++ b/kernel/arch/arm/mach-orion5x/include/mach/irqs.h
@@ -16,42 +16,42 @@
/*
* Orion Main Interrupt Controller
*/
-#define IRQ_ORION5X_BRIDGE 0
-#define IRQ_ORION5X_DOORBELL_H2C 1
-#define IRQ_ORION5X_DOORBELL_C2H 2
-#define IRQ_ORION5X_UART0 3
-#define IRQ_ORION5X_UART1 4
-#define IRQ_ORION5X_I2C 5
-#define IRQ_ORION5X_GPIO_0_7 6
-#define IRQ_ORION5X_GPIO_8_15 7
-#define IRQ_ORION5X_GPIO_16_23 8
-#define IRQ_ORION5X_GPIO_24_31 9
-#define IRQ_ORION5X_PCIE0_ERR 10
-#define IRQ_ORION5X_PCIE0_INT 11
-#define IRQ_ORION5X_USB1_CTRL 12
-#define IRQ_ORION5X_DEV_BUS_ERR 14
-#define IRQ_ORION5X_PCI_ERR 15
-#define IRQ_ORION5X_USB_BR_ERR 16
-#define IRQ_ORION5X_USB0_CTRL 17
-#define IRQ_ORION5X_ETH_RX 18
-#define IRQ_ORION5X_ETH_TX 19
-#define IRQ_ORION5X_ETH_MISC 20
-#define IRQ_ORION5X_ETH_SUM 21
-#define IRQ_ORION5X_ETH_ERR 22
-#define IRQ_ORION5X_IDMA_ERR 23
-#define IRQ_ORION5X_IDMA_0 24
-#define IRQ_ORION5X_IDMA_1 25
-#define IRQ_ORION5X_IDMA_2 26
-#define IRQ_ORION5X_IDMA_3 27
-#define IRQ_ORION5X_CESA 28
-#define IRQ_ORION5X_SATA 29
-#define IRQ_ORION5X_XOR0 30
-#define IRQ_ORION5X_XOR1 31
+#define IRQ_ORION5X_BRIDGE (1 + 0)
+#define IRQ_ORION5X_DOORBELL_H2C (1 + 1)
+#define IRQ_ORION5X_DOORBELL_C2H (1 + 2)
+#define IRQ_ORION5X_UART0 (1 + 3)
+#define IRQ_ORION5X_UART1 (1 + 4)
+#define IRQ_ORION5X_I2C (1 + 5)
+#define IRQ_ORION5X_GPIO_0_7 (1 + 6)
+#define IRQ_ORION5X_GPIO_8_15 (1 + 7)
+#define IRQ_ORION5X_GPIO_16_23 (1 + 8)
+#define IRQ_ORION5X_GPIO_24_31 (1 + 9)
+#define IRQ_ORION5X_PCIE0_ERR (1 + 10)
+#define IRQ_ORION5X_PCIE0_INT (1 + 11)
+#define IRQ_ORION5X_USB1_CTRL (1 + 12)
+#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14)
+#define IRQ_ORION5X_PCI_ERR (1 + 15)
+#define IRQ_ORION5X_USB_BR_ERR (1 + 16)
+#define IRQ_ORION5X_USB0_CTRL (1 + 17)
+#define IRQ_ORION5X_ETH_RX (1 + 18)
+#define IRQ_ORION5X_ETH_TX (1 + 19)
+#define IRQ_ORION5X_ETH_MISC (1 + 20)
+#define IRQ_ORION5X_ETH_SUM (1 + 21)
+#define IRQ_ORION5X_ETH_ERR (1 + 22)
+#define IRQ_ORION5X_IDMA_ERR (1 + 23)
+#define IRQ_ORION5X_IDMA_0 (1 + 24)
+#define IRQ_ORION5X_IDMA_1 (1 + 25)
+#define IRQ_ORION5X_IDMA_2 (1 + 26)
+#define IRQ_ORION5X_IDMA_3 (1 + 27)
+#define IRQ_ORION5X_CESA (1 + 28)
+#define IRQ_ORION5X_SATA (1 + 29)
+#define IRQ_ORION5X_XOR0 (1 + 30)
+#define IRQ_ORION5X_XOR1 (1 + 31)
/*
* Orion General Purpose Pins
*/
-#define IRQ_ORION5X_GPIO_START 32
+#define IRQ_ORION5X_GPIO_START 33
#define NR_GPIO_IRQS 32
#define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS)
diff --git a/kernel/arch/arm/mach-orion5x/irq.c b/kernel/arch/arm/mach-orion5x/irq.c
index cd4bac4d7..086ecb87d 100644
--- a/kernel/arch/arm/mach-orion5x/irq.c
+++ b/kernel/arch/arm/mach-orion5x/irq.c
@@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
stat = readl_relaxed(MAIN_IRQ_CAUSE);
stat &= readl_relaxed(MAIN_IRQ_MASK);
if (stat) {
- unsigned int hwirq = __fls(stat);
+ unsigned int hwirq = 1 + __fls(stat);
handle_IRQ(hwirq, regs);
return;
}
@@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
void __init orion5x_init_irq(void)
{
- orion_irq_init(0, MAIN_IRQ_MASK);
+ orion_irq_init(1, MAIN_IRQ_MASK);
#ifdef CONFIG_MULTI_IRQ_HANDLER
set_handle_irq(orion5x_legacy_handle_irq);
diff --git a/kernel/arch/arm/mach-rockchip/platsmp.c b/kernel/arch/arm/mach-rockchip/platsmp.c
index 2e6ab67e2..611a5f96d 100644
--- a/kernel/arch/arm/mach-rockchip/platsmp.c
+++ b/kernel/arch/arm/mach-rockchip/platsmp.c
@@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu)
static int pmu_set_power_domain(int pd, bool on)
{
u32 val = (on) ? 0 : BIT(pd);
+ struct reset_control *rstc = rockchip_get_core_reset(pd);
int ret;
+ if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
+ pr_err("%s: could not get reset control for core %d\n",
+ __func__, pd);
+ return PTR_ERR(rstc);
+ }
+
/*
* We need to soft reset the cpu when we turn off the cpu power domain,
* or else the active processors might be stalled when the individual
* processor is powered down.
*/
- if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
- struct reset_control *rstc = rockchip_get_core_reset(pd);
-
- if (IS_ERR(rstc)) {
- pr_err("%s: could not get reset control for core %d\n",
- __func__, pd);
- return PTR_ERR(rstc);
- }
-
- if (on)
- reset_control_deassert(rstc);
- else
- reset_control_assert(rstc);
-
- reset_control_put(rstc);
- }
+ if (!IS_ERR(rstc) && !on)
+ reset_control_assert(rstc);
ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
if (ret < 0) {
@@ -112,6 +105,12 @@ static int pmu_set_power_domain(int pd, bool on)
}
}
+ if (!IS_ERR(rstc)) {
+ if (on)
+ reset_control_deassert(rstc);
+ reset_control_put(rstc);
+ }
+
return 0;
}
@@ -147,8 +146,12 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
* the mailbox:
* sram_base_addr + 4: 0xdeadbeaf
* sram_base_addr + 8: start address for pc
+ * The cpu0 need to wait the other cpus other than cpu0 entering
+ * the wfe state.The wait time is affected by many aspects.
+ * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
* */
- udelay(10);
+ mdelay(1); /* ensure the cpus other than cpu0 to startup */
+
writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
writel(0xDEADBEAF, sram_base_addr + 4);
dsb_sev();
diff --git a/kernel/arch/arm64/Kconfig b/kernel/arch/arm64/Kconfig
index d555ed3a0..09a41259b 100644
--- a/kernel/arch/arm64/Kconfig
+++ b/kernel/arch/arm64/Kconfig
@@ -103,6 +103,10 @@ config NO_IOPORT_MAP
config STACKTRACE_SUPPORT
def_bool y
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0xdead000000000000
+
config LOCKDEP_SUPPORT
def_bool y
@@ -411,6 +415,22 @@ config ARM64_ERRATUM_845719
If unsure, say Y.
+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
endmenu
@@ -581,7 +601,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
- depends on ARM64 && OF
+ depends on ARM64 && OF && !PREEMPT_RT_FULL
select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/kernel/arch/arm64/Makefile b/kernel/arch/arm64/Makefile
index 4d2a92599..81151663e 100644
--- a/kernel/arch/arm64/Makefile
+++ b/kernel/arch/arm64/Makefile
@@ -30,6 +30,10 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE += -mcmodel=large
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/kernel/arch/arm64/include/asm/memory.h b/kernel/arch/arm64/include/asm/memory.h
index f800d45ea..44a59c20e 100644
--- a/kernel/arch/arm64/include/asm/memory.h
+++ b/kernel/arch/arm64/include/asm/memory.h
@@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr;
#define PHYS_OFFSET ({ memstart_addr; })
/*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/kernel/arch/arm64/kernel/debug-monitors.c b/kernel/arch/arm64/kernel/debug-monitors.c
index b056369fd..70654d843 100644
--- a/kernel/arch/arm64/kernel/debug-monitors.c
+++ b/kernel/arch/arm64/kernel/debug-monitors.c
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_add(&hook->node, &break_hook);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_add_rcu(&hook->node, &break_hook);
+ spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_del(&hook->node);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&break_hook_lock);
+ synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
- read_lock(&break_hook_lock);
- list_for_each_entry(hook, &break_hook, node)
+ rcu_read_lock();
+ list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
- read_unlock(&break_hook_lock);
+ rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/kernel/arch/arm64/kernel/fpsimd.c b/kernel/arch/arm64/kernel/fpsimd.c
index 3dca15634..c31e59fe2 100644
--- a/kernel/arch/arm64/kernel/fpsimd.c
+++ b/kernel/arch/arm64/kernel/fpsimd.c
@@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+ fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
diff --git a/kernel/arch/arm64/kernel/head.S b/kernel/arch/arm64/kernel/head.S
index 19f915e8f..36aa31ff2 100644
--- a/kernel/arch/arm64/kernel/head.S
+++ b/kernel/arch/arm64/kernel/head.S
@@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
+ /* EL2 debug */
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
+
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/kernel/arch/arm64/kernel/insn.c b/kernel/arch/arm64/kernel/insn.c
index 924902083..30eb88e5b 100644
--- a/kernel/arch/arm64/kernel/insn.c
+++ b/kernel/arch/arm64/kernel/insn.c
@@ -77,7 +77,7 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
{
@@ -124,13 +124,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
- spin_lock_irqsave(&patch_lock, flags);
+ raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
- spin_unlock_irqrestore(&patch_lock, flags);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
diff --git a/kernel/arch/arm64/kernel/module.c b/kernel/arch/arm64/kernel/module.c
index 67bf4107f..876eb8df5 100644
--- a/kernel/arch/arm64/kernel/module.c
+++ b/kernel/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR);
break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
+#endif
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
diff --git a/kernel/arch/arm64/kernel/signal32.c b/kernel/arch/arm64/kernel/signal32.c
index c0cff3410..c58aee062 100644
--- a/kernel/arch/arm64/kernel/signal32.c
+++ b/kernel/arch/arm64/kernel/signal32.c
@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
/*
* VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
*/
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
- int err = 0;
+ int i, err = 0;
/*
* Save the hardware registers to the fpsimd_state structure.
@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
/*
* Now copy the FP registers. Since the registers are packed,
* we can copy the prefix we want (V0-V15) as it is.
- * FIXME: Won't work if big endian.
*/
- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
- sizeof(frame->ufp.fpregs));
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }
/* Create an AArch32 fpscr from the fpsr and the fpcr. */
fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
- int err = 0;
+ int i, err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
- /*
- * Copy the FP registers into the start of the fpsimd_state.
- * FIXME: Won't work if big endian.
- */
- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
- sizeof(frame->ufp.fpregs));
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }
/* Extract the fpsr and the fpcr from the fpscr */
__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/kernel/arch/arm64/kvm/hyp.S b/kernel/arch/arm64/kvm/hyp.S
index 5befd010e..64f9e60b3 100644
--- a/kernel/arch/arm64/kvm/hyp.S
+++ b/kernel/arch/arm64/kvm/hyp.S
@@ -844,8 +844,6 @@
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
- bic x3, x3, #1 // Clear Enable
- msr cntv_ctl_el0, x3
isb
@@ -853,6 +851,9 @@
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
1:
+ // Disable the virtual timer
+ msr cntv_ctl_el0, xzr
+
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
@@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run)
// Guest context
add x2, x0, #VCPU_CONTEXT
+ // We must restore the 32-bit state before the sysregs, thanks
+ // to Cortex-A57 erratum #852523.
+ restore_guest_32bit_state
bl __restore_sysregs
bl __restore_fpsimd
skip_debug_state x3, 1f
bl __restore_debug
1:
- restore_guest_32bit_state
restore_guest_regs
// That's it, no more messing around.
diff --git a/kernel/arch/m32r/boot/compressed/misc.c b/kernel/arch/m32r/boot/compressed/misc.c
index 28a09529f..3a7692745 100644
--- a/kernel/arch/m32r/boot/compressed/misc.c
+++ b/kernel/arch/m32r/boot/compressed/misc.c
@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
puts("\nDecompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output_data, 0,
+ NULL, error);
puts("done.\nBooting the kernel.\n");
}
diff --git a/kernel/arch/mips/boot/compressed/decompress.c b/kernel/arch/mips/boot/compressed/decompress.c
index 54831069a..080cd53ba 100644
--- a/kernel/arch/mips/boot/compressed/decompress.c
+++ b/kernel/arch/mips/boot/compressed/decompress.c
@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
puts("\n");
/* Decompress the kernel with according algorithm */
- decompress((char *)zimage_start, zimage_size, 0, 0,
- (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
+ __decompress((char *)zimage_start, zimage_size, 0, 0,
+ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
/* FIXME: should we flush cache here? */
puts("Now, booting the kernel...\n");
diff --git a/kernel/arch/mips/math-emu/cp1emu.c b/kernel/arch/mips/math-emu/cp1emu.c
index 6983fcd48..2b95e34fa 100644
--- a/kernel/arch/mips/math-emu/cp1emu.c
+++ b/kernel/arch/mips/math-emu/cp1emu.c
@@ -1137,7 +1137,7 @@ emul:
break;
case mfhc_op:
- if (!cpu_has_mips_r2)
+ if (!cpu_has_mips_r2_r6)
goto sigill;
/* copregister rd -> gpr[rt] */
@@ -1148,7 +1148,7 @@ emul:
break;
case mthc_op:
- if (!cpu_has_mips_r2)
+ if (!cpu_has_mips_r2_r6)
goto sigill;
/* copregister rd <- gpr[rt] */
@@ -1181,6 +1181,24 @@ emul:
}
break;
+ case bc1eqz_op:
+ case bc1nez_op:
+ if (!cpu_has_mips_r6 || delay_slot(xcp))
+ return SIGILL;
+
+ cond = likely = 0;
+ switch (MIPSInst_RS(ir)) {
+ case bc1eqz_op:
+ if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
+ cond = 1;
+ break;
+ case bc1nez_op:
+ if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
+ cond = 1;
+ break;
+ }
+ goto branch_common;
+
case bc_op:
if (delay_slot(xcp))
return SIGILL;
@@ -1207,7 +1225,7 @@ emul:
case bct_op:
break;
}
-
+branch_common:
set_delay_slot(xcp);
if (cond) {
/*
diff --git a/kernel/arch/parisc/kernel/irq.c b/kernel/arch/parisc/kernel/irq.c
index f3191db6e..c0eab24f6 100644
--- a/kernel/arch/parisc/kernel/irq.c
+++ b/kernel/arch/parisc/kernel/irq.c
@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
-#ifdef CONFIG_SMP
struct irq_desc *desc;
+#ifdef CONFIG_SMP
cpumask_t dest;
#endif
@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
irq = eirr_to_irq(eirr_val);
-#ifdef CONFIG_SMP
+ /* Filter out spurious interrupts, mostly from serial port at bootup */
desc = irq_to_desc(irq);
+ if (unlikely(!desc->action))
+ goto set_out;
+
+#ifdef CONFIG_SMP
cpumask_copy(&dest, desc->irq_data.affinity);
if (irqd_is_per_cpu(&desc->irq_data) &&
!cpumask_test_cpu(smp_processor_id(), &dest)) {
diff --git a/kernel/arch/parisc/kernel/syscall.S b/kernel/arch/parisc/kernel/syscall.S
index 7ef22e338..0b8d26d3b 100644
--- a/kernel/arch/parisc/kernel/syscall.S
+++ b/kernel/arch/parisc/kernel/syscall.S
@@ -821,7 +821,7 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%sr3,%r26), %r29
- sub,= %r29, %r25, %r0
+ sub,*= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%sr3,%r26)
copy %r0, %r28
diff --git a/kernel/arch/powerpc/boot/Makefile b/kernel/arch/powerpc/boot/Makefile
index 73eddda53..4eec430d8 100644
--- a/kernel/arch/powerpc/boot/Makefile
+++ b/kernel/arch/powerpc/boot/Makefile
@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
endif
ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
+else
+BOOTCFLAGS += -mlittle-endian
+BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
index 43e6ad424..88d27e325 100644
--- a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -135,7 +135,19 @@
#define pte_iterate_hashed_end() } while(0)
#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte) \
+ ({ \
+ unsigned int psize; \
+ if (is_kernel_addr(addr)) \
+ psize = MMU_PAGE_4K; \
+ else \
+ psize = get_slice_psize(mm, addr); \
+ psize; \
+ })
#else
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
#endif
diff --git a/kernel/arch/powerpc/include/asm/rtas.h b/kernel/arch/powerpc/include/asm/rtas.h
index 7a4ede16b..b77ef369c 100644
--- a/kernel/arch/powerpc/include/asm/rtas.h
+++ b/kernel/arch/powerpc/include/asm/rtas.h
@@ -343,6 +343,7 @@ extern void rtas_power_off(void);
extern void rtas_halt(void);
extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
extern int rtas_get_power_level(int powerdomain, int *level);
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
extern bool rtas_indicator_present(int token, int *maxindex);
diff --git a/kernel/arch/powerpc/include/asm/switch_to.h b/kernel/arch/powerpc/include/asm/switch_to.h
index 58abeda64..15cca17cb 100644
--- a/kernel/arch/powerpc/include/asm/switch_to.h
+++ b/kernel/arch/powerpc/include/asm/switch_to.h
@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
extern void enable_kernel_fp(void);
extern void enable_kernel_altivec(void);
+extern void enable_kernel_vsx(void);
extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *);
diff --git a/kernel/arch/powerpc/kernel/eeh.c b/kernel/arch/powerpc/kernel/eeh.c
index 9ee61d156..cb565ad0a 100644
--- a/kernel/arch/powerpc/kernel/eeh.c
+++ b/kernel/arch/powerpc/kernel/eeh.c
@@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
if (!(pe->type & EEH_PE_PHB)) {
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+
+ /*
+ * The config space of some PCI devices can't be accessed
+ * when their PEs are in frozen state. Otherwise, fenced
+ * PHB might be seen. Those PEs are identified with flag
+ * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
+ * is set automatically when the PE is put to EEH_PE_ISOLATED.
+ *
+ * Restoring BARs possibly triggers PCI config access in
+ * (OPAL) firmware and then causes fenced PHB. If the
+ * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
+ * pointless to restore BARs and dump config space.
+ */
eeh_ops->configure_bridge(pe);
- eeh_pe_restore_bars(pe);
+ if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
+ eeh_pe_restore_bars(pe);
- pci_regs_buf[0] = 0;
- eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+ pci_regs_buf[0] = 0;
+ eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+ }
}
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev)
return;
}
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->probe(pdn, NULL);
-
/*
* The EEH cache might not be removed correctly because of
* unbalanced kref to the device during unplug time, which
@@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev)
dev->dev.archdata.edev = NULL;
}
+ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+ eeh_ops->probe(pdn, NULL);
+
edev->pdev = dev;
dev->dev.archdata.edev = edev;
diff --git a/kernel/arch/powerpc/kernel/process.c b/kernel/arch/powerpc/kernel/process.c
index febb50dd5..0596373cd 100644
--- a/kernel/arch/powerpc/kernel/process.c
+++ b/kernel/arch/powerpc/kernel/process.c
@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-#if 0
-/* not currently used, but some crazy RAID module might want to later */
void enable_kernel_vsx(void)
{
WARN_ON(preemptible());
@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_vsx);
-#endif
void giveup_vsx(struct task_struct *tsk)
{
diff --git a/kernel/arch/powerpc/kernel/rtas.c b/kernel/arch/powerpc/kernel/rtas.c
index 7a488c108..caffb10e7 100644
--- a/kernel/arch/powerpc/kernel/rtas.c
+++ b/kernel/arch/powerpc/kernel/rtas.c
@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
}
EXPORT_SYMBOL(rtas_get_sensor);
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+ int token = rtas_token("get-sensor-state");
+ int rc;
+
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ rc = rtas_call(token, 2, 2, state, sensor, index);
+ WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+ rc <= RTAS_EXTENDED_DELAY_MAX));
+
+ if (rc < 0)
+ return rtas_error_rc(rc);
+ return rc;
+}
+
bool rtas_indicator_present(int token, int *maxindex)
{
int proplen, count, i;
diff --git a/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index b027a8973..c6d601cc9 100644
--- a/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
- u64 pte1;
-
- pte1 = be64_to_cpu(hpte[1]);
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(v, pte1, pte_index);
+ rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
- /* Read PTE low word after tlbie to get final R/C values */
- remove_revmap_chain(kvm, pte_index, rev, v, pte1);
+ /*
+ * The reference (R) and change (C) bits in a HPT
+ * entry can be set by hardware at any time up until
+ * the HPTE is invalidated and the TLB invalidation
+ * sequence has completed. This means that when
+ * removing a HPTE, we need to re-read the HPTE after
+ * the invalidation sequence has completed in order to
+ * obtain reliable values of R and C.
+ */
+ remove_revmap_chain(kvm, pte_index, rev, v,
+ be64_to_cpu(hpte[1]));
}
r = rev->guest_rpte & ~HPTE_GR_RESERVED;
note_hpte_modification(kvm, rev);
diff --git a/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 4d70df26c..3b2d2c5b6 100644
--- a/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/kernel/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1127,6 +1127,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
beq 4f
b guest_exit_cont
3:
diff --git a/kernel/arch/powerpc/mm/hugepage-hash64.c b/kernel/arch/powerpc/mm/hugepage-hash64.c
index 43dafb9d6..4d87122cf 100644
--- a/kernel/arch/powerpc/mm/hugepage-hash64.c
+++ b/kernel/arch/powerpc/mm/hugepage-hash64.c
@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
BUG_ON(index >= 4096);
vpn = hpt_vpn(ea, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
hpte_slot_array = get_hpte_slot_array(pmdp);
if (psize == MMU_PAGE_4K) {
/*
@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
valid = hpte_valid(hpte_slot_array, index);
if (valid) {
/* update the hpte bits */
+ hash = hpt_hash(vpn, shift, ssize);
hidx = hpte_hash_index(hpte_slot_array, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
if (!valid) {
unsigned long hpte_group;
+ hash = hpt_hash(vpn, shift, ssize);
/* insert new entry */
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
new_pmd |= _PAGE_HASHPTE;
diff --git a/kernel/arch/powerpc/platforms/pseries/ras.c b/kernel/arch/powerpc/platforms/pseries/ras.c
index 02e4a1745..3b6647e57 100644
--- a/kernel/arch/powerpc/platforms/pseries/ras.c
+++ b/kernel/arch/powerpc/platforms/pseries/ras.c
@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
int state;
int critical;
- status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+ status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+ &state);
if (state > 3)
critical = 1; /* Time Critical */
diff --git a/kernel/arch/powerpc/platforms/pseries/setup.c b/kernel/arch/powerpc/platforms/pseries/setup.c
index df6a70419..e6e8b241d 100644
--- a/kernel/arch/powerpc/platforms/pseries/setup.c
+++ b/kernel/arch/powerpc/platforms/pseries/setup.c
@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
eeh_dev_init(PCI_DN(np), pci->phb);
}
break;
+ case OF_RECONFIG_DETACH_NODE:
+ pci = PCI_DN(np);
+ if (pci)
+ list_del(&pci->list);
+ break;
default:
err = NOTIFY_DONE;
break;
diff --git a/kernel/arch/s390/boot/compressed/misc.c b/kernel/arch/s390/boot/compressed/misc.c
index 42506b371..4da604ebf 100644
--- a/kernel/arch/s390/boot/compressed/misc.c
+++ b/kernel/arch/s390/boot/compressed/misc.c
@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
#endif
puts("Uncompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
diff --git a/kernel/arch/s390/kernel/setup.c b/kernel/arch/s390/kernel/setup.c
index 7262fe438..1942f22e6 100644
--- a/kernel/arch/s390/kernel/setup.c
+++ b/kernel/arch/s390/kernel/setup.c
@@ -683,7 +683,7 @@ static void __init setup_memory(void)
/*
* Setup hardware capabilities.
*/
-static void __init setup_hwcaps(void)
+static int __init setup_hwcaps(void)
{
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
struct cpuid cpu_id;
@@ -749,9 +749,11 @@ static void __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_TE;
/*
- * Vector extension HWCAP_S390_VXRS is bit 11.
+ * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
+ * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
+ * instead of facility bit 129.
*/
- if (test_facility(129))
+ if (MACHINE_HAS_VX)
elf_hwcap |= HWCAP_S390_VXRS;
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
@@ -788,7 +790,9 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z13");
break;
}
+ return 0;
}
+arch_initcall(setup_hwcaps);
/*
* Add system information as device randomness
@@ -871,11 +875,6 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
/*
- * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
- */
- setup_hwcaps();
-
- /*
* Create kernel page tables and switch to virtual addressing.
*/
paging_init();
diff --git a/kernel/arch/sh/boot/compressed/misc.c b/kernel/arch/sh/boot/compressed/misc.c
index 95470a472..208a9753a 100644
--- a/kernel/arch/sh/boot/compressed/misc.c
+++ b/kernel/arch/sh/boot/compressed/misc.c
@@ -132,7 +132,7 @@ void decompress_kernel(void)
puts("Uncompressing Linux... ");
cache_control(CACHE_ENABLE);
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
cache_control(CACHE_DISABLE);
puts("Ok, booting the kernel.\n");
}
diff --git a/kernel/arch/unicore32/boot/compressed/misc.c b/kernel/arch/unicore32/boot/compressed/misc.c
index 176d5bda3..5c65dfee2 100644
--- a/kernel/arch/unicore32/boot/compressed/misc.c
+++ b/kernel/arch/unicore32/boot/compressed/misc.c
@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
output_ptr = get_unaligned_le32(tmp);
arch_decomp_puts("Uncompressing Linux...");
- decompress(input_data, input_data_end - input_data, NULL, NULL,
- output_data, NULL, error);
+ __decompress(input_data, input_data_end - input_data, NULL, NULL,
+ output_data, 0, NULL, error);
arch_decomp_puts(" done, booting the kernel.\n");
return output_ptr;
}
diff --git a/kernel/arch/x86/boot/compressed/misc.c b/kernel/arch/x86/boot/compressed/misc.c
index a107b935e..e28437e0f 100644
--- a/kernel/arch/x86/boot/compressed/misc.c
+++ b/kernel/arch/x86/boot/compressed/misc.c
@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
#endif
debug_putstr("\nDecompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, output_len,
+ NULL, error);
parse_elf(output);
/*
* 32-bit always performs relocations. 64-bit relocations are only
diff --git a/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c b/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2079baf06..daf8d2b9a 100644
--- a/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/kernel/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -294,6 +294,7 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
+ .cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
diff --git a/kernel/arch/x86/include/asm/desc.h b/kernel/arch/x86/include/asm/desc.h
index a0bf89fd2..4e10d73cf 100644
--- a/kernel/arch/x86/include/asm/desc.h
+++ b/kernel/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
- set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- preempt_disable();
- load_LDT_nolock(pc);
- preempt_enable();
-}
-
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/kernel/arch/x86/include/asm/mmu.h b/kernel/arch/x86/include/asm/mmu.h
index 09b9620a7..364d27481 100644
--- a/kernel/arch/x86/include/asm/mmu.h
+++ b/kernel/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
- void *ldt;
- int size;
+ struct ldt_struct *ldt;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/kernel/arch/x86/include/asm/mmu_context.h b/kernel/arch/x86/include/asm/mmu_context.h
index e997f70f8..80d67dd80 100644
--- a/kernel/arch/x86/include/asm/mmu_context.h
+++ b/kernel/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+ /*
+ * Xen requires page-aligned LDTs with special permissions. This is
+ * needed to prevent us from installing evil descriptors such as
+ * call gates. On native, we could merge the ldt_struct and LDT
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+ int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+ struct ldt_struct *ldt;
+
+ /* lockless_dereference synchronizes with smp_store_release */
+ ldt = lockless_dereference(mm->context.ldt);
+
+ /*
+ * Any change to mm->context.ldt is followed by an IPI to all
+ * CPUs with the mm active. The LDT will not be freed until
+ * after the IPI is handled by all such CPUs. This means that,
+ * if the ldt_struct changes before we return, the values we see
+ * will be safe, and the new values will be loaded before we run
+ * any user code.
+ *
+ * NB: don't try to convert this to use RCU without extreme care.
+ * We would still need IRQs off, because we don't want to change
+ * the local LDT after an IPI loaded a newer value than the one
+ * that we can see.
+ */
+
+ if (unlikely(ldt))
+ set_ldt(ldt->entries, ldt->size);
+ else
+ clear_LDT();
+
+ DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
+/*
* Used for LDT copy/destruction.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
- * never free an LDT while the mm still exists. That
- * means that next->context.ldt != prev->context.ldt,
- * because mms never share an LDT.
+ * never set context.ldt to NULL while the mm still
+ * exists. That means that next->context.ldt !=
+ * prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
}
#endif
diff --git a/kernel/arch/x86/kernel/acpi/boot.c b/kernel/arch/x86/kernel/acpi/boot.c
index dbe76a14c..07bea8022 100644
--- a/kernel/arch/x86/kernel/acpi/boot.c
+++ b/kernel/arch/x86/kernel/acpi/boot.c
@@ -489,6 +489,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
diff --git a/kernel/arch/x86/kernel/cpu/common.c b/kernel/arch/x86/kernel/cpu/common.c
index a62cf04da..205e0f3df 100644
--- a/kernel/arch/x86/kernel/cpu/common.c
+++ b/kernel/arch/x86/kernel/cpu/common.c
@@ -1434,7 +1434,7 @@ void cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
clear_all_debug_regs();
dbg_restore_debug_regs();
@@ -1483,7 +1483,7 @@ void cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c b/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c
index b4a41cf03..e166d833c 100644
--- a/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/kernel/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -116,6 +116,27 @@ void mce_intel_hcpu_update(unsigned long cpu)
per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
}
+static void cmci_toggle_interrupt_mode(bool on)
+{
+ unsigned long flags, *owned;
+ int bank;
+ u64 val;
+
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ owned = this_cpu_ptr(mce_banks_owned);
+ for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+
+ if (on)
+ val |= MCI_CTL2_CMCI_EN;
+ else
+ val &= ~MCI_CTL2_CMCI_EN;
+
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ }
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
unsigned long cmci_intel_adjust_timer(unsigned long interval)
{
if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
@@ -145,7 +166,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
*/
if (!atomic_read(&cmci_storm_on_cpus)) {
__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
- cmci_reenable();
+ cmci_toggle_interrupt_mode(true);
cmci_recheck();
}
return CMCI_POLL_INTERVAL;
@@ -156,22 +177,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
}
}
-static void cmci_storm_disable_banks(void)
-{
- unsigned long flags, *owned;
- int bank;
- u64 val;
-
- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- owned = this_cpu_ptr(mce_banks_owned);
- for_each_set_bit(bank, owned, MAX_NR_BANKS) {
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
- val &= ~MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- }
- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
-
static bool cmci_storm_detect(void)
{
unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -193,7 +198,7 @@ static bool cmci_storm_detect(void)
if (cnt <= CMCI_STORM_THRESHOLD)
return false;
- cmci_storm_disable_banks();
+ cmci_toggle_interrupt_mode(false);
__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
r = atomic_add_return(1, &cmci_storm_on_cpus);
mce_timer_kick(CMCI_STORM_INTERVAL);
diff --git a/kernel/arch/x86/kernel/cpu/perf_event.c b/kernel/arch/x86/kernel/cpu/perf_event.c
index aa4e3a74e..4cc98a4e8 100644
--- a/kernel/arch/x86/kernel/cpu/perf_event.c
+++ b/kernel/arch/x86/kernel/cpu/perf_event.c
@@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct ldt_struct *ldt;
+
if (idx > LDT_ENTRIES)
return 0;
- if (idx > current->active_mm->context.size)
+ /* IRQs are off, so this synchronizes with smp_store_release */
+ ldt = lockless_dereference(current->active_mm->context.ldt);
+ if (!ldt || idx > ldt->size)
return 0;
- desc = current->active_mm->context.ldt;
+ desc = &ldt->entries[idx];
} else {
if (idx > GDT_ENTRIES)
return 0;
- desc = raw_cpu_ptr(gdt_page.gdt);
+ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
}
- return get_desc_base(desc + idx);
+ return get_desc_base(desc);
}
#ifdef CONFIG_COMPAT
diff --git a/kernel/arch/x86/kernel/ldt.c b/kernel/arch/x86/kernel/ldt.c
index c37886d75..2bcc0525f 100644
--- a/kernel/arch/x86/kernel/ldt.c
+++ b/kernel/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -20,82 +21,82 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
- if (current->active_mm == current_mm)
- load_LDT(&current->active_mm->context);
+ mm_context_t *pc;
+
+ if (current->active_mm != current_mm)
+ return;
+
+ pc = &current->active_mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->size);
}
-#endif
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
{
- void *oldldt, *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+ struct ldt_struct *new_ldt;
+ int alloc_size;
+
+ if (size > LDT_ENTRIES)
+ return NULL;
+
+ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+ if (!new_ldt)
+ return NULL;
+
+ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+ alloc_size = size * LDT_ENTRY_SIZE;
+
+ /*
+ * Xen is very picky: it requires a page-aligned LDT that has no
+ * trailing nonzero bytes in any page that contains LDT descriptors.
+ * Keep it simple: zero the whole allocation and never allocate less
+ * than PAGE_SIZE.
+ */
+ if (alloc_size > PAGE_SIZE)
+ new_ldt->entries = vzalloc(alloc_size);
else
- newldt = (void *)__get_free_page(GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
+ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
- (mincount - oldsize) * LDT_ENTRY_SIZE);
+ if (!new_ldt->entries) {
+ kfree(new_ldt);
+ return NULL;
+ }
- paravirt_alloc_ldt(newldt, mincount);
+ new_ldt->size = size;
+ return new_ldt;
+}
-#ifdef CONFIG_X86_64
- /* CHECKME: Do we really need this ? */
- wmb();
-#endif
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- preempt_disable();
- load_LDT(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
-#else
- load_LDT(pc);
-#endif
- }
- if (oldsize) {
- paravirt_free_ldt(oldldt, oldsize);
- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- put_page(virt_to_page(oldldt));
- }
- return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->size);
}
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+ struct ldt_struct *ldt)
{
- int err = alloc_ldt(new, old->size, 0);
- int i;
+ /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ smp_store_release(&current_mm->context.ldt, ldt);
+
+ /* Activate the LDT for all CPUs using current_mm. */
+ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+}
- if (err < 0)
- return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (likely(!ldt))
+ return;
- for (i = 0; i < old->size; i++)
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
- return 0;
+ paravirt_free_ldt(ldt->entries, ldt->size);
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ kfree(ldt->entries);
+ kfree(ldt);
}
/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;
mutex_init(&mm->context.lock);
- mm->context.size = 0;
old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- mutex_lock(&old_mm->context.lock);
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
+ if (!old_mm) {
+ mm->context.ldt = NULL;
+ return 0;
}
+
+ mutex_lock(&old_mm->context.lock);
+ if (!old_mm->context.ldt) {
+ mm->context.ldt = NULL;
+ goto out_unlock;
+ }
+
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ if (!new_ldt) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ new_ldt->size * LDT_ENTRY_SIZE);
+ finalize_ldt_struct(new_ldt);
+
+ mm->context.ldt = new_ldt;
+
+out_unlock:
+ mutex_unlock(&old_mm->context.lock);
return retval;
}
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
-#ifdef CONFIG_X86_32
- /* CHECKME: Can this ever happen ? */
- if (mm == current->active_mm)
- clear_LDT();
-#endif
- paravirt_free_ldt(mm->context.ldt, mm->context.size);
- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- put_page(virt_to_page(mm->context.ldt));
- mm->context.size = 0;
- }
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int err;
+ int retval;
unsigned long size;
struct mm_struct *mm = current->mm;
- if (!mm->context.size)
- return 0;
+ mutex_lock(&mm->context.lock);
+
+ if (!mm->context.ldt) {
+ retval = 0;
+ goto out_unlock;
+ }
+
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
- mutex_lock(&mm->context.lock);
- size = mm->context.size * LDT_ENTRY_SIZE;
+ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- mutex_unlock(&mm->context.lock);
- if (err < 0)
- goto error_return;
+ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ retval = -EFAULT;
+ goto out_unlock;
+ }
+
if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr + size, bytecount - size) != 0) {
- err = -EFAULT;
- goto error_return;
+ /* Zero-fill the rest and pretend we read bytecount bytes. */
+ if (clear_user(ptr + size, bytecount - size)) {
+ retval = -EFAULT;
+ goto out_unlock;
}
}
- return bytecount;
-error_return:
- return err;
+ retval = bytecount;
+
+out_unlock:
+ mutex_unlock(&mm->context.lock);
+ return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
+ int oldsize, newsize;
+ struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}
- mutex_lock(&mm->context.lock);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(&current->mm->context,
- ldt_info.entry_number + 1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- memset(&ldt, 0, sizeof(ldt));
- goto install;
+ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+ LDT_empty(&ldt_info)) {
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
}
+
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
}
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
+ mutex_lock(&mm->context.lock);
+
+ old_ldt = mm->context.ldt;
+ oldsize = old_ldt ? old_ldt->size : 0;
+ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+ error = -ENOMEM;
+ new_ldt = alloc_ldt_struct(newsize);
+ if (!new_ldt)
goto out_unlock;
- }
- fill_ldt(&ldt, &ldt_info);
- if (oldmode)
- ldt.avl = 0;
+ if (old_ldt)
+ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ new_ldt->entries[ldt_info.entry_number] = ldt;
+ finalize_ldt_struct(new_ldt);
- /* Install the new entry ... */
-install:
- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+ install_ldt(mm, new_ldt);
+ free_ldt_struct(old_ldt);
error = 0;
out_unlock:
diff --git a/kernel/arch/x86/kernel/process_64.c b/kernel/arch/x86/kernel/process_64.c
index ddfdbf74f..5e0bf57d9 100644
--- a/kernel/arch/x86/kernel/process_64.c
+++ b/kernel/arch/x86/kernel/process_64.c
@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
- if (dead_task->mm->context.size) {
+ if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ dead_task->mm->context.ldt->size);
BUG();
}
}
diff --git a/kernel/arch/x86/kernel/step.c b/kernel/arch/x86/kernel/step.c
index 9b4d51d0c..0ccb53a9f 100644
--- a/kernel/arch/x86/kernel/step.c
+++ b/kernel/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
- if (unlikely((seg >> 3) >= child->mm->context.size))
+ if (unlikely(!child->mm->context.ldt ||
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = child->mm->context.ldt + seg;
+ desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */
diff --git a/kernel/arch/x86/kvm/mmu.c b/kernel/arch/x86/kvm/mmu.c
index b73337634..554e877e0 100644
--- a/kernel/arch/x86/kvm/mmu.c
+++ b/kernel/arch/x86/kvm/mmu.c
@@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- /* It is valid if the spte is zapped. */
- return spte == 0ull;
-}
#else
union split_spte {
struct {
@@ -478,23 +472,6 @@ retry:
return spte.spte;
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- union split_spte sspte = (union split_spte)spte;
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
- /* It is valid if the spte is zapped. */
- if (spte == 0ull)
- return true;
-
- /* It is valid if the spte is being zapped. */
- if (sspte.spte_low == 0ull &&
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
- return true;
-
- return false;
-}
#endif
static bool spte_is_locklessly_modifiable(u64 spte)
@@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return vcpu_match_mmio_gva(vcpu, addr);
}
-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- * - It is the mmio spte
- * - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
- return __check_direct_spte_mmio_pf(spte);
-}
-
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
}
/*
- * It's ok if the gva is remapped by other cpus on shadow guest,
- * it's a BUG if the gfn is not a mmio page.
- */
- if (direct && !check_direct_spte_mmio_pf(spte))
- return RET_MMIO_PF_BUG;
-
- /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c
index 6cceb2cb2..37d79a026 100644
--- a/kernel/arch/x86/kvm/x86.c
+++ b/kernel/arch/x86/kvm/x86.c
@@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
diff --git a/kernel/arch/x86/math-emu/fpu_entry.c b/kernel/arch/x86/math-emu/fpu_entry.c
index 9b8681241..274a52b11 100644
--- a/kernel/arch/x86/math-emu/fpu_entry.c
+++ b/kernel/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/i387.h>
@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/kernel/arch/x86/math-emu/fpu_system.h b/kernel/arch/x86/math-emu/fpu_system.h
index 2c614410a..d342fce49 100644
--- a/kernel/arch/x86/math-emu/fpu_system.h
+++ b/kernel/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(&current->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(&current->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/kernel/arch/x86/math-emu/get_address.c b/kernel/arch/x86/math-emu/get_address.c
index 6ef5e9938..8300db71c 100644
--- a/kernel/arch/x86/math-emu/get_address.c
+++ b/kernel/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/kernel/arch/x86/mm/init_32.c b/kernel/arch/x86/mm/init_32.c
index c8140e128..c23ab1ee3 100644
--- a/kernel/arch/x86/mm/init_32.c
+++ b/kernel/arch/x86/mm/init_32.c
@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
vaddr = start;
pgd_idx = pgd_index(vaddr);
+ pmd_idx = pmd_index(vaddr);
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
diff --git a/kernel/arch/x86/power/cpu.c b/kernel/arch/x86/power/cpu.c
index 757678fb2..bf9384488 100644
--- a/kernel/arch/x86/power/cpu.c
+++ b/kernel/arch/x86/power/cpu.c
@@ -23,6 +23,7 @@
#include <asm/debugreg.h>
#include <asm/fpu-internal.h> /* pcntxt_mask */
#include <asm/cpu.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -154,7 +155,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
}
/**
diff --git a/kernel/arch/xtensa/include/asm/traps.h b/kernel/arch/xtensa/include/asm/traps.h
index 677bfcf4e..28f33a8b7 100644
--- a/kernel/arch/xtensa/include/asm/traps.h
+++ b/kernel/arch/xtensa/include/asm/traps.h
@@ -25,30 +25,39 @@ static inline void spill_registers(void)
{
#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
- " call12 1f\n"
+ " call8 1f\n"
" _j 2f\n"
" retw\n"
" .align 4\n"
"1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
" _entry a1, 48\n"
- " addi a12, a0, 3\n"
-#if XCHAL_NUM_AREGS > 32
- " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
" _entry a1, 48\n"
" mov a12, a0\n"
" .endr\n"
-#endif
- " _entry a1, 48\n"
+ " _entry a1, 16\n"
#if XCHAL_NUM_AREGS % 12 == 0
- " mov a8, a8\n"
-#elif XCHAL_NUM_AREGS % 12 == 4
" mov a12, a12\n"
-#elif XCHAL_NUM_AREGS % 12 == 8
+#elif XCHAL_NUM_AREGS % 12 == 4
" mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
#endif
" retw\n"
+#endif
"2:\n"
- : : : "a12", "a13", "memory");
+ : : : "a8", "a9", "memory");
#else
__asm__ __volatile__ (
" mov a12, a12\n"
diff --git a/kernel/arch/xtensa/kernel/entry.S b/kernel/arch/xtensa/kernel/entry.S
index 82bbfa5a0..a2a902140 100644
--- a/kernel/arch/xtensa/kernel/entry.S
+++ b/kernel/arch/xtensa/kernel/entry.S
@@ -568,12 +568,13 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames).
*/
+2:
#if XCHAL_HAVE_THREADPTR
l32i a3, a1, PT_THREADPTR
wur a3, threadptr
#endif
-2: j common_exception_exit
+ j common_exception_exit
/* This is the kernel exception exit.
* We avoided to do a MOVSP when we entered the exception, but we
@@ -1820,7 +1821,7 @@ ENDPROC(system_call)
mov a12, a0
.endr
#endif
- _entry a1, 48
+ _entry a1, 16
#if XCHAL_NUM_AREGS % 12 == 0
mov a8, a8
#elif XCHAL_NUM_AREGS % 12 == 4
@@ -1844,7 +1845,7 @@ ENDPROC(system_call)
ENTRY(_switch_to)
- entry a1, 16
+ entry a1, 48
mov a11, a3 # and 'next' (a3)