summaryrefslogtreecommitdiffstats
path: root/kernel/arch/mips/kernel
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/mips/kernel
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/mips/kernel')
-rw-r--r--kernel/arch/mips/kernel/Makefile5
-rw-r--r--kernel/arch/mips/kernel/asm-offsets.c14
-rw-r--r--kernel/arch/mips/kernel/branch.c4
-rw-r--r--kernel/arch/mips/kernel/cevt-bcm1480.c44
-rw-r--r--kernel/arch/mips/kernel/cevt-ds1287.c37
-rw-r--r--kernel/arch/mips/kernel/cevt-gt641xx.c57
-rw-r--r--kernel/arch/mips/kernel/cevt-r4k.c18
-rw-r--r--kernel/arch/mips/kernel/cevt-sb1250.c45
-rw-r--r--kernel/arch/mips/kernel/cevt-txx9.c81
-rw-r--r--kernel/arch/mips/kernel/cps-vec-ns16550.S202
-rw-r--r--kernel/arch/mips/kernel/cps-vec.S146
-rw-r--r--kernel/arch/mips/kernel/cpu-probe.c82
-rw-r--r--kernel/arch/mips/kernel/csrc-r4k.c44
-rw-r--r--kernel/arch/mips/kernel/genex.S2
-rw-r--r--kernel/arch/mips/kernel/head.S16
-rw-r--r--kernel/arch/mips/kernel/i8259.c345
-rw-r--r--kernel/arch/mips/kernel/idle.c16
-rw-r--r--kernel/arch/mips/kernel/irq.c52
-rw-r--r--kernel/arch/mips/kernel/irq_cpu.c169
-rw-r--r--kernel/arch/mips/kernel/jump_label.c2
-rw-r--r--kernel/arch/mips/kernel/kgdb.c4
-rw-r--r--kernel/arch/mips/kernel/mips-cm.c309
-rw-r--r--kernel/arch/mips/kernel/mips-cpc.c17
-rw-r--r--kernel/arch/mips/kernel/mips-r2-to-r6-emul.c2
-rw-r--r--kernel/arch/mips/kernel/mips_ksyms.c2
-rw-r--r--kernel/arch/mips/kernel/octeon_switch.S26
-rw-r--r--kernel/arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--kernel/arch/mips/kernel/pm-cps.c2
-rw-r--r--kernel/arch/mips/kernel/prom.c3
-rw-r--r--kernel/arch/mips/kernel/ptrace.c88
-rw-r--r--kernel/arch/mips/kernel/r2300_switch.S28
-rw-r--r--kernel/arch/mips/kernel/r4k_fpu.S436
-rw-r--r--kernel/arch/mips/kernel/r4k_switch.S41
-rw-r--r--kernel/arch/mips/kernel/scall32-o32.S79
-rw-r--r--kernel/arch/mips/kernel/scall64-64.S41
-rw-r--r--kernel/arch/mips/kernel/scall64-n32.S22
-rw-r--r--kernel/arch/mips/kernel/scall64-o32.S57
-rw-r--r--kernel/arch/mips/kernel/segment.c2
-rw-r--r--kernel/arch/mips/kernel/setup.c36
-rw-r--r--kernel/arch/mips/kernel/signal-common.h9
-rw-r--r--kernel/arch/mips/kernel/signal.c445
-rw-r--r--kernel/arch/mips/kernel/signal32.c212
-rw-r--r--kernel/arch/mips/kernel/signal_n32.c11
-rw-r--r--kernel/arch/mips/kernel/smp-bmips.c4
-rw-r--r--kernel/arch/mips/kernel/smp-cps.c39
-rw-r--r--kernel/arch/mips/kernel/smp-gic.c2
-rw-r--r--kernel/arch/mips/kernel/smp.c13
-rw-r--r--kernel/arch/mips/kernel/spinlock_test.c4
-rw-r--r--kernel/arch/mips/kernel/spram.c1
-rw-r--r--kernel/arch/mips/kernel/stacktrace.c27
-rw-r--r--kernel/arch/mips/kernel/sysrq.c65
-rw-r--r--kernel/arch/mips/kernel/traps.c101
-rw-r--r--kernel/arch/mips/kernel/unaligned.c76
-rw-r--r--kernel/arch/mips/kernel/uprobes.c341
-rw-r--r--kernel/arch/mips/kernel/vdso.c207
-rw-r--r--kernel/arch/mips/kernel/vmlinux.lds.S23
-rw-r--r--kernel/arch/mips/kernel/vpe.c7
57 files changed, 2536 insertions, 1633 deletions
diff --git a/kernel/arch/mips/kernel/Makefile b/kernel/arch/mips/kernel/Makefile
index d3d2ff2d7..68e2b7db9 100644
--- a/kernel/arch/mips/kernel/Makefile
+++ b/kernel/arch/mips/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o
obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
obj-$(CONFIG_MIPS_SPRAM) += spram.o
@@ -61,8 +62,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
-obj-$(CONFIG_I8259) += i8259.o
-obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
@@ -77,6 +76,7 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
@@ -100,6 +100,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
diff --git a/kernel/arch/mips/kernel/asm-offsets.c b/kernel/arch/mips/kernel/asm-offsets.c
index beabe19ff..154e2039e 100644
--- a/kernel/arch/mips/kernel/asm-offsets.c
+++ b/kernel/arch/mips/kernel/asm-offsets.c
@@ -1,5 +1,5 @@
/*
- * offset.c: Calculate pt_regs and task_struct offsets.
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
@@ -128,6 +128,7 @@ void output_thread_defines(void)
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
BLANK();
}
@@ -245,17 +246,6 @@ void output_sc_defines(void)
}
#endif
-#ifdef CONFIG_MIPS32_COMPAT
-void output_sc32_defines(void)
-{
- COMMENT("Linux 32-bit sigcontext offsets.");
- OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
- OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
- OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
- BLANK();
-}
-#endif
-
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
diff --git a/kernel/arch/mips/kernel/branch.c b/kernel/arch/mips/kernel/branch.c
index c0c5e5972..d8f9b357b 100644
--- a/kernel/arch/mips/kernel/branch.c
+++ b/kernel/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case blezl_op: /* not really i_format */
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case blez_op:
/*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case bgtz_op:
/*
diff --git a/kernel/arch/mips/kernel/cevt-bcm1480.c b/kernel/arch/mips/kernel/cevt-bcm1480.c
index 797645718..940ac00e9 100644
--- a/kernel/arch/mips/kernel/cevt-bcm1480.c
+++ b/kernel/arch/mips/kernel/cevt-bcm1480.c
@@ -40,8 +40,8 @@
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
-static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -49,24 +49,22 @@ static void sibyte_set_mode(enum clock_event_mode mode,
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writeq(0, cfg);
- __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
- __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
- cfg);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* Stop the timer until we actually program a shot */
- case CLOCK_EVT_MODE_SHUTDOWN:
- __raw_writeq(0, cfg);
- break;
-
- case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
- case CLOCK_EVT_MODE_RESUME:
- ;
- }
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+ return 0;
+}
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+ return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -91,7 +89,7 @@ static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
void __iomem *cfg;
unsigned long tmode;
- if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
@@ -130,7 +128,9 @@ void sb1480_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
- cd->set_mode = sibyte_set_mode;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
bcm1480_mask_irq(cpu, irq);
diff --git a/kernel/arch/mips/kernel/cevt-ds1287.c b/kernel/arch/mips/kernel/cevt-ds1287.c
index ff1f01b72..77a5ddf53 100644
--- a/kernel/arch/mips/kernel/cevt-ds1287.c
+++ b/kernel/arch/mips/kernel/cevt-ds1287.c
@@ -59,27 +59,32 @@ static int ds1287_set_next_event(unsigned long delta,
return -EINVAL;
}
-static void ds1287_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int ds1287_shutdown(struct clock_event_device *evt)
{
u8 val;
spin_lock(&rtc_lock);
val = CMOS_READ(RTC_REG_B);
+ val &= ~RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- val |= RTC_PIE;
- break;
- default:
- val &= ~RTC_PIE;
- break;
- }
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val |= RTC_PIE;
CMOS_WRITE(val, RTC_REG_B);
spin_unlock(&rtc_lock);
+ return 0;
}
static void ds1287_event_handler(struct clock_event_device *dev)
@@ -87,11 +92,13 @@ static void ds1287_event_handler(struct clock_event_device *dev)
}
static struct clock_event_device ds1287_clockevent = {
- .name = "ds1287",
- .features = CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = ds1287_set_next_event,
- .set_mode = ds1287_set_mode,
- .event_handler = ds1287_event_handler,
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_state_shutdown = ds1287_shutdown,
+ .set_state_periodic = ds1287_set_periodic,
+ .tick_resume = ds1287_shutdown,
+ .event_handler = ds1287_event_handler,
};
static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
diff --git a/kernel/arch/mips/kernel/cevt-gt641xx.c b/kernel/arch/mips/kernel/cevt-gt641xx.c
index f06946075..660400511 100644
--- a/kernel/arch/mips/kernel/cevt-gt641xx.c
+++ b/kernel/arch/mips/kernel/cevt-gt641xx.c
@@ -64,8 +64,7 @@ static int gt641xx_timer0_set_next_event(unsigned long delta,
return 0;
}
-static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
{
u32 ctrl;
@@ -73,21 +72,39 @@ static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl |= GT_TC_CONTROL_ENTC0_MSK;
- break;
- default:
- break;
- }
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
}
static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -95,12 +112,16 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
}
static struct clock_event_device gt641xx_timer0_clockevent = {
- .name = "gt641xx-timer0",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .irq = GT641XX_TIMER0_IRQ,
- .set_next_event = gt641xx_timer0_set_next_event,
- .set_mode = gt641xx_timer0_set_mode,
- .event_handler = gt641xx_timer0_event_handler,
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_state_shutdown = gt641xx_timer0_shutdown,
+ .set_state_periodic = gt641xx_timer0_set_periodic,
+ .set_state_oneshot = gt641xx_timer0_set_oneshot,
+ .tick_resume = gt641xx_timer0_shutdown,
+ .event_handler = gt641xx_timer0_event_handler,
};
static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
diff --git a/kernel/arch/mips/kernel/cevt-r4k.c b/kernel/arch/mips/kernel/cevt-r4k.c
index d70c4d893..8dfe6a6e1 100644
--- a/kernel/arch/mips/kernel/cevt-r4k.c
+++ b/kernel/arch/mips/kernel/cevt-r4k.c
@@ -28,12 +28,6 @@ static int mips_next_event(unsigned long delta,
return res;
}
-void mips_set_clock_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
@@ -174,6 +168,11 @@ int c0_compare_int_usable(void)
return 1;
}
+unsigned int __weak get_c0_compare_int(void)
+{
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -189,11 +188,9 @@ int r4k_clockevent_init(void)
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
- * interrupt number of it's liking.
+ * interrupt number of its liking.
*/
- irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
- if (get_c0_compare_int)
- irq = get_c0_compare_int();
+ irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
@@ -212,7 +209,6 @@ int r4k_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
diff --git a/kernel/arch/mips/kernel/cevt-sb1250.c b/kernel/arch/mips/kernel/cevt-sb1250.c
index 5ea6d6b1d..3d860efd6 100644
--- a/kernel/arch/mips/kernel/cevt-sb1250.c
+++ b/kernel/arch/mips/kernel/cevt-sb1250.c
@@ -38,8 +38,20 @@
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
-static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+
+ return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -47,24 +59,11 @@ static void sibyte_set_mode(enum clock_event_mode mode,
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writeq(0, cfg);
- __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
- __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
- cfg);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* Stop the timer until we actually program a shot */
- case CLOCK_EVT_MODE_SHUTDOWN:
- __raw_writeq(0, cfg);
- break;
-
- case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
- case CLOCK_EVT_MODE_RESUME:
- ;
- }
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+
+ return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -89,7 +88,7 @@ static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
void __iomem *cfg;
unsigned long tmode;
- if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
@@ -129,7 +128,9 @@ void sb1250_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
- cd->set_mode = sibyte_set_mode;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
sb1250_mask_irq(cpu, irq);
diff --git a/kernel/arch/mips/kernel/cevt-txx9.c b/kernel/arch/mips/kernel/cevt-txx9.c
index 723932441..537eefdf8 100644
--- a/kernel/arch/mips/kernel/cevt-txx9.c
+++ b/kernel/arch/mips/kernel/cevt-txx9.c
@@ -85,36 +85,54 @@ static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
__raw_writel(0, &tmrptr->tisr);
}
-static void txx9tmr_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
- &tmrptr->itmr);
- /* start timer */
- __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
- evt->shift,
- &tmrptr->cpra);
- __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- __raw_writel(0, &tmrptr->itmr);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
- break;
- case CLOCK_EVT_MODE_RESUME:
- __raw_writel(TIMER_CCD, &tmrptr->ccdr);
- __raw_writel(0, &tmrptr->itmr);
- break;
- }
+
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
}
static int txx9tmr_set_next_event(unsigned long delta,
@@ -133,12 +151,15 @@ static int txx9tmr_set_next_event(unsigned long delta,
static struct txx9_clock_event_device txx9_clock_event_device = {
.cd = {
- .name = "TXx9",
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_mode = txx9tmr_set_mode,
- .set_next_event = txx9tmr_set_next_event,
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_state_shutdown = txx9tmr_set_state_shutdown,
+ .set_state_periodic = txx9tmr_set_state_periodic,
+ .set_state_oneshot = txx9tmr_set_state_oneshot,
+ .tick_resume = txx9tmr_tick_resume,
+ .set_next_event = txx9tmr_set_next_event,
},
};
diff --git a/kernel/arch/mips/kernel/cps-vec-ns16550.S b/kernel/arch/mips/kernel/cps-vec-ns16550.S
new file mode 100644
index 000000000..6d246ad05
--- /dev/null
+++ b/kernel/arch/mips/kernel/cps-vec-ns16550.S
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <linux/serial_reg.h>
+
+#define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT)
+#define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT)
+
+/**
+ * _mips_cps_putc() - write a character to the UART
+ * @a0: ASCII character to write
+ * @t9: UART base address
+ */
+LEAF(_mips_cps_putc)
+1: lw t0, UART_LSR_OFS(t9)
+ andi t0, t0, UART_LSR_TEMT
+ beqz t0, 1b
+ sb a0, UART_TX_OFS(t9)
+ jr ra
+ END(_mips_cps_putc)
+
+/**
+ * _mips_cps_puts() - write a string to the UART
+ * @a0: pointer to NULL-terminated ASCII string
+ * @t9: UART base address
+ *
+ * Write a null-terminated ASCII string to the UART.
+ */
+NESTED(_mips_cps_puts, 0, ra)
+ move s7, ra
+ move s6, a0
+
+1: lb a0, 0(s6)
+ beqz a0, 2f
+ jal _mips_cps_putc
+ PTR_ADDIU s6, s6, 1
+ b 1b
+
+2: jr s7
+ END(_mips_cps_puts)
+
+/**
+ * _mips_cps_putx4 - write a 4b hex value to the UART
+ * @a0: the 4b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a single hexadecimal character to the UART.
+ */
+NESTED(_mips_cps_putx4, 0, ra)
+ andi a0, a0, 0xf
+ li t0, '0'
+ blt a0, 10, 1f
+ li t0, 'a'
+ addiu a0, a0, -10
+1: addu a0, a0, t0
+ b _mips_cps_putc
+ END(_mips_cps_putx4)
+
+/**
+ * _mips_cps_putx8 - write an 8b hex value to the UART
+ * @a0: the 8b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx8, 0, ra)
+ move s3, ra
+ move s2, a0
+ srl a0, a0, 4
+ jal _mips_cps_putx4
+ move a0, s2
+ move ra, s3
+ b _mips_cps_putx4
+ END(_mips_cps_putx8)
+
+/**
+ * _mips_cps_putx16 - write a 16b hex value to the UART
+ * @a0: the 16b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx16, 0, ra)
+ move s5, ra
+ move s4, a0
+ srl a0, a0, 8
+ jal _mips_cps_putx8
+ move a0, s4
+ move ra, s5
+ b _mips_cps_putx8
+ END(_mips_cps_putx16)
+
+/**
+ * _mips_cps_putx32 - write a 32b hex value to the UART
+ * @a0: the 32b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx32, 0, ra)
+ move s7, ra
+ move s6, a0
+ srl a0, a0, 16
+ jal _mips_cps_putx16
+ move a0, s6
+ move ra, s7
+ b _mips_cps_putx16
+ END(_mips_cps_putx32)
+
+#ifdef CONFIG_64BIT
+
+/**
+ * _mips_cps_putx64 - write a 64b hex value to the UART
+ * @a0: the 64b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx64, 0, ra)
+ move sp, ra
+ move s8, a0
+ dsrl32 a0, a0, 0
+ jal _mips_cps_putx32
+ move a0, s8
+ move ra, sp
+ b _mips_cps_putx32
+ END(_mips_cps_putx64)
+
+#define _mips_cps_putxlong _mips_cps_putx64
+
+#else /* !CONFIG_64BIT */
+
+#define _mips_cps_putxlong _mips_cps_putx32
+
+#endif /* !CONFIG_64BIT */
+
+/**
+ * mips_cps_bev_dump() - dump relevant exception state to UART
+ * @a0: pointer to NULL-terminated ASCII string naming the exception
+ *
+ * Write information that may be useful in debugging an exception to the
+ * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception
+ * will only be run if something goes horribly wrong very early during
+ * the bringup of a core and it is very likely to be unsafe to perform
+ * memory accesses at that point (cache state indeterminate, EVA may not
+ * be configured, coherence may be disabled) let alone have a stack,
+ * this is all written in assembly using only registers & unmapped
+ * uncached access to the UART registers.
+ */
+LEAF(mips_cps_bev_dump)
+ move s0, ra
+ move s1, a0
+
+ li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_bev
+ jal _mips_cps_puts
+ move a0, s1
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+
+#define DUMP_COP0_REG(reg, name, sz, _mfc0) \
+ PTR_LA a0, 8f; \
+ jal _mips_cps_puts; \
+ _mfc0 a0, reg; \
+ jal _mips_cps_putx##sz; \
+ PTR_LA a0, str_newline; \
+ jal _mips_cps_puts; \
+ TEXT(name)
+
+ DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ jr s0
+ END(mips_cps_bev_dump)
+
+.pushsection .data
+str_bev: .asciiz "BEV Exception: "
+str_newline: .asciiz "\r\n"
+.popsection
diff --git a/kernel/arch/mips/kernel/cps-vec.S b/kernel/arch/mips/kernel/cps-vec.S
index 55b759a00..ac81edd44 100644
--- a/kernel/arch/mips/kernel/cps-vec.S
+++ b/kernel/arch/mips/kernel/cps-vec.S
@@ -25,20 +25,39 @@
.set noreorder
+#ifdef CONFIG_64BIT
+# define STATUS_BITDEPS ST0_KX
+#else
+# define STATUS_BITDEPS 0
+#endif
+
+#ifdef CONFIG_MIPS_CPS_NS16550
+
+#define DUMP_EXCEP(name) \
+ PTR_LA a0, 8f; \
+ jal mips_cps_bev_dump; \
+ nop; \
+ TEXT(name)
+
+#else /* !CONFIG_MIPS_CPS_NS16550 */
+
+#define DUMP_EXCEP(name)
+
+#endif /* !CONFIG_MIPS_CPS_NS16550 */
+
/*
* Set dest to non-zero if the core supports the MT ASE, else zero. If
* MT is not supported then branch to nomt.
*/
.macro has_mt dest, nomt
- mfc0 \dest, CP0_CONFIG
- bgez \dest, \nomt
- mfc0 \dest, CP0_CONFIG, 1
+ mfc0 \dest, CP0_CONFIG, 1
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 2
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 3
andi \dest, \dest, MIPS_CONF3_MT
beqz \dest, \nomt
+ nop
.endm
.section .text.cps-vec
@@ -46,11 +65,9 @@
LEAF(mips_cps_core_entry)
/*
- * These first 12 bytes will be patched by cps_smp_setup to load the
- * base address of the CM GCRs into register v1 and the CCA to use into
- * register s0.
+ * These first 4 bytes will be patched by cps_smp_setup to load the
+ * CCA to use into register s0.
*/
- .quad 0
.word 0
/* Check whether we're here due to an NMI */
@@ -60,7 +77,7 @@ LEAF(mips_cps_core_entry)
nop
/* This is an NMI */
- la k0, nmi_handler
+ PTR_LA k0, nmi_handler
jr k0
nop
@@ -70,7 +87,7 @@ not_nmi:
mtc0 t0, CP0_CAUSE
/* Setup Status */
- li t0, ST0_CU1 | ST0_CU0
+ li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
mtc0 t0, CP0_STATUS
/*
@@ -107,10 +124,10 @@ not_nmi:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- add a1, a0, t1
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0)
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
bne a0, a1, 1b
nop
icache_done:
@@ -134,12 +151,12 @@ icache_done:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- addu a1, a0, t1
- subu a1, a1, t0
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
dcache_done:
/* Set Kseg0 CCA to that in s0 */
@@ -150,13 +167,19 @@ dcache_done:
mtc0 t0, CP0_CONFIG
ehb
+ /* Calculate an uncached address for the CM GCRs */
+ MFC0 v1, CP0_CMGCRBASE
+ PTR_SLL v1, v1, 4
+ PTR_LI t0, UNCAC_BASE
+ PTR_ADDU v1, v1, t0
+
/* Enter the coherent domain */
li t0, 0xff
sw t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
- la t0, 1f
+ PTR_LA t0, 1f
jr t0
nop
@@ -178,58 +201,62 @@ dcache_done:
nop
/* Off we go! */
- lw t1, VPEBOOTCFG_PC(v0)
- lw gp, VPEBOOTCFG_GP(v0)
- lw sp, VPEBOOTCFG_SP(v0)
+ PTR_L t1, VPEBOOTCFG_PC(v0)
+ PTR_L gp, VPEBOOTCFG_GP(v0)
+ PTR_L sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
.org 0x200
LEAF(excep_tlbfill)
+ DUMP_EXCEP("TLB Fill")
b .
nop
END(excep_tlbfill)
.org 0x280
LEAF(excep_xtlbfill)
+ DUMP_EXCEP("XTLB Fill")
b .
nop
END(excep_xtlbfill)
.org 0x300
LEAF(excep_cache)
+ DUMP_EXCEP("Cache")
b .
nop
END(excep_cache)
.org 0x380
LEAF(excep_genex)
+ DUMP_EXCEP("General")
b .
nop
END(excep_genex)
.org 0x400
LEAF(excep_intex)
+ DUMP_EXCEP("Interrupt")
b .
nop
END(excep_intex)
.org 0x480
LEAF(excep_ejtag)
- la k0, ejtag_debug_handler
+ DUMP_EXCEP("EJTAG")
+ PTR_LA k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* Check that the core implements the MT ASE */
has_mt t0, 3f
- nop
.set push
- .set mips32r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -237,7 +264,7 @@ LEAF(mips_cps_core_init)
/* ...and for the moment only 1 VPE */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
@@ -250,25 +277,25 @@ LEAF(mips_cps_core_init)
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
- addiu t7, t0, 1
+ addiu ta3, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
- li t5, 1
+ li ta1, 1
1: /* Operate on the appropriate TC */
- mtc0 t5, CP0_VPECONTROL
+ mtc0 ta1, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
- mttc0 t5, CP0_TCBIND
+ mttc0 ta1, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
- sll t1, t5, VPECONF0_XTC_SHIFT
+ sll t1, ta1, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
@@ -280,8 +307,8 @@ LEAF(mips_cps_core_init)
mttc0 t0, CP0_TCHALT
/* Next VPE */
- addiu t5, t5, 1
- slt t0, t5, t7
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
bnez t0, 1b
nop
@@ -298,20 +325,21 @@ LEAF(mips_cps_core_init)
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
- la t0, mips_cm_base
- lw t0, 0(t0)
+ PTR_LA t0, mips_cm_base
+ PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
lw t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
- la t1, mips_cps_core_bootcfg
- lw t1, 0(t1)
- addu t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
- has_mt t6, 1f
- li t9, 0
+ li t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
+ has_mt ta2, 1f
/* Find the number of VPEs present in the core */
mfc0 t1, CP0_MVPCONF0
@@ -330,28 +358,28 @@ LEAF(mips_cps_boot_vpes)
/* Retrieve the VPE ID from EBase.CPUNum */
mfc0 t9, $15, 1
and t9, t9, t1
+#endif
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
- lw t7, COREBOOTCFG_VPECONFIG(t0)
- addu v0, v0, t7
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
+ PTR_ADDU v0, v0, ta3
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* If the core doesn't support MT then return */
- bnez t6, 1f
+ bnez ta2, 1f
nop
jr ra
nop
.set push
- .set mips32r2
.set mt
1: /* Enter VPE configuration state */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +388,12 @@ LEAF(mips_cps_boot_vpes)
ehb
/* Loop through each VPE */
- lw t6, COREBOOTCFG_VPEMASK(t0)
- move t8, t6
- li t5, 0
+ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
+ move t8, ta2
+ li ta1, 0
/* Check whether the VPE should be running. If not, skip it */
-1: andi t0, t6, 1
+1: andi t0, ta2, 1
beqz t0, 2f
nop
@@ -373,7 +401,7 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
- or t0, t0, t5
+ or t0, t0, ta1
mtc0 t0, CP0_VPECONTROL
ehb
@@ -384,8 +412,8 @@ LEAF(mips_cps_boot_vpes)
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
- mul t0, t0, t5
- addu t0, t0, t7
+ mul t0, t0, ta1
+ addu t0, t0, ta3
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +451,9 @@ LEAF(mips_cps_boot_vpes)
mttc0 t0, CP0_VPECONF0
/* Next VPE */
-2: srl t6, t6, 1
- addiu t5, t5, 1
- bnez t6, 1b
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
nop
/* Leave VPE configuration state */
@@ -445,13 +473,13 @@ LEAF(mips_cps_boot_vpes)
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
- la t0, 1f
+ PTR_LA t0, 1f
1: jr.hb t0
nop
2: .set pop
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
/* Return */
jr ra
@@ -466,10 +494,10 @@ LEAF(mips_cps_boot_vpes)
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
- la \dest, __per_cpu_offset
+ PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
- la \dest, cps_cpu_state
+ PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
diff --git a/kernel/arch/mips/kernel/cpu-probe.c b/kernel/arch/mips/kernel/cpu-probe.c
index 209e5b76c..6b9064499 100644
--- a/kernel/arch/mips/kernel/cpu-probe.c
+++ b/kernel/arch/mips/kernel/cpu-probe.c
@@ -32,6 +32,9 @@
#include <asm/spram.h>
#include <asm/uaccess.h>
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+
/*
* Get the FPU Implementation/Revision.
*/
@@ -188,7 +191,7 @@ __setup("nohtw", htw_disable);
static int mips_ftlb_disabled;
static int mips_has_ftlb_configured;
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
static int __init ftlb_disable(char *s)
{
@@ -202,7 +205,10 @@ static int __init ftlb_disable(char *s)
return 1;
/* Disable it in the boot cpu */
- set_ftlb_enable(&cpu_data[0], 0);
+ if (set_ftlb_enable(&cpu_data[0], 0)) {
+ pr_warn("Can't turn FTLB off\n");
+ return 1;
+ }
back_to_back_c0_hazard();
@@ -364,45 +370,58 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
return 3;
}
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
{
- unsigned int config6;
+ unsigned int config;
/* It's implementation dependent how the FTLB can be enabled */
switch (c->cputype) {
case CPU_PROAPTIV:
case CPU_P5600:
/* proAptiv & related cores use Config6 to enable the FTLB */
- config6 = read_c0_config6();
+ config = read_c0_config6();
/* Clear the old probability value */
- config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+ config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
if (enable)
/* Enable FTLB */
- write_c0_config6(config6 |
+ write_c0_config6(config |
(calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT)
| MIPS_CONF6_FTLBEN);
else
/* Disable FTLB */
- write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
- back_to_back_c0_hazard();
+ write_c0_config6(config & ~MIPS_CONF6_FTLBEN);
+ break;
+ case CPU_I6400:
+ /* I6400 & related cores use Config7 to configure FTLB */
+ config = read_c0_config7();
+ /* Clear the old probability value */
+ config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
+ write_c0_config7(config | (calculate_ftlb_probability(c)
+ << MIPS_CONF7_FTLBP_SHIFT));
break;
+ default:
+ return 1;
}
+
+ return 0;
}
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
- int isa;
+ int isa, mt;
config0 = read_c0_config();
/*
* Look for Standard TLB or Dual VTLB and FTLB
*/
- if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
- (((config0 & MIPS_CONF_MT) >> 7) == 4))
+ mt = config0 & MIPS_CONF_MT;
+ if (mt == MIPS_CONF_MT_TLB)
c->options |= MIPS_CPU_TLB;
+ else if (mt == MIPS_CONF_MT_FTLB)
+ c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
@@ -517,13 +536,14 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_SEGMENTS;
if (config3 & MIPS_CONF3_MSA)
c->ases |= MIPS_ASE_MSA;
- /* Only tested on 32-bit cores */
- if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
+ if (config3 & MIPS_CONF3_PW) {
c->htw_seq = 0;
c->options |= MIPS_CPU_HTW;
}
if (config3 & MIPS_CONF3_CDMM)
c->options |= MIPS_CPU_CDMM;
+ if (config3 & MIPS_CONF3_SP)
+ c->options |= MIPS_CPU_SP;
return config3 & MIPS_CONF_M;
}
@@ -540,7 +560,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
if (cpu_has_tlb) {
if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
c->options |= MIPS_CPU_TLBINV;
- mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+
+ /*
+ * R6 has dropped the MMUExtDef field from config4.
+ * On R6 the fields always describe the FTLB, and only if it is
+ * present according to Config.MT.
+ */
+ if (!cpu_has_mips_r6)
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ else if (cpu_has_ftlb)
+ mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+ else
+ mmuextdef = 0;
+
switch (mmuextdef) {
case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
@@ -945,7 +977,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
@@ -960,7 +992,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST;
c->tlbsize = 64;
break;
case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
@@ -1121,6 +1153,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_P5600;
__cpu_name[cpu] = "MIPS P5600";
break;
+ case PRID_IMP_I6400:
+ c->cputype = CPU_I6400;
+ __cpu_name[cpu] = "MIPS I6400";
+ break;
case PRID_IMP_M5150:
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
@@ -1443,7 +1479,9 @@ void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
- case PRID_COMP_INGENIC:
+ case PRID_COMP_INGENIC_D0:
+ case PRID_COMP_INGENIC_D1:
+ case PRID_COMP_INGENIC_E1:
cpu_probe_ingenic(c, cpu);
break;
case PRID_COMP_NETLOGIC:
@@ -1478,6 +1516,10 @@ void cpu_probe(void)
else
cpu_set_nofpu_opts(c);
+ if (cpu_has_bp_ghist)
+ write_c0_r10k_diag(read_c0_r10k_diag() |
+ R10K_DIAG_E_GHIST);
+
if (cpu_has_mips_r2_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
/* R2 has Performance Counter Interrupt indicator */
@@ -1486,10 +1528,14 @@ void cpu_probe(void)
else
c->srsets = 1;
+ if (cpu_has_mips_r6)
+ elf_hwcap |= HWCAP_MIPS_R6;
+
if (cpu_has_msa) {
c->msa_id = cpu_get_msa_id();
WARN(c->msa_id & MSA_IR_WRPF,
"Vector register partitioning unimplemented!");
+ elf_hwcap |= HWCAP_MIPS_MSA;
}
cpu_probe_vmbits(c);
diff --git a/kernel/arch/mips/kernel/csrc-r4k.c b/kernel/arch/mips/kernel/csrc-r4k.c
index e5ed7ada1..1f910563f 100644
--- a/kernel/arch/mips/kernel/csrc-r4k.c
+++ b/kernel/arch/mips/kernel/csrc-r4k.c
@@ -28,6 +28,43 @@ static u64 notrace r4k_read_sched_clock(void)
return read_c0_count();
}
+static inline unsigned int rdhwr_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static bool rdhwr_count_usable(void)
+{
+ unsigned int prev, curr, i;
+
+ /*
+ * Older QEMUs have a broken implementation of RDHWR for the CP0 count
+ * which always returns a constant value. Try to identify this and don't
+ * use it in the VDSO if it is broken. This workaround can be removed
+ * once the fix has been in QEMU stable for a reasonable amount of time.
+ */
+ for (i = 0, prev = rdhwr_count(); i < 100; i++) {
+ curr = rdhwr_count();
+
+ if (curr != prev)
+ return true;
+
+ prev = curr;
+ }
+
+ pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
+ return false;
+}
+
int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
@@ -36,6 +73,13 @@ int __init init_r4k_clocksource(void)
/* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+ /*
+ * R2 onwards makes the count accessible to user mode so it can be used
+ * by the VDSO (HWREna is configured by configure_hwrena()).
+ */
+ if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
+ clocksource_mips.archdata.vdso_clock_mode = VDSO_CLOCK_R4K;
+
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
diff --git a/kernel/arch/mips/kernel/genex.S b/kernel/arch/mips/kernel/genex.S
index af42e7003..baa7b6fc0 100644
--- a/kernel/arch/mips/kernel/genex.S
+++ b/kernel/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
- __BUILD_clear_\clear
+ __build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
diff --git a/kernel/arch/mips/kernel/head.S b/kernel/arch/mips/kernel/head.S
index 95afd663c..4e4cc5b9a 100644
--- a/kernel/arch/mips/kernel/head.S
+++ b/kernel/arch/mips/kernel/head.S
@@ -94,6 +94,22 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ PTR_LA t0, __appended_dtb
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ li t1, 0xd00dfeed
+#else
+ li t1, 0xedfe0dd0
+#endif
+ lw t2, (t0)
+ bne t1, t2, not_found
+ nop
+
+ move a1, t0
+ PTR_LI a0, -2
+not_found:
+#endif
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
diff --git a/kernel/arch/mips/kernel/i8259.c b/kernel/arch/mips/kernel/i8259.c
deleted file mode 100644
index a74ec3ae5..000000000
--- a/kernel/arch/mips/kernel/i8259.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Code to handle x86 style IRQs plus some generic interrupt stuff.
- *
- * Copyright (C) 1992 Linus Torvalds
- * Copyright (C) 1994 - 2000 Ralf Baechle
- */
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/syscore_ops.h>
-#include <linux/irq.h>
-
-#include <asm/i8259.h>
-#include <asm/io.h>
-
-/*
- * This is the 'legacy' 8259A Programmable Interrupt Controller,
- * present in the majority of PC/AT boxes.
- * plus some generic x86 specific things if generic specifics makes
- * any sense at all.
- * this file should become arch/i386/kernel/irq.c when the old irq.c
- * moves to arch independent land
- */
-
-static int i8259A_auto_eoi = -1;
-DEFINE_RAW_SPINLOCK(i8259A_lock);
-static void disable_8259A_irq(struct irq_data *d);
-static void enable_8259A_irq(struct irq_data *d);
-static void mask_and_ack_8259A(struct irq_data *d);
-static void init_8259A(int auto_eoi);
-
-static struct irq_chip i8259A_chip = {
- .name = "XT-PIC",
- .irq_mask = disable_8259A_irq,
- .irq_disable = disable_8259A_irq,
- .irq_unmask = enable_8259A_irq,
- .irq_mask_ack = mask_and_ack_8259A,
-};
-
-/*
- * 8259A PIC functions to handle ISA devices:
- */
-
-/*
- * This contains the irq mask for both 8259A irq controllers,
- */
-static unsigned int cached_irq_mask = 0xffff;
-
-#define cached_master_mask (cached_irq_mask)
-#define cached_slave_mask (cached_irq_mask >> 8)
-
-static void disable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- cached_irq_mask |= mask;
- if (irq & 8)
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- else
- outb(cached_master_mask, PIC_MASTER_IMR);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-static void enable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- mask = ~(1 << irq);
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- cached_irq_mask &= mask;
- if (irq & 8)
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- else
- outb(cached_master_mask, PIC_MASTER_IMR);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-int i8259A_irq_pending(unsigned int irq)
-{
- unsigned int mask;
- unsigned long flags;
- int ret;
-
- irq -= I8259A_IRQ_BASE;
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- if (irq < 8)
- ret = inb(PIC_MASTER_CMD) & mask;
- else
- ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- return ret;
-}
-
-void make_8259A_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
- enable_irq(irq);
-}
-
-/*
- * This function assumes to be called rarely. Switching between
- * 8259A registers is slow.
- * This has to be protected by the irq controller spinlock
- * before being called.
- */
-static inline int i8259A_irq_real(unsigned int irq)
-{
- int value;
- int irqmask = 1 << irq;
-
- if (irq < 8) {
- outb(0x0B, PIC_MASTER_CMD); /* ISR register */
- value = inb(PIC_MASTER_CMD) & irqmask;
- outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
- return value;
- }
- outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
- value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
- outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
- return value;
-}
-
-/*
- * Careful! The 8259A is a fragile beast, it pretty
- * much _has_ to be done exactly like this (mask it
- * first, _then_ send the EOI, and the order of EOI
- * to the two 8259s is important!
- */
-static void mask_and_ack_8259A(struct irq_data *d)
-{
- unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- irqmask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- /*
- * Lightweight spurious IRQ detection. We do not want
- * to overdo spurious IRQ handling - it's usually a sign
- * of hardware problems, so we only do the checks we can
- * do without slowing down good hardware unnecessarily.
- *
- * Note that IRQ7 and IRQ15 (the two spurious IRQs
- * usually resulting from the 8259A-1|2 PICs) occur
- * even if the IRQ is masked in the 8259A. Thus we
- * can check spurious 8259A IRQs without doing the
- * quite slow i8259A_irq_real() call for every IRQ.
- * This does not cover 100% of spurious interrupts,
- * but should be enough to warn the user that there
- * is something bad going on ...
- */
- if (cached_irq_mask & irqmask)
- goto spurious_8259A_irq;
- cached_irq_mask |= irqmask;
-
-handle_real_irq:
- if (irq & 8) {
- inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
- outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
- } else {
- inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
- outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
- }
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
- return;
-
-spurious_8259A_irq:
- /*
- * this is the slow path - should happen rarely.
- */
- if (i8259A_irq_real(irq))
- /*
- * oops, the IRQ _is_ in service according to the
- * 8259A - not spurious, go handle it.
- */
- goto handle_real_irq;
-
- {
- static int spurious_irq_mask;
- /*
- * At this point we can be sure the IRQ is spurious,
- * lets ACK and report it. [once per IRQ]
- */
- if (!(spurious_irq_mask & irqmask)) {
- printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
- atomic_inc(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
- * simpler for us.
- */
- goto handle_real_irq;
- }
-}
-
-static void i8259A_resume(void)
-{
- if (i8259A_auto_eoi >= 0)
- init_8259A(i8259A_auto_eoi);
-}
-
-static void i8259A_shutdown(void)
-{
- /* Put the i8259A into a quiescent state that
- * the kernel initialization code can get it
- * out of.
- */
- if (i8259A_auto_eoi >= 0) {
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- }
-}
-
-static struct syscore_ops i8259_syscore_ops = {
- .resume = i8259A_resume,
- .shutdown = i8259A_shutdown,
-};
-
-static int __init i8259A_init_sysfs(void)
-{
- register_syscore_ops(&i8259_syscore_ops);
- return 0;
-}
-
-device_initcall(i8259A_init_sysfs);
-
-static void init_8259A(int auto_eoi)
-{
- unsigned long flags;
-
- i8259A_auto_eoi = auto_eoi;
-
- raw_spin_lock_irqsave(&i8259A_lock, flags);
-
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
-
- /*
- * outb_p - this has to work on a wide range of PC hardware.
- */
- outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
- outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
- outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
- if (auto_eoi) /* master does Auto EOI */
- outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
- else /* master expects normal EOI */
- outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-
- outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
- outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
- outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
- outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
- if (auto_eoi)
- /*
- * In AEOI mode we just have to mask the interrupt
- * when acking.
- */
- i8259A_chip.irq_mask_ack = disable_8259A_irq;
- else
- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
-
- udelay(100); /* wait for 8259A to initialize */
-
- outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
- outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
-
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .name = "cascade",
- .flags = IRQF_NO_THREAD,
-};
-
-static struct resource pic1_io_resource = {
- .name = "pic1",
- .start = PIC_MASTER_CMD,
- .end = PIC_MASTER_IMR,
- .flags = IORESOURCE_BUSY
-};
-
-static struct resource pic2_io_resource = {
- .name = "pic2",
- .start = PIC_SLAVE_CMD,
- .end = PIC_SLAVE_IMR,
- .flags = IORESOURCE_BUSY
-};
-
-static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
- irq_set_probe(virq);
- return 0;
-}
-
-static struct irq_domain_ops i8259A_ops = {
- .map = i8259A_irq_domain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-/*
- * On systems with i8259-style interrupt controllers we assume for
- * driver compatibility reasons interrupts 0 - 15 to be the i8259
- * interrupts even if the hardware uses a different interrupt numbering.
- */
-void __init init_i8259_irqs(void)
-{
- struct irq_domain *domain;
-
- insert_resource(&ioport_resource, &pic1_io_resource);
- insert_resource(&ioport_resource, &pic2_io_resource);
-
- init_8259A(0);
-
- domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0,
- &i8259A_ops, NULL);
- if (!domain)
- panic("Failed to add i8259 IRQ domain");
-
- setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
-}
diff --git a/kernel/arch/mips/kernel/idle.c b/kernel/arch/mips/kernel/idle.c
index e4f62b787..46794d64c 100644
--- a/kernel/arch/mips/kernel/idle.c
+++ b/kernel/arch/mips/kernel/idle.c
@@ -134,6 +134,16 @@ void __init check_wait(void)
return;
}
+ /*
+ * MIPSr6 specifies that masked interrupts should unblock an executing
+ * wait instruction, and thus that it is safe for us to use
+ * r4k_wait_irqoff. Yippee!
+ */
+ if (cpu_has_mips_r6) {
+ cpu_wait = r4k_wait_irqoff;
+ return;
+ }
+
switch (current_cpu_type()) {
case CPU_R3081:
case CPU_R3081E:
@@ -155,12 +165,12 @@ void __init check_wait(void)
case CPU_4KEC:
case CPU_4KSC:
case CPU_5KC:
+ case CPU_5KE:
case CPU_25KF:
case CPU_PR4450:
case CPU_BMIPS3300:
case CPU_BMIPS4350:
case CPU_BMIPS4380:
- case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
@@ -171,7 +181,9 @@ void __init check_wait(void)
case CPU_XLP:
cpu_wait = r4k_wait;
break;
-
+ case CPU_BMIPS5000:
+ cpu_wait = r4k_wait_irqoff;
+ break;
case CPU_RM7000:
cpu_wait = rm7k_wait_irqoff;
break;
diff --git a/kernel/arch/mips/kernel/irq.c b/kernel/arch/mips/kernel/irq.c
index 3c8a18a00..8eb5af805 100644
--- a/kernel/arch/mips/kernel/irq.c
+++ b/kernel/arch/mips/kernel/irq.c
@@ -25,48 +25,6 @@
#include <linux/atomic.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_KGDB
-int kgdb_early_setup;
-#endif
-
-static DECLARE_BITMAP(irq_map, NR_IRQS);
-
-int allocate_irqno(void)
-{
- int irq;
-
-again:
- irq = find_first_zero_bit(irq_map, NR_IRQS);
-
- if (irq >= NR_IRQS)
- return -ENOSPC;
-
- if (test_and_set_bit(irq, irq_map))
- goto again;
-
- return irq;
-}
-
-/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
- * in the kernel initialization so treating allocation failure as BUG() is
- * ok.
- */
-void __init alloc_legacy_irqno(void)
-{
- int i;
-
- for (i = 0; i <= 16; i++)
- BUG_ON(test_and_set_bit(i, irq_map));
-}
-
-void free_irqno(unsigned int irq)
-{
- smp_mb__before_atomic();
- clear_bit(irq, irq_map);
- smp_mb__after_atomic();
-}
-
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -93,20 +51,10 @@ void __init init_IRQ(void)
{
int i;
-#ifdef CONFIG_KGDB
- if (kgdb_early_setup)
- return;
-#endif
-
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
arch_init_irq();
-
-#ifdef CONFIG_KGDB
- if (!kgdb_early_setup)
- kgdb_early_setup = 1;
-#endif
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/kernel/arch/mips/kernel/irq_cpu.c b/kernel/arch/mips/kernel/irq_cpu.c
deleted file mode 100644
index 6eb7a3f51..000000000
--- a/kernel/arch/mips/kernel/irq_cpu.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2001 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
- *
- * This file define the irq handler for MIPS CPU interrupts.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/*
- * Almost all MIPS CPUs define 8 interrupt sources. They are typically
- * level triggered (i.e., cannot be cleared from CPU; must be cleared from
- * device). The first two are software interrupts which we don't really
- * use or support. The last one is usually the CPU timer interrupt if
- * counter register is present or, for CPUs with an external FPU, by
- * convention it's the FPU exception interrupt.
- *
- * Don't even think about using this on SMP. You have been warned.
- *
- * This file exports one global function:
- * void mips_cpu_irq_init(void);
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/mipsmtregs.h>
-#include <asm/setup.h>
-
-static inline void unmask_mips_irq(struct irq_data *d)
-{
- set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_enable_hazard();
-}
-
-static inline void mask_mips_irq(struct irq_data *d)
-{
- clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_disable_hazard();
-}
-
-static struct irq_chip mips_cpu_irq_controller = {
- .name = "MIPS",
- .irq_ack = mask_mips_irq,
- .irq_mask = mask_mips_irq,
- .irq_mask_ack = mask_mips_irq,
- .irq_unmask = unmask_mips_irq,
- .irq_eoi = unmask_mips_irq,
- .irq_disable = mask_mips_irq,
- .irq_enable = unmask_mips_irq,
-};
-
-/*
- * Basically the same as above but taking care of all the MT stuff
- */
-
-static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
-{
- unsigned int vpflags = dvpe();
-
- clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- evpe(vpflags);
- unmask_mips_irq(d);
- return 0;
-}
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for mips_cpu_irq_end.
- */
-static void mips_mt_cpu_irq_ack(struct irq_data *d)
-{
- unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- evpe(vpflags);
- mask_mips_irq(d);
-}
-
-static struct irq_chip mips_mt_cpu_irq_controller = {
- .name = "MIPS",
- .irq_startup = mips_mt_cpu_irq_startup,
- .irq_ack = mips_mt_cpu_irq_ack,
- .irq_mask = mask_mips_irq,
- .irq_mask_ack = mips_mt_cpu_irq_ack,
- .irq_unmask = unmask_mips_irq,
- .irq_eoi = unmask_mips_irq,
- .irq_disable = mask_mips_irq,
- .irq_enable = unmask_mips_irq,
-};
-
-asmlinkage void __weak plat_irq_dispatch(void)
-{
- unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
- int irq;
-
- if (!pending) {
- spurious_interrupt();
- return;
- }
-
- pending >>= CAUSEB_IP;
- while (pending) {
- irq = fls(pending) - 1;
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- pending &= ~BIT(irq);
- }
-}
-
-static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hw)
-{
- static struct irq_chip *chip;
-
- if (hw < 2 && cpu_has_mipsmt) {
- /* Software interrupts are used for MT/CMT IPI */
- chip = &mips_mt_cpu_irq_controller;
- } else {
- chip = &mips_cpu_irq_controller;
- }
-
- if (cpu_has_vint)
- set_vi_handler(hw, plat_irq_dispatch);
-
- irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
- .map = mips_cpu_intc_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void __init __mips_cpu_irq_init(struct device_node *of_node)
-{
- struct irq_domain *domain;
-
- /* Mask interrupts. */
- clear_c0_status(ST0_IM);
- clear_c0_cause(CAUSEF_IP);
-
- domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
- &mips_cpu_intc_irq_domain_ops, NULL);
- if (!domain)
- panic("Failed to add irqdomain for MIPS CPU");
-}
-
-void __init mips_cpu_irq_init(void)
-{
- __mips_cpu_irq_init(NULL);
-}
-
-int __init mips_cpu_irq_of_init(struct device_node *of_node,
- struct device_node *parent)
-{
- __mips_cpu_irq_init(of_node);
- return 0;
-}
diff --git a/kernel/arch/mips/kernel/jump_label.c b/kernel/arch/mips/kernel/jump_label.c
index dda800e9e..3e586daa3 100644
--- a/kernel/arch/mips/kernel/jump_label.c
+++ b/kernel/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@ void arch_jump_label_transform(struct jump_entry *e,
/* Target must have the right alignment and ISA must be preserved. */
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
insn.j_format.target = e->target >> J_RANGE_SHIFT;
} else {
diff --git a/kernel/arch/mips/kernel/kgdb.c b/kernel/arch/mips/kernel/kgdb.c
index 7afcc2f22..de63d36af 100644
--- a/kernel/arch/mips/kernel/kgdb.c
+++ b/kernel/arch/mips/kernel/kgdb.c
@@ -378,10 +378,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct kgdb_arch arch_kgdb_ops;
-/*
- * We use kgdb_early_setup so that functions we need to call now don't
- * cause trouble when called again later.
- */
int kgdb_arch_init(void)
{
union mips_instruction insn = {
diff --git a/kernel/arch/mips/kernel/mips-cm.c b/kernel/arch/mips/kernel/mips-cm.c
index 85bbe9b96..1448c1f43 100644
--- a/kernel/arch/mips/kernel/mips-cm.c
+++ b/kernel/arch/mips/kernel/mips-cm.c
@@ -9,17 +9,142 @@
*/
#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
#include <asm/mips-cm.h>
#include <asm/mipsregs.h>
void __iomem *mips_cm_base;
void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+ "mem", "gcr", "gic", "mmio",
+ "0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transation type */
+static char *cm3_tr[16] = {
+ [0x0] = "ReqNoData",
+ [0x1] = "0x1",
+ [0x2] = "ReqWData",
+ [0x3] = "0x3",
+ [0x4] = "IReqNoResp",
+ [0x5] = "IReqWResp",
+ [0x6] = "IReqNoRespDat",
+ [0x7] = "IReqWRespDat",
+ [0x8] = "RespNoData",
+ [0x9] = "RespDataFol",
+ [0xa] = "RespWData",
+ [0xb] = "RespDataOnly",
+ [0xc] = "IRespNoData",
+ [0xd] = "IRespDataFol",
+ [0xe] = "IRespWData",
+ [0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+ [0x00] = "0x00",
+ [0x01] = "Legacy Write",
+ [0x02] = "Legacy Read",
+ [0x03] = "0x03",
+ [0x04] = "0x04",
+ [0x05] = "0x05",
+ [0x06] = "0x06",
+ [0x07] = "0x07",
+ [0x08] = "Coherent Read Own",
+ [0x09] = "Coherent Read Share",
+ [0x0a] = "Coherent Read Discard",
+ [0x0b] = "Coherent Ready Share Always",
+ [0x0c] = "Coherent Upgrade",
+ [0x0d] = "Coherent Writeback",
+ [0x0e] = "0x0e",
+ [0x0f] = "0x0f",
+ [0x10] = "Coherent Copyback",
+ [0x11] = "Coherent Copyback Invalidate",
+ [0x12] = "Coherent Invalidate",
+ [0x13] = "Coherent Write Invalidate",
+ [0x14] = "Coherent Completion Sync",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "0x18",
+ [0x19] = "0x19",
+ [0x1a] = "0x1a",
+ [0x1b] = "0x1b",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+ [0x0] = "Legacy Read",
+ [0x1] = "Legacy Write",
+ [0x2] = "Coherent Read Own",
+ [0x3] = "Coherent Read Share",
+ [0x4] = "Coherent Read Discard",
+ [0x5] = "Coherent Evicted",
+ [0x6] = "Coherent Upgrade",
+ [0x7] = "Coherent Upgrade for Store Conditional",
+ [0x8] = "Coherent Writeback",
+ [0x9] = "Coherent Write Invalidate",
+ [0xa] = "0xa",
+ [0xb] = "0xb",
+ [0xc] = "0xc",
+ [0xd] = "0xd",
+ [0xe] = "0xe",
+ [0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+ [0x0] = "Normal",
+ [0x1] = "Registers",
+ [0x2] = "TLB",
+ [0x3] = "0x3",
+ [0x4] = "L1I",
+ [0x5] = "L1D",
+ [0x6] = "L3",
+ [0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+ "Invalid/OK", "Invalid/Data",
+ "Shared/OK", "Shared/Data",
+ "Modified/OK", "Modified/Data",
+ "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_causes[32] = {
+ "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+ "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+ "0x08", "0x09", "0x0a", "0x0b",
+ "0x0c", "0x0d", "0x0e", "0x0f",
+ "0x10", "0x11", "0x12", "0x13",
+ "0x14", "0x15", "0x16", "INTVN_WR_ERR",
+ "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+ "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+ "0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+ "MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+ "CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+ "0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+ "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+ "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
phys_addr_t __mips_cm_phys_base(void)
{
u32 config3 = read_c0_config3();
- u32 cmgcr;
+ unsigned long cmgcr;
/* Check the CMGCRBase register is implemented */
if (!(config3 & MIPS_CONF3_CMGCR))
@@ -80,6 +205,14 @@ int mips_cm_probe(void)
{
phys_addr_t addr;
u32 base_reg;
+ unsigned cpu;
+
+ /*
+ * No need to probe again if we have already been
+ * here before.
+ */
+ if (mips_cm_base)
+ return 0;
addr = mips_cm_phys_base();
BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
@@ -117,5 +250,179 @@ int mips_cm_probe(void)
/* probe for an L2-only sync region */
mips_cm_probe_l2sync();
+ /* determine register width for this CM */
+ mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cm_core_lock, cpu));
+
return 0;
}
+
+void mips_cm_lock_other(unsigned int core, unsigned int vp)
+{
+ unsigned curr_core;
+ u32 val;
+
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
+ val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
+ } else {
+ BUG_ON(vp != 0);
+ val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
+ }
+
+ write_gcr_cl_other(val);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
+}
+
+void mips_cm_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ preempt_enable();
+}
+
+void mips_cm_error_report(void)
+{
+ u64 cm_error, cm_addr, cm_other;
+ unsigned long revision;
+ int ocause, cause;
+ char buf[256];
+
+ if (!mips_cm_present())
+ return;
+
+ revision = mips_cm_revision();
+
+ if (revision < CM_REV_CM3) { /* CM2 */
+ cm_error = read_gcr_error_cause();
+ cm_addr = read_gcr_error_addr();
+ cm_other = read_gcr_error_mult();
+ cause = cm_error >> CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
+ ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+
+ if (!cause)
+ return;
+
+ if (cause < 16) {
+ unsigned long cca_bits = (cm_error >> 15) & 7;
+ unsigned long tr_bits = (cm_error >> 12) & 7;
+ unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+ unsigned long stag_bits = (cm_error >> 3) & 15;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "CCA=%lu TR=%s MCmd=%s STag=%lu "
+ "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+ cm2_cmd[cmd_bits], stag_bits, sport_bits);
+ } else {
+ /* glob state & sresp together */
+ unsigned long c3_bits = (cm_error >> 18) & 7;
+ unsigned long c2_bits = (cm_error >> 15) & 7;
+ unsigned long c1_bits = (cm_error >> 12) & 7;
+ unsigned long c0_bits = (cm_error >> 9) & 7;
+ unsigned long sc_bit = (cm_error >> 8) & 1;
+ unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "C3=%s C2=%s C1=%s C0=%s SC=%s "
+ "MCmd=%s SPort=%lu\n",
+ cm2_core[c3_bits], cm2_core[c2_bits],
+ cm2_core[c1_bits], cm2_core[c0_bits],
+ sc_bit ? "True" : "False",
+ cm2_cmd[cmd_bits], sport_bits);
+ }
+ pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+ cm2_causes[cause], buf);
+ pr_err("CM_ADDR =%08llx\n", cm_addr);
+ pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
+ } else { /* CM3 */
+ ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
+ ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
+
+ cm_error = read64_gcr_error_cause();
+ cm_addr = read64_gcr_error_addr();
+ cm_other = read64_gcr_error_mult();
+ cause = cm_error >> CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF;
+ ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+
+ if (!cause)
+ return;
+
+ /* Used by cause == {1,2,3} */
+ core_id_bits = (cm_error >> 22) & 0xf;
+ vp_id_bits = (cm_error >> 18) & 0xf;
+ cmd_bits = (cm_error >> 14) & 0xf;
+ cmd_group_bits = (cm_error >> 11) & 0xf;
+ cm3_cca_bits = (cm_error >> 8) & 7;
+ mcp_bits = (cm_error >> 5) & 0xf;
+ cm3_tr_bits = (cm_error >> 1) & 0xf;
+ sched_bit = cm_error & 0x1;
+
+ if (cause == 1 || cause == 3) { /* Tag ECC */
+ unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+ unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+ unsigned long dword_bits = (cm_error >> 49) & 0xff;
+ unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+ unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+ unsigned long bank_bit = (cm_error >> 28) & 0x1;
+ snprintf(buf, sizeof(buf),
+ "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+ "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ tag_ecc ? "TAG" : "DATA",
+ tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+ data_way_bits, bank_bit, dword_bits,
+ data_sets_bits,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else if (cause == 2) {
+ unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+ unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+ unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+ unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+ snprintf(buf, sizeof(buf),
+ "Decode Request Error: Type=%lu, Command=%lu"
+ "Command Group=%lu Destination ID=%lu"
+ "CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ data_error_type, data_decode_cmd,
+ data_decode_group, data_decode_destination_id,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else {
+ buf[0] = 0;
+ }
+
+ pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+ cm3_causes[cause], buf);
+ pr_err("CM_ADDR =%llx\n", cm_addr);
+ pr_err("CM_OTHER=%llx %s\n", cm_other, cm3_causes[ocause]);
+ }
+
+ /* reprime cause register */
+ write_gcr_error_cause(0);
+}
diff --git a/kernel/arch/mips/kernel/mips-cpc.c b/kernel/arch/mips/kernel/mips-cpc.c
index 11964501c..566b8d2c0 100644
--- a/kernel/arch/mips/kernel/mips-cpc.c
+++ b/kernel/arch/mips/kernel/mips-cpc.c
@@ -21,9 +21,16 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
-phys_addr_t __weak mips_cpc_phys_base(void)
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
{
- u32 cpc_base;
+ unsigned long cpc_base;
if (!mips_cm_present())
return 0;
@@ -69,6 +76,12 @@ void mips_cpc_lock_other(unsigned int core)
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
}
void mips_cpc_unlock_other(void)
diff --git a/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c b/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c
index f2977f009..1f5aac7f9 100644
--- a/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -22,6 +22,7 @@
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/break.h>
+#include <asm/debug.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
@@ -2363,7 +2364,6 @@ static const struct file_operations mipsr2_clear_fops = {
static int __init mipsr2_init_debugfs(void)
{
- extern struct dentry *mips_debugfs_dir;
struct dentry *mipsr2_emul;
if (!mips_debugfs_dir)
diff --git a/kernel/arch/mips/kernel/mips_ksyms.c b/kernel/arch/mips/kernel/mips_ksyms.c
index 291af0b5c..e2b6ab746 100644
--- a/kernel/arch/mips/kernel/mips_ksyms.c
+++ b/kernel/arch/mips/kernel/mips_ksyms.c
@@ -17,6 +17,7 @@
#include <asm/fpu.h>
#include <asm/msa.h>
+extern void *__bzero_kernel(void *__s, size_t __count);
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
const char *__from, long __len);
@@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
EXPORT_SYMBOL(__copy_in_user_eva);
EXPORT_SYMBOL(__copy_to_user_eva);
EXPORT_SYMBOL(__copy_user_inatomic_eva);
+EXPORT_SYMBOL(__bzero_kernel);
#endif
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
diff --git a/kernel/arch/mips/kernel/octeon_switch.S b/kernel/arch/mips/kernel/octeon_switch.S
index 423ae83af..3375745b9 100644
--- a/kernel/arch/mips/kernel/octeon_switch.S
+++ b/kernel/arch/mips/kernel/octeon_switch.S
@@ -18,7 +18,7 @@
.set pop
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti)
*/
.align 7
LEAF(resume)
@@ -28,30 +28,6 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
- /*
- * check if we need to save FPU registers
- */
- .set push
- .set noreorder
- beqz a3, 1f
- PTR_L t3, TASK_THREAD_INFO(a0)
- .set pop
-
- /*
- * clear saved user stack CU1 bit
- */
- LONG_L t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- LONG_S t0, ST_OFF(t3)
-
- .set push
- .set arch=mips64r2
- fpu_save_double a0 t0 t1 # c0_status passed in t0
- # clobbers t1
- .set pop
-1:
-
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
dmfc0 t0, $11,7 /* CvmMemCtl */
diff --git a/kernel/arch/mips/kernel/perf_event_mipsxx.c b/kernel/arch/mips/kernel/perf_event_mipsxx.c
index cc1b6fadf..d7b8dd431 100644
--- a/kernel/arch/mips/kernel/perf_event_mipsxx.c
+++ b/kernel/arch/mips/kernel/perf_event_mipsxx.c
@@ -1556,6 +1556,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_P5600:
+ case CPU_I6400:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
base_id = raw_id & 0xff;
@@ -1717,6 +1718,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
+ case CPU_I6400:
+ mipspmu.name = "mips/I6400";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/kernel/arch/mips/kernel/pm-cps.c b/kernel/arch/mips/kernel/pm-cps.c
index 06147179a..f63a28997 100644
--- a/kernel/arch/mips/kernel/pm-cps.c
+++ b/kernel/arch/mips/kernel/pm-cps.c
@@ -267,6 +267,7 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
/* CPUs which do not require the workaround */
case CPU_P5600:
+ case CPU_I6400:
return 0;
default:
@@ -671,6 +672,7 @@ static int __init cps_pm_init(void)
case CPU_PROAPTIV:
case CPU_M5150:
case CPU_P5600:
+ case CPU_I6400:
stype_intervention = 0x2;
stype_memory = 0x3;
stype_ordering = 0x10;
diff --git a/kernel/arch/mips/kernel/prom.c b/kernel/arch/mips/kernel/prom.c
index e303cb1ef..5fcec3032 100644
--- a/kernel/arch/mips/kernel/prom.c
+++ b/kernel/arch/mips/kernel/prom.c
@@ -18,6 +18,7 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -37,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/kernel/arch/mips/kernel/ptrace.c b/kernel/arch/mips/kernel/ptrace.c
index e933a309f..4f0ac78d1 100644
--- a/kernel/arch/mips/kernel/ptrace.c
+++ b/kernel/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/security.h>
+#include <linux/stddef.h>
#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
@@ -490,6 +491,93 @@ enum mips_regset {
REGSET_FPR,
};
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) { \
+ .name = #reg, \
+ .offset = offsetof(struct pt_regs, r) \
+}
+
+#define REG_OFFSET_END { \
+ .name = NULL, \
+ .offset = 0 \
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(c0_status, cp0_status),
+ REG_OFFSET_NAME(hi, hi),
+ REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ REG_OFFSET_NAME(acx, acx),
+#endif
+ REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+ REG_OFFSET_NAME(c0_cause, cp0_cause),
+ REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_MIPS_MT_SMTC
+ REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ REG_OFFSET_NAME(mpl0, mpl[0]),
+ REG_OFFSET_NAME(mpl1, mpl[1]),
+ REG_OFFSET_NAME(mpl2, mpl[2]),
+ REG_OFFSET_NAME(mtp0, mtp[0]),
+ REG_OFFSET_NAME(mtp1, mtp[1]),
+ REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static const struct user_regset mips_regsets[] = {
diff --git a/kernel/arch/mips/kernel/r2300_switch.S b/kernel/arch/mips/kernel/r2300_switch.S
index 5087a4b72..ac27ef7d4 100644
--- a/kernel/arch/mips/kernel/r2300_switch.S
+++ b/kernel/arch/mips/kernel/r2300_switch.S
@@ -31,18 +31,8 @@
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
-/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
- beqz a3, 1f
-
- PTR_L t3, TASK_THREAD_INFO(a0)
-
- /*
- * clear saved user stack CU1 bit
- */
- lw t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- sw t0, ST_OFF(t3)
-
- fpu_save_single a0, t0 # clobbers t0
-
-1:
-
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
diff --git a/kernel/arch/mips/kernel/r4k_fpu.S b/kernel/arch/mips/kernel/r4k_fpu.S
index 1d88af26b..f09546ee2 100644
--- a/kernel/arch/mips/kernel/r4k_fpu.S
+++ b/kernel/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
+#include <asm/asmmacro.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
@@ -35,6 +36,14 @@
.set noreorder
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
@@ -54,117 +63,60 @@ LEAF(_save_fp_context)
nop
#endif
/* Store the 16 odd double precision registers */
- EX sdc1 $f1, SC_FPREGS+8(a0)
- EX sdc1 $f3, SC_FPREGS+24(a0)
- EX sdc1 $f5, SC_FPREGS+40(a0)
- EX sdc1 $f7, SC_FPREGS+56(a0)
- EX sdc1 $f9, SC_FPREGS+72(a0)
- EX sdc1 $f11, SC_FPREGS+88(a0)
- EX sdc1 $f13, SC_FPREGS+104(a0)
- EX sdc1 $f15, SC_FPREGS+120(a0)
- EX sdc1 $f17, SC_FPREGS+136(a0)
- EX sdc1 $f19, SC_FPREGS+152(a0)
- EX sdc1 $f21, SC_FPREGS+168(a0)
- EX sdc1 $f23, SC_FPREGS+184(a0)
- EX sdc1 $f25, SC_FPREGS+200(a0)
- EX sdc1 $f27, SC_FPREGS+216(a0)
- EX sdc1 $f29, SC_FPREGS+232(a0)
- EX sdc1 $f31, SC_FPREGS+248(a0)
+ EX sdc1 $f1, 8(a0)
+ EX sdc1 $f3, 24(a0)
+ EX sdc1 $f5, 40(a0)
+ EX sdc1 $f7, 56(a0)
+ EX sdc1 $f9, 72(a0)
+ EX sdc1 $f11, 88(a0)
+ EX sdc1 $f13, 104(a0)
+ EX sdc1 $f15, 120(a0)
+ EX sdc1 $f17, 136(a0)
+ EX sdc1 $f19, 152(a0)
+ EX sdc1 $f21, 168(a0)
+ EX sdc1 $f23, 184(a0)
+ EX sdc1 $f25, 200(a0)
+ EX sdc1 $f27, 216(a0)
+ EX sdc1 $f29, 232(a0)
+ EX sdc1 $f31, 248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
/* Store the 16 even double precision registers */
- EX sdc1 $f0, SC_FPREGS+0(a0)
- EX sdc1 $f2, SC_FPREGS+16(a0)
- EX sdc1 $f4, SC_FPREGS+32(a0)
- EX sdc1 $f6, SC_FPREGS+48(a0)
- EX sdc1 $f8, SC_FPREGS+64(a0)
- EX sdc1 $f10, SC_FPREGS+80(a0)
- EX sdc1 $f12, SC_FPREGS+96(a0)
- EX sdc1 $f14, SC_FPREGS+112(a0)
- EX sdc1 $f16, SC_FPREGS+128(a0)
- EX sdc1 $f18, SC_FPREGS+144(a0)
- EX sdc1 $f20, SC_FPREGS+160(a0)
- EX sdc1 $f22, SC_FPREGS+176(a0)
- EX sdc1 $f24, SC_FPREGS+192(a0)
- EX sdc1 $f26, SC_FPREGS+208(a0)
- EX sdc1 $f28, SC_FPREGS+224(a0)
- EX sdc1 $f30, SC_FPREGS+240(a0)
- EX sw t1, SC_FPC_CSR(a0)
+ EX sdc1 $f0, 0(a0)
+ EX sdc1 $f2, 16(a0)
+ EX sdc1 $f4, 32(a0)
+ EX sdc1 $f6, 48(a0)
+ EX sdc1 $f8, 64(a0)
+ EX sdc1 $f10, 80(a0)
+ EX sdc1 $f12, 96(a0)
+ EX sdc1 $f14, 112(a0)
+ EX sdc1 $f16, 128(a0)
+ EX sdc1 $f18, 144(a0)
+ EX sdc1 $f20, 160(a0)
+ EX sdc1 $f22, 176(a0)
+ EX sdc1 $f24, 192(a0)
+ EX sdc1 $f26, 208(a0)
+ EX sdc1 $f28, 224(a0)
+ EX sdc1 $f30, 240(a0)
+ EX sw t1, 0(a1)
jr ra
li v0, 0 # success
.set pop
END(_save_fp_context)
-#ifdef CONFIG_MIPS32_COMPAT
- /* Save 32-bit process floating point context */
-LEAF(_save_fp_context32)
- .set push
- .set MIPS_ISA_ARCH_LEVEL_RAW
- SET_HARDFLOAT
- cfc1 t1, fcr31
-
-#ifndef CONFIG_CPU_MIPS64_R6
- mfc0 t0, CP0_STATUS
- sll t0, t0, 5
- bgez t0, 1f # skip storing odd if FR=0
- nop
-#endif
-
- /* Store the 16 odd double precision registers */
- EX sdc1 $f1, SC32_FPREGS+8(a0)
- EX sdc1 $f3, SC32_FPREGS+24(a0)
- EX sdc1 $f5, SC32_FPREGS+40(a0)
- EX sdc1 $f7, SC32_FPREGS+56(a0)
- EX sdc1 $f9, SC32_FPREGS+72(a0)
- EX sdc1 $f11, SC32_FPREGS+88(a0)
- EX sdc1 $f13, SC32_FPREGS+104(a0)
- EX sdc1 $f15, SC32_FPREGS+120(a0)
- EX sdc1 $f17, SC32_FPREGS+136(a0)
- EX sdc1 $f19, SC32_FPREGS+152(a0)
- EX sdc1 $f21, SC32_FPREGS+168(a0)
- EX sdc1 $f23, SC32_FPREGS+184(a0)
- EX sdc1 $f25, SC32_FPREGS+200(a0)
- EX sdc1 $f27, SC32_FPREGS+216(a0)
- EX sdc1 $f29, SC32_FPREGS+232(a0)
- EX sdc1 $f31, SC32_FPREGS+248(a0)
-
- /* Store the 16 even double precision registers */
-1: EX sdc1 $f0, SC32_FPREGS+0(a0)
- EX sdc1 $f2, SC32_FPREGS+16(a0)
- EX sdc1 $f4, SC32_FPREGS+32(a0)
- EX sdc1 $f6, SC32_FPREGS+48(a0)
- EX sdc1 $f8, SC32_FPREGS+64(a0)
- EX sdc1 $f10, SC32_FPREGS+80(a0)
- EX sdc1 $f12, SC32_FPREGS+96(a0)
- EX sdc1 $f14, SC32_FPREGS+112(a0)
- EX sdc1 $f16, SC32_FPREGS+128(a0)
- EX sdc1 $f18, SC32_FPREGS+144(a0)
- EX sdc1 $f20, SC32_FPREGS+160(a0)
- EX sdc1 $f22, SC32_FPREGS+176(a0)
- EX sdc1 $f24, SC32_FPREGS+192(a0)
- EX sdc1 $f26, SC32_FPREGS+208(a0)
- EX sdc1 $f28, SC32_FPREGS+224(a0)
- EX sdc1 $f30, SC32_FPREGS+240(a0)
- EX sw t1, SC32_FPC_CSR(a0)
- cfc1 t0, $0 # implementation/version
- EX sw t0, SC32_FPC_EIR(a0)
- .set pop
-
- jr ra
- li v0, 0 # success
- END(_save_fp_context32)
-#endif
-
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
- EX lw t1, SC_FPC_CSR(a0)
+ EX lw t1, 0(a1)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
defined(CONFIG_CPU_MIPS32_R6)
@@ -178,101 +130,231 @@ LEAF(_restore_fp_context)
bgez t0, 1f # skip loading odd if FR=0
nop
#endif
- EX ldc1 $f1, SC_FPREGS+8(a0)
- EX ldc1 $f3, SC_FPREGS+24(a0)
- EX ldc1 $f5, SC_FPREGS+40(a0)
- EX ldc1 $f7, SC_FPREGS+56(a0)
- EX ldc1 $f9, SC_FPREGS+72(a0)
- EX ldc1 $f11, SC_FPREGS+88(a0)
- EX ldc1 $f13, SC_FPREGS+104(a0)
- EX ldc1 $f15, SC_FPREGS+120(a0)
- EX ldc1 $f17, SC_FPREGS+136(a0)
- EX ldc1 $f19, SC_FPREGS+152(a0)
- EX ldc1 $f21, SC_FPREGS+168(a0)
- EX ldc1 $f23, SC_FPREGS+184(a0)
- EX ldc1 $f25, SC_FPREGS+200(a0)
- EX ldc1 $f27, SC_FPREGS+216(a0)
- EX ldc1 $f29, SC_FPREGS+232(a0)
- EX ldc1 $f31, SC_FPREGS+248(a0)
+ EX ldc1 $f1, 8(a0)
+ EX ldc1 $f3, 24(a0)
+ EX ldc1 $f5, 40(a0)
+ EX ldc1 $f7, 56(a0)
+ EX ldc1 $f9, 72(a0)
+ EX ldc1 $f11, 88(a0)
+ EX ldc1 $f13, 104(a0)
+ EX ldc1 $f15, 120(a0)
+ EX ldc1 $f17, 136(a0)
+ EX ldc1 $f19, 152(a0)
+ EX ldc1 $f21, 168(a0)
+ EX ldc1 $f23, 184(a0)
+ EX ldc1 $f25, 200(a0)
+ EX ldc1 $f27, 216(a0)
+ EX ldc1 $f29, 232(a0)
+ EX ldc1 $f31, 248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
- EX ldc1 $f0, SC_FPREGS+0(a0)
- EX ldc1 $f2, SC_FPREGS+16(a0)
- EX ldc1 $f4, SC_FPREGS+32(a0)
- EX ldc1 $f6, SC_FPREGS+48(a0)
- EX ldc1 $f8, SC_FPREGS+64(a0)
- EX ldc1 $f10, SC_FPREGS+80(a0)
- EX ldc1 $f12, SC_FPREGS+96(a0)
- EX ldc1 $f14, SC_FPREGS+112(a0)
- EX ldc1 $f16, SC_FPREGS+128(a0)
- EX ldc1 $f18, SC_FPREGS+144(a0)
- EX ldc1 $f20, SC_FPREGS+160(a0)
- EX ldc1 $f22, SC_FPREGS+176(a0)
- EX ldc1 $f24, SC_FPREGS+192(a0)
- EX ldc1 $f26, SC_FPREGS+208(a0)
- EX ldc1 $f28, SC_FPREGS+224(a0)
- EX ldc1 $f30, SC_FPREGS+240(a0)
+ EX ldc1 $f0, 0(a0)
+ EX ldc1 $f2, 16(a0)
+ EX ldc1 $f4, 32(a0)
+ EX ldc1 $f6, 48(a0)
+ EX ldc1 $f8, 64(a0)
+ EX ldc1 $f10, 80(a0)
+ EX ldc1 $f12, 96(a0)
+ EX ldc1 $f14, 112(a0)
+ EX ldc1 $f16, 128(a0)
+ EX ldc1 $f18, 144(a0)
+ EX ldc1 $f20, 160(a0)
+ EX ldc1 $f22, 176(a0)
+ EX ldc1 $f24, 192(a0)
+ EX ldc1 $f26, 208(a0)
+ EX ldc1 $f28, 224(a0)
+ EX ldc1 $f30, 240(a0)
ctc1 t1, fcr31
.set pop
jr ra
li v0, 0 # success
END(_restore_fp_context)
-#ifdef CONFIG_MIPS32_COMPAT
-LEAF(_restore_fp_context32)
- /* Restore an o32 sigcontext. */
- .set push
- SET_HARDFLOAT
- EX lw t1, SC32_FPC_CSR(a0)
+#ifdef CONFIG_CPU_HAS_MSA
-#ifndef CONFIG_CPU_MIPS64_R6
- mfc0 t0, CP0_STATUS
- sll t0, t0, 5
- bgez t0, 1f # skip loading odd if FR=0
+ .macro op_one_wr op, idx, base
+ .align 4
+\idx: \op \idx, 0, \base
+ jr ra
nop
-#endif
+ .endm
- EX ldc1 $f1, SC32_FPREGS+8(a0)
- EX ldc1 $f3, SC32_FPREGS+24(a0)
- EX ldc1 $f5, SC32_FPREGS+40(a0)
- EX ldc1 $f7, SC32_FPREGS+56(a0)
- EX ldc1 $f9, SC32_FPREGS+72(a0)
- EX ldc1 $f11, SC32_FPREGS+88(a0)
- EX ldc1 $f13, SC32_FPREGS+104(a0)
- EX ldc1 $f15, SC32_FPREGS+120(a0)
- EX ldc1 $f17, SC32_FPREGS+136(a0)
- EX ldc1 $f19, SC32_FPREGS+152(a0)
- EX ldc1 $f21, SC32_FPREGS+168(a0)
- EX ldc1 $f23, SC32_FPREGS+184(a0)
- EX ldc1 $f25, SC32_FPREGS+200(a0)
- EX ldc1 $f27, SC32_FPREGS+216(a0)
- EX ldc1 $f29, SC32_FPREGS+232(a0)
- EX ldc1 $f31, SC32_FPREGS+248(a0)
+ .macro op_msa_wr name, op
+LEAF(\name)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, 0f
+ PTR_ADDU t0, t0, t1
+ jr t0
+ nop
+ op_one_wr \op, 0, a1
+ op_one_wr \op, 1, a1
+ op_one_wr \op, 2, a1
+ op_one_wr \op, 3, a1
+ op_one_wr \op, 4, a1
+ op_one_wr \op, 5, a1
+ op_one_wr \op, 6, a1
+ op_one_wr \op, 7, a1
+ op_one_wr \op, 8, a1
+ op_one_wr \op, 9, a1
+ op_one_wr \op, 10, a1
+ op_one_wr \op, 11, a1
+ op_one_wr \op, 12, a1
+ op_one_wr \op, 13, a1
+ op_one_wr \op, 14, a1
+ op_one_wr \op, 15, a1
+ op_one_wr \op, 16, a1
+ op_one_wr \op, 17, a1
+ op_one_wr \op, 18, a1
+ op_one_wr \op, 19, a1
+ op_one_wr \op, 20, a1
+ op_one_wr \op, 21, a1
+ op_one_wr \op, 22, a1
+ op_one_wr \op, 23, a1
+ op_one_wr \op, 24, a1
+ op_one_wr \op, 25, a1
+ op_one_wr \op, 26, a1
+ op_one_wr \op, 27, a1
+ op_one_wr \op, 28, a1
+ op_one_wr \op, 29, a1
+ op_one_wr \op, 30, a1
+ op_one_wr \op, 31, a1
+ .set pop
+ END(\name)
+ .endm
-1: EX ldc1 $f0, SC32_FPREGS+0(a0)
- EX ldc1 $f2, SC32_FPREGS+16(a0)
- EX ldc1 $f4, SC32_FPREGS+32(a0)
- EX ldc1 $f6, SC32_FPREGS+48(a0)
- EX ldc1 $f8, SC32_FPREGS+64(a0)
- EX ldc1 $f10, SC32_FPREGS+80(a0)
- EX ldc1 $f12, SC32_FPREGS+96(a0)
- EX ldc1 $f14, SC32_FPREGS+112(a0)
- EX ldc1 $f16, SC32_FPREGS+128(a0)
- EX ldc1 $f18, SC32_FPREGS+144(a0)
- EX ldc1 $f20, SC32_FPREGS+160(a0)
- EX ldc1 $f22, SC32_FPREGS+176(a0)
- EX ldc1 $f24, SC32_FPREGS+192(a0)
- EX ldc1 $f26, SC32_FPREGS+208(a0)
- EX ldc1 $f28, SC32_FPREGS+224(a0)
- EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t1, fcr31
+ op_msa_wr read_msa_wr_b, st_b
+ op_msa_wr read_msa_wr_h, st_h
+ op_msa_wr read_msa_wr_w, st_w
+ op_msa_wr read_msa_wr_d, st_d
+
+ op_msa_wr write_msa_wr_b, ld_b
+ op_msa_wr write_msa_wr_h, ld_h
+ op_msa_wr write_msa_wr_w, ld_w
+ op_msa_wr write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ copy_u_d \wr, 1
+ EX sd $1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_u_w \wr, 2
+ EX sw $1, \off(\base)
+ copy_u_w \wr, 3
+ EX sw $1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_u_w \wr, 2
+ EX sw $1, (\off+4)(\base)
+ copy_u_w \wr, 3
+ EX sw $1, \off(\base)
+#endif
+ .set pop
+ .endm
+
+LEAF(_save_msa_all_upper)
+ save_msa_upper 0, 0x00, a0
+ save_msa_upper 1, 0x08, a0
+ save_msa_upper 2, 0x10, a0
+ save_msa_upper 3, 0x18, a0
+ save_msa_upper 4, 0x20, a0
+ save_msa_upper 5, 0x28, a0
+ save_msa_upper 6, 0x30, a0
+ save_msa_upper 7, 0x38, a0
+ save_msa_upper 8, 0x40, a0
+ save_msa_upper 9, 0x48, a0
+ save_msa_upper 10, 0x50, a0
+ save_msa_upper 11, 0x58, a0
+ save_msa_upper 12, 0x60, a0
+ save_msa_upper 13, 0x68, a0
+ save_msa_upper 14, 0x70, a0
+ save_msa_upper 15, 0x78, a0
+ save_msa_upper 16, 0x80, a0
+ save_msa_upper 17, 0x88, a0
+ save_msa_upper 18, 0x90, a0
+ save_msa_upper 19, 0x98, a0
+ save_msa_upper 20, 0xa0, a0
+ save_msa_upper 21, 0xa8, a0
+ save_msa_upper 22, 0xb0, a0
+ save_msa_upper 23, 0xb8, a0
+ save_msa_upper 24, 0xc0, a0
+ save_msa_upper 25, 0xc8, a0
+ save_msa_upper 26, 0xd0, a0
+ save_msa_upper 27, 0xd8, a0
+ save_msa_upper 28, 0xe0, a0
+ save_msa_upper 29, 0xe8, a0
+ save_msa_upper 30, 0xf0, a0
+ save_msa_upper 31, 0xf8, a0
jr ra
- li v0, 0 # success
- .set pop
- END(_restore_fp_context32)
+ li v0, 0
+ END(_save_msa_all_upper)
+
+ .macro restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ EX ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw $1, \off(\base)
+ insert_w \wr, 2
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ EX lw $1, \off(\base)
+ insert_w \wr, 3
#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_all_upper)
+ restore_msa_upper 0, 0x00, a0
+ restore_msa_upper 1, 0x08, a0
+ restore_msa_upper 2, 0x10, a0
+ restore_msa_upper 3, 0x18, a0
+ restore_msa_upper 4, 0x20, a0
+ restore_msa_upper 5, 0x28, a0
+ restore_msa_upper 6, 0x30, a0
+ restore_msa_upper 7, 0x38, a0
+ restore_msa_upper 8, 0x40, a0
+ restore_msa_upper 9, 0x48, a0
+ restore_msa_upper 10, 0x50, a0
+ restore_msa_upper 11, 0x58, a0
+ restore_msa_upper 12, 0x60, a0
+ restore_msa_upper 13, 0x68, a0
+ restore_msa_upper 14, 0x70, a0
+ restore_msa_upper 15, 0x78, a0
+ restore_msa_upper 16, 0x80, a0
+ restore_msa_upper 17, 0x88, a0
+ restore_msa_upper 18, 0x90, a0
+ restore_msa_upper 19, 0x98, a0
+ restore_msa_upper 20, 0xa0, a0
+ restore_msa_upper 21, 0xa8, a0
+ restore_msa_upper 22, 0xb0, a0
+ restore_msa_upper 23, 0xb8, a0
+ restore_msa_upper 24, 0xc0, a0
+ restore_msa_upper 25, 0xc8, a0
+ restore_msa_upper 26, 0xd0, a0
+ restore_msa_upper 27, 0xd8, a0
+ restore_msa_upper 28, 0xe0, a0
+ restore_msa_upper 29, 0xe8, a0
+ restore_msa_upper 30, 0xf0, a0
+ restore_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
.set reorder
diff --git a/kernel/arch/mips/kernel/r4k_switch.S b/kernel/arch/mips/kernel/r4k_switch.S
index 04cbbde35..92cd0516e 100644
--- a/kernel/arch/mips/kernel/r4k_switch.S
+++ b/kernel/arch/mips/kernel/r4k_switch.S
@@ -34,7 +34,7 @@
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, s32 fp_save)
+ * struct thread_info *next_ti)
*/
.align 5
LEAF(resume)
@@ -43,45 +43,6 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
- /*
- * Check whether we need to save any FP context. FP context is saved
- * iff the process has used the context with the scalar FPU or the MSA
- * ASE in the current time slice, as indicated by _TIF_USEDFPU and
- * _TIF_USEDMSA respectively. switch_to will have set fp_save
- * accordingly to an FP_SAVE_ enum value.
- */
- beqz a3, 2f
-
- /*
- * We do. Clear the saved CU1 bit for prev, such that next time it is
- * scheduled it will start in userland with the FPU disabled. If the
- * task uses the FPU then it will be enabled again via the do_cpu trap.
- * This allows us to lazily restore the FP context.
- */
- PTR_L t3, TASK_THREAD_INFO(a0)
- LONG_L t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- LONG_S t0, ST_OFF(t3)
-
- /* Check whether we're saving scalar or vector context. */
- bgtz a3, 1f
-
- /* Save 128b MSA vector context + scalar FP control & status. */
- .set push
- SET_HARDFLOAT
- cfc1 t1, fcr31
- msa_save_all a0
- .set pop /* SET_HARDFLOAT */
-
- sw t1, THREAD_FCR31(a0)
- b 2f
-
-1: /* Save 32b/64b scalar FP context. */
- fpu_save_double a0 t0 t1 # c0_status passed in t0
- # clobbers t1
-2:
-
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
diff --git a/kernel/arch/mips/kernel/scall32-o32.S b/kernel/arch/mips/kernel/scall32-o32.S
index 6e8de80bb..2d23c834b 100644
--- a/kernel/arch/mips/kernel/scall32-o32.S
+++ b/kernel/arch/mips/kernel/scall32-o32.S
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
lw t1, PT_EPC(sp) # skip syscall on return
subu v0, v0, __NR_O32_Linux # check syscall number
- sltiu t0, v0, __NR_O32_Linux_syscalls + 1
addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp)
- beqz t0, illegal_syscall
-
- sll t0, v0, 2
- la t1, sys_call_table
- addu t1, t0
- lw t2, (t1) # syscall routine
- beqz t2, illegal_syscall
sw a3, PT_R26(sp) # save a3 for syscall restarting
@@ -73,10 +65,11 @@ NESTED(handle_sys, PT_SIZE, sp)
.set noreorder
.set nomacro
-1: user_lw(t5, 16(t0)) # argument #5 from usp
-4: user_lw(t6, 20(t0)) # argument #6 from usp
-3: user_lw(t7, 24(t0)) # argument #7 from usp
-2: user_lw(t8, 28(t0)) # argument #8 from usp
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
@@ -85,16 +78,26 @@ NESTED(handle_sys, PT_SIZE, sp)
.set pop
.section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
bnez t0, syscall_trace_entry # -> yes
+syscall_common:
+ sltiu t0, v0, __NR_O32_Linux_syscalls + 1
+ beqz t0, illegal_syscall
+
+ sll t0, v0, 2
+ la t1, sys_call_table
+ addu t1, t0
+ lw t2, (t1) # syscall routine
+
+ beqz t2, illegal_syscall
jalr t2 # Do The Real Thing (TM)
@@ -115,7 +118,7 @@ o32_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, t2
+ move s0, v0
move a0, sp
/*
@@ -128,33 +131,24 @@ syscall_trace_entry:
1: jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ move v0, s0 # restore syscall
- move t0, s0
RESTORE_STATIC
lw a0, PT_R4(sp) # Restore argument registers
lw a1, PT_R5(sp)
lw a2, PT_R6(sp)
lw a3, PT_R7(sp)
- jalr t0
-
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sw t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- lw t1, PT_R2(sp) # syscall number
- negu v0 # error
- sw t1, PT_R0(sp) # save it for syscall restarting
-1: sw v0, PT_R2(sp) # result
+ j syscall_common
-2: j syscall_exit
+1: j syscall_exit
/* ------------------------------------------------------------------------ */
/*
- * The stackpointer for a call with more than 4 arguments is bad.
- * We probably should handle this case a bit more drastic.
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
*/
bad_stack:
li v0, EFAULT
@@ -163,6 +157,22 @@ bad_stack:
sw t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
/*
* The system call does not exist in this kernel
*/
@@ -582,3 +592,6 @@ EXPORT(sys_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
PTR sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
+ PTR sys_mlock2
diff --git a/kernel/arch/mips/kernel/scall64-64.S b/kernel/arch/mips/kernel/scall64-64.S
index a6f6b762c..deac63315 100644
--- a/kernel/arch/mips/kernel/scall64-64.S
+++ b/kernel/arch/mips/kernel/scall64-64.S
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
.set at
#endif
- dsubu t0, v0, __NR_64_Linux # check syscall number
- sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
#endif
- beqz t0, illegal_syscall
-
- dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
- # syscall routine
sd a3, PT_R26(sp) # save a3 for syscall restarting
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
and t0, t1, t0
bnez t0, syscall_trace_entry
+syscall_common:
+ dsubu t2, v0, __NR_64_Linux
+ sltiu t0, t2, __NR_64_Linux_syscalls + 1
+ beqz t0, illegal_syscall
+
+ dsll t0, t2, 3 # offset into table
+ dla t2, sys_call_table
+ daddu t0, t2, t0
+ ld t2, (t0) # syscall routine
+ beqz t2, illegal_syscall
+
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, t2
+ move s0, v0
move a0, sp
move a1, v0
jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move v0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
- jalr t0
-
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
+ j syscall_common
-2: j syscall_exit
+1: j syscall_exit
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
@@ -436,4 +430,7 @@ EXPORT(sys_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 5315 */
PTR sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
+ PTR sys_mlock2
.size sys_call_table,.-sys_call_table
diff --git a/kernel/arch/mips/kernel/scall64-n32.S b/kernel/arch/mips/kernel/scall64-n32.S
index 4b2010654..5a69eb48d 100644
--- a/kernel/arch/mips/kernel/scall64-n32.S
+++ b/kernel/arch/mips/kernel/scall64-n32.S
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
+syscall_common:
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
move a1, v0
jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move t2, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
- jalr t0
+ j syscall_common
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
-
-2: j syscall_exit
+1: j syscall_exit
not_n32_scall:
/* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,7 @@ EXPORT(sysn32_call_table)
PTR sys_memfd_create
PTR sys_bpf
PTR compat_sys_execveat /* 6320 */
+ PTR sys_userfaultfd
+ PTR sys_membarrier
+ PTR sys_mlock2
.size sysn32_call_table,.-sysn32_call_table
diff --git a/kernel/arch/mips/kernel/scall64-o32.S b/kernel/arch/mips/kernel/scall64-o32.S
index d07b210fb..e4b6d7c97 100644
--- a/kernel/arch/mips/kernel/scall64-o32.S
+++ b/kernel/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
daddu t1, t0, 32
bltz t1, bad_stack
-1: lw a4, 16(t0) # argument #5 from usp
-2: lw a5, 20(t0) # argument #6 from usp
-3: lw a6, 24(t0) # argument #7 from usp
-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
.section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- PTR 3b, bad_stack
- PTR 4b, bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -86,6 +87,7 @@ NESTED(handle_sys, PT_SIZE, sp)
and t0, t1, t0
bnez t0, trace_a_syscall
+syscall_common:
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -129,9 +131,9 @@ trace_a_syscall:
1: jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move t2, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -141,19 +143,9 @@ trace_a_syscall:
ld a5, PT_R9(sp)
ld a6, PT_R10(sp)
ld a7, PT_R11(sp) # For indirect syscalls
- jalr t0
+ j syscall_common
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
-
-2: j syscall_exit
+1: j syscall_exit
/* ------------------------------------------------------------------------ */
@@ -167,6 +159,22 @@ bad_stack:
sd t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
not_o32_scall:
/*
* This is not an o32 compatibility syscall, pass it on
@@ -383,7 +391,7 @@ EXPORT(sys32_call_table)
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
- PTR sys_getsockopt
+ PTR compat_sys_getsockopt
PTR sys_listen
PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom
@@ -567,4 +575,7 @@ EXPORT(sys32_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
PTR compat_sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
+ PTR sys_mlock2
.size sys32_call_table,.-sys32_call_table
diff --git a/kernel/arch/mips/kernel/segment.c b/kernel/arch/mips/kernel/segment.c
index 076ead2a9..87bc74a5a 100644
--- a/kernel/arch/mips/kernel/segment.c
+++ b/kernel/arch/mips/kernel/segment.c
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/cpu.h>
+#include <asm/debug.h>
#include <asm/mipsregs.h>
static void build_segment_config(char *str, unsigned int cfg)
@@ -91,7 +92,6 @@ static const struct file_operations segments_fops = {
static int __init segments_info(void)
{
- extern struct dentry *mips_debugfs_dir;
struct dentry *segments;
if (cpu_has_segments) {
diff --git a/kernel/arch/mips/kernel/setup.c b/kernel/arch/mips/kernel/setup.c
index be73c4911..66aac55df 100644
--- a/kernel/arch/mips/kernel/setup.c
+++ b/kernel/arch/mips/kernel/setup.c
@@ -33,11 +33,16 @@
#include <asm/cache.h>
#include <asm/cdmm.h>
#include <asm/cpu.h>
+#include <asm/debug.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/prom.h>
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+const char __section(.appended_dtb) __appended_dtb[0x100000];
+#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
+
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
@@ -337,6 +342,11 @@ static void __init bootmem_init(void)
min_low_pfn = start;
if (end <= reserved_end)
continue;
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Skip zones before initrd and initrd itself */
+ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
+ continue;
+#endif
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
@@ -479,7 +489,7 @@ static void __init bootmem_init(void)
* o bootmem_init()
* o sparse_init()
* o paging_init()
- * o dma_continguous_reserve()
+ * o dma_contiguous_reserve()
*
* At this stage the bootmem allocator is ready to use.
*
@@ -611,6 +621,10 @@ static void __init request_crashkernel(struct resource *res)
}
#endif /* !defined(CONFIG_KEXEC) */
+#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
+#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
+#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_EXTEND)
+
static void __init arch_mem_init(char **cmdline_p)
{
struct memblock_region *reg;
@@ -635,18 +649,24 @@ static void __init arch_mem_init(char **cmdline_p)
pr_info("Determined physical RAM map:\n");
print_memory_map();
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
+#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
+ if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
+ (USE_DTB_CMDLINE && !boot_command_line[0]))
+ strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+
+ if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+ }
+
+#if defined(CONFIG_CMDLINE_BOOL)
if (builtin_cmdline[0]) {
- strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
- strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
#endif
-#else
- strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
diff --git a/kernel/arch/mips/kernel/signal-common.h b/kernel/arch/mips/kernel/signal-common.h
index 0b85f827c..f50d48435 100644
--- a/kernel/arch/mips/kernel/signal-common.h
+++ b/kernel/arch/mips/kernel/signal-common.h
@@ -31,4 +31,13 @@ extern int fpcsr_pending(unsigned int __user *fpcsr);
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
#endif /* __SIGNAL_COMMON_H */
diff --git a/kernel/arch/mips/kernel/signal.c b/kernel/arch/mips/kernel/signal.c
index 6a28c792d..bf792e283 100644
--- a/kernel/arch/mips/kernel/signal.c
+++ b/kernel/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
+#include <linux/uprobes.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
@@ -35,23 +36,23 @@
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
-#include <asm/vdso.h>
#include <asm/dsp.h>
#include <asm/inst.h>
+#include <asm/msa.h>
#include "signal-common.h"
-static int (*save_fp_context)(struct sigcontext __user *sc);
-static int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
+
+ /* Matches struct ucontext from its uc_mcontext field onwards */
struct sigcontext sf_sc;
sigset_t sf_mask;
+ unsigned long long sf_extcontext[0];
};
struct rt_sigframe {
@@ -65,43 +66,255 @@ struct rt_sigframe {
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
-static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_to_sigcontext(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
- for (i = 0; i < NUM_FPU_REGS; i++) {
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |=
__put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
- &sc->sc_fpregs[i]);
+ &fpregs[i]);
}
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+ err |= __put_user(current->thread.fpu.fcr31, csr);
return err;
}
-static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_from_sigcontext(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
u64 fpr_val;
- for (i = 0; i < NUM_FPU_REGS; i++) {
- err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &fpregs[i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
}
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+ err |= __get_user(current->thread.fpu.fcr31, csr);
+
+ return err;
+}
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+ struct ucontext __user *uc;
+
+ /*
+ * We can just pretend the sigcontext is always embedded in a struct
+ * ucontext here, because the offset from sigcontext to extended
+ * context is the same in the struct sigframe case.
+ */
+ uc = container_of(sc, struct ucontext, uc_mcontext);
+ return &uc->uc_extcontext;
+}
+
+static int save_msa_extcontext(void __user *buf)
+{
+ struct msa_extcontext __user *msa = buf;
+ uint64_t val;
+ int i, err;
+
+ if (!thread_msa_context_live())
+ return 0;
+
+ /*
+ * Ensure that we can't lose the live MSA context between checking
+ * for it & writing it to memory.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be saved to kernel memory
+ * and then copied to user memory. The save to kernel memory
+ * should already have been done when handling scalar FP
+ * context.
+ */
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ err = __put_user(read_msa_csr(), &msa->csr);
+ err |= _save_msa_all_upper(&msa->wr);
+
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+ err |= __put_user(val, &msa->wr[i]);
+ }
+ }
+
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+ return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ struct msa_extcontext __user *msa = buf;
+ unsigned long long val;
+ unsigned int csr;
+ int i, err;
+
+ if (size != sizeof(*msa))
+ return -EINVAL;
+
+ err = get_user(csr, &msa->csr);
+ if (err)
+ return err;
+
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be copied to kernel
+ * memory and later loaded to registers. The same is true of
+ * scalar FP context, so FPU & MSA should have already been
+ * disabled whilst handling scalar FP context.
+ */
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ write_msa_csr(csr);
+ err |= _restore_msa_all_upper(&msa->wr);
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ current->thread.fpu.msacsr = csr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &msa->wr[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ }
return err;
}
+static int save_extcontext(void __user *buf)
+{
+ int sz;
+
+ sz = save_msa_extcontext(buf);
+ if (sz < 0)
+ return sz;
+ buf += sz;
+
+ /* If no context was saved then trivially return */
+ if (!sz)
+ return 0;
+
+ /* Write the end marker */
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+ return -EFAULT;
+
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+ return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+ struct extcontext ext;
+ int err;
+
+ while (1) {
+ err = __get_user(ext.magic, (unsigned int *)buf);
+ if (err)
+ return err;
+
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
+ return 0;
+
+ err = __get_user(ext.size, (unsigned int *)(buf
+ + offsetof(struct extcontext, size)));
+ if (err)
+ return err;
+
+ switch (ext.magic) {
+ case MSA_EXTCONTEXT_MAGIC:
+ err = restore_msa_extcontext(buf, ext.size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ buf += ext.size;
+ }
+}
+
/*
* Helper routines
*/
-static int protected_save_fp_context(struct sigcontext __user *sc)
+int protected_save_fp_context(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used, ext_sz;
int err;
-#ifndef CONFIG_EVA
+
+ used = used_math() ? USED_FP : 0;
+ if (!used)
+ goto fp_done;
+
+ if (!test_thread_flag(TIF_32BIT_FPREGS))
+ used |= USED_FR1;
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ used |= USED_HYBRID_FPRS;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so
+ * save to the kernel FP context & copy that to userland below.
+ */
+ if (config_enabled(CONFIG_EVA))
+ lose_fpu(1);
+
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
@@ -114,27 +327,57 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
if (likely(!err))
break;
/* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
+ err = __put_user(0, &fpregs[0]) |
+ __put_user(0, &fpregs[31]) |
+ __put_user(0, csr);
if (err)
- break; /* really bad sigcontext */
+ return err; /* really bad sigcontext */
}
-#else
- /*
- * EVA does not have FPU EVA instructions so saving fpu context directly
- * does not work.
- */
- lose_fpu(1);
- err = save_fp_context(sc); /* this might fail */
-#endif
- return err;
+
+fp_done:
+ ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+ if (err < 0)
+ return err;
+ used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+ return __put_user(used, used_math);
}
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+int protected_restore_fp_context(void __user *sc)
{
- int err, tmp __maybe_unused;
-#ifndef CONFIG_EVA
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used;
+ int err, sig = 0, tmp __maybe_unused;
+
+ err = __get_user(used, used_math);
+ conditional_used_math(used & USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (err || !(used & USED_FP))
+ lose_fpu(0);
+ if (err)
+ return err;
+ if (!(used & USED_FP))
+ goto fp_done;
+
+ err = sig = fpcsr_pending(csr);
+ if (err < 0)
+ return err;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so we
+ * disable the FPU here such that the code below simply copies to
+ * the kernel FP context.
+ */
+ if (config_enabled(CONFIG_EVA))
+ lose_fpu(0);
+
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
@@ -147,28 +390,24 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
if (likely(!err))
break;
/* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
+ err = __get_user(tmp, &fpregs[0]) |
+ __get_user(tmp, &fpregs[31]) |
+ __get_user(tmp, csr);
if (err)
break; /* really bad sigcontext */
}
-#else
- /*
- * EVA does not have FPU EVA instructions so restoring fpu context
- * directly does not work.
- */
- lose_fpu(0);
- err = restore_fp_context(sc); /* this might fail */
-#endif
- return err;
+
+fp_done:
+ if (used & USED_EXTCONTEXT)
+ err |= restore_extcontext(sc_to_extcontext(sc));
+
+ return err ?: sig;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
- unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@@ -191,19 +430,38 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
- if (used_math) {
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context(sc);
- }
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
return err;
}
+static size_t extcontext_max_size(void)
+{
+ size_t sz = 0;
+
+ /*
+ * The assumption here is that between this point & the point at which
+ * the extended context is saved the size of the context should only
+ * ever be able to shrink (if the task is preempted), but never grow.
+ * That is, what this function returns is an upper bound on the size of
+ * the extended context for the current task at the current time.
+ */
+
+ if (thread_msa_context_live())
+ sz += sizeof(struct msa_extcontext);
+
+ /* If any context is saved then we'll append the end marker */
+ if (sz)
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+
+ return sz;
+}
+
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
@@ -223,21 +481,8 @@ int fpcsr_pending(unsigned int __user *fpcsr)
return err ?: sig;
}
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context(sc);
- return err ?: sig;
-}
-
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
- unsigned int used_math;
unsigned long treg;
int err = 0;
int i;
@@ -265,19 +510,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
+ return err ?: protected_restore_fp_context(sc);
}
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
@@ -285,6 +518,9 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
{
unsigned long sp;
+ /* Leave space for potential extended context */
+ frame_size += extcontext_max_size();
+
/* Default to using normal stack */
sp = regs->regs[29];
@@ -515,12 +751,15 @@ static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
- .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
.setup_rt_frame = setup_rt_frame,
- .rt_signal_return_offset =
- offsetof(struct mips_vdso, rt_signal_trampoline),
- .restart = __NR_restart_syscall
+ .restart = __NR_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image,
};
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
@@ -560,11 +799,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
}
if (sig_uses_siginfo(&ksig->ka))
- ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
+ ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
ksig, regs, oldset);
else
- ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
- regs, oldset);
+ ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn,
+ ksig, regs, oldset);
signal_setup_done(ret, ksig, 0);
}
@@ -616,6 +855,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
user_exit();
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -629,43 +871,46 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
}
#ifdef CONFIG_SMP
-#ifndef CONFIG_EVA
-static int smp_save_fp_context(struct sigcontext __user *sc)
+static int smp_save_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
- ? _save_fp_context(sc)
+ ? save_hw_fp_context(sc)
: copy_fp_to_sigcontext(sc);
}
-static int smp_restore_fp_context(struct sigcontext __user *sc)
+static int smp_restore_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
- ? _restore_fp_context(sc)
+ ? restore_hw_fp_context(sc)
: copy_fp_from_sigcontext(sc);
}
-#endif /* CONFIG_EVA */
#endif
static int signal_setup(void)
{
-#ifndef CONFIG_EVA
+ /*
+ * The offset from sigcontext to extended context should be the same
+ * regardless of the type of signal, such that userland can always know
+ * where to look if it wishes to find the extended context structures.
+ */
+ BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+ offsetof(struct sigframe, sf_sc)) !=
+ (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+ offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
- save_fp_context = _save_fp_context;
- restore_fp_context = _restore_fp_context;
+ save_fp_context = save_hw_fp_context;
+ restore_fp_context = restore_hw_fp_context;
} else {
save_fp_context = copy_fp_to_sigcontext;
restore_fp_context = copy_fp_from_sigcontext;
}
#endif /* CONFIG_SMP */
-#else
- save_fp_context = copy_fp_to_sigcontext;
- restore_fp_context = copy_fp_from_sigcontext;
-#endif
return 0;
}
diff --git a/kernel/arch/mips/kernel/signal32.c b/kernel/arch/mips/kernel/signal32.c
index 5d7f26349..4909639aa 100644
--- a/kernel/arch/mips/kernel/signal32.c
+++ b/kernel/arch/mips/kernel/signal32.c
@@ -31,17 +31,10 @@
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/war.h>
-#include <asm/vdso.h>
#include <asm/dsp.h>
#include "signal-common.h"
-static int (*save_fp_context32)(struct sigcontext32 __user *sc);
-static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
@@ -74,99 +67,11 @@ struct rt_sigframe32 {
struct ucontext32 rs_uc;
};
-/*
- * Thread saved context copy to/from a signal context presumed to be on the
- * user stack, and therefore accessed with appropriate macros from uaccess.h.
- */
-static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
- int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
- for (i = 0; i < NUM_FPU_REGS; i += inc) {
- err |=
- __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
- &sc->sc_fpregs[i]);
- }
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
- int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
- u64 fpr_val;
-
- for (i = 0; i < NUM_FPU_REGS; i += inc) {
- err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
- set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
- }
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
- int err;
- while (1) {
- lock_fpu_owner();
- if (is_fpu_owner()) {
- err = save_fp_context32(sc);
- unlock_fpu_owner();
- } else {
- unlock_fpu_owner();
- err = copy_fp_to_sigcontext32(sc);
- }
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, tmp __maybe_unused;
- while (1) {
- lock_fpu_owner();
- if (is_fpu_owner()) {
- err = restore_fp_context32(sc);
- unlock_fpu_owner();
- } else {
- unlock_fpu_owner();
- err = copy_fp_from_sigcontext32(sc);
- }
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
int i;
- u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@@ -186,35 +91,18 @@ static int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(mflo3(), &sc->sc_lo3);
}
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
- if (used_math) {
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context32(sc);
- }
return err;
}
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context32(sc);
- return err ?: sig;
-}
-
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
- u32 used_math;
int err = 0;
s32 treg;
int i;
@@ -238,70 +126,7 @@ static int restore_sigcontext32(struct pt_regs *regs,
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context32(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
- int err = 0;
-
- if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __put_sigset_unknown_nsig();
- case 2:
- err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
- err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
- case 1:
- err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
- err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
- }
-
- return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
- int err = 0;
- unsigned long sig[4];
-
- if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __get_sigset_unknown_nsig();
- case 2:
- err |= __get_user(sig[3], &ubuf->sig[3]);
- err |= __get_user(sig[2], &ubuf->sig[2]);
- kbuf->sig[1] = sig[2] | (sig[3] << 32);
- case 1:
- err |= __get_user(sig[1], &ubuf->sig[1]);
- err |= __get_user(sig[0], &ubuf->sig[0]);
- kbuf->sig[0] = sig[0] | (sig[1] << 32);
- }
-
- return err;
+ return err ?: protected_restore_fp_context(sc);
}
/*
@@ -580,25 +405,12 @@ static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
*/
struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
- .signal_return_offset =
- offsetof(struct mips_vdso, o32_signal_trampoline),
.setup_rt_frame = setup_rt_frame_32,
- .rt_signal_return_offset =
- offsetof(struct mips_vdso, o32_rt_signal_trampoline),
- .restart = __NR_O32_restart_syscall
-};
-
-static int signal32_init(void)
-{
- if (cpu_has_fpu) {
- save_fp_context32 = _save_fp_context32;
- restore_fp_context32 = _restore_fp_context32;
- } else {
- save_fp_context32 = copy_fp_to_sigcontext32;
- restore_fp_context32 = copy_fp_from_sigcontext32;
- }
+ .restart = __NR_O32_restart_syscall,
- return 0;
-}
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
-arch_initcall(signal32_init);
+ .vdso = &vdso_image_o32,
+};
diff --git a/kernel/arch/mips/kernel/signal_n32.c b/kernel/arch/mips/kernel/signal_n32.c
index f1d4751ee..a7bc38430 100644
--- a/kernel/arch/mips/kernel/signal_n32.c
+++ b/kernel/arch/mips/kernel/signal_n32.c
@@ -38,7 +38,6 @@
#include <asm/fpu.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
-#include <asm/vdso.h>
#include "signal-common.h"
@@ -151,7 +150,11 @@ static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig,
struct mips_abi mips_abi_n32 = {
.setup_rt_frame = setup_rt_frame_n32,
- .rt_signal_return_offset =
- offsetof(struct mips_vdso, n32_rt_signal_trampoline),
- .restart = __NR_N32_restart_syscall
+ .restart = __NR_N32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image_n32,
};
diff --git a/kernel/arch/mips/kernel/smp-bmips.c b/kernel/arch/mips/kernel/smp-bmips.c
index 336708ae5..78cf8c2f1 100644
--- a/kernel/arch/mips/kernel/smp-bmips.c
+++ b/kernel/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/kernel/arch/mips/kernel/smp-cps.c b/kernel/arch/mips/kernel/smp-cps.c
index 4251d390b..e04c8057b 100644
--- a/kernel/arch/mips/kernel/smp-cps.c
+++ b/kernel/arch/mips/kernel/smp-cps.c
@@ -8,6 +8,7 @@
* option) any later version.
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/sched.h>
@@ -37,8 +38,9 @@ static unsigned core_vpe_count(unsigned core)
if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
return 1;
- write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ mips_cm_lock_other(core, 0);
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ mips_cm_unlock_other();
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
@@ -133,11 +135,9 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/*
* Patch the start of mips_cps_core_entry to provide:
*
- * v0 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
- UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
uasm_i_addiu(&entry_code, 16, 0, cca);
blast_dcache_range((unsigned long)&mips_cps_core_entry,
(unsigned long)entry_code);
@@ -190,10 +190,11 @@ err_out:
static void boot_core(unsigned core)
{
- u32 access;
+ u32 access, stat, seq_state;
+ unsigned timeout;
/* Select the appropriate core */
- write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ mips_cm_lock_other(core, 0);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -210,12 +211,36 @@ static void boot_core(unsigned core)
/* Reset the core */
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+
+ timeout = 100;
+ while (true) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+
+ /* U6 == coherent execution, ie. the core is up */
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
+ break;
+
+ /* Delay a little while before we start warning */
+ if (timeout) {
+ timeout--;
+ mdelay(10);
+ continue;
+ }
+
+ pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
+ core, stat);
+ mdelay(1000);
+ }
+
mips_cpc_unlock_other();
} else {
/* Take the core out of reset */
write_gcr_co_reset_release(0);
}
+ mips_cm_unlock_other();
+
/* The core is now powered up */
bitmap_set(core_power, core, 1);
}
@@ -369,7 +394,7 @@ void play_dead(void)
static void wait_for_sibling_halt(void *ptr_cpu)
{
- unsigned cpu = (unsigned)ptr_cpu;
+ unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
@@ -430,7 +455,7 @@ static void cps_cpu_die(unsigned int cpu)
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
- (void *)cpu, 1);
+ (void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
diff --git a/kernel/arch/mips/kernel/smp-gic.c b/kernel/arch/mips/kernel/smp-gic.c
index 5f0ab5bcd..9b63829cf 100644
--- a/kernel/arch/mips/kernel/smp-gic.c
+++ b/kernel/arch/mips/kernel/smp-gic.c
@@ -46,9 +46,11 @@ void gic_send_ipi_single(int cpu, unsigned int action)
if (mips_cpc_present() && (core != current_cpu_data.core)) {
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
}
}
diff --git a/kernel/arch/mips/kernel/smp.c b/kernel/arch/mips/kernel/smp.c
index d0744cc77..2b521e07b 100644
--- a/kernel/arch/mips/kernel/smp.c
+++ b/kernel/arch/mips/kernel/smp.c
@@ -42,6 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
+#include <asm/maar.h>
cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
@@ -120,6 +121,7 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
@@ -157,6 +159,7 @@ asmlinkage void start_secondary(void)
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
+ maar_init();
/*
* XXX parity protection should be folded in here when it's converted
@@ -192,16 +195,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
- irq_enter();
- generic_smp_call_function_interrupt();
- irq_exit();
-}
-
static void stop_this_cpu(void *dummy)
{
/*
diff --git a/kernel/arch/mips/kernel/spinlock_test.c b/kernel/arch/mips/kernel/spinlock_test.c
index 39f7ab7b0..f7d86955d 100644
--- a/kernel/arch/mips/kernel/spinlock_test.c
+++ b/kernel/arch/mips/kernel/spinlock_test.c
@@ -5,7 +5,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/spinlock.h>
-
+#include <asm/debug.h>
static int ss_get(void *data, u64 *val)
{
@@ -115,8 +115,6 @@ static int multi_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
-
-extern struct dentry *mips_debugfs_dir;
static int __init spinlock_test(void)
{
struct dentry *d;
diff --git a/kernel/arch/mips/kernel/spram.c b/kernel/arch/mips/kernel/spram.c
index d1168d7c3..8489c88f9 100644
--- a/kernel/arch/mips/kernel/spram.c
+++ b/kernel/arch/mips/kernel/spram.c
@@ -209,6 +209,7 @@ void spram_config(void)
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
+ case CPU_I6400:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
diff --git a/kernel/arch/mips/kernel/stacktrace.c b/kernel/arch/mips/kernel/stacktrace.c
index 1ba775d24..506021f62 100644
--- a/kernel/arch/mips/kernel/stacktrace.c
+++ b/kernel/arch/mips/kernel/stacktrace.c
@@ -12,14 +12,15 @@
* Save stack-backtrace addresses into a stack_trace buffer:
*/
static void save_raw_context_stack(struct stack_trace *trace,
- unsigned long reg29)
+ unsigned long reg29, int savesched)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
- if (__kernel_text_address(addr)) {
+ if (__kernel_text_address(addr) &&
+ (savesched || !in_sched_functions(addr))) {
if (trace->skip > 0)
trace->skip--;
else
@@ -31,7 +32,7 @@ static void save_raw_context_stack(struct stack_trace *trace,
}
static void save_context_stack(struct stack_trace *trace,
- struct task_struct *tsk, struct pt_regs *regs)
+ struct task_struct *tsk, struct pt_regs *regs, int savesched)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
@@ -43,20 +44,22 @@ static void save_context_stack(struct stack_trace *trace,
(unsigned long)task_stack_page(tsk);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
- save_raw_context_stack(trace, sp);
+ save_raw_context_stack(trace, sp, savesched);
return;
}
do {
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = pc;
- if (trace->nr_entries >= trace->max_entries)
- break;
+ if (savesched || !in_sched_functions(pc)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = pc;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
pc = unwind_stack(tsk, &sp, pc, &ra);
} while (pc);
#else
- save_raw_context_stack(trace, sp);
+ save_raw_context_stack(trace, sp, savesched);
#endif
}
@@ -82,6 +85,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
regs->cp0_epc = tsk->thread.reg31;
} else
prepare_frametrace(regs);
- save_context_stack(trace, tsk, regs);
+ save_context_stack(trace, tsk, regs, tsk == current);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/kernel/arch/mips/kernel/sysrq.c b/kernel/arch/mips/kernel/sysrq.c
new file mode 100644
index 000000000..5f0553930
--- /dev/null
+++ b/kernel/arch/mips/kernel/sysrq.c
@@ -0,0 +1,65 @@
+/*
+ * MIPS specific sysrq operations.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/tlbdebug.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(int key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init mips_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(mips_sysrq_init);
diff --git a/kernel/arch/mips/kernel/traps.c b/kernel/arch/mips/kernel/traps.c
index 5f5f44edc..ca9a81007 100644
--- a/kernel/arch/mips/kernel/traps.c
+++ b/kernel/arch/mips/kernel/traps.c
@@ -37,6 +37,7 @@
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
@@ -243,6 +244,7 @@ static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
+ unsigned int exccode;
int i;
show_regs_print_info(KERN_DEFAULT);
@@ -324,10 +326,10 @@ static void __show_regs(const struct pt_regs *regs)
}
printk("\n");
- printk("Cause : %08x\n", cause);
+ exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
- cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
- if (1 <= cause && cause <= 5)
+ if (1 <= exccode && exccode <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
@@ -369,11 +371,6 @@ void show_registers(struct pt_regs *regs)
set_fs(old_fs);
}
-static int regs_to_trapnr(struct pt_regs *regs)
-{
- return (regs->cp0_cause >> 2) & 0x1f;
-}
-
static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
@@ -383,7 +380,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_enter();
- if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
SIGSEGV) == NOTIFY_STOP)
sig = 0;
@@ -469,7 +466,7 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
- if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
SIGBUS) == NOTIFY_STOP)
goto out;
@@ -693,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
- siginfo_t info;
+ siginfo_t info = {
+ .si_signo = SIGFPE,
+ .si_code = FPE_INTOVF,
+ .si_addr = (void __user *)regs->cp0_epc,
+ };
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state);
}
@@ -825,7 +822,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
int sig;
prev_state = exception_enter();
- if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
SIGFPE) == NOTIFY_STOP)
goto out;
@@ -877,15 +874,16 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
- siginfo_t info;
+ siginfo_t info = { 0 };
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+ if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
return;
@@ -905,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
- info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
@@ -947,6 +944,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
set_fs(KERNEL_DS);
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
u16 instr[2];
@@ -986,15 +984,27 @@ asmlinkage void do_bp(struct pt_regs *regs)
* pertain to them.
*/
switch (bcode) {
+ case BRK_UPROBE:
+ if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOL:
+ if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode,
- regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
- regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
@@ -1027,6 +1037,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
set_fs(get_ds());
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
__get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -1093,8 +1104,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
no_r2_instr:
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
- if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
SIGILL) == NOTIFY_STOP)
goto out;
@@ -1443,8 +1455,9 @@ asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
enum ctx_state prev_state;
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
- regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear MSACSR.Cause before enabling interrupts */
@@ -1522,7 +1535,6 @@ asmlinkage void do_watch(struct pt_regs *regs)
asmlinkage void do_mcheck(struct pt_regs *regs)
{
- const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
mm_segment_t old_fs = get_fs();
@@ -1531,19 +1543,8 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
show_regs(regs);
if (multi_match) {
- pr_err("Index : %0x\n", read_c0_index());
- pr_err("Pagemask: %0x\n", read_c0_pagemask());
- pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
- pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
- pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
- pr_err("Wired : %0x\n", read_c0_wired());
- pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
- if (cpu_has_htw) {
- pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
- pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
- pr_err("PWCtl : %0x\n", read_c0_pwctl());
- }
- pr_err("\n");
+ dump_tlb_regs();
+ pr_info("\n");
dump_tlb_all();
}
@@ -1650,6 +1651,7 @@ static inline void parity_protection_init(void)
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
+ case CPU_I6400:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -1854,12 +1856,14 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
{
char str[100];
+ nmi_enter();
raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
smp_processor_id(), regs->cp0_epc);
regs->cp0_epc = read_c0_errorepc();
die(str, regs);
+ nmi_exit();
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -2142,10 +2146,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
- /* Boot CPU's cache setup in setup_arch(). */
- if (!is_boot_cpu)
- cpu_cache_init();
- tlb_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
TLBMISS_HANDLER_SETUP();
}
@@ -2197,22 +2201,13 @@ void __init trap_init(void)
check_wait();
-#if defined(CONFIG_KGDB)
- if (kgdb_early_setup)
- return; /* Already done */
-#endif
-
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
-#ifdef CONFIG_KVM_GUEST
-#define KVM_GUEST_KSEG0 0x40000000
- ebase = KVM_GUEST_KSEG0;
-#else
- ebase = CKSEG0;
-#endif
+ ebase = CAC_BASE;
+
if (cpu_has_mips_r2_r6)
ebase += (read_c0_ebase() & 0x3ffff000);
}
diff --git a/kernel/arch/mips/kernel/unaligned.c b/kernel/arch/mips/kernel/unaligned.c
index eb3efd137..490cea569 100644
--- a/kernel/arch/mips/kernel/unaligned.c
+++ b/kernel/arch/mips/kernel/unaligned.c
@@ -85,6 +85,7 @@
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
+#include <asm/debug.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
@@ -891,6 +892,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
#ifdef CONFIG_EVA
mm_segment_t seg;
#endif
+ union fpureg *fpr;
+ enum msa_2b_fmt df;
+ unsigned int wd;
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -1202,6 +1206,75 @@ static void emulate_load_store_insn(struct pt_regs *regs,
break;
return;
+ case msa_op:
+ if (!cpu_has_msa)
+ goto sigill;
+
+ /*
+ * If we've reached this point then userland should have taken
+ * the MSA disabled exception & initialised vector context at
+ * some point in the past.
+ */
+ BUG_ON(!thread_msa_context_live());
+
+ df = insn.msa_mi10_format.df;
+ wd = insn.msa_mi10_format.wd;
+ fpr = &current->thread.fpu.fpr[wd];
+
+ switch (insn.msa_mi10_format.func) {
+ case msa_ld_op:
+ if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Disable preemption to avoid a race between copying
+ * state from userland, migrating to another CPU and
+ * updating the hardware vector register below.
+ */
+ preempt_disable();
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
+
+ /*
+ * Update the hardware register if it is in use by the
+ * task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ if (test_thread_flag(TIF_USEDMSA))
+ write_msa_wr(wd, fpr, df);
+
+ preempt_enable();
+ break;
+
+ case msa_st_op:
+ if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Update from the hardware register if it is in use by
+ * the task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA))
+ read_msa_wr(wd, fpr, df);
+ preempt_enable();
+
+ res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+ if (res)
+ goto fault;
+ break;
+
+ default:
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ break;
+
#ifndef CONFIG_CPU_MIPSR6
/*
* COP2 is available to implementor for application specific use.
@@ -2223,7 +2296,6 @@ sigbus:
}
#ifdef CONFIG_DEBUG_FS
-extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
@@ -2240,5 +2312,5 @@ static int __init debugfs_unaligned(void)
return -ENOMEM;
return 0;
}
-__initcall(debugfs_unaligned);
+arch_initcall(debugfs_unaligned);
#endif
diff --git a/kernel/arch/mips/kernel/uprobes.c b/kernel/arch/mips/kernel/uprobes.c
new file mode 100644
index 000000000..8452d933a
--- /dev/null
+++ b/kernel/arch/mips/kernel/uprobes.c
@@ -0,0 +1,341 @@
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ case jr_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ case bgez_op:
+ case bgezl_op:
+ case bltzal_op:
+ case bltzall_op:
+ case bgezal_op:
+ case bgezall_op:
+ case bposge32_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+ return 1;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+ struct mm_struct *mm, unsigned long addr)
+{
+ union mips_instruction inst;
+
+ /*
+ * For the time being this also blocks attempts to use uprobes with
+ * MIPS16 and microMIPS.
+ */
+ if (addr & 0x03)
+ return -EINVAL;
+
+ inst.word = aup->insn[0];
+ aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+ aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
+
+ return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS). We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ union mips_instruction inst;
+
+ inst.word = *insn;
+
+ switch (inst.i_format.opcode) {
+ case spec_op:
+ switch (inst.r_format.func) {
+ case break_op:
+ case teq_op:
+ case tge_op:
+ case tgeu_op:
+ case tlt_op:
+ case tltu_op:
+ case tne_op:
+ return 1;
+ }
+ break;
+
+ case bcond_op: /* Yes, really ... */
+ switch (inst.u_format.rt) {
+ case teqi_op:
+ case tgei_op:
+ case tgeiu_op:
+ case tlti_op:
+ case tltiu_op:
+ case tnei_op:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#define UPROBE_TRAP_NR ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ union mips_instruction insn;
+
+ /*
+ * Now find the EPC where to resume after the breakpoint has been
+ * dealt with. This may require emulation of a branch.
+ */
+ aup->resume_epc = regs->cp0_epc + 4;
+ if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+ unsigned long epc;
+
+ epc = regs->cp0_epc;
+ __compute_return_epc_for_insn(regs, insn);
+ aup->resume_epc = regs->cp0_epc;
+ }
+
+ utask->autask.saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->cp0_epc = current->utask->xol_vaddr;
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ regs->cp0_epc = aup->resume_epc;
+
+ return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+ if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_UPROBE_XOL:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+ struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+ unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->regs[31];
+
+ /* Replace the return address with the trampoline address */
+ regs->regs[31] = ra;
+
+ return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(mm, vaddr,
+ *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
+}
+
+void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *kaddr;
+
+ /* Initialize the slot */
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+ kunmap_atomic(kaddr);
+
+ /*
+ * The MIPS version of flush_icache_range will operate safely on
+ * user space addresses and more importantly, it doesn't require a
+ * VMA argument.
+ */
+ flush_icache_range(vaddr, vaddr + len);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ return 0;
+}
diff --git a/kernel/arch/mips/kernel/vdso.c b/kernel/arch/mips/kernel/vdso.c
index ed2a27872..975e99759 100644
--- a/kernel/arch/mips/kernel/vdso.c
+++ b/kernel/arch/mips/kernel/vdso.c
@@ -1,122 +1,175 @@
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
*
- * Copyright (C) 2009, 2010 Cavium Networks, Inc.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/unistd.h>
-#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+#include <asm/abi.h>
#include <asm/vdso.h>
-#include <asm/uasm.h>
-#include <asm/processor.h>
+
+/* Kernel-provided data used by the VDSO. */
+static union mips_vdso_data vdso_data __page_aligned_data;
/*
- * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
+ * what we map and where within the area they are mapped is determined at
+ * runtime.
*/
-#define __NR_O32_sigreturn 4119
-#define __NR_O32_rt_sigreturn 4193
-#define __NR_N32_rt_sigreturn 6211
+static struct page *no_pages[] = { NULL };
+static struct vm_special_mapping vdso_vvar_mapping = {
+ .name = "[vvar]",
+ .pages = no_pages,
+};
-static struct page *vdso_page;
-
-static void __init install_trampoline(u32 *tramp, unsigned int sigreturn)
+static void __init init_vdso_image(struct mips_vdso_image *image)
{
- uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */
- uasm_i_syscall(&tramp, 0);
+ unsigned long num_pages, i;
+
+ BUG_ON(!PAGE_ALIGNED(image->data));
+ BUG_ON(!PAGE_ALIGNED(image->size));
+
+ num_pages = image->size / PAGE_SIZE;
+
+ for (i = 0; i < num_pages; i++) {
+ image->mapping.pages[i] =
+ virt_to_page(image->data + (i * PAGE_SIZE));
+ }
}
static int __init init_vdso(void)
{
- struct mips_vdso *vdso;
-
- vdso_page = alloc_page(GFP_KERNEL);
- if (!vdso_page)
- panic("Cannot allocate vdso");
-
- vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
- if (!vdso)
- panic("Cannot map vdso");
- clear_page(vdso);
-
- install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
-#ifdef CONFIG_32BIT
- install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
-#else
- install_trampoline(vdso->n32_rt_signal_trampoline,
- __NR_N32_rt_sigreturn);
- install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
- install_trampoline(vdso->o32_rt_signal_trampoline,
- __NR_O32_rt_sigreturn);
+ init_vdso_image(&vdso_image);
+
+#ifdef CONFIG_MIPS32_O32
+ init_vdso_image(&vdso_image_o32);
#endif
- vunmap(vdso);
+#ifdef CONFIG_MIPS32_N32
+ init_vdso_image(&vdso_image_n32);
+#endif
return 0;
}
subsys_initcall(init_vdso);
-static unsigned long vdso_addr(unsigned long start)
+void update_vsyscall(struct timekeeper *tk)
{
- unsigned long offset = 0UL;
-
- if (current->flags & PF_RANDOMIZE) {
- offset = get_random_int();
- offset <<= PAGE_SHIFT;
- if (TASK_IS_32BIT_ADDR)
- offset &= 0xfffffful;
- else
- offset &= 0xffffffful;
+ vdso_data_write_begin(&vdso_data);
+
+ vdso_data.xtime_sec = tk->xtime_sec;
+ vdso_data.xtime_nsec = tk->tkr_mono.xtime_nsec;
+ vdso_data.wall_to_mono_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data.wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec;
+ vdso_data.cs_shift = tk->tkr_mono.shift;
+
+ vdso_data.clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode;
+ if (vdso_data.clock_mode != VDSO_CLOCK_NONE) {
+ vdso_data.cs_mult = tk->tkr_mono.mult;
+ vdso_data.cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data.cs_mask = tk->tkr_mono.mask;
}
- return STACK_TOP + offset;
+ vdso_data_write_end(&vdso_data);
+}
+
+void update_vsyscall_tz(void)
+{
+ if (vdso_data.clock_mode != VDSO_CLOCK_NONE) {
+ vdso_data.tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data.tz_dsttime = sys_tz.tz_dsttime;
+ }
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- int ret;
- unsigned long addr;
+ struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr;
+ struct vm_area_struct *vma;
+ struct resource gic_res;
+ int ret;
down_write(&mm->mmap_sem);
- addr = vdso_addr(mm->start_stack);
+ /*
+ * Determine total area size. This includes the VDSO data itself, the
+ * data page, and the GIC user page if present. Always create a mapping
+ * for the GIC user area if the GIC is present regardless of whether it
+ * is the current clocksource, in case it comes into use later on. We
+ * only map a page even though the total area is 64K, as we only need
+ * the counter registers at the start.
+ */
+ gic_size = gic_present ? PAGE_SIZE : 0;
+ vvar_size = gic_size + PAGE_SIZE;
+ size = vvar_size + image->size;
+
+ base = get_unmapped_area(NULL, 0, size, 0, 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+
+ data_addr = base + gic_size;
+ vdso_addr = data_addr + PAGE_SIZE;
- addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
+ vma = _install_special_mapping(mm, base, vvar_size,
+ VM_READ | VM_MAYREAD,
+ &vdso_vvar_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
}
- ret = install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_page);
+ /* Map GIC user page. */
+ if (gic_size) {
+ ret = gic_get_usm_range(&gic_res);
+ if (ret)
+ goto out;
+
+ ret = io_remap_pfn_range(vma, base,
+ gic_res.start >> PAGE_SHIFT,
+ gic_size,
+ pgprot_noncached(PAGE_READONLY));
+ if (ret)
+ goto out;
+ }
+ /* Map data page. */
+ ret = remap_pfn_range(vma, data_addr,
+ virt_to_phys(&vdso_data) >> PAGE_SHIFT,
+ PAGE_SIZE, PAGE_READONLY);
if (ret)
- goto up_fail;
+ goto out;
+
+ /* Map VDSO image. */
+ vma = _install_special_mapping(mm, vdso_addr, image->size,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &image->mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
- mm->context.vdso = (void *)addr;
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
-up_fail:
+out:
up_write(&mm->mmap_sem);
return ret;
}
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
- return "[vdso]";
- return NULL;
-}
diff --git a/kernel/arch/mips/kernel/vmlinux.lds.S b/kernel/arch/mips/kernel/vmlinux.lds.S
index 3b46f7ce9..0a93e83cd 100644
--- a/kernel/arch/mips/kernel/vmlinux.lds.S
+++ b/kernel/arch/mips/kernel/vmlinux.lds.S
@@ -17,7 +17,9 @@ OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
note PT_NOTE FLAGS(4); /* R__ */
+#endif /* CAVIUM_OCTEON_SOC */
}
#ifdef CONFIG_32BIT
@@ -71,7 +73,12 @@ SECTIONS
__stop___dbe_table = .;
}
- NOTES :text :note
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+#define NOTES_HEADER
+#else /* CONFIG_CAVIUM_OCTEON_SOC */
+#define NOTES_HEADER :note
+#endif /* CONFIG_CAVIUM_OCTEON_SOC */
+ NOTES :text NOTES_HEADER
.dummy : { *(.dummy) } :text
_sdata = .; /* Start of data section */
@@ -125,8 +132,19 @@ SECTIONS
.exit.data : {
EXIT_DATA
}
-
+#ifdef CONFIG_SMP
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+#endif
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . += 0x100000;
+#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
+ .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+ *(.appended_dtb)
+ KEEP(*(.appended_dtb))
+ }
+#endif
/*
* Align to 64K in attempt to eliminate holes before the
* .bss..swapper_pg_dir section at the start of .bss. This
@@ -175,6 +193,7 @@ SECTIONS
DISCARDS
/DISCARD/ : {
/* ABI crap starts here */
+ *(.MIPS.abiflags)
*(.MIPS.options)
*(.options)
*(.pdr)
diff --git a/kernel/arch/mips/kernel/vpe.c b/kernel/arch/mips/kernel/vpe.c
index 11da31456..9067b651c 100644
--- a/kernel/arch/mips/kernel/vpe.c
+++ b/kernel/arch/mips/kernel/vpe.c
@@ -817,6 +817,7 @@ static int vpe_open(struct inode *inode, struct file *filp)
static int vpe_release(struct inode *inode, struct file *filp)
{
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
struct vpe *v;
Elf_Ehdr *hdr;
int ret = 0;
@@ -827,7 +828,7 @@ static int vpe_release(struct inode *inode, struct file *filp)
hdr = (Elf_Ehdr *) v->pbuffer;
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
- if ((vpe_elfload(v) >= 0) && vpe_run) {
+ if (vpe_elfload(v) >= 0) {
vpe_run(v);
} else {
pr_warn("VPE loader: ELF load failed.\n");
@@ -850,6 +851,10 @@ static int vpe_release(struct inode *inode, struct file *filp)
v->plen = 0;
return ret;
+#else
+ pr_warn("VPE loader: ELF load failed.\n");
+ return -ENOEXEC;
+#endif
}
static ssize_t vpe_write(struct file *file, const char __user *buffer,