summaryrefslogtreecommitdiffstats
path: root/kernel/arch/tile
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/tile
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/tile')
-rw-r--r--kernel/arch/tile/Kconfig37
-rw-r--r--kernel/arch/tile/gxio/mpipe.c33
-rw-r--r--kernel/arch/tile/include/asm/Kbuild3
-rw-r--r--kernel/arch/tile/include/asm/atomic.h2
-rw-r--r--kernel/arch/tile/include/asm/atomic_32.h28
-rw-r--r--kernel/arch/tile/include/asm/atomic_64.h49
-rw-r--r--kernel/arch/tile/include/asm/dma-mapping.h45
-rw-r--r--kernel/arch/tile/include/asm/edac.h29
-rw-r--r--kernel/arch/tile/include/asm/elf.h4
-rw-r--r--kernel/arch/tile/include/asm/highmem.h1
-rw-r--r--kernel/arch/tile/include/asm/hugetlb.h13
-rw-r--r--kernel/arch/tile/include/asm/io.h3
-rw-r--r--kernel/arch/tile/include/asm/irq.h5
-rw-r--r--kernel/arch/tile/include/asm/page.h8
-rw-r--r--kernel/arch/tile/include/asm/pgtable.h8
-rw-r--r--kernel/arch/tile/include/asm/processor.h2
-rw-r--r--kernel/arch/tile/include/asm/spinlock_32.h6
-rw-r--r--kernel/arch/tile/include/asm/spinlock_64.h5
-rw-r--r--kernel/arch/tile/include/asm/stack.h13
-rw-r--r--kernel/arch/tile/include/asm/switch_to.h8
-rw-r--r--kernel/arch/tile/include/asm/syscall.h28
-rw-r--r--kernel/arch/tile/include/asm/thread_info.h1
-rw-r--r--kernel/arch/tile/include/asm/topology.h2
-rw-r--r--kernel/arch/tile/include/asm/traps.h8
-rw-r--r--kernel/arch/tile/include/asm/uaccess.h66
-rw-r--r--kernel/arch/tile/include/asm/word-at-a-time.h42
-rw-r--r--kernel/arch/tile/include/hv/hypervisor.h60
-rw-r--r--kernel/arch/tile/include/uapi/arch/opcode_tilegx.h6
-rw-r--r--kernel/arch/tile/include/uapi/asm/mman.h1
-rw-r--r--kernel/arch/tile/kernel/entry.S7
-rw-r--r--kernel/arch/tile/kernel/hvglue.S3
-rw-r--r--kernel/arch/tile/kernel/hvglue_trace.c4
-rw-r--r--kernel/arch/tile/kernel/intvec_32.S1
-rw-r--r--kernel/arch/tile/kernel/intvec_64.S7
-rw-r--r--kernel/arch/tile/kernel/pci_gx.c6
-rw-r--r--kernel/arch/tile/kernel/perf_event.c2
-rw-r--r--kernel/arch/tile/kernel/process.c143
-rw-r--r--kernel/arch/tile/kernel/ptrace.c3
-rw-r--r--kernel/arch/tile/kernel/setup.c2
-rw-r--r--kernel/arch/tile/kernel/stack.c127
-rw-r--r--kernel/arch/tile/kernel/sysfs.c11
-rw-r--r--kernel/arch/tile/kernel/time.c8
-rw-r--r--kernel/arch/tile/kernel/traps.c15
-rw-r--r--kernel/arch/tile/kernel/usb.c2
-rw-r--r--kernel/arch/tile/kernel/vdso/Makefile4
-rw-r--r--kernel/arch/tile/kernel/vdso/vgettimeofday.c10
-rw-r--r--kernel/arch/tile/lib/atomic_32.c23
-rw-r--r--kernel/arch/tile/lib/atomic_asm_32.S4
-rw-r--r--kernel/arch/tile/lib/exports.c3
-rw-r--r--kernel/arch/tile/lib/memcpy_user_64.c4
-rw-r--r--kernel/arch/tile/lib/spinlock_32.c11
-rw-r--r--kernel/arch/tile/lib/spinlock_64.c11
-rw-r--r--kernel/arch/tile/lib/usercopy_32.S46
-rw-r--r--kernel/arch/tile/lib/usercopy_64.S46
-rw-r--r--kernel/arch/tile/mm/elf.c2
-rw-r--r--kernel/arch/tile/mm/fault.c17
-rw-r--r--kernel/arch/tile/mm/highmem.c12
-rw-r--r--kernel/arch/tile/mm/hugetlbpage.c5
-rw-r--r--kernel/arch/tile/mm/init.c2
59 files changed, 616 insertions, 441 deletions
diff --git a/kernel/arch/tile/Kconfig b/kernel/arch/tile/Kconfig
index a07e31b50..8ec7a4599 100644
--- a/kernel/arch/tile/Kconfig
+++ b/kernel/arch/tile/Kconfig
@@ -24,10 +24,15 @@ config TILE
select MODULES_USE_ELF_RELA
select HAVE_ARCH_TRACEHOOK
select HAVE_SYSCALL_TRACEPOINTS
+ select USER_STACKTRACE_SUPPORT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DEBUG_STACKOVERFLOW
select ARCH_WANT_FRAME_POINTERS
select HAVE_CONTEXT_TRACKING
+ select EDAC_SUPPORT
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select HAVE_ARCH_SECCOMP_FILTER
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
@@ -124,8 +129,10 @@ config HVC_TILE
select HVC_IRQ if TILEGX
def_bool y
+# Building with ARCH=tilegx (or ARCH=tile) implies using the
+# 64-bit TILE-Gx toolchain, so force CONFIG_TILEGX on.
config TILEGX
- bool "Building for TILE-Gx (64-bit) processor"
+ def_bool ARCH != "tilepro"
select SPARSE_IRQ
select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
select HAVE_FUNCTION_TRACER
@@ -169,8 +176,6 @@ config NR_CPUS
smaller kernel memory footprint results from using a smaller
value on chips with fewer tiles.
-if TILEGX
-
choice
prompt "Kernel page size"
default PAGE_SIZE_64KB
@@ -181,8 +186,11 @@ choice
connections, etc., it may be better to select 16KB, which uses
memory more efficiently at some cost in TLB performance.
- Note that this option is TILE-Gx specific; currently
- TILEPro page size is set by rebuilding the hypervisor.
+ Note that for TILEPro, you must also rebuild the hypervisor
+ with a matching page size.
+
+config PAGE_SIZE_4KB
+ bool "4KB" if TILEPRO
config PAGE_SIZE_16KB
bool "16KB"
@@ -192,12 +200,11 @@ config PAGE_SIZE_64KB
endchoice
-endif
-
source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
+ select KEXEC_CORE
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -215,6 +222,22 @@ config COMPAT
If enabled, the kernel will support running TILE-Gx binaries
that were built with the -m32 option.
+config SECCOMP
+ bool "Enable seccomp to safely compute untrusted bytecode"
+ depends on PROC_FS
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via prctl, it cannot be disabled and the task is only
+ allowed to execute a few safe syscalls defined by each seccomp
+ mode.
+
+ If unsure, say N.
+
config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
diff --git a/kernel/arch/tile/gxio/mpipe.c b/kernel/arch/tile/gxio/mpipe.c
index ee186e13d..f102048d9 100644
--- a/kernel/arch/tile/gxio/mpipe.c
+++ b/kernel/arch/tile/gxio/mpipe.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <gxio/iorpc_globals.h>
#include <gxio/iorpc_mpipe.h>
@@ -29,32 +30,6 @@
/* HACK: Avoid pointless "shadow" warnings. */
#define link link_shadow
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough. To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
- size_t len = strnlen(src, size) + 1;
- if (len > size) {
- if (size)
- dest[0] = '\0';
- return 0;
- }
- memcpy(dest, src, len);
- return len;
-}
-
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
{
char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
if (!context)
return GXIO_ERR_NO_DEVICE;
- if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
if (rv >= 0) {
- if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+ if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
return GXIO_ERR_INVAL_MEMORY_SIZE;
memcpy(link_mac, mac.mac, sizeof(mac.mac));
}
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
_gxio_mpipe_link_name_t name;
int rv;
- if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
rv = gxio_mpipe_link_open_aux(context, name, flags);
diff --git a/kernel/arch/tile/include/asm/Kbuild b/kernel/arch/tile/include/asm/Kbuild
index f5433e0e3..ba35c41c7 100644
--- a/kernel/arch/tile/include/asm/Kbuild
+++ b/kernel/arch/tile/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_regs.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
@@ -27,7 +28,7 @@ generic-y += poll.h
generic-y += posix_types.h
generic-y += preempt.h
generic-y += resource.h
-generic-y += scatterlist.h
+generic-y += seccomp.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
diff --git a/kernel/arch/tile/include/asm/atomic.h b/kernel/arch/tile/include/asm/atomic.h
index 709798460..9fc0107a9 100644
--- a/kernel/arch/tile/include/asm/atomic.h
+++ b/kernel/arch/tile/include/asm/atomic.h
@@ -34,7 +34,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE(v->counter);
+ return READ_ONCE(v->counter);
}
/**
diff --git a/kernel/arch/tile/include/asm/atomic_32.h b/kernel/arch/tile/include/asm/atomic_32.h
index 1b109fad9..d320ce253 100644
--- a/kernel/arch/tile/include/asm/atomic_32.h
+++ b/kernel/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
_atomic_xchg_add(&v->counter, i);
}
+#define ATOMIC_OP(op) \
+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ _atomic_##op((unsigned long *)&v->counter, i); \
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
_atomic64_xchg_add(&v->counter, i);
}
+#define ATOMIC64_OP(op) \
+long long _atomic64_##op(long long *v, long long n); \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ _atomic64_##op(&v->counter, i); \
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
/**
* atomic64_add_return - add integer and return
* @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);
+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/kernel/arch/tile/include/asm/atomic_64.h b/kernel/arch/tile/include/asm/atomic_64.h
index 7b11c5fad..51cabc26e 100644
--- a/kernel/arch/tile/include/asm/atomic_64.h
+++ b/kernel/arch/tile/include/asm/atomic_64.h
@@ -24,7 +24,7 @@
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
-#define atomic_set(v, i) ((v)->counter = (i))
+#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
/*
* The smp_mb() operations throughout are to support the fact that
@@ -58,12 +58,32 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+static inline void atomic_and(int i, atomic_t *v)
+{
+ __insn_fetchand4((void *)&v->counter, i);
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+ __insn_fetchor4((void *)&v->counter, i);
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+ int guess, oldval = v->counter;
+ do {
+ guess = oldval;
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch4(&v->counter, guess ^ i);
+ } while (guess != oldval);
+}
+
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) ((v)->counter)
-#define atomic64_set(v, i) ((v)->counter = (i))
+#define atomic64_read(v) READ_ONCE((v)->counter)
+#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
static inline void atomic64_add(long i, atomic64_t *v)
{
@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
return oldval != u;
}
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+ __insn_fetchand((void *)&v->counter, i);
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+ __insn_fetchor((void *)&v->counter, i);
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+ long guess, oldval = v->counter;
+ do {
+ guess = oldval;
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch(&v->counter, guess ^ i);
+ } while (guess != oldval);
+}
+
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
@@ -105,9 +145,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-/* Define this to indicate that cmpxchg is an efficient operation. */
-#define __HAVE_ARCH_CMPXCHG
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/kernel/arch/tile/include/asm/dma-mapping.h b/kernel/arch/tile/include/asm/dma-mapping.h
index 1eae359d8..96ac6cce4 100644
--- a/kernel/arch/tile/include/asm/dma-mapping.h
+++ b/kernel/arch/tile/include/asm/dma-mapping.h
@@ -59,8 +59,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
-
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
@@ -74,18 +72,9 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
return addr + size - 1 <= *dev->dma_mask;
}
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-}
+#define HAVE_ARCH_DMA_SET_MASK 1
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->dma_supported(dev, mask);
-}
+#include <asm-generic/dma-mapping-common.h>
static inline int
dma_set_mask(struct device *dev, u64 mask)
@@ -116,36 +105,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
- void *cpu_addr;
-
- cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
-
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
-
- return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-
- dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
-#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
-#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
-
/*
* dma_alloc_noncoherent() is #defined to return coherent memory,
* so there's no need to do any flushing here.
diff --git a/kernel/arch/tile/include/asm/edac.h b/kernel/arch/tile/include/asm/edac.h
deleted file mode 100644
index 87fc83eea..000000000
--- a/kernel/arch/tile/include/asm/edac.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_EDAC_H
-#define _ASM_TILE_EDAC_H
-
-/* ECC atomic, DMA, SMP and interrupt safe scrub function */
-
-static inline void atomic_scrub(void *va, u32 size)
-{
- /*
- * These is nothing to be done here because CE is
- * corrected by the mshim.
- */
- return;
-}
-
-#endif /* _ASM_TILE_EDAC_H */
diff --git a/kernel/arch/tile/include/asm/elf.h b/kernel/arch/tile/include/asm/elf.h
index 41d9878a9..c505d77e4 100644
--- a/kernel/arch/tile/include/asm/elf.h
+++ b/kernel/arch/tile/include/asm/elf.h
@@ -22,6 +22,7 @@
#include <arch/chip.h>
#include <linux/ptrace.h>
+#include <linux/elf-em.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -30,9 +31,6 @@ typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-#define EM_TILEPRO 188
-#define EM_TILEGX 191
-
/* Provide a nominal data structure. */
#define ELF_NFPREG 0
typedef double elf_fpreg_t;
diff --git a/kernel/arch/tile/include/asm/highmem.h b/kernel/arch/tile/include/asm/highmem.h
index fc8429a31..979579b38 100644
--- a/kernel/arch/tile/include/asm/highmem.h
+++ b/kernel/arch/tile/include/asm/highmem.h
@@ -63,7 +63,6 @@ void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
-struct page *kmap_atomic_to_page(void *ptr);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
diff --git a/kernel/arch/tile/include/asm/hugetlb.h b/kernel/arch/tile/include/asm/hugetlb.h
index 325773300..2fac5be4d 100644
--- a/kernel/arch/tile/include/asm/hugetlb.h
+++ b/kernel/arch/tile/include/asm/hugetlb.h
@@ -40,10 +40,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
-{
-}
-
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
@@ -98,15 +94,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
-static inline int arch_prepare_hugepage(struct page *page)
-{
- return 0;
-}
-
-static inline void arch_release_hugepage(struct page *page)
-{
-}
-
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
diff --git a/kernel/arch/tile/include/asm/io.h b/kernel/arch/tile/include/asm/io.h
index 6ef4ecab1..322b5fe94 100644
--- a/kernel/arch/tile/include/asm/io.h
+++ b/kernel/arch/tile/include/asm/io.h
@@ -54,7 +54,8 @@ extern void iounmap(volatile void __iomem *addr);
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
-#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
+#define ioremap_wt(physaddr, size) ioremap(physaddr, size)
+#define ioremap_uc(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
#define mmiowb()
diff --git a/kernel/arch/tile/include/asm/irq.h b/kernel/arch/tile/include/asm/irq.h
index 1fe869118..84a924034 100644
--- a/kernel/arch/tile/include/asm/irq.h
+++ b/kernel/arch/tile/include/asm/irq.h
@@ -78,4 +78,9 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type);
void setup_irq_regs(void);
+#ifdef __tilegx__
+void arch_trigger_all_cpu_backtrace(bool self);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
#endif /* _ASM_TILE_IRQ_H */
diff --git a/kernel/arch/tile/include/asm/page.h b/kernel/arch/tile/include/asm/page.h
index a213a8d84..8eca6a0e1 100644
--- a/kernel/arch/tile/include/asm/page.h
+++ b/kernel/arch/tile/include/asm/page.h
@@ -20,15 +20,17 @@
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#if defined(CONFIG_PAGE_SIZE_16KB)
+#if defined(CONFIG_PAGE_SIZE_4KB) /* tilepro only */
+#define PAGE_SHIFT 12
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_4K
+#elif defined(CONFIG_PAGE_SIZE_16KB)
#define PAGE_SHIFT 14
#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PAGE_SHIFT 16
#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
#else
-#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
-#define CTX_PAGE_FLAG 0
+#error Page size not specified in Kconfig
#endif
#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
diff --git a/kernel/arch/tile/include/asm/pgtable.h b/kernel/arch/tile/include/asm/pgtable.h
index 95a4f19d1..2b05ccbeb 100644
--- a/kernel/arch/tile/include/asm/pgtable.h
+++ b/kernel/arch/tile/include/asm/pgtable.h
@@ -414,10 +414,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
}
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
{
return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
}
diff --git a/kernel/arch/tile/include/asm/processor.h b/kernel/arch/tile/include/asm/processor.h
index dd4f9f17e..139dfdee0 100644
--- a/kernel/arch/tile/include/asm/processor.h
+++ b/kernel/arch/tile/include/asm/processor.h
@@ -111,8 +111,6 @@ struct thread_struct {
unsigned long long interrupt_mask;
/* User interrupt-control 0 state */
unsigned long intctrl_0;
- /* Is this task currently doing a backtrace? */
- bool in_backtrace;
/* Any other miscellaneous processor state bits */
unsigned long proc_status;
#if !CHIP_HAS_FIXED_INTVEC_BASE()
diff --git a/kernel/arch/tile/include/asm/spinlock_32.h b/kernel/arch/tile/include/asm/spinlock_32.h
index c0a77b38d..b14b1ba5b 100644
--- a/kernel/arch/tile/include/asm/spinlock_32.h
+++ b/kernel/arch/tile/include/asm/spinlock_32.h
@@ -41,8 +41,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
* to claim the lock is held, since it will be momentarily
* if not already. There's no need to wait for a "valid"
* lock->next_ticket to become available.
+ * Use READ_ONCE() to ensure that calling this in a loop is OK.
*/
- return lock->next_ticket != lock->current_ticket;
+ int curr = READ_ONCE(lock->current_ticket);
+ int next = READ_ONCE(lock->next_ticket);
+
+ return next != curr;
}
void arch_spin_lock(arch_spinlock_t *lock);
diff --git a/kernel/arch/tile/include/asm/spinlock_64.h b/kernel/arch/tile/include/asm/spinlock_64.h
index 9a12b9c7e..b9718fb4e 100644
--- a/kernel/arch/tile/include/asm/spinlock_64.h
+++ b/kernel/arch/tile/include/asm/spinlock_64.h
@@ -18,6 +18,8 @@
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
+#include <linux/compiler.h>
+
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
@@ -44,7 +46,8 @@ static inline u32 arch_spin_next(u32 val)
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- u32 val = lock->lock;
+ /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
+ u32 val = READ_ONCE(lock->lock);
return arch_spin_current(val) != arch_spin_next(val);
}
diff --git a/kernel/arch/tile/include/asm/stack.h b/kernel/arch/tile/include/asm/stack.h
index 0e9d382a2..c3cb42615 100644
--- a/kernel/arch/tile/include/asm/stack.h
+++ b/kernel/arch/tile/include/asm/stack.h
@@ -58,17 +58,14 @@ extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
/* Advance to the next frame. */
extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
+/* Dump just the contents of the pt_regs structure. */
+extern void tile_show_regs(struct pt_regs *);
+
/*
* Dump stack given complete register info. Use only from the
* architecture-specific code; show_stack()
- * and dump_stack() (in entry.S) are architecture-independent entry points.
+ * and dump_stack() are architecture-independent entry points.
*/
-extern void tile_show_stack(struct KBacktraceIterator *, int headers);
-
-/* Dump stack of current process, with registers to seed the backtrace. */
-extern void dump_stack_regs(struct pt_regs *);
-
-/* Helper method for assembly dump_stack(). */
-extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
+extern void tile_show_stack(struct KBacktraceIterator *);
#endif /* _ASM_TILE_STACK_H */
diff --git a/kernel/arch/tile/include/asm/switch_to.h b/kernel/arch/tile/include/asm/switch_to.h
index b8f888cbe..34ee72705 100644
--- a/kernel/arch/tile/include/asm/switch_to.h
+++ b/kernel/arch/tile/include/asm/switch_to.h
@@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void);
* Kernel threads can check to see if they need to migrate their
* stack whenever they return from a context switch; for user
* threads, we defer until they are returning to user-space.
+ * We defer homecache migration until the runqueue lock is released.
*/
-#define finish_arch_switch(prev) do { \
- if (unlikely((prev)->state == TASK_DEAD)) \
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
- ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
+#define finish_arch_post_lock_switch() do { \
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
if (current->mm == NULL && !kstack_hash && \
- current_thread_info()->homecache_cpu != smp_processor_id()) \
+ current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
homecache_migrate_kthread(); \
} while (0)
diff --git a/kernel/arch/tile/include/asm/syscall.h b/kernel/arch/tile/include/asm/syscall.h
index 9644b88f1..373d73064 100644
--- a/kernel/arch/tile/include/asm/syscall.h
+++ b/kernel/arch/tile/include/asm/syscall.h
@@ -20,6 +20,8 @@
#include <linux/sched.h>
#include <linux/err.h>
+#include <linux/audit.h>
+#include <linux/compat.h>
#include <arch/abi.h>
/* The array of function pointers for syscalls. */
@@ -61,7 +63,15 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- regs->regs[0] = (long) error ?: val;
+ if (error) {
+ /* R0 is the passed-in negative error, R1 is positive. */
+ regs->regs[0] = error;
+ regs->regs[1] = -error;
+ } else {
+ /* R1 set to zero to indicate no error. */
+ regs->regs[0] = val;
+ regs->regs[1] = 0;
+ }
}
static inline void syscall_get_arguments(struct task_struct *task,
@@ -82,4 +92,20 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs[i], args, n * sizeof(args[0]));
}
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * tile has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+ if (is_compat_task())
+ return AUDIT_ARCH_TILEGX32;
+
+#ifdef CONFIG_TILEGX
+ return AUDIT_ARCH_TILEGX;
+#else
+ return AUDIT_ARCH_TILEPRO;
+#endif
+}
+
#endif /* _ASM_TILE_SYSCALL_H */
diff --git a/kernel/arch/tile/include/asm/thread_info.h b/kernel/arch/tile/include/asm/thread_info.h
index f804c39a5..dc1fb28d9 100644
--- a/kernel/arch/tile/include/asm/thread_info.h
+++ b/kernel/arch/tile/include/asm/thread_info.h
@@ -42,6 +42,7 @@ struct thread_info {
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
void __user *unalign_jit_base; /* unalign fixup JIT base */
#endif
+ bool in_backtrace; /* currently doing backtrace? */
};
/*
diff --git a/kernel/arch/tile/include/asm/topology.h b/kernel/arch/tile/include/asm/topology.h
index 938311844..76b0d0ebb 100644
--- a/kernel/arch/tile/include/asm/topology.h
+++ b/kernel/arch/tile/include/asm/topology.h
@@ -55,7 +55,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
#define topology_physical_package_id(cpu) ((void)(cpu), 0)
#define topology_core_id(cpu) (cpu)
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
-#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#endif /* _ASM_TILE_TOPOLOGY_H */
diff --git a/kernel/arch/tile/include/asm/traps.h b/kernel/arch/tile/include/asm/traps.h
index 4b99a1c3a..11c82270c 100644
--- a/kernel/arch/tile/include/asm/traps.h
+++ b/kernel/arch/tile/include/asm/traps.h
@@ -52,6 +52,14 @@ void do_timer_interrupt(struct pt_regs *, int fault_num);
/* kernel/messaging.c */
void hv_message_intr(struct pt_regs *, int intnum);
+#define TILE_NMI_DUMP_STACK 1 /* Dump stack for sysrq+'l' */
+
+/* kernel/process.c */
+void do_nmi_dump_stack(struct pt_regs *regs);
+
+/* kernel/traps.c */
+void do_nmi(struct pt_regs *, int fault_num, unsigned long reason);
+
/* kernel/irq.c */
void tile_dev_intr(struct pt_regs *, int intnum);
diff --git a/kernel/arch/tile/include/asm/uaccess.h b/kernel/arch/tile/include/asm/uaccess.h
index a33276bf5..0a9c42657 100644
--- a/kernel/arch/tile/include/asm/uaccess.h
+++ b/kernel/arch/tile/include/asm/uaccess.h
@@ -65,6 +65,13 @@ static inline int is_arch_mappable_range(unsigned long addr,
#endif
/*
+ * Note that using this definition ignores is_arch_mappable_range(),
+ * so on tilepro code that uses user_addr_max() is constrained not
+ * to reference the tilepro user-interrupt region.
+ */
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
+
+/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
*/
@@ -471,62 +478,9 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
#endif
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-extern long strnlen_user_asm(const char __user *str, long n);
-static inline long __must_check strnlen_user(const char __user *str, long n)
-{
- might_fault();
- return strnlen_user_asm(str, n);
-}
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- * Caller must check the specified block with access_ok() before calling
- * this function.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
-static inline long __must_check __strncpy_from_user(
- char *dst, const char __user *src, long count)
-{
- might_fault();
- return strncpy_from_user_asm(dst, src, count);
-}
-static inline long __must_check strncpy_from_user(
- char *dst, const char __user *src, long count)
-{
- if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
+extern long strnlen_user(const char __user *str, long n);
+extern long strlen_user(const char __user *str);
+extern long strncpy_from_user(char *dst, const char __user *src, long);
/**
* clear_user: - Zero a block of memory in user space.
diff --git a/kernel/arch/tile/include/asm/word-at-a-time.h b/kernel/arch/tile/include/asm/word-at-a-time.h
new file mode 100644
index 000000000..b66a693c2
--- /dev/null
+++ b/kernel/arch/tile/include/asm/word-at-a-time.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <asm/byteorder.h>
+
+struct word_at_a_time { /* unused */ };
+#define WORD_AT_A_TIME_CONSTANTS {}
+
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
+static inline unsigned long has_zero(unsigned long val, unsigned long *data,
+ const struct word_at_a_time *c)
+{
+#ifdef __tilegx__
+ unsigned long mask = __insn_v1cmpeqi(val, 0);
+#else /* tilepro */
+ unsigned long mask = __insn_seqib(val, 0);
+#endif
+ *data = mask;
+ return mask;
+}
+
+/* These operations are both nops. */
+#define prep_zero_mask(val, data, c) (data)
+#define create_zero_mask(data) (data)
+
+/* And this operation just depends on endianness. */
+static inline long find_zero(unsigned long mask)
+{
+#ifdef __BIG_ENDIAN
+ return __builtin_clzl(mask) >> 3;
+#else
+ return __builtin_ctzl(mask) >> 3;
+#endif
+}
+
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/kernel/arch/tile/include/hv/hypervisor.h b/kernel/arch/tile/include/hv/hypervisor.h
index e0e6af4e7..f10b332b3 100644
--- a/kernel/arch/tile/include/hv/hypervisor.h
+++ b/kernel/arch/tile/include/hv/hypervisor.h
@@ -321,8 +321,11 @@
/** hv_console_set_ipi */
#define HV_DISPATCH_CONSOLE_SET_IPI 63
+/** hv_send_nmi */
+#define HV_DISPATCH_SEND_NMI 65
+
/** One more than the largest dispatch value */
-#define _HV_DISPATCH_END 64
+#define _HV_DISPATCH_END 66
#ifndef __ASSEMBLER__
@@ -1253,6 +1256,11 @@ void hv_downcall_dispatch(void);
#define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL
/** Device interrupt downcall interrupt vector */
#define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS
+/** NMI downcall interrupt vector */
+#define INT_NMI_DWNCL 64
+
+#define HV_NMI_FLAG_FORCE 0x1 /**< Force an NMI downcall regardless of
+ the ICS bit of the client. */
#ifndef __ASSEMBLER__
@@ -1780,6 +1788,56 @@ int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg);
int hv_dev_poll_cancel(int devhdl);
+/** NMI information */
+typedef struct
+{
+ /** Result: negative error, or HV_NMI_RESULT_xxx. */
+ int result;
+
+ /** PC from interrupted remote core (if result != HV_NMI_RESULT_FAIL_HV). */
+ HV_VirtAddr pc;
+
+} HV_NMI_Info;
+
+/** NMI issued successfully. */
+#define HV_NMI_RESULT_OK 0
+
+/** NMI not issued: remote tile running at client PL with ICS set. */
+#define HV_NMI_RESULT_FAIL_ICS 1
+
+/** NMI not issued: remote tile waiting in hypervisor. */
+#define HV_NMI_RESULT_FAIL_HV 2
+
+/** Force an NMI downcall regardless of the ICS bit of the client. */
+#define HV_NMI_FLAG_FORCE 0x1
+
+/** Send an NMI interrupt request to a particular tile.
+ *
+ * This will cause the NMI to be issued on the remote tile regardless
+ * of the state of the client interrupt mask. However, if the remote
+ * tile is in the hypervisor, it will not execute the NMI, and
+ * HV_NMI_RESULT_FAIL_HV will be returned. Similarly, if the remote
+ * tile is in a client interrupt critical section at the time of the
+ * NMI, it will not execute the NMI, and HV_NMI_RESULT_FAIL_ICS will
+ * be returned. In this second case, however, if HV_NMI_FLAG_FORCE
+ * is set in flags, then the remote tile will enter its NMI interrupt
+ * vector regardless. Forcing the NMI vector during an interrupt
+ * critical section will mean that the client can not safely continue
+ * execution after handling the interrupt.
+ *
+ * @param tile Tile to which the NMI request is sent.
+ * @param info NMI information which is defined by and interpreted by the
+ * supervisor, is passed to the specified tile, and is
+ * stored in the SPR register SYSTEM_SAVE_{CLIENT_PL}_2 on the
+ * specified tile when entering the NMI handler routine.
+ * Typically, this parameter stores the NMI type, or an aligned
+ * VA plus some special bits, etc.
+ * @param flags Flags (HV_NMI_FLAG_xxx).
+ * @return Information about the requested NMI.
+ */
+HV_NMI_Info hv_send_nmi(HV_Coord tile, unsigned long info, __hv64 flags);
+
+
/** Scatter-gather list for preada/pwritea calls. */
typedef struct
#if CHIP_VA_WIDTH() <= 32
diff --git a/kernel/arch/tile/include/uapi/arch/opcode_tilegx.h b/kernel/arch/tile/include/uapi/arch/opcode_tilegx.h
index d76ff2db7..9e46eaa84 100644
--- a/kernel/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/kernel/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -830,11 +830,11 @@ enum
ADDX_RRR_0_OPCODE_X0 = 2,
ADDX_RRR_0_OPCODE_X1 = 2,
ADDX_RRR_0_OPCODE_Y0 = 0,
- ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+ ADDX_RRR_0_OPCODE_Y1 = 0,
ADD_RRR_0_OPCODE_X0 = 3,
ADD_RRR_0_OPCODE_X1 = 3,
ADD_RRR_0_OPCODE_Y0 = 1,
- ADD_SPECIAL_0_OPCODE_Y1 = 1,
+ ADD_RRR_0_OPCODE_Y1 = 1,
ANDI_IMM8_OPCODE_X0 = 3,
ANDI_IMM8_OPCODE_X1 = 3,
ANDI_OPCODE_Y0 = 2,
@@ -995,6 +995,7 @@ enum
LD4U_ADD_IMM8_OPCODE_X1 = 12,
LD4U_OPCODE_Y2 = 2,
LD4U_UNARY_OPCODE_X1 = 20,
+ LDNA_ADD_IMM8_OPCODE_X1 = 21,
LDNA_UNARY_OPCODE_X1 = 21,
LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
LDNT1S_UNARY_OPCODE_X1 = 22,
@@ -1015,7 +1016,6 @@ enum
LD_UNARY_OPCODE_X1 = 29,
LNK_UNARY_OPCODE_X1 = 30,
LNK_UNARY_OPCODE_Y1 = 14,
- LWNA_ADD_IMM8_OPCODE_X1 = 21,
MFSPR_IMM8_OPCODE_X1 = 22,
MF_UNARY_OPCODE_X1 = 31,
MM_BF_OPCODE_X0 = 7,
diff --git a/kernel/arch/tile/include/uapi/asm/mman.h b/kernel/arch/tile/include/uapi/asm/mman.h
index 81b8fc348..63ee13faf 100644
--- a/kernel/arch/tile/include/uapi/asm/mman.h
+++ b/kernel/arch/tile/include/uapi/asm/mman.h
@@ -36,6 +36,7 @@
*/
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
+#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
#endif /* _ASM_TILE_MMAN_H */
diff --git a/kernel/arch/tile/kernel/entry.S b/kernel/arch/tile/kernel/entry.S
index 3d9175992..670a35694 100644
--- a/kernel/arch/tile/kernel/entry.S
+++ b/kernel/arch/tile/kernel/entry.S
@@ -27,13 +27,6 @@ STD_ENTRY(current_text_addr)
{ move r0, lr; jrp lr }
STD_ENDPROC(current_text_addr)
-STD_ENTRY(dump_stack)
- { move r2, lr; lnk r1 }
- { move r4, r52; addli r1, r1, dump_stack - . }
- { move r3, sp; j _dump_stack }
- jrp lr /* keep backtracer happy */
- STD_ENDPROC(dump_stack)
-
STD_ENTRY(KBacktraceIterator_init_current)
{ move r2, lr; lnk r1 }
{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
diff --git a/kernel/arch/tile/kernel/hvglue.S b/kernel/arch/tile/kernel/hvglue.S
index 2ab456622..d78ee2ad6 100644
--- a/kernel/arch/tile/kernel/hvglue.S
+++ b/kernel/arch/tile/kernel/hvglue.S
@@ -71,4 +71,5 @@ gensym hv_flush_all, 0x6e0, 32
gensym hv_get_ipi_pte, 0x700, 32
gensym hv_set_pte_super_shift, 0x720, 32
gensym hv_console_set_ipi, 0x7e0, 32
-gensym hv_glue_internals, 0x800, 30720
+gensym hv_send_nmi, 0x820, 32
+gensym hv_glue_internals, 0x820, 30688
diff --git a/kernel/arch/tile/kernel/hvglue_trace.c b/kernel/arch/tile/kernel/hvglue_trace.c
index 85c74ad29..add0d7139 100644
--- a/kernel/arch/tile/kernel/hvglue_trace.c
+++ b/kernel/arch/tile/kernel/hvglue_trace.c
@@ -75,6 +75,7 @@
#define hv_get_ipi_pte _hv_get_ipi_pte
#define hv_set_pte_super_shift _hv_set_pte_super_shift
#define hv_console_set_ipi _hv_console_set_ipi
+#define hv_send_nmi _hv_send_nmi
#include <hv/hypervisor.h>
#undef hv_init
#undef hv_install_context
@@ -134,6 +135,7 @@
#undef hv_get_ipi_pte
#undef hv_set_pte_super_shift
#undef hv_console_set_ipi
+#undef hv_send_nmi
/*
* Provide macros based on <linux/syscalls.h> to provide a wrapper
@@ -264,3 +266,5 @@ HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
HV_VirtAddr, tlb_va, unsigned long, tlb_length,
unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
HV_Remote_ASID*, asids, int, asidcount)
+HV_WRAP3(HV_NMI_Info, hv_send_nmi, HV_Coord, tile, unsigned long, info,
+ __hv64, flags)
diff --git a/kernel/arch/tile/kernel/intvec_32.S b/kernel/arch/tile/kernel/intvec_32.S
index cdbda45a4..fbbe2ea88 100644
--- a/kernel/arch/tile/kernel/intvec_32.S
+++ b/kernel/arch/tile/kernel/intvec_32.S
@@ -1224,6 +1224,7 @@ handle_syscall:
jal do_syscall_trace_enter
}
FEEDBACK_REENTER(handle_syscall)
+ blz r0, .Lsyscall_sigreturn_skip
/*
* We always reload our registers from the stack at this
diff --git a/kernel/arch/tile/kernel/intvec_64.S b/kernel/arch/tile/kernel/intvec_64.S
index 5b67efcec..58964d209 100644
--- a/kernel/arch/tile/kernel/intvec_64.S
+++ b/kernel/arch/tile/kernel/intvec_64.S
@@ -515,6 +515,10 @@ intvec_\vecname:
.ifc \c_routine, handle_perf_interrupt
mfspr r2, AUX_PERF_COUNT_STS
.endif
+ .ifc \c_routine, do_nmi
+ mfspr r2, SPR_SYSTEM_SAVE_K_2 /* nmi type */
+ .else
+ .endif
.endif
.endif
.endif
@@ -1243,6 +1247,7 @@ handle_syscall:
jal do_syscall_trace_enter
}
FEEDBACK_REENTER(handle_syscall)
+ bltz r0, .Lsyscall_sigreturn_skip
/*
* We always reload our registers from the stack at this
@@ -1571,3 +1576,5 @@ intrpt_start:
/* Synthetic interrupt delivered only by the simulator */
int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
+ /* Synthetic interrupt delivered by hv */
+ int_hand INT_NMI_DWNCL, NMI_DWNCL, do_nmi, handle_nmi
diff --git a/kernel/arch/tile/kernel/pci_gx.c b/kernel/arch/tile/kernel/pci_gx.c
index b1df847d0..4c017d0d2 100644
--- a/kernel/arch/tile/kernel/pci_gx.c
+++ b/kernel/arch/tile/kernel/pci_gx.c
@@ -304,7 +304,7 @@ static struct irq_chip tilegx_legacy_irq_chip = {
* to Linux which just calls handle_level_irq() after clearing the
* MAC INTx Assert status bit associated with this interrupt.
*/
-static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
+static void trio_handle_level_irq(struct irq_desc *desc)
{
struct pci_controller *controller = irq_desc_get_handler_data(desc);
gxio_trio_context_t *trio_context = controller->trio;
@@ -313,7 +313,7 @@ static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
unsigned int reg_offset;
uint64_t level_mask;
- handle_level_irq(irq, desc);
+ handle_level_irq(desc);
/*
* Clear the INTx Level status, otherwise future interrupts are
@@ -1442,7 +1442,7 @@ static struct pci_ops tile_cfg_ops = {
/* MSI support starts here. */
static unsigned int tilegx_msi_startup(struct irq_data *d)
{
- if (d->msi_desc)
+ if (irq_data_get_msi_desc(d))
pci_msi_unmask_irq(d);
return 0;
diff --git a/kernel/arch/tile/kernel/perf_event.c b/kernel/arch/tile/kernel/perf_event.c
index bb509cee3..8767060d7 100644
--- a/kernel/arch/tile/kernel/perf_event.c
+++ b/kernel/arch/tile/kernel/perf_event.c
@@ -21,7 +21,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*/
diff --git a/kernel/arch/tile/kernel/process.c b/kernel/arch/tile/kernel/process.c
index b403c2e3e..7d5769310 100644
--- a/kernel/arch/tile/kernel/process.c
+++ b/kernel/arch/tile/kernel/process.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <linux/delay.h>
#include <linux/context_tracking.h>
#include <asm/stack.h>
#include <asm/switch_to.h>
@@ -132,7 +133,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
(CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
callee_regs[0] = sp; /* r30 = function */
callee_regs[1] = arg; /* r31 = arg */
- childregs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
p->thread.pc = (unsigned long) ret_from_kernel_thread;
return 0;
}
@@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
hardwall_switch_tasks(prev, next);
#endif
+ /* Notify the simulator of task exit. */
+ if (unlikely(prev->state == TASK_DEAD))
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
+ (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
+
/*
* Switch kernel SP, PC, and callee-saved registers.
* In the context of the new task, return the old task pointer
@@ -546,31 +551,141 @@ void exit_thread(void)
#endif
}
-void show_regs(struct pt_regs *regs)
+void tile_show_regs(struct pt_regs *regs)
{
- struct task_struct *tsk = validate_current();
int i;
-
- if (tsk != &corrupt_current)
- show_regs_print_info(KERN_ERR);
#ifdef __tilegx__
for (i = 0; i < 17; i++)
- pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+ pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+18, regs->regs[i+18],
i+36, regs->regs[i+36]);
- pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
+ pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
regs->regs[17], regs->regs[35], regs->tp);
- pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
+ pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else
for (i = 0; i < 13; i++)
- pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+ pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
+ " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+14, regs->regs[i+14],
i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
- pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
+ pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
regs->regs[13], regs->tp, regs->sp, regs->lr);
#endif
- pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n",
- regs->pc, regs->ex1, regs->faultnum);
+ pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
+ regs->pc, regs->ex1, regs->faultnum,
+ is_compat_task() ? " compat" : "",
+ (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
+ !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
+ (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ struct KBacktraceIterator kbt;
+
+ show_regs_print_info(KERN_DEFAULT);
+ tile_show_regs(regs);
+
+ KBacktraceIterator_init(&kbt, NULL, regs);
+ tile_show_stack(&kbt);
+}
- dump_stack_regs(regs);
+/* To ensure stack dump on tiles occurs one by one. */
+static DEFINE_SPINLOCK(backtrace_lock);
+/* To ensure no backtrace occurs before all of the stack dump are done. */
+static atomic_t backtrace_cpus;
+/* The cpu mask to avoid reentrance. */
+static struct cpumask backtrace_mask;
+
+void do_nmi_dump_stack(struct pt_regs *regs)
+{
+ int is_idle = is_idle_task(current) && !in_interrupt();
+ int cpu;
+
+ nmi_enter();
+ cpu = smp_processor_id();
+ if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
+ goto done;
+
+ spin_lock(&backtrace_lock);
+ if (is_idle)
+ pr_info("CPU: %d idle\n", cpu);
+ else
+ show_regs(regs);
+ spin_unlock(&backtrace_lock);
+ atomic_dec(&backtrace_cpus);
+done:
+ nmi_exit();
+}
+
+#ifdef __tilegx__
+void arch_trigger_all_cpu_backtrace(bool self)
+{
+ struct cpumask mask;
+ HV_Coord tile;
+ unsigned int timeout;
+ int cpu;
+ int ongoing;
+ HV_NMI_Info info[NR_CPUS];
+
+ ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
+ if (ongoing != 0) {
+ pr_err("Trying to do all-cpu backtrace.\n");
+ pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
+ ongoing);
+ if (self) {
+ pr_err("Reporting the stack on this cpu only.\n");
+ dump_stack();
+ }
+ return;
+ }
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+ cpumask_copy(&backtrace_mask, &mask);
+
+ /* Backtrace for myself first. */
+ if (self)
+ dump_stack();
+
+ /* Tentatively dump stack on remote tiles via NMI. */
+ timeout = 100;
+ while (!cpumask_empty(&mask) && timeout) {
+ for_each_cpu(cpu, &mask) {
+ tile.x = cpu_x(cpu);
+ tile.y = cpu_y(cpu);
+ info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
+ if (info[cpu].result == HV_NMI_RESULT_OK)
+ cpumask_clear_cpu(cpu, &mask);
+ }
+
+ mdelay(10);
+ timeout--;
+ }
+
+ /* Warn about cpus stuck in ICS and decrement their counts here. */
+ if (!cpumask_empty(&mask)) {
+ for_each_cpu(cpu, &mask) {
+ switch (info[cpu].result) {
+ case HV_NMI_RESULT_FAIL_ICS:
+ pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
+ cpu, info[cpu].pc);
+ break;
+ case HV_NMI_RESULT_FAIL_HV:
+ pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
+ cpu);
+ break;
+ case HV_ENOSYS:
+ pr_warn("Hypervisor too old to allow remote stack dumps.\n");
+ goto skip_for_each;
+ default: /* should not happen */
+ pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
+ cpu, info[cpu].result, info[cpu].pc);
+ break;
+ }
+ }
+skip_for_each:
+ atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
+ }
}
+#endif /* __tilegx_ */
diff --git a/kernel/arch/tile/kernel/ptrace.c b/kernel/arch/tile/kernel/ptrace.c
index f84eed824..bdc126faf 100644
--- a/kernel/arch/tile/kernel/ptrace.c
+++ b/kernel/arch/tile/kernel/ptrace.c
@@ -262,6 +262,9 @@ int do_syscall_trace_enter(struct pt_regs *regs)
if (work & _TIF_NOHZ)
user_exit();
+ if (secure_computing() == -1)
+ return -1;
+
if (work & _TIF_SYSCALL_TRACE) {
if (tracehook_report_syscall_entry(regs))
regs->regs[TREG_SYSCALL_NR] = -1;
diff --git a/kernel/arch/tile/kernel/setup.c b/kernel/arch/tile/kernel/setup.c
index 396b5c96e..6b755d125 100644
--- a/kernel/arch/tile/kernel/setup.c
+++ b/kernel/arch/tile/kernel/setup.c
@@ -71,7 +71,7 @@ static unsigned long __initdata node_percpu[MAX_NUMNODES];
* per-CPU stack and boot info.
*/
DEFINE_PER_CPU(unsigned long, boot_sp) =
- (unsigned long)init_stack + THREAD_SIZE;
+ (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
diff --git a/kernel/arch/tile/kernel/stack.c b/kernel/arch/tile/kernel/stack.c
index c42dce50a..402b9c85a 100644
--- a/kernel/arch/tile/kernel/stack.c
+++ b/kernel/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
#include <linux/mmzone.h>
#include <linux/dcache.h>
#include <linux/fs.h>
+#include <linux/hardirq.h>
#include <linux/string.h>
#include <asm/backtrace.h>
#include <asm/page.h>
@@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
if (kbt->verbose)
pr_err(" <%s while in user mode>\n", fault);
} else {
- if (kbt->verbose)
+ if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
p->pc, p->sp, p->ex1);
return NULL;
@@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
return p;
}
-/* Is the pc pointing to a sigreturn trampoline? */
-static int is_sigreturn(unsigned long pc)
+/* Is the iterator pointing to a sigreturn trampoline? */
+static int is_sigreturn(struct KBacktraceIterator *kbt)
{
- return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
+ return kbt->task->mm &&
+ (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
+ (ulong)&__vdso_rt_sigreturn));
}
/* Return a pt_regs pointer for a valid signal handler frame */
@@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
{
BacktraceIterator *b = &kbt->it;
- if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
+ if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
b->sp % sizeof(long) == 0) {
int retval;
pagefault_disable();
@@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
return NULL;
}
-static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
-{
- return is_sigreturn(kbt->it.pc);
-}
-
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
{
struct pt_regs *p;
@@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive(
{
for (;;) {
do {
- if (!KBacktraceIterator_is_sigreturn(kbt))
+ if (!is_sigreturn(kbt))
return KBT_ONGOING;
} while (backtrace_next(&kbt->it));
@@ -334,7 +332,7 @@ static void describe_addr(struct KBacktraceIterator *kbt,
}
if (vma->vm_file) {
- p = d_path(&vma->vm_file->f_path, buf, bufsize);
+ p = file_path(vma->vm_file, buf, bufsize);
if (IS_ERR(p))
p = "?";
name = kbasename(p);
@@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt,
*/
static bool start_backtrace(void)
{
- if (current->thread.in_backtrace) {
+ if (current_thread_info()->in_backtrace) {
pr_err("Backtrace requested while in backtrace!\n");
return false;
}
- current->thread.in_backtrace = true;
+ current_thread_info()->in_backtrace = true;
return true;
}
static void end_backtrace(void)
{
- current->thread.in_backtrace = false;
+ current_thread_info()->in_backtrace = false;
}
/*
* This method wraps the backtracer's more generic support.
* It is only invoked from the architecture-specific code; show_stack()
- * and dump_stack() (in entry.S) are architecture-independent entry points.
+ * and dump_stack() are architecture-independent entry points.
*/
-void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
+void tile_show_stack(struct KBacktraceIterator *kbt)
{
int i;
int have_mmap_sem = 0;
if (!start_backtrace())
return;
- if (headers) {
- /*
- * Add a blank line since if we are called from panic(),
- * then bust_spinlocks() spit out a space in front of us
- * and it will mess up our KERN_ERR.
- */
- pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
- kbt->task->pid, kbt->task->tgid, kbt->task->comm,
- raw_smp_processor_id(), get_cycles());
- }
kbt->verbose = 1;
i = 0;
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
char namebuf[KSYM_NAME_LEN+100];
unsigned long address = kbt->it.pc;
- /* Try to acquire the mmap_sem as we pass into userspace. */
- if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
+ /*
+ * Try to acquire the mmap_sem as we pass into userspace.
+ * If we're in an interrupt context, don't even try, since
+ * it's not safe to call e.g. d_path() from an interrupt,
+ * since it uses spin locks without disabling interrupts.
+ * Note we test "kbt->task == current", not "kbt->is_current",
+ * since we're checking that "current" will work in d_path().
+ */
+ if (kbt->task == current && address < PAGE_OFFSET &&
+ !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
have_mmap_sem =
down_read_trylock(&kbt->task->mm->mmap_sem);
+ }
describe_addr(kbt, address, have_mmap_sem,
namebuf, sizeof(namebuf));
@@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
}
if (kbt->end == KBT_LOOP)
pr_err("Stack dump stopped; next frame identical to this one\n");
- if (headers)
- pr_err("Stack dump complete\n");
if (have_mmap_sem)
up_read(&kbt->task->mm->mmap_sem);
end_backtrace();
}
EXPORT_SYMBOL(tile_show_stack);
-
-/* This is called from show_regs() and _dump_stack() */
-void dump_stack_regs(struct pt_regs *regs)
-{
- struct KBacktraceIterator kbt;
- KBacktraceIterator_init(&kbt, NULL, regs);
- tile_show_stack(&kbt, 1);
-}
-EXPORT_SYMBOL(dump_stack_regs);
-
static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
ulong pc, ulong lr, ulong sp, ulong r52)
{
@@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
return regs;
}
-/* This is called from dump_stack() and just converts to pt_regs */
+/* Deprecated function currently only used by kernel_double_fault(). */
void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
{
+ struct KBacktraceIterator kbt;
struct pt_regs regs;
- dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
+
+ regs_to_pt_regs(&regs, pc, lr, sp, r52);
+ KBacktraceIterator_init(&kbt, NULL, &regs);
+ tile_show_stack(&kbt);
}
/* This is called from KBacktraceIterator_init_current() */
@@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
regs_to_pt_regs(&regs, pc, lr, sp, r52));
}
-/* This is called only from kernel/sched/core.c, with esp == NULL */
+/*
+ * Called from sched_show_task() with task != NULL, or dump_stack()
+ * with task == NULL. The esp argument is always NULL.
+ */
void show_stack(struct task_struct *task, unsigned long *esp)
{
struct KBacktraceIterator kbt;
- if (task == NULL || task == current)
+ if (task == NULL || task == current) {
KBacktraceIterator_init_current(&kbt);
- else
+ KBacktraceIterator_next(&kbt); /* don't show first frame */
+ } else {
KBacktraceIterator_init(&kbt, task, NULL);
- tile_show_stack(&kbt, 0);
+ }
+ tile_show_stack(&kbt);
}
#ifdef CONFIG_STACKTRACE
/* Support generic Linux stack API too */
-void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+static void save_stack_trace_common(struct task_struct *task,
+ struct pt_regs *regs,
+ bool user,
+ struct stack_trace *trace)
{
struct KBacktraceIterator kbt;
int skip = trace->skip;
@@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
if (!start_backtrace())
goto done;
- if (task == NULL || task == current)
+ if (regs != NULL) {
+ KBacktraceIterator_init(&kbt, NULL, regs);
+ } else if (task == NULL || task == current) {
KBacktraceIterator_init_current(&kbt);
- else
+ skip++; /* don't show KBacktraceIterator_init_current */
+ } else {
KBacktraceIterator_init(&kbt, task, NULL);
+ }
for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
if (skip) {
--skip;
continue;
}
- if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
+ if (i >= trace->max_entries ||
+ (!user && kbt.it.pc < PAGE_OFFSET))
break;
trace->entries[i++] = kbt.it.pc;
}
end_backtrace();
done:
+ if (i < trace->max_entries)
+ trace->entries[i++] = ULONG_MAX;
trace->nr_entries = i;
}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ save_stack_trace_common(task, NULL, false, trace);
+}
EXPORT_SYMBOL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(NULL, trace);
+ save_stack_trace_common(NULL, NULL, false, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ save_stack_trace_common(NULL, regs, false, trace);
+}
+
+void save_stack_trace_user(struct stack_trace *trace)
+{
+ /* Trace user stack if we are not a kernel thread. */
+ if (current->mm)
+ save_stack_trace_common(NULL, task_pt_regs(current),
+ true, trace);
+ else if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
#endif
/* In entry.S */
diff --git a/kernel/arch/tile/kernel/sysfs.c b/kernel/arch/tile/kernel/sysfs.c
index a3ed12f8f..825867c53 100644
--- a/kernel/arch/tile/kernel/sysfs.c
+++ b/kernel/arch/tile/kernel/sysfs.c
@@ -198,16 +198,13 @@ static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
return err;
}
-static int hv_stats_device_remove(struct device *dev,
- struct subsys_interface *sif)
+static void hv_stats_device_remove(struct device *dev,
+ struct subsys_interface *sif)
{
int cpu = dev->id;
- if (!cpu_online(cpu))
- return 0;
-
- sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
- return 0;
+ if (cpu_online(cpu))
+ sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
}
diff --git a/kernel/arch/tile/kernel/time.c b/kernel/arch/tile/kernel/time.c
index 00178ecf9..178989e6d 100644
--- a/kernel/arch/tile/kernel/time.c
+++ b/kernel/arch/tile/kernel/time.c
@@ -140,10 +140,10 @@ static int tile_timer_set_next_event(unsigned long ticks,
* Whenever anyone tries to change modes, we just mask interrupts
* and wait for the next event to get set.
*/
-static void tile_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int tile_timer_shutdown(struct clock_event_device *evt)
{
arch_local_irq_mask_now(INT_TILE_TIMER);
+ return 0;
}
/*
@@ -157,7 +157,9 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
.rating = 100,
.irq = -1,
.set_next_event = tile_timer_set_next_event,
- .set_mode = tile_timer_set_mode,
+ .set_state_shutdown = tile_timer_shutdown,
+ .set_state_oneshot = tile_timer_shutdown,
+ .tick_resume = tile_timer_shutdown,
};
void setup_tile_timer(void)
diff --git a/kernel/arch/tile/kernel/traps.c b/kernel/arch/tile/kernel/traps.c
index 312fc134c..0011a9ff0 100644
--- a/kernel/arch/tile/kernel/traps.c
+++ b/kernel/arch/tile/kernel/traps.c
@@ -395,6 +395,21 @@ done:
exception_exit(prev_state);
}
+void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
+{
+ switch (reason) {
+ case TILE_NMI_DUMP_STACK:
+ do_nmi_dump_stack(regs);
+ break;
+ default:
+ panic("Unexpected do_nmi type %ld", reason);
+ return;
+ }
+}
+
+/* Deprecated function currently only used here. */
+extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
+
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
{
_dump_stack(dummy, pc, lr, sp, r52);
diff --git a/kernel/arch/tile/kernel/usb.c b/kernel/arch/tile/kernel/usb.c
index 5af8debc6..9f1e05e12 100644
--- a/kernel/arch/tile/kernel/usb.c
+++ b/kernel/arch/tile/kernel/usb.c
@@ -21,6 +21,8 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/usb/tilegx.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
static u64 ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/kernel/arch/tile/kernel/vdso/Makefile b/kernel/arch/tile/kernel/vdso/Makefile
index a025f63d5..c54fff37b 100644
--- a/kernel/arch/tile/kernel/vdso/Makefile
+++ b/kernel/arch/tile/kernel/vdso/Makefile
@@ -54,7 +54,7 @@ $(obj)/built-in.o: $(obj)/vdso-syms.o
$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=both)
SYSCFLAGS_vdso_syms.o = -r
$(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
$(call if_changed,vdsold)
@@ -113,6 +113,6 @@ $(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S
$(obj)/vdso32.o: $(obj)/vdso32.so
SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=both)
$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
$(call if_changed,vdsold)
diff --git a/kernel/arch/tile/kernel/vdso/vgettimeofday.c b/kernel/arch/tile/kernel/vdso/vgettimeofday.c
index 8bb21eda0..e63310c49 100644
--- a/kernel/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/kernel/arch/tile/kernel/vdso/vgettimeofday.c
@@ -67,7 +67,7 @@ static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
u64 ns;
do {
- count = read_seqcount_begin(&vdso->tb_seq);
+ count = raw_read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_sec;
ns = vdso->wall_time_snsec;
ns += vgetsns(vdso);
@@ -86,7 +86,7 @@ static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
u64 ns;
do {
- count = read_seqcount_begin(&vdso->tb_seq);
+ count = raw_read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_sec;
ns = vdso->monotonic_time_snsec;
ns += vgetsns(vdso);
@@ -105,7 +105,7 @@ static inline int do_realtime_coarse(struct vdso_data *vdso,
unsigned count;
do {
- count = read_seqcount_begin(&vdso->tb_seq);
+ count = raw_read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_coarse_sec;
ts->tv_nsec = vdso->wall_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
@@ -119,7 +119,7 @@ static inline int do_monotonic_coarse(struct vdso_data *vdso,
unsigned count;
do {
- count = read_seqcount_begin(&vdso->tb_seq);
+ count = raw_read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_coarse_sec;
ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
@@ -137,7 +137,7 @@ struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
/* The use of the timezone is obsolete, normally tz is NULL. */
if (unlikely(tz != NULL)) {
do {
- count = read_seqcount_begin(&vdso->tz_seq);
+ count = raw_read_seqcount_begin(&vdso->tz_seq);
tz->tz_minuteswest = vdso->tz_minuteswest;
tz->tz_dsttime = vdso->tz_dsttime;
} while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
diff --git a/kernel/arch/tile/lib/atomic_32.c b/kernel/arch/tile/lib/atomic_32.c
index c89b211fd..298df1e99 100644
--- a/kernel/arch/tile/lib/atomic_32.c
+++ b/kernel/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
}
EXPORT_SYMBOL(_atomic_or);
+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+{
+ return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+}
+EXPORT_SYMBOL(_atomic_and);
+
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
{
return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
}
EXPORT_SYMBOL(_atomic64_cmpxchg);
+long long _atomic64_and(long long *v, long long n)
+{
+ return __atomic64_and(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_and);
+
+long long _atomic64_or(long long *v, long long n)
+{
+ return __atomic64_or(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_or);
+
+long long _atomic64_xor(long long *v, long long n)
+{
+ return __atomic64_xor(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_xor);
/*
* If any of the atomic or futex routines hit a bad address (not in
diff --git a/kernel/arch/tile/lib/atomic_asm_32.S b/kernel/arch/tile/lib/atomic_asm_32.S
index 6bda3132c..f61126563 100644
--- a/kernel/arch/tile/lib/atomic_asm_32.S
+++ b/kernel/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
atomic_op _xchg_add_unless, 32, \
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op _or, 32, "or r24, r22, r2"
+atomic_op _and, 32, "and r24, r22, r2"
atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _xor, 32, "xor r24, r22, r2"
@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
{ bbns r26, 3f; add r24, r22, r4 }; \
{ bbns r27, 3f; add r25, r23, r5 }; \
slt_u r26, r24, r22; add r25, r25, r26"
+atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp lr /* happy backtracer */
diff --git a/kernel/arch/tile/lib/exports.c b/kernel/arch/tile/lib/exports.c
index 82733c87d..9d171ca43 100644
--- a/kernel/arch/tile/lib/exports.c
+++ b/kernel/arch/tile/lib/exports.c
@@ -18,8 +18,6 @@
/* arch/tile/lib/usercopy.S */
#include <linux/uaccess.h>
-EXPORT_SYMBOL(strnlen_user_asm);
-EXPORT_SYMBOL(strncpy_from_user_asm);
EXPORT_SYMBOL(clear_user_asm);
EXPORT_SYMBOL(flush_user_asm);
EXPORT_SYMBOL(finv_user_asm);
@@ -28,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm);
#include <linux/kernel.h>
#include <asm/processor.h>
EXPORT_SYMBOL(current_text_addr);
-EXPORT_SYMBOL(dump_stack);
/* arch/tile/kernel/head.S */
EXPORT_SYMBOL(empty_zero_page);
diff --git a/kernel/arch/tile/lib/memcpy_user_64.c b/kernel/arch/tile/lib/memcpy_user_64.c
index 88c701649..97bbb6060 100644
--- a/kernel/arch/tile/lib/memcpy_user_64.c
+++ b/kernel/arch/tile/lib/memcpy_user_64.c
@@ -28,7 +28,7 @@
#define _ST(p, inst, v) \
({ \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
@@ -41,7 +41,7 @@
({ \
unsigned long __v; \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
diff --git a/kernel/arch/tile/lib/spinlock_32.c b/kernel/arch/tile/lib/spinlock_32.c
index b34f79aad..88c2a5336 100644
--- a/kernel/arch/tile/lib/spinlock_32.c
+++ b/kernel/arch/tile/lib/spinlock_32.c
@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u32 iterations = 0;
- while (arch_spin_is_locked(lock))
+ int curr = READ_ONCE(lock->current_ticket);
+ int next = READ_ONCE(lock->next_ticket);
+
+ /* Return immediately if unlocked. */
+ if (next == curr)
+ return;
+
+ /* Wait until the current locker has released the lock. */
+ do {
delay_backoff(iterations++);
+ } while (READ_ONCE(lock->current_ticket) == curr);
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/kernel/arch/tile/lib/spinlock_64.c b/kernel/arch/tile/lib/spinlock_64.c
index d6fb9581e..c8d1f94ff 100644
--- a/kernel/arch/tile/lib/spinlock_64.c
+++ b/kernel/arch/tile/lib/spinlock_64.c
@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u32 iterations = 0;
- while (arch_spin_is_locked(lock))
+ u32 val = READ_ONCE(lock->lock);
+ u32 curr = arch_spin_current(val);
+
+ /* Return immediately if unlocked. */
+ if (arch_spin_next(val) == curr)
+ return;
+
+ /* Wait until the current locker has released the lock. */
+ do {
delay_backoff(iterations++);
+ } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/kernel/arch/tile/lib/usercopy_32.S b/kernel/arch/tile/lib/usercopy_32.S
index 1bc162224..db93ad5fa 100644
--- a/kernel/arch/tile/lib/usercopy_32.S
+++ b/kernel/arch/tile/lib/usercopy_32.S
@@ -20,52 +20,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
/*
- * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
- * It returns the length, including the terminating NUL, or zero on exception.
- * If length is greater than the bound, returns one plus the bound.
- */
-STD_ENTRY(strnlen_user_asm)
- { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
-1: { lb_u r4, r0; addi r1, r1, -1 }
- bz r4, 2f
- { bnzt r1, 1b; addi r0, r0, 1 }
-2: { sub r0, r0, r3; jrp lr }
- STD_ENDPROC(strnlen_user_asm)
- .pushsection .fixup,"ax"
-strnlen_user_fault:
- { move r0, zero; jrp lr }
- ENDPROC(strnlen_user_fault)
- .section __ex_table,"a"
- .align 4
- .word 1b, strnlen_user_fault
- .popsection
-
-/*
- * strncpy_from_user_asm takes the kernel target pointer in r0,
- * the userspace source pointer in r1, and the length bound (including
- * the trailing NUL) in r2. On success, it returns the string length
- * (not including the trailing NUL), or -EFAULT on failure.
- */
-STD_ENTRY(strncpy_from_user_asm)
- { bz r2, 2f; move r3, r0 }
-1: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
- { sb r0, r4; addi r0, r0, 1 }
- bz r4, 2f
- bnzt r2, 1b
- { sub r0, r0, r3; jrp lr }
-2: addi r0, r0, -1 /* don't count the trailing NUL */
- { sub r0, r0, r3; jrp lr }
- STD_ENDPROC(strncpy_from_user_asm)
- .pushsection .fixup,"ax"
-strncpy_from_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(strncpy_from_user_fault)
- .section __ex_table,"a"
- .align 4
- .word 1b, strncpy_from_user_fault
- .popsection
-
-/*
* clear_user_asm takes the user target address in r0 and the
* number of bytes to zero in r1.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
diff --git a/kernel/arch/tile/lib/usercopy_64.S b/kernel/arch/tile/lib/usercopy_64.S
index b3b31a330..9322dc551 100644
--- a/kernel/arch/tile/lib/usercopy_64.S
+++ b/kernel/arch/tile/lib/usercopy_64.S
@@ -20,52 +20,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
/*
- * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
- * It returns the length, including the terminating NUL, or zero on exception.
- * If length is greater than the bound, returns one plus the bound.
- */
-STD_ENTRY(strnlen_user_asm)
- { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
-1: { ld1u r4, r0; addi r1, r1, -1 }
- beqz r4, 2f
- { bnezt r1, 1b; addi r0, r0, 1 }
-2: { sub r0, r0, r3; jrp lr }
- STD_ENDPROC(strnlen_user_asm)
- .pushsection .fixup,"ax"
-strnlen_user_fault:
- { move r0, zero; jrp lr }
- ENDPROC(strnlen_user_fault)
- .section __ex_table,"a"
- .align 8
- .quad 1b, strnlen_user_fault
- .popsection
-
-/*
- * strncpy_from_user_asm takes the kernel target pointer in r0,
- * the userspace source pointer in r1, and the length bound (including
- * the trailing NUL) in r2. On success, it returns the string length
- * (not including the trailing NUL), or -EFAULT on failure.
- */
-STD_ENTRY(strncpy_from_user_asm)
- { beqz r2, 2f; move r3, r0 }
-1: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
- { st1 r0, r4; addi r0, r0, 1 }
- beqz r4, 2f
- bnezt r2, 1b
- { sub r0, r0, r3; jrp lr }
-2: addi r0, r0, -1 /* don't count the trailing NUL */
- { sub r0, r0, r3; jrp lr }
- STD_ENDPROC(strncpy_from_user_asm)
- .pushsection .fixup,"ax"
-strncpy_from_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(strncpy_from_user_fault)
- .section __ex_table,"a"
- .align 8
- .quad 1b, strncpy_from_user_fault
- .popsection
-
-/*
* clear_user_asm takes the user target address in r0 and the
* number of bytes to zero in r1.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
diff --git a/kernel/arch/tile/mm/elf.c b/kernel/arch/tile/mm/elf.c
index f7ddae372..6225cc998 100644
--- a/kernel/arch/tile/mm/elf.c
+++ b/kernel/arch/tile/mm/elf.c
@@ -56,7 +56,7 @@ static int notify_exec(struct mm_struct *mm)
if (exe_file == NULL)
goto done_free;
- path = d_path(&exe_file->f_path, buf, PAGE_SIZE);
+ path = file_path(exe_file, buf, PAGE_SIZE);
if (IS_ERR(path))
goto done_put;
diff --git a/kernel/arch/tile/mm/fault.c b/kernel/arch/tile/mm/fault.c
index 3f4f58d34..13eac59bf 100644
--- a/kernel/arch/tile/mm/fault.c
+++ b/kernel/arch/tile/mm/fault.c
@@ -699,11 +699,10 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
* interrupt away appropriately and return immediately. We can't do
* page faults for user code while in kernel mode.
*/
-void do_page_fault(struct pt_regs *regs, int fault_num,
- unsigned long address, unsigned long write)
+static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
+ unsigned long address, unsigned long write)
{
int is_page_fault;
- enum ctx_state prev_state = exception_enter();
#ifdef CONFIG_KPROBES
/*
@@ -713,7 +712,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
regs->faultnum, SIGSEGV) == NOTIFY_STOP)
- goto done;
+ return;
#endif
#ifdef __tilegx__
@@ -835,18 +834,22 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
async->is_fault = is_page_fault;
async->is_write = write;
async->address = address;
- goto done;
+ return;
}
}
#endif
handle_page_fault(regs, fault_num, is_page_fault, address, write);
+}
-done:
+void do_page_fault(struct pt_regs *regs, int fault_num,
+ unsigned long address, unsigned long write)
+{
+ enum ctx_state prev_state = exception_enter();
+ __do_page_fault(regs, fault_num, address, write);
exception_exit(prev_state);
}
-
#if CHIP_HAS_TILE_DMA()
/*
* This routine effectively re-issues asynchronous page faults
diff --git a/kernel/arch/tile/mm/highmem.c b/kernel/arch/tile/mm/highmem.c
index fcd545014..eca28551b 100644
--- a/kernel/arch/tile/mm/highmem.c
+++ b/kernel/arch/tile/mm/highmem.c
@@ -275,15 +275,3 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
return kmap_atomic_prot(pfn_to_page(pfn), prot);
}
-
-struct page *kmap_atomic_to_page(void *ptr)
-{
- pte_t *pte;
- unsigned long vaddr = (unsigned long)ptr;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- pte = kmap_get_pte(vaddr);
- return pte_page(*pte);
-}
diff --git a/kernel/arch/tile/mm/hugetlbpage.c b/kernel/arch/tile/mm/hugetlbpage.c
index 8416240c3..c034dc3fe 100644
--- a/kernel/arch/tile/mm/hugetlbpage.c
+++ b/kernel/arch/tile/mm/hugetlbpage.c
@@ -160,11 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
diff --git a/kernel/arch/tile/mm/init.c b/kernel/arch/tile/mm/init.c
index 5bd252e3f..d4e1fc41d 100644
--- a/kernel/arch/tile/mm/init.c
+++ b/kernel/arch/tile/mm/init.c
@@ -863,7 +863,7 @@ void __init mem_init(void)
* memory to the highmem for now.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
-int arch_add_memory(u64 start, u64 size)
+int arch_add_memory(u64 start, u64 size, bool for_device)
{
struct pglist_data *pgdata = &contig_page_data;
struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;