diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/s390/lib | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/s390/lib')
-rw-r--r-- | kernel/arch/s390/lib/Makefile | 9 | ||||
-rw-r--r-- | kernel/arch/s390/lib/delay.c | 129 | ||||
-rw-r--r-- | kernel/arch/s390/lib/find.c | 77 | ||||
-rw-r--r-- | kernel/arch/s390/lib/mem.S | 88 | ||||
-rw-r--r-- | kernel/arch/s390/lib/probes.c | 159 | ||||
-rw-r--r-- | kernel/arch/s390/lib/spinlock.c | 272 | ||||
-rw-r--r-- | kernel/arch/s390/lib/string.c | 342 | ||||
-rw-r--r-- | kernel/arch/s390/lib/uaccess.c | 392 |
8 files changed, 1468 insertions, 0 deletions
diff --git a/kernel/arch/s390/lib/Makefile b/kernel/arch/s390/lib/Makefile new file mode 100644 index 000000000..0e8fefe5b --- /dev/null +++ b/kernel/arch/s390/lib/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for s390-specific library files.. +# + +lib-y += delay.o string.o uaccess.o find.o +obj-y += mem.o +lib-$(CONFIG_SMP) += spinlock.o +lib-$(CONFIG_KPROBES) += probes.o +lib-$(CONFIG_UPROBES) += probes.o diff --git a/kernel/arch/s390/lib/delay.c b/kernel/arch/s390/lib/delay.c new file mode 100644 index 000000000..16dc42d83 --- /dev/null +++ b/kernel/arch/s390/lib/delay.c @@ -0,0 +1,129 @@ +/* + * Precise Delay Loops for S390 + * + * Copyright IBM Corp. 1999, 2008 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/module.h> +#include <linux/irqflags.h> +#include <linux/interrupt.h> +#include <asm/vtimer.h> +#include <asm/div64.h> + +void __delay(unsigned long loops) +{ + /* + * To end the bloody studid and useless discussion about the + * BogoMips number I took the liberty to define the __delay + * function in a way that that resulting BogoMips number will + * yield the megahertz number of the cpu. The important function + * is udelay and that is done using the tod clock. -- martin. + */ + asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); +} + +static void __udelay_disabled(unsigned long long usecs) +{ + unsigned long cr0, cr6, new; + u64 clock_saved, end; + + end = get_tod_clock() + (usecs << 12); + clock_saved = local_tick_disable(); + __ctl_store(cr0, 0, 0); + __ctl_store(cr6, 6, 6); + new = (cr0 & 0xffff00e0) | 0x00000800; + __ctl_load(new , 0, 0); + new = 0; + __ctl_load(new, 6, 6); + lockdep_off(); + do { + set_clock_comparator(end); + enabled_wait(); + } while (get_tod_clock_fast() < end); + lockdep_on(); + __ctl_load(cr0, 0, 0); + __ctl_load(cr6, 6, 6); + local_tick_enable(clock_saved); +} + +static void __udelay_enabled(unsigned long long usecs) +{ + u64 clock_saved, end; + + end = get_tod_clock_fast() + (usecs << 12); + do { + clock_saved = 0; + if (end < S390_lowcore.clock_comparator) { + clock_saved = local_tick_disable(); + set_clock_comparator(end); + } + enabled_wait(); + if (clock_saved) + local_tick_enable(clock_saved); + } while (get_tod_clock_fast() < end); +} + +/* + * Waits for 'usecs' microseconds using the TOD clock comparator. + */ +void __udelay(unsigned long long usecs) +{ + unsigned long flags; + + preempt_disable(); + local_irq_save(flags); + if (in_irq()) { + __udelay_disabled(usecs); + goto out; + } + if (in_softirq()) { + if (raw_irqs_disabled_flags(flags)) + __udelay_disabled(usecs); + else + __udelay_enabled(usecs); + goto out; + } + if (raw_irqs_disabled_flags(flags)) { + local_bh_disable(); + __udelay_disabled(usecs); + _local_bh_enable(); + goto out; + } + __udelay_enabled(usecs); +out: + local_irq_restore(flags); + preempt_enable(); +} +EXPORT_SYMBOL(__udelay); + +/* + * Simple udelay variant. To be used on startup and reboot + * when the interrupt handler isn't working. + */ +void udelay_simple(unsigned long long usecs) +{ + u64 end; + + end = get_tod_clock_fast() + (usecs << 12); + while (get_tod_clock_fast() < end) + cpu_relax(); +} + +void __ndelay(unsigned long long nsecs) +{ + u64 end; + + nsecs <<= 9; + do_div(nsecs, 125); + end = get_tod_clock_fast() + nsecs; + if (nsecs & ~0xfffUL) + __udelay(nsecs >> 12); + while (get_tod_clock_fast() < end) + barrier(); +} +EXPORT_SYMBOL(__ndelay); diff --git a/kernel/arch/s390/lib/find.c b/kernel/arch/s390/lib/find.c new file mode 100644 index 000000000..922003c1b --- /dev/null +++ b/kernel/arch/s390/lib/find.c @@ -0,0 +1,77 @@ +/* + * MSB0 numbered special bitops handling. + * + * On s390x the bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + * and on s390: + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| + * + * The reason for this bit numbering is the fact that the hardware sets bits + * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap + * from the 'wrong end'. + */ + +#include <linux/compiler.h> +#include <linux/bitops.h> +#include <linux/export.h> + +unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG - 1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = (*p) & (~0UL << (BITS_PER_LONG - size)); + if (!tmp) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + (__fls(tmp) ^ (BITS_PER_LONG - 1)); +} +EXPORT_SYMBOL(find_first_bit_inv); + +unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + (offset / BITS_PER_LONG); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL >> offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= (~0UL << (BITS_PER_LONG - size)); + if (!tmp) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + (__fls(tmp) ^ (BITS_PER_LONG - 1)); +} +EXPORT_SYMBOL(find_next_bit_inv); diff --git a/kernel/arch/s390/lib/mem.S b/kernel/arch/s390/lib/mem.S new file mode 100644 index 000000000..c6d553e85 --- /dev/null +++ b/kernel/arch/s390/lib/mem.S @@ -0,0 +1,88 @@ +/* + * String handling functions. + * + * Copyright IBM Corp. 2012 + */ + +#include <linux/linkage.h> + +/* + * memset implementation + * + * This code corresponds to the C construct below. We do distinguish + * between clearing (c == 0) and setting a memory array (c != 0) simply + * because nearly all memset invocations in the kernel clear memory and + * the xc instruction is preferred in such cases. + * + * void *memset(void *s, int c, size_t n) + * { + * if (likely(c == 0)) + * return __builtin_memset(s, 0, n); + * return __builtin_memset(s, c, n); + * } + */ +ENTRY(memset) + ltgr %r4,%r4 + bzr %r14 + ltgr %r3,%r3 + jnz .Lmemset_fill + aghi %r4,-1 + srlg %r3,%r4,8 + ltgr %r3,%r3 + lgr %r1,%r2 + jz .Lmemset_clear_rest +.Lmemset_clear_loop: + xc 0(256,%r1),0(%r1) + la %r1,256(%r1) + brctg %r3,.Lmemset_clear_loop +.Lmemset_clear_rest: + larl %r3,.Lmemset_xc + ex %r4,0(%r3) + br %r14 +.Lmemset_fill: + stc %r3,0(%r2) + cghi %r4,1 + lgr %r1,%r2 + ber %r14 + aghi %r4,-2 + srlg %r3,%r4,8 + ltgr %r3,%r3 + jz .Lmemset_fill_rest +.Lmemset_fill_loop: + mvc 1(256,%r1),0(%r1) + la %r1,256(%r1) + brctg %r3,.Lmemset_fill_loop +.Lmemset_fill_rest: + larl %r3,.Lmemset_mvc + ex %r4,0(%r3) + br %r14 +.Lmemset_xc: + xc 0(1,%r1),0(%r1) +.Lmemset_mvc: + mvc 1(1,%r1),0(%r1) + +/* + * memcpy implementation + * + * void *memcpy(void *dest, const void *src, size_t n) + */ +ENTRY(memcpy) + ltgr %r4,%r4 + bzr %r14 + aghi %r4,-1 + srlg %r5,%r4,8 + ltgr %r5,%r5 + lgr %r1,%r2 + jnz .Lmemcpy_loop +.Lmemcpy_rest: + larl %r5,.Lmemcpy_mvc + ex %r4,0(%r5) + br %r14 +.Lmemcpy_loop: + mvc 0(256,%r1),0(%r3) + la %r1,256(%r1) + la %r3,256(%r3) + brctg %r5,.Lmemcpy_loop + j .Lmemcpy_rest +.Lmemcpy_mvc: + mvc 0(1,%r1),0(%r3) diff --git a/kernel/arch/s390/lib/probes.c b/kernel/arch/s390/lib/probes.c new file mode 100644 index 000000000..ae90e1ae3 --- /dev/null +++ b/kernel/arch/s390/lib/probes.c @@ -0,0 +1,159 @@ +/* + * Common helper functions for kprobes and uprobes + * + * Copyright IBM Corp. 2014 + */ + +#include <asm/kprobes.h> +#include <asm/dis.h> + +int probe_is_prohibited_opcode(u16 *insn) +{ + if (!is_known_insn((unsigned char *)insn)) + return -EINVAL; + switch (insn[0] >> 8) { + case 0x0c: /* bassm */ + case 0x0b: /* bsm */ + case 0x83: /* diag */ + case 0x44: /* ex */ + case 0xac: /* stnsm */ + case 0xad: /* stosm */ + return -EINVAL; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x00: /* exrl */ + return -EINVAL; + } + } + switch (insn[0]) { + case 0x0101: /* pr */ + case 0xb25a: /* bsa */ + case 0xb240: /* bakr */ + case 0xb258: /* bsg */ + case 0xb218: /* pc */ + case 0xb228: /* pt */ + case 0xb98d: /* epsw */ + case 0xe560: /* tbegin */ + case 0xe561: /* tbeginc */ + case 0xb2f8: /* tend */ + return -EINVAL; + } + return 0; +} + +int probe_get_fixup_type(u16 *insn) +{ + /* default fixup method */ + int fixup = FIXUP_PSW_NORMAL; + + switch (insn[0] >> 8) { + case 0x05: /* balr */ + case 0x0d: /* basr */ + fixup = FIXUP_RETURN_REGISTER; + /* if r2 = 0, no branch will be taken */ + if ((insn[0] & 0x0f) == 0) + fixup |= FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x06: /* bctr */ + case 0x07: /* bcr */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x45: /* bal */ + case 0x4d: /* bas */ + fixup = FIXUP_RETURN_REGISTER; + break; + case 0x47: /* bc */ + case 0x46: /* bct */ + case 0x86: /* bxh */ + case 0x87: /* bxle */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x82: /* lpsw */ + fixup = FIXUP_NOT_REQUIRED; + break; + case 0xb2: /* lpswe */ + if ((insn[0] & 0xff) == 0xb2) + fixup = FIXUP_NOT_REQUIRED; + break; + case 0xa7: /* bras */ + if ((insn[0] & 0x0f) == 0x05) + fixup |= FIXUP_RETURN_REGISTER; + break; + case 0xc0: + if ((insn[0] & 0x0f) == 0x05) /* brasl */ + fixup |= FIXUP_RETURN_REGISTER; + break; + case 0xeb: + switch (insn[2] & 0xff) { + case 0x44: /* bxhg */ + case 0x45: /* bxleg */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } + break; + case 0xe3: /* bctg */ + if ((insn[2] & 0xff) == 0x46) + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0xec: + switch (insn[2] & 0xff) { + case 0xe5: /* clgrb */ + case 0xe6: /* cgrb */ + case 0xf6: /* crb */ + case 0xf7: /* clrb */ + case 0xfc: /* cgib */ + case 0xfd: /* cglib */ + case 0xfe: /* cib */ + case 0xff: /* clib */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } + break; + } + return fixup; +} + +int probe_is_insn_relative_long(u16 *insn) +{ + /* Check if we have a RIL-b or RIL-c format instruction which + * we need to modify in order to avoid instruction emulation. */ + switch (insn[0] >> 8) { + case 0xc0: + if ((insn[0] & 0x0f) == 0x00) /* larl */ + return true; + break; + case 0xc4: + switch (insn[0] & 0x0f) { + case 0x02: /* llhrl */ + case 0x04: /* lghrl */ + case 0x05: /* lhrl */ + case 0x06: /* llghrl */ + case 0x07: /* sthrl */ + case 0x08: /* lgrl */ + case 0x0b: /* stgrl */ + case 0x0c: /* lgfrl */ + case 0x0d: /* lrl */ + case 0x0e: /* llgfrl */ + case 0x0f: /* strl */ + return true; + } + break; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x02: /* pfdrl */ + case 0x04: /* cghrl */ + case 0x05: /* chrl */ + case 0x06: /* clghrl */ + case 0x07: /* clhrl */ + case 0x08: /* cgrl */ + case 0x0a: /* clgrl */ + case 0x0c: /* cgfrl */ + case 0x0d: /* crl */ + case 0x0e: /* clgfrl */ + case 0x0f: /* clrl */ + return true; + } + break; + } + return false; +} diff --git a/kernel/arch/s390/lib/spinlock.c b/kernel/arch/s390/lib/spinlock.c new file mode 100644 index 000000000..d6c9991f7 --- /dev/null +++ b/kernel/arch/s390/lib/spinlock.c @@ -0,0 +1,272 @@ +/* + * Out of line spinlock code. + * + * Copyright IBM Corp. 2004, 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <asm/io.h> + +int spin_retry = -1; + +static int __init spin_retry_init(void) +{ + if (spin_retry < 0) + spin_retry = MACHINE_HAS_CAD ? 10 : 1000; + return 0; +} +early_initcall(spin_retry_init); + +/** + * spin_retry= parameter + */ +static int __init spin_retry_setup(char *str) +{ + spin_retry = simple_strtoul(str, &str, 0); + return 1; +} +__setup("spin_retry=", spin_retry_setup); + +static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) +{ + asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); +} + +void arch_spin_lock_wait(arch_spinlock_t *lp) +{ + unsigned int cpu = SPINLOCK_LOCKVAL; + unsigned int owner; + int count; + + while (1) { + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return; + continue; + } + /* Check if the lock owner is running. */ + if (!smp_vcpu_scheduled(~owner)) { + smp_yield_cpu(~owner); + continue; + } + /* Loop for a while on the lock value. */ + count = spin_retry; + do { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); + owner = ACCESS_ONCE(lp->lock); + } while (owner && count-- > 0); + if (!owner) + continue; + /* + * For multiple layers of hypervisors, e.g. z/VM + LPAR + * yield the CPU if the lock is still unavailable. + */ + if (!MACHINE_IS_LPAR) + smp_yield_cpu(~owner); + } +} +EXPORT_SYMBOL(arch_spin_lock_wait); + +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +{ + unsigned int cpu = SPINLOCK_LOCKVAL; + unsigned int owner; + int count; + + local_irq_restore(flags); + while (1) { + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + local_irq_disable(); + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return; + local_irq_restore(flags); + } + /* Check if the lock owner is running. */ + if (!smp_vcpu_scheduled(~owner)) { + smp_yield_cpu(~owner); + continue; + } + /* Loop for a while on the lock value. */ + count = spin_retry; + do { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); + owner = ACCESS_ONCE(lp->lock); + } while (owner && count-- > 0); + if (!owner) + continue; + /* + * For multiple layers of hypervisors, e.g. z/VM + LPAR + * yield the CPU if the lock is still unavailable. + */ + if (!MACHINE_IS_LPAR) + smp_yield_cpu(~owner); + } +} +EXPORT_SYMBOL(arch_spin_lock_wait_flags); + +int arch_spin_trylock_retry(arch_spinlock_t *lp) +{ + unsigned int cpu = SPINLOCK_LOCKVAL; + unsigned int owner; + int count; + + for (count = spin_retry; count > 0; count--) { + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return 1; + } else if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); + } + return 0; +} +EXPORT_SYMBOL(arch_spin_trylock_retry); + +void _raw_read_lock_wait(arch_rwlock_t *rw) +{ + unsigned int owner, old; + int count = spin_retry; + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); +#endif + owner = 0; + while (1) { + if (count-- <= 0) { + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); + count = spin_retry; + } + old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); + if ((int) old < 0) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); + continue; + } + if (_raw_compare_and_swap(&rw->lock, old, old + 1)) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait); + +int _raw_read_trylock_retry(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (count-- > 0) { + old = ACCESS_ONCE(rw->lock); + if ((int) old < 0) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); + continue; + } + if (_raw_compare_and_swap(&rw->lock, old, old + 1)) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_read_trylock_retry); + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) +{ + unsigned int owner, old; + int count = spin_retry; + + owner = 0; + while (1) { + if (count-- <= 0) { + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); + count = spin_retry; + } + old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); + smp_rmb(); + if ((int) old >= 0) { + prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); + old = prev; + } + if ((old & 0x7fffffff) == 0 && (int) prev >= 0) + break; + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +void _raw_write_lock_wait(arch_rwlock_t *rw) +{ + unsigned int owner, old, prev; + int count = spin_retry; + + prev = 0x80000000; + owner = 0; + while (1) { + if (count-- <= 0) { + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); + count = spin_retry; + } + old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); + if ((int) old >= 0 && + _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) + prev = old; + else + smp_rmb(); + if ((old & 0x7fffffff) == 0 && (int) prev >= 0) + break; + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +int _raw_write_trylock_retry(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (count-- > 0) { + old = ACCESS_ONCE(rw->lock); + if (old) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); + continue; + } + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_write_trylock_retry); + +void arch_lock_relax(unsigned int cpu) +{ + if (!cpu) + return; + if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) + return; + smp_yield_cpu(~cpu); +} +EXPORT_SYMBOL(arch_lock_relax); diff --git a/kernel/arch/s390/lib/string.c b/kernel/arch/s390/lib/string.c new file mode 100644 index 000000000..b647d5ff0 --- /dev/null +++ b/kernel/arch/s390/lib/string.c @@ -0,0 +1,342 @@ +/* + * Optimized string functions + * + * S390 version + * Copyright IBM Corp. 2004 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define IN_ARCH_STRING_C 1 + +#include <linux/types.h> +#include <linux/module.h> + +/* + * Helper functions to find the end of a string + */ +static inline char *__strend(const char *s) +{ + register unsigned long r0 asm("0") = 0; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (r0), "+a" (s) : : "cc" ); + return (char *) r0; +} + +static inline char *__strnend(const char *s, size_t n) +{ + register unsigned long r0 asm("0") = 0; + const char *p = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); + return (char *) p; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + * + * returns the length of @s + */ +size_t strlen(const char *s) +{ + return __strend(s) - s; +} +EXPORT_SYMBOL(strlen); + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @n: The maximum number of bytes to search + * + * returns the minimum of the length of @s and @n + */ +size_t strnlen(const char * s, size_t n) +{ + return __strnend(s, n) - s; +} +EXPORT_SYMBOL(strnlen); + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * + * returns a pointer to @dest + */ +char *strcpy(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + char *ret = dest; + + asm volatile ("0: mvst %0,%1\n" + " jo 0b" + : "+&a" (dest), "+&a" (src) : "d" (r0) + : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL(strcpy); + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = __strend(src) - src; + + if (size) { + size_t len = (ret >= size) ? size-1 : ret; + dest[len] = '\0'; + memcpy(dest, src, len); + } + return ret; +} +EXPORT_SYMBOL(strlcpy); + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @n: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @n bytes. + */ +char *strncpy(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + memset(dest + len, 0, n - len); + memcpy(dest, src, len); + return dest; +} +EXPORT_SYMBOL(strncpy); + +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * + * returns a pointer to @dest + */ +char *strcat(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + unsigned long dummy; + char *ret = dest; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + "1: mvst %0,%2\n" + " jo 1b" + : "=&a" (dummy), "+a" (dest), "+a" (src) + : "d" (r0), "0" (0UL) : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL(strcat); + +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The size of the destination buffer. + */ +size_t strlcat(char *dest, const char *src, size_t n) +{ + size_t dsize = __strend(dest) - dest; + size_t len = __strend(src) - src; + size_t res = dsize + len; + + if (dsize < n) { + dest += dsize; + n -= dsize; + if (len >= n) + len = n - 1; + dest[len] = '\0'; + memcpy(dest, src, len); + } + return res; +} +EXPORT_SYMBOL(strlcat); + +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The maximum numbers of bytes to copy + * + * returns a pointer to @dest + * + * Note that in contrast to strncpy, strncat ensures the result is + * terminated. + */ +char *strncat(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + char *p = __strend(dest); + + p[len] = '\0'; + memcpy(p, src, len); + return dest; +} +EXPORT_SYMBOL(strncat); + +/** + * strcmp - Compare two strings + * @cs: One string + * @ct: Another string + * + * returns 0 if @cs and @ct are equal, + * < 0 if @cs is less than @ct + * > 0 if @cs is greater than @ct + */ +int strcmp(const char *cs, const char *ct) +{ + register int r0 asm("0") = 0; + int ret = 0; + + asm volatile ("0: clst %2,%3\n" + " jo 0b\n" + " je 1f\n" + " ic %0,0(%2)\n" + " ic %1,0(%3)\n" + " sr %0,%1\n" + "1:" + : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) + : : "cc" ); + return ret; +} +EXPORT_SYMBOL(strcmp); + +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char * strrchr(const char * s, int c) +{ + size_t len = __strend(s) - s; + + if (len) + do { + if (s[len] == (char) c) + return (char *) s + len; + } while (--len > 0); + return NULL; +} +EXPORT_SYMBOL(strrchr); + +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +char * strstr(const char * s1,const char * s2) +{ + int l1, l2; + + l2 = __strend(s2) - s2; + if (!l2) + return (char *) s1; + l1 = __strend(s1) - s1; + while (l1-- >= l2) { + register unsigned long r2 asm("2") = (unsigned long) s1; + register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r4 asm("4") = (unsigned long) s2; + register unsigned long r5 asm("5") = (unsigned long) l2; + int cc; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc), "+a" (r2), "+a" (r3), + "+a" (r4), "+a" (r5) : : "cc" ); + if (!cc) + return (char *) s1; + s1++; + } + return NULL; +} +EXPORT_SYMBOL(strstr); + +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +void *memchr(const void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + " jl 1f\n" + " la %0,0\n" + "1:" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL(memchr); + +/** + * memcmp - Compare two areas of memory + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + */ +int memcmp(const void *cs, const void *ct, size_t n) +{ + register unsigned long r2 asm("2") = (unsigned long) cs; + register unsigned long r3 asm("3") = (unsigned long) n; + register unsigned long r4 asm("4") = (unsigned long) ct; + register unsigned long r5 asm("5") = (unsigned long) n; + int ret; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) + : : "cc" ); + if (ret) + ret = *(char *) r2 - *(char *) r4; + return ret; +} +EXPORT_SYMBOL(memcmp); + +/** + * memscan - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +void *memscan(void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL(memscan); diff --git a/kernel/arch/s390/lib/uaccess.c b/kernel/arch/s390/lib/uaccess.c new file mode 100644 index 000000000..4614d415b --- /dev/null +++ b/kernel/arch/s390/lib/uaccess.c @@ -0,0 +1,392 @@ +/* + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright IBM Corp. 2006,2014 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/jump_label.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <asm/facility.h> + +static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; + +static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x81UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + "9: jz 7f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 4f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + "10:slgr %0,%4\n" + " algr %2,%4\n" + "4: lghi %4,-1\n" + " algr %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,6f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "5: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "6: aghi %4,-256\n" + " jnm 5b\n" + " ex %4,0(%3)\n" + " j 8f\n" + "7:slgr %0,%0\n" + "8:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, + unsigned long size) +{ + unsigned long tmp1, tmp2; + + load_kernel_asce(); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcp 0(%0,%2),0(%1),%3\n" + "10:jz 8f\n" + "1: algr %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + "11:jnz 1b\n" + " j 8f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " lghi %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + "12:slgr %0,%4\n" + " algr %2,%4\n" + "5: lghi %4,-1\n" + " algr %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,7f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "6: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "7: aghi %4,-256\n" + " jnm 6b\n" + " ex %4,0(%3)\n" + " j 9f\n" + "8:slgr %0,%0\n" + "9: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) + EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_from_user_mvcos(to, from, n); + return copy_from_user_mvcp(to, from, n); +} +EXPORT_SYMBOL(__copy_from_user); + +static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + "6: jz 4f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + "7: slgr %0,%4\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, + unsigned long size) +{ + unsigned long tmp1, tmp2; + + load_kernel_asce(); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcs 0(%0,%1),0(%2),%3\n" + "7: jz 5f\n" + "1: algr %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + "8: jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " lghi %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + "9: slgr %0,%4\n" + " j 6f\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_to_user_mvcos(to, from, n); + return copy_to_user_mvcs(to, from, n); +} +EXPORT_SYMBOL(__copy_to_user); + +static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810081UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2:slgr %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, + unsigned long size) +{ + unsigned long tmp1; + + load_kernel_asce(); + asm volatile( + " sacf 256\n" + " aghi %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + "0: aghi %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " aghi %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3: aghi %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_in_user_mvcos(to, from, n); + return copy_in_user_mvc(to, from, n); +} +EXPORT_SYMBOL(__copy_in_user); + +static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1: algr %0,%2\n" + " slgr %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " slgr %3,%1\n" + " clgr %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " slgr %0,%3\n" + " j 5f\n" + "4:slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long clear_user_xc(void __user *to, unsigned long size) +{ + unsigned long tmp1, tmp2; + + load_kernel_asce(); + asm volatile( + " sacf 256\n" + " aghi %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0: aghi %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " slgr %2,%1\n" + " clgr %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " aghi %2,-1\n" + "1: ex %2,0(%3)\n" + " aghi %2,1\n" + " slgr %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3: aghi %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __clear_user(void __user *to, unsigned long size) +{ + if (static_key_false(&have_mvcos)) + return clear_user_mvcos(to, size); + return clear_user_xc(to, size); +} +EXPORT_SYMBOL(__clear_user); + +static inline unsigned long strnlen_user_srst(const char __user *src, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " slgr %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " slgr %0,%1\n" + "1: sacf 768\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +unsigned long __strnlen_user(const char __user *src, unsigned long size) +{ + if (unlikely(!size)) + return 0; + load_kernel_asce(); + return strnlen_user_srst(src, size); +} +EXPORT_SYMBOL(__strnlen_user); + +long __strncpy_from_user(char *dst, const char __user *src, long size) +{ + size_t done, len, offset, len_str; + + if (unlikely(size <= 0)) + return 0; + done = 0; + do { + offset = (size_t)src & ~PAGE_MASK; + len = min(size - done, PAGE_SIZE - offset); + if (copy_from_user(dst, src, len)) + return -EFAULT; + len_str = strnlen(dst, len); + done += len_str; + src += len_str; + dst += len_str; + } while ((len_str == len) && (done < size)); + return done; +} +EXPORT_SYMBOL(__strncpy_from_user); + +/* + * The "old" uaccess variant without mvcos can be enforced with the + * uaccess_primary kernel parameter. This is mainly for debugging purposes. + */ +static int uaccess_primary __initdata; + +static int __init parse_uaccess_pt(char *__unused) +{ + uaccess_primary = 1; + return 0; +} +early_param("uaccess_primary", parse_uaccess_pt); + +static int __init uaccess_init(void) +{ + if (!uaccess_primary && test_facility(27)) + static_key_slow_inc(&have_mvcos); + return 0; +} +early_initcall(uaccess_init); |