summaryrefslogtreecommitdiffstats
path: root/kernel/arch/s390/include/asm/spinlock.h
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/s390/include/asm/spinlock.h
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/s390/include/asm/spinlock.h')
-rw-r--r--kernel/arch/s390/include/asm/spinlock.h280
1 files changed, 280 insertions, 0 deletions
diff --git a/kernel/arch/s390/include/asm/spinlock.h b/kernel/arch/s390/include/asm/spinlock.h
new file mode 100644
index 000000000..0e37cd041
--- /dev/null
+++ b/kernel/arch/s390/include/asm/spinlock.h
@@ -0,0 +1,280 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/spinlock.h"
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <linux/smp.h>
+
+#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+
+extern int spin_retry;
+
+static inline int
+_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
+{
+ return __sync_bool_compare_and_swap(lock, old, new);
+}
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+
+void arch_lock_relax(unsigned int cpu);
+
+void arch_spin_lock_wait(arch_spinlock_t *);
+int arch_spin_trylock_retry(arch_spinlock_t *);
+void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+
+static inline void arch_spin_relax(arch_spinlock_t *lock)
+{
+ arch_lock_relax(lock->lock);
+}
+
+static inline u32 arch_spin_lockval(int cpu)
+{
+ return ~cpu;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.lock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lp)
+{
+ return ACCESS_ONCE(lp->lock) != 0;
+}
+
+static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
+{
+ barrier();
+ return likely(arch_spin_value_unlocked(*lp) &&
+ _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lp)
+{
+ if (!arch_spin_trylock_once(lp))
+ arch_spin_lock_wait(lp);
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
+ unsigned long flags)
+{
+ if (!arch_spin_trylock_once(lp))
+ arch_spin_lock_wait_flags(lp, flags);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
+{
+ if (!arch_spin_trylock_once(lp))
+ return arch_spin_trylock_retry(lp);
+ return 1;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
+{
+ typecheck(unsigned int, lp->lock);
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (lp->lock)
+ : "d" (0)
+ : "cc", "memory");
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ while (arch_spin_is_locked(lock))
+ arch_spin_relax(lock);
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == 0)
+
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+static inline int arch_read_trylock_once(arch_rwlock_t *rw)
+{
+ unsigned int old = ACCESS_ONCE(rw->lock);
+ return likely((int) old >= 0 &&
+ _raw_compare_and_swap(&rw->lock, old, old + 1));
+}
+
+static inline int arch_write_trylock_once(arch_rwlock_t *rw)
+{
+ unsigned int old = ACCESS_ONCE(rw->lock);
+ return likely(old == 0 &&
+ _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
+}
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __RAW_OP_OR "lao"
+#define __RAW_OP_AND "lan"
+#define __RAW_OP_ADD "laa"
+
+#define __RAW_LOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ "bcr 14,0\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#define __RAW_UNLOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ "bcr 14,0\n" \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
+ if ((int) old < 0)
+ _raw_read_lock_wait(rw);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+ if (old != 0)
+ _raw_write_lock_wait(rw, old);
+ rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ rw->owner = 0;
+ __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ if (!arch_read_trylock_once(rw))
+ _raw_read_lock_wait(rw);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ do {
+ old = ACCESS_ONCE(rw->lock);
+ } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ if (!arch_write_trylock_once(rw))
+ _raw_write_lock_wait(rw);
+ rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ typecheck(unsigned int, rw->lock);
+
+ rw->owner = 0;
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (rw->lock)
+ : "d" (0)
+ : "cc", "memory");
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ if (!arch_read_trylock_once(rw))
+ return _raw_read_trylock_retry(rw);
+ return 1;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
+ return 0;
+ rw->owner = SPINLOCK_LOCKVAL;
+ return 1;
+}
+
+static inline void arch_read_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
+
+static inline void arch_write_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
+
+#endif /* __ASM_SPINLOCK_H */