summaryrefslogtreecommitdiffstats
path: root/kernel/arch/sparc/lib/atomic32.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/sparc/lib/atomic32.c')
-rw-r--r--kernel/arch/sparc/lib/atomic32.c166
1 files changed, 166 insertions, 0 deletions
diff --git a/kernel/arch/sparc/lib/atomic32.c b/kernel/arch/sparc/lib/atomic32.c
new file mode 100644
index 000000000..71cd65ab2
--- /dev/null
+++ b/kernel/arch/sparc/lib/atomic32.c
@@ -0,0 +1,166 @@
+/*
+ * atomic32.c: 32-bit atomic_t implementation
+ *
+ * Copyright (C) 2004 Keith M Wesolowski
+ * Copyright (C) 2007 Kyle McMartin
+ *
+ * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
+ */
+
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_SMP
+#define ATOMIC_HASH_SIZE 4
+#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
+
+spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
+};
+
+#else /* SMP */
+
+static DEFINE_SPINLOCK(dummy);
+#define ATOMIC_HASH_SIZE 1
+#define ATOMIC_HASH(a) (&dummy)
+
+#endif /* SMP */
+
+#define ATOMIC_OP(op, cop) \
+int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int ret; \
+ unsigned long flags; \
+ spin_lock_irqsave(ATOMIC_HASH(v), flags); \
+ \
+ ret = (v->counter cop i); \
+ \
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(atomic_##op##_return);
+
+ATOMIC_OP(add, +=)
+
+#undef ATOMIC_OP
+
+int atomic_xchg(atomic_t *v, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+ ret = v->counter;
+ v->counter = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+ return ret;
+}
+EXPORT_SYMBOL(atomic_xchg);
+
+int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+ return ret;
+}
+EXPORT_SYMBOL(atomic_cmpxchg);
+
+int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+ return ret;
+}
+EXPORT_SYMBOL(__atomic_add_unless);
+
+/* Atomic operations are already serializing */
+void atomic_set(atomic_t *v, int i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+ v->counter = i;
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+}
+EXPORT_SYMBOL(atomic_set);
+
+unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old | mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___set_bit);
+
+unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old & ~mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___clear_bit);
+
+unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old ^ mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___change_bit);
+
+unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
+{
+ unsigned long flags;
+ u32 prev;
+
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+ return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__cmpxchg_u32);
+
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
+{
+ unsigned long flags;
+ u32 prev;
+
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+ prev = *ptr;
+ *ptr = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+ return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__xchg_u32);