summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/percpu_counter.h
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/include/linux/percpu_counter.h
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/include/linux/percpu_counter.h')
-rw-r--r--kernel/include/linux/percpu_counter.h190
1 files changed, 190 insertions, 0 deletions
diff --git a/kernel/include/linux/percpu_counter.h b/kernel/include/linux/percpu_counter.h
new file mode 100644
index 000000000..84a109449
--- /dev/null
+++ b/kernel/include/linux/percpu_counter.h
@@ -0,0 +1,190 @@
+#ifndef _LINUX_PERCPU_COUNTER_H
+#define _LINUX_PERCPU_COUNTER_H
+/*
+ * A simple "approximate counter" for use in ext2 and ext3 superblocks.
+ *
+ * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+
+#ifdef CONFIG_SMP
+
+struct percpu_counter {
+ raw_spinlock_t lock;
+ s64 count;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct list_head list; /* All percpu_counters are on a list */
+#endif
+ s32 __percpu *counters;
+};
+
+extern int percpu_counter_batch;
+
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
+ struct lock_class_key *key);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ ({ \
+ static struct lock_class_key __key; \
+ \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
+ })
+
+void percpu_counter_destroy(struct percpu_counter *fbc);
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
+void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
+s64 __percpu_counter_sum(struct percpu_counter *fbc);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
+
+static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ __percpu_counter_add(fbc, amount, percpu_counter_batch);
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ s64 ret = __percpu_counter_sum(fbc);
+ return ret < 0 ? 0 : ret;
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+/*
+ * It is possible for the percpu_counter_read() to return a small negative
+ * number for some counter which should never be negative.
+ *
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ s64 ret = fbc->count;
+
+ barrier(); /* Prevent reloads of fbc->count */
+ if (ret >= 0)
+ return ret;
+ return 0;
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return (fbc->counters != NULL);
+}
+
+#else /* !CONFIG_SMP */
+
+struct percpu_counter {
+ s64 count;
+};
+
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
+{
+ fbc->count = amount;
+ return 0;
+}
+
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+}
+
+static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+}
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ if (fbc->count > rhs)
+ return 1;
+ else if (fbc->count < rhs)
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ return percpu_counter_compare(fbc, rhs);
+}
+
+static inline void
+percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ preempt_disable();
+ fbc->count += amount;
+ preempt_enable();
+}
+
+static inline void
+__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+ percpu_counter_add(fbc, amount);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+/*
+ * percpu_counter is intended to track positive numbers. In the UP case the
+ * number should never be negative.
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ return percpu_counter_read_positive(fbc);
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return percpu_counter_read(fbc);
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return 1;
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void percpu_counter_inc(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, 1);
+}
+
+static inline void percpu_counter_dec(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, -1);
+}
+
+static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, -amount);
+}
+
+#endif /* _LINUX_PERCPU_COUNTER_H */