1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
19 struct percpu_counter {
22 #ifdef CONFIG_HOTPLUG_CPU
23 struct list_head list; /* All percpu_counters are on a list */
25 s32 __percpu *counters;
28 extern int percpu_counter_batch;
30 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
31 struct lock_class_key *key);
33 #define percpu_counter_init(fbc, value, gfp) \
35 static struct lock_class_key __key; \
37 __percpu_counter_init(fbc, value, gfp, &__key); \
40 void percpu_counter_destroy(struct percpu_counter *fbc);
41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
42 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
44 s64 __percpu_counter_sum(struct percpu_counter *fbc);
45 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
47 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
49 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
52 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
54 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
57 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
59 s64 ret = __percpu_counter_sum(fbc);
60 return ret < 0 ? 0 : ret;
63 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
65 return __percpu_counter_sum(fbc);
68 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
74 * It is possible for the percpu_counter_read() to return a small negative
75 * number for some counter which should never be negative.
78 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
82 barrier(); /* Prevent reloads of fbc->count */
88 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
90 return (fbc->counters != NULL);
93 #else /* !CONFIG_SMP */
95 struct percpu_counter {
99 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
106 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
110 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
115 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
117 if (fbc->count > rhs)
119 else if (fbc->count < rhs)
126 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
128 return percpu_counter_compare(fbc, rhs);
132 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
135 fbc->count += amount;
140 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
142 percpu_counter_add(fbc, amount);
145 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
151 * percpu_counter is intended to track positive numbers. In the UP case the
152 * number should never be negative.
154 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
159 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
161 return percpu_counter_read_positive(fbc);
164 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
166 return percpu_counter_read(fbc);
169 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
174 #endif /* CONFIG_SMP */
176 static inline void percpu_counter_inc(struct percpu_counter *fbc)
178 percpu_counter_add(fbc, 1);
181 static inline void percpu_counter_dec(struct percpu_counter *fbc)
183 percpu_counter_add(fbc, -1);
186 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
188 percpu_counter_add(fbc, -amount);
191 #endif /* _LINUX_PERCPU_COUNTER_H */