]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/linux/percpu_counter.h
Merge remote-tracking branch 'dt-rh/for-next'
[karo-tx-linux.git] / include / linux / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
16
17 #ifdef CONFIG_SMP
18
19 struct percpu_counter {
20         raw_spinlock_t lock;
21         s64 count;
22 #ifdef CONFIG_HOTPLUG_CPU
23         struct list_head list;  /* All percpu_counters are on a list */
24 #endif
25         s32 __percpu *counters;
26 };
27
28 extern int percpu_counter_batch;
29
30 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
31                           struct lock_class_key *key);
32
33 #define percpu_counter_init(fbc, value, gfp)                            \
34         ({                                                              \
35                 static struct lock_class_key __key;                     \
36                                                                         \
37                 __percpu_counter_init(fbc, value, gfp, &__key);         \
38         })
39
40 void percpu_counter_destroy(struct percpu_counter *fbc);
41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
43 s64 __percpu_counter_sum(struct percpu_counter *fbc);
44 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
45
46 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
47 {
48         return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
49 }
50
51 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
52 {
53         __percpu_counter_add(fbc, amount, percpu_counter_batch);
54 }
55
56 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
57 {
58         s64 ret = __percpu_counter_sum(fbc);
59         return ret < 0 ? 0 : ret;
60 }
61
62 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
63 {
64         return __percpu_counter_sum(fbc);
65 }
66
67 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
68 {
69         return fbc->count;
70 }
71
72 /*
73  * It is possible for the percpu_counter_read() to return a small negative
74  * number for some counter which should never be negative.
75  *
76  */
77 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
78 {
79         s64 ret = fbc->count;
80
81         barrier();              /* Prevent reloads of fbc->count */
82         if (ret >= 0)
83                 return ret;
84         return 0;
85 }
86
87 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
88 {
89         return (fbc->counters != NULL);
90 }
91
92 #else /* !CONFIG_SMP */
93
94 struct percpu_counter {
95         s64 count;
96 };
97
98 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
99                                       gfp_t gfp)
100 {
101         fbc->count = amount;
102         return 0;
103 }
104
105 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
106 {
107 }
108
109 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
110 {
111         fbc->count = amount;
112 }
113
114 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
115 {
116         if (fbc->count > rhs)
117                 return 1;
118         else if (fbc->count < rhs)
119                 return -1;
120         else
121                 return 0;
122 }
123
124 static inline int
125 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
126 {
127         return percpu_counter_compare(fbc, rhs);
128 }
129
130 static inline void
131 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
132 {
133         preempt_disable();
134         fbc->count += amount;
135         preempt_enable();
136 }
137
138 static inline void
139 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
140 {
141         percpu_counter_add(fbc, amount);
142 }
143
144 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
145 {
146         return fbc->count;
147 }
148
149 /*
150  * percpu_counter is intended to track positive numbers. In the UP case the
151  * number should never be negative.
152  */
153 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
154 {
155         return fbc->count;
156 }
157
158 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
159 {
160         return percpu_counter_read_positive(fbc);
161 }
162
163 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
164 {
165         return percpu_counter_read(fbc);
166 }
167
168 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
169 {
170         return 1;
171 }
172
173 #endif  /* CONFIG_SMP */
174
175 static inline void percpu_counter_inc(struct percpu_counter *fbc)
176 {
177         percpu_counter_add(fbc, 1);
178 }
179
180 static inline void percpu_counter_dec(struct percpu_counter *fbc)
181 {
182         percpu_counter_add(fbc, -1);
183 }
184
185 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
186 {
187         percpu_counter_add(fbc, -amount);
188 }
189
190 #endif /* _LINUX_PERCPU_COUNTER_H */