xref: /linux/include/linux/percpu_counter.h (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
4 /*
5  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6  *
7  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16 
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH	INT_MAX
19 
20 #ifdef CONFIG_SMP
21 
22 struct percpu_counter {
23 	raw_spinlock_t lock;
24 	s64 count;
25 #ifdef CONFIG_HOTPLUG_CPU
26 	struct list_head list;	/* All percpu_counters are on a list */
27 #endif
28 	s32 __percpu *counters;
29 };
30 
31 extern int percpu_counter_batch;
32 
33 int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
34 			       gfp_t gfp, u32 nr_counters,
35 			       struct lock_class_key *key);
36 
37 #define percpu_counter_init_many(fbc, value, gfp, nr_counters)		\
38 	({								\
39 		static struct lock_class_key __key;			\
40 									\
41 		__percpu_counter_init_many(fbc, value, gfp, nr_counters,\
42 					   &__key);			\
43 	})
44 
45 
46 #define percpu_counter_init(fbc, value, gfp)				\
47 	percpu_counter_init_many(fbc, value, gfp, 1)
48 
49 void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
percpu_counter_destroy(struct percpu_counter * fbc)50 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
51 {
52 	percpu_counter_destroy_many(fbc, 1);
53 }
54 
55 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
56 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
57 			      s32 batch);
58 s64 __percpu_counter_sum(struct percpu_counter *fbc);
59 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
60 bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
61 				  s64 amount, s32 batch);
62 void percpu_counter_sync(struct percpu_counter *fbc);
63 
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)64 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
65 {
66 	return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
67 }
68 
percpu_counter_add(struct percpu_counter * fbc,s64 amount)69 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
70 {
71 	percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
72 }
73 
74 static inline bool
percpu_counter_limited_add(struct percpu_counter * fbc,s64 limit,s64 amount)75 percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
76 {
77 	return __percpu_counter_limited_add(fbc, limit, amount,
78 					    percpu_counter_batch);
79 }
80 
81 /*
82  * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
83  * are accumulated in local per cpu counter and not in fbc->count until
84  * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
85  * write efficient.
86  * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
87  * used to add up the counts from each CPU to account for all the local
88  * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
89  * should be used when a counter is updated frequently and read rarely.
90  */
91 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)92 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
93 {
94 	percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
95 }
96 
percpu_counter_sum_positive(struct percpu_counter * fbc)97 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
98 {
99 	s64 ret = __percpu_counter_sum(fbc);
100 	return ret < 0 ? 0 : ret;
101 }
102 
percpu_counter_sum(struct percpu_counter * fbc)103 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
104 {
105 	return __percpu_counter_sum(fbc);
106 }
107 
percpu_counter_read(struct percpu_counter * fbc)108 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
109 {
110 	return fbc->count;
111 }
112 
113 /*
114  * It is possible for the percpu_counter_read() to return a small negative
115  * number for some counter which should never be negative.
116  *
117  */
percpu_counter_read_positive(struct percpu_counter * fbc)118 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
119 {
120 	/* Prevent reloads of fbc->count */
121 	s64 ret = READ_ONCE(fbc->count);
122 
123 	if (ret >= 0)
124 		return ret;
125 	return 0;
126 }
127 
percpu_counter_initialized(struct percpu_counter * fbc)128 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
129 {
130 	return (fbc->counters != NULL);
131 }
132 
133 #else /* !CONFIG_SMP */
134 
135 struct percpu_counter {
136 	s64 count;
137 };
138 
percpu_counter_init_many(struct percpu_counter * fbc,s64 amount,gfp_t gfp,u32 nr_counters)139 static inline int percpu_counter_init_many(struct percpu_counter *fbc,
140 					   s64 amount, gfp_t gfp,
141 					   u32 nr_counters)
142 {
143 	u32 i;
144 
145 	for (i = 0; i < nr_counters; i++)
146 		fbc[i].count = amount;
147 
148 	return 0;
149 }
150 
percpu_counter_init(struct percpu_counter * fbc,s64 amount,gfp_t gfp)151 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
152 				      gfp_t gfp)
153 {
154 	return percpu_counter_init_many(fbc, amount, gfp, 1);
155 }
156 
percpu_counter_destroy_many(struct percpu_counter * fbc,u32 nr_counters)157 static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
158 					       u32 nr_counters)
159 {
160 }
161 
percpu_counter_destroy(struct percpu_counter * fbc)162 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
163 {
164 }
165 
percpu_counter_set(struct percpu_counter * fbc,s64 amount)166 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
167 {
168 	fbc->count = amount;
169 }
170 
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)171 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
172 {
173 	if (fbc->count > rhs)
174 		return 1;
175 	else if (fbc->count < rhs)
176 		return -1;
177 	else
178 		return 0;
179 }
180 
181 static inline int
__percpu_counter_compare(struct percpu_counter * fbc,s64 rhs,s32 batch)182 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
183 {
184 	return percpu_counter_compare(fbc, rhs);
185 }
186 
187 static inline void
percpu_counter_add(struct percpu_counter * fbc,s64 amount)188 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
189 {
190 	unsigned long flags;
191 
192 	local_irq_save(flags);
193 	fbc->count += amount;
194 	local_irq_restore(flags);
195 }
196 
197 static inline bool
percpu_counter_limited_add(struct percpu_counter * fbc,s64 limit,s64 amount)198 percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
199 {
200 	unsigned long flags;
201 	bool good = false;
202 	s64 count;
203 
204 	if (amount == 0)
205 		return true;
206 
207 	local_irq_save(flags);
208 	count = fbc->count + amount;
209 	if ((amount > 0 && count <= limit) ||
210 	    (amount < 0 && count >= limit)) {
211 		fbc->count = count;
212 		good = true;
213 	}
214 	local_irq_restore(flags);
215 	return good;
216 }
217 
218 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
219 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)220 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
221 {
222 	percpu_counter_add(fbc, amount);
223 }
224 
225 static inline void
percpu_counter_add_batch(struct percpu_counter * fbc,s64 amount,s32 batch)226 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
227 {
228 	percpu_counter_add(fbc, amount);
229 }
230 
percpu_counter_read(struct percpu_counter * fbc)231 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
232 {
233 	return fbc->count;
234 }
235 
236 /*
237  * percpu_counter is intended to track positive numbers. In the UP case the
238  * number should never be negative.
239  */
percpu_counter_read_positive(struct percpu_counter * fbc)240 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
241 {
242 	return fbc->count;
243 }
244 
percpu_counter_sum_positive(struct percpu_counter * fbc)245 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
246 {
247 	return percpu_counter_read_positive(fbc);
248 }
249 
percpu_counter_sum(struct percpu_counter * fbc)250 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
251 {
252 	return percpu_counter_read(fbc);
253 }
254 
percpu_counter_initialized(struct percpu_counter * fbc)255 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
256 {
257 	return true;
258 }
259 
percpu_counter_sync(struct percpu_counter * fbc)260 static inline void percpu_counter_sync(struct percpu_counter *fbc)
261 {
262 }
263 #endif	/* CONFIG_SMP */
264 
percpu_counter_inc(struct percpu_counter * fbc)265 static inline void percpu_counter_inc(struct percpu_counter *fbc)
266 {
267 	percpu_counter_add(fbc, 1);
268 }
269 
percpu_counter_dec(struct percpu_counter * fbc)270 static inline void percpu_counter_dec(struct percpu_counter *fbc)
271 {
272 	percpu_counter_add(fbc, -1);
273 }
274 
percpu_counter_sub(struct percpu_counter * fbc,s64 amount)275 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
276 {
277 	percpu_counter_add(fbc, -amount);
278 }
279 
280 static inline void
percpu_counter_sub_local(struct percpu_counter * fbc,s64 amount)281 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
282 {
283 	percpu_counter_add_local(fbc, -amount);
284 }
285 
286 #endif /* _LINUX_PERCPU_COUNTER_H */
287