xref: /linux/include/linux/vmstat.h (revision 592329e5e94e26080f4815c6cc6cd0f487a91064)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4 
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
12 
13 #ifdef CONFIG_NUMA
14 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
15 #endif
16 
17 struct reclaim_stat {
18 	unsigned nr_dirty;
19 	unsigned nr_unqueued_dirty;
20 	unsigned nr_congested;
21 	unsigned nr_writeback;
22 	unsigned nr_immediate;
23 	unsigned nr_pageout;
24 	unsigned nr_activate[ANON_AND_FILE];
25 	unsigned nr_ref_keep;
26 	unsigned nr_unmap_fail;
27 	unsigned nr_lazyfree_fail;
28 	unsigned nr_demoted;
29 };
30 
31 /* Stat data for system wide items */
32 enum vm_stat_item {
33 	NR_DIRTY_THRESHOLD,
34 	NR_DIRTY_BG_THRESHOLD,
35 	NR_MEMMAP_PAGES,	/* page metadata allocated through buddy allocator */
36 	NR_MEMMAP_BOOT_PAGES,	/* page metadata allocated through boot allocator */
37 	NR_VM_STAT_ITEMS,
38 };
39 
40 #ifdef CONFIG_VM_EVENT_COUNTERS
41 /*
42  * Light weight per cpu counter implementation.
43  *
44  * Counters should only be incremented and no critical kernel component
45  * should rely on the counter values.
46  *
47  * Counters are handled completely inline. On many platforms the code
48  * generated will simply be the increment of a global address.
49  */
50 
51 struct vm_event_state {
52 	unsigned long event[NR_VM_EVENT_ITEMS];
53 };
54 
55 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
56 
57 /*
58  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
59  * local_irq_disable overhead.
60  */
__count_vm_event(enum vm_event_item item)61 static inline void __count_vm_event(enum vm_event_item item)
62 {
63 	raw_cpu_inc(vm_event_states.event[item]);
64 }
65 
count_vm_event(enum vm_event_item item)66 static inline void count_vm_event(enum vm_event_item item)
67 {
68 	this_cpu_inc(vm_event_states.event[item]);
69 }
70 
__count_vm_events(enum vm_event_item item,long delta)71 static inline void __count_vm_events(enum vm_event_item item, long delta)
72 {
73 	raw_cpu_add(vm_event_states.event[item], delta);
74 }
75 
count_vm_events(enum vm_event_item item,long delta)76 static inline void count_vm_events(enum vm_event_item item, long delta)
77 {
78 	this_cpu_add(vm_event_states.event[item], delta);
79 }
80 
81 extern void all_vm_events(unsigned long *);
82 
83 extern void vm_events_fold_cpu(int cpu);
84 
85 #else
86 
87 /* Disable counters */
count_vm_event(enum vm_event_item item)88 static inline void count_vm_event(enum vm_event_item item)
89 {
90 }
count_vm_events(enum vm_event_item item,long delta)91 static inline void count_vm_events(enum vm_event_item item, long delta)
92 {
93 }
__count_vm_event(enum vm_event_item item)94 static inline void __count_vm_event(enum vm_event_item item)
95 {
96 }
__count_vm_events(enum vm_event_item item,long delta)97 static inline void __count_vm_events(enum vm_event_item item, long delta)
98 {
99 }
all_vm_events(unsigned long * ret)100 static inline void all_vm_events(unsigned long *ret)
101 {
102 }
vm_events_fold_cpu(int cpu)103 static inline void vm_events_fold_cpu(int cpu)
104 {
105 }
106 
107 #endif /* CONFIG_VM_EVENT_COUNTERS */
108 
109 #ifdef CONFIG_NUMA_BALANCING
110 #define count_vm_numa_event(x)     count_vm_event(x)
111 #define count_vm_numa_events(x, y) count_vm_events(x, y)
112 #else
113 #define count_vm_numa_event(x) do {} while (0)
114 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
115 #endif /* CONFIG_NUMA_BALANCING */
116 
117 #ifdef CONFIG_DEBUG_TLBFLUSH
118 #define count_vm_tlb_event(x)	   count_vm_event(x)
119 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
120 #else
121 #define count_vm_tlb_event(x)     do {} while (0)
122 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
123 #endif
124 
125 #ifdef CONFIG_PER_VMA_LOCK_STATS
126 #define count_vm_vma_lock_event(x) count_vm_event(x)
127 #else
128 #define count_vm_vma_lock_event(x) do {} while (0)
129 #endif
130 
131 #define __count_zid_vm_events(item, zid, delta) \
132 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
133 
134 /*
135  * Zone and node-based page accounting with per cpu differentials.
136  */
137 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
138 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
139 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
140 
141 #ifdef CONFIG_NUMA
zone_numa_event_add(long x,struct zone * zone,enum numa_stat_item item)142 static inline void zone_numa_event_add(long x, struct zone *zone,
143 				enum numa_stat_item item)
144 {
145 	atomic_long_add(x, &zone->vm_numa_event[item]);
146 	atomic_long_add(x, &vm_numa_event[item]);
147 }
148 
zone_numa_event_state(struct zone * zone,enum numa_stat_item item)149 static inline unsigned long zone_numa_event_state(struct zone *zone,
150 					enum numa_stat_item item)
151 {
152 	return atomic_long_read(&zone->vm_numa_event[item]);
153 }
154 
155 static inline unsigned long
global_numa_event_state(enum numa_stat_item item)156 global_numa_event_state(enum numa_stat_item item)
157 {
158 	return atomic_long_read(&vm_numa_event[item]);
159 }
160 #endif /* CONFIG_NUMA */
161 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)162 static inline void zone_page_state_add(long x, struct zone *zone,
163 				 enum zone_stat_item item)
164 {
165 	atomic_long_add(x, &zone->vm_stat[item]);
166 	atomic_long_add(x, &vm_zone_stat[item]);
167 }
168 
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)169 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
170 				 enum node_stat_item item)
171 {
172 	atomic_long_add(x, &pgdat->vm_stat[item]);
173 	atomic_long_add(x, &vm_node_stat[item]);
174 }
175 
global_zone_page_state(enum zone_stat_item item)176 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
177 {
178 	long x = atomic_long_read(&vm_zone_stat[item]);
179 #ifdef CONFIG_SMP
180 	if (x < 0)
181 		x = 0;
182 #endif
183 	return x;
184 }
185 
186 static inline
global_node_page_state_pages(enum node_stat_item item)187 unsigned long global_node_page_state_pages(enum node_stat_item item)
188 {
189 	long x = atomic_long_read(&vm_node_stat[item]);
190 #ifdef CONFIG_SMP
191 	if (x < 0)
192 		x = 0;
193 #endif
194 	return x;
195 }
196 
global_node_page_state(enum node_stat_item item)197 static inline unsigned long global_node_page_state(enum node_stat_item item)
198 {
199 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
200 
201 	return global_node_page_state_pages(item);
202 }
203 
zone_page_state(struct zone * zone,enum zone_stat_item item)204 static inline unsigned long zone_page_state(struct zone *zone,
205 					enum zone_stat_item item)
206 {
207 	long x = atomic_long_read(&zone->vm_stat[item]);
208 #ifdef CONFIG_SMP
209 	if (x < 0)
210 		x = 0;
211 #endif
212 	return x;
213 }
214 
215 /*
216  * More accurate version that also considers the currently pending
217  * deltas. For that we need to loop over all cpus to find the current
218  * deltas. There is no synchronization so the result cannot be
219  * exactly accurate either.
220  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)221 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
222 					enum zone_stat_item item)
223 {
224 	long x = atomic_long_read(&zone->vm_stat[item]);
225 
226 #ifdef CONFIG_SMP
227 	int cpu;
228 	for_each_online_cpu(cpu)
229 		x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
230 
231 	if (x < 0)
232 		x = 0;
233 #endif
234 	return x;
235 }
236 
237 #ifdef CONFIG_NUMA
238 /* See __count_vm_event comment on why raw_cpu_inc is used. */
239 static inline void
__count_numa_event(struct zone * zone,enum numa_stat_item item)240 __count_numa_event(struct zone *zone, enum numa_stat_item item)
241 {
242 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
243 
244 	raw_cpu_inc(pzstats->vm_numa_event[item]);
245 }
246 
247 static inline void
__count_numa_events(struct zone * zone,enum numa_stat_item item,long delta)248 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
249 {
250 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
251 
252 	raw_cpu_add(pzstats->vm_numa_event[item], delta);
253 }
254 
255 extern unsigned long sum_zone_node_page_state(int node,
256 					      enum zone_stat_item item);
257 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
258 extern unsigned long node_page_state(struct pglist_data *pgdat,
259 						enum node_stat_item item);
260 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
261 					   enum node_stat_item item);
262 extern void fold_vm_numa_events(void);
263 #else
264 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
265 #define node_page_state(node, item) global_node_page_state(item)
266 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
fold_vm_numa_events(void)267 static inline void fold_vm_numa_events(void)
268 {
269 }
270 #endif /* CONFIG_NUMA */
271 
272 #ifdef CONFIG_SMP
273 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
274 void __inc_zone_page_state(struct page *, enum zone_stat_item);
275 void __dec_zone_page_state(struct page *, enum zone_stat_item);
276 
277 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
278 void __inc_node_page_state(struct page *, enum node_stat_item);
279 void __dec_node_page_state(struct page *, enum node_stat_item);
280 
281 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
282 void inc_zone_page_state(struct page *, enum zone_stat_item);
283 void dec_zone_page_state(struct page *, enum zone_stat_item);
284 
285 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
286 void inc_node_page_state(struct page *, enum node_stat_item);
287 void dec_node_page_state(struct page *, enum node_stat_item);
288 
289 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
290 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
291 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
292 extern void dec_zone_state(struct zone *, enum zone_stat_item);
293 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
294 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
295 
296 void quiet_vmstat(void);
297 void cpu_vm_stats_fold(int cpu);
298 void refresh_zone_stat_thresholds(void);
299 
300 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
301 
302 int calculate_pressure_threshold(struct zone *zone);
303 int calculate_normal_threshold(struct zone *zone);
304 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
305 				int (*calculate_pressure)(struct zone *));
306 #else /* CONFIG_SMP */
307 
308 /*
309  * We do not maintain differentials in a single processor configuration.
310  * The functions directly modify the zone and global counters.
311  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)312 static inline void __mod_zone_page_state(struct zone *zone,
313 			enum zone_stat_item item, long delta)
314 {
315 	zone_page_state_add(delta, zone, item);
316 }
317 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)318 static inline void __mod_node_page_state(struct pglist_data *pgdat,
319 			enum node_stat_item item, int delta)
320 {
321 	if (vmstat_item_in_bytes(item)) {
322 		/*
323 		 * Only cgroups use subpage accounting right now; at
324 		 * the global level, these items still change in
325 		 * multiples of whole pages. Store them as pages
326 		 * internally to keep the per-cpu counters compact.
327 		 */
328 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
329 		delta >>= PAGE_SHIFT;
330 	}
331 
332 	node_page_state_add(delta, pgdat, item);
333 }
334 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)335 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
336 {
337 	atomic_long_inc(&zone->vm_stat[item]);
338 	atomic_long_inc(&vm_zone_stat[item]);
339 }
340 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)341 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
342 {
343 	atomic_long_inc(&pgdat->vm_stat[item]);
344 	atomic_long_inc(&vm_node_stat[item]);
345 }
346 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)347 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
348 {
349 	atomic_long_dec(&zone->vm_stat[item]);
350 	atomic_long_dec(&vm_zone_stat[item]);
351 }
352 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)353 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
354 {
355 	atomic_long_dec(&pgdat->vm_stat[item]);
356 	atomic_long_dec(&vm_node_stat[item]);
357 }
358 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)359 static inline void __inc_zone_page_state(struct page *page,
360 			enum zone_stat_item item)
361 {
362 	__inc_zone_state(page_zone(page), item);
363 }
364 
__inc_node_page_state(struct page * page,enum node_stat_item item)365 static inline void __inc_node_page_state(struct page *page,
366 			enum node_stat_item item)
367 {
368 	__inc_node_state(page_pgdat(page), item);
369 }
370 
371 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)372 static inline void __dec_zone_page_state(struct page *page,
373 			enum zone_stat_item item)
374 {
375 	__dec_zone_state(page_zone(page), item);
376 }
377 
__dec_node_page_state(struct page * page,enum node_stat_item item)378 static inline void __dec_node_page_state(struct page *page,
379 			enum node_stat_item item)
380 {
381 	__dec_node_state(page_pgdat(page), item);
382 }
383 
384 
385 /*
386  * We only use atomic operations to update counters. So there is no need to
387  * disable interrupts.
388  */
389 #define inc_zone_page_state __inc_zone_page_state
390 #define dec_zone_page_state __dec_zone_page_state
391 #define mod_zone_page_state __mod_zone_page_state
392 
393 #define inc_node_page_state __inc_node_page_state
394 #define dec_node_page_state __dec_node_page_state
395 #define mod_node_page_state __mod_node_page_state
396 
397 #define inc_zone_state __inc_zone_state
398 #define inc_node_state __inc_node_state
399 #define dec_zone_state __dec_zone_state
400 
401 #define set_pgdat_percpu_threshold(pgdat, callback) { }
402 
refresh_zone_stat_thresholds(void)403 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)404 static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)405 static inline void quiet_vmstat(void) { }
406 
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)407 static inline void drain_zonestat(struct zone *zone,
408 			struct per_cpu_zonestat *pzstats) { }
409 #endif		/* CONFIG_SMP */
410 
__zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)411 static inline void __zone_stat_mod_folio(struct folio *folio,
412 		enum zone_stat_item item, long nr)
413 {
414 	__mod_zone_page_state(folio_zone(folio), item, nr);
415 }
416 
__zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)417 static inline void __zone_stat_add_folio(struct folio *folio,
418 		enum zone_stat_item item)
419 {
420 	__mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
421 }
422 
__zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)423 static inline void __zone_stat_sub_folio(struct folio *folio,
424 		enum zone_stat_item item)
425 {
426 	__mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
427 }
428 
zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)429 static inline void zone_stat_mod_folio(struct folio *folio,
430 		enum zone_stat_item item, long nr)
431 {
432 	mod_zone_page_state(folio_zone(folio), item, nr);
433 }
434 
zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)435 static inline void zone_stat_add_folio(struct folio *folio,
436 		enum zone_stat_item item)
437 {
438 	mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
439 }
440 
zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)441 static inline void zone_stat_sub_folio(struct folio *folio,
442 		enum zone_stat_item item)
443 {
444 	mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
445 }
446 
__node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)447 static inline void __node_stat_mod_folio(struct folio *folio,
448 		enum node_stat_item item, long nr)
449 {
450 	__mod_node_page_state(folio_pgdat(folio), item, nr);
451 }
452 
__node_stat_add_folio(struct folio * folio,enum node_stat_item item)453 static inline void __node_stat_add_folio(struct folio *folio,
454 		enum node_stat_item item)
455 {
456 	__mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
457 }
458 
__node_stat_sub_folio(struct folio * folio,enum node_stat_item item)459 static inline void __node_stat_sub_folio(struct folio *folio,
460 		enum node_stat_item item)
461 {
462 	__mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
463 }
464 
node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)465 static inline void node_stat_mod_folio(struct folio *folio,
466 		enum node_stat_item item, long nr)
467 {
468 	mod_node_page_state(folio_pgdat(folio), item, nr);
469 }
470 
node_stat_add_folio(struct folio * folio,enum node_stat_item item)471 static inline void node_stat_add_folio(struct folio *folio,
472 		enum node_stat_item item)
473 {
474 	mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
475 }
476 
node_stat_sub_folio(struct folio * folio,enum node_stat_item item)477 static inline void node_stat_sub_folio(struct folio *folio,
478 		enum node_stat_item item)
479 {
480 	mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
481 }
482 
483 extern const char * const vmstat_text[];
484 
zone_stat_name(enum zone_stat_item item)485 static inline const char *zone_stat_name(enum zone_stat_item item)
486 {
487 	return vmstat_text[item];
488 }
489 
490 #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)491 static inline const char *numa_stat_name(enum numa_stat_item item)
492 {
493 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
494 			   item];
495 }
496 #endif /* CONFIG_NUMA */
497 
node_stat_name(enum node_stat_item item)498 static inline const char *node_stat_name(enum node_stat_item item)
499 {
500 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
501 			   NR_VM_NUMA_EVENT_ITEMS +
502 			   item];
503 }
504 
lru_list_name(enum lru_list lru)505 static inline const char *lru_list_name(enum lru_list lru)
506 {
507 	return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
508 }
509 
510 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)511 static inline const char *vm_event_name(enum vm_event_item item)
512 {
513 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
514 			   NR_VM_NUMA_EVENT_ITEMS +
515 			   NR_VM_NODE_STAT_ITEMS +
516 			   NR_VM_STAT_ITEMS +
517 			   item];
518 }
519 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
520 
521 #ifdef CONFIG_MEMCG
522 
523 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
524 			int val);
525 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)526 static inline void mod_lruvec_state(struct lruvec *lruvec,
527 				    enum node_stat_item idx, int val)
528 {
529 	unsigned long flags;
530 
531 	local_irq_save(flags);
532 	__mod_lruvec_state(lruvec, idx, val);
533 	local_irq_restore(flags);
534 }
535 
536 void __lruvec_stat_mod_folio(struct folio *folio,
537 			     enum node_stat_item idx, int val);
538 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)539 static inline void lruvec_stat_mod_folio(struct folio *folio,
540 					 enum node_stat_item idx, int val)
541 {
542 	unsigned long flags;
543 
544 	local_irq_save(flags);
545 	__lruvec_stat_mod_folio(folio, idx, val);
546 	local_irq_restore(flags);
547 }
548 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)549 static inline void mod_lruvec_page_state(struct page *page,
550 					 enum node_stat_item idx, int val)
551 {
552 	lruvec_stat_mod_folio(page_folio(page), idx, val);
553 }
554 
555 #else
556 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)557 static inline void __mod_lruvec_state(struct lruvec *lruvec,
558 				      enum node_stat_item idx, int val)
559 {
560 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
561 }
562 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)563 static inline void mod_lruvec_state(struct lruvec *lruvec,
564 				    enum node_stat_item idx, int val)
565 {
566 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
567 }
568 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)569 static inline void __lruvec_stat_mod_folio(struct folio *folio,
570 					 enum node_stat_item idx, int val)
571 {
572 	__mod_node_page_state(folio_pgdat(folio), idx, val);
573 }
574 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)575 static inline void lruvec_stat_mod_folio(struct folio *folio,
576 					 enum node_stat_item idx, int val)
577 {
578 	mod_node_page_state(folio_pgdat(folio), idx, val);
579 }
580 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)581 static inline void mod_lruvec_page_state(struct page *page,
582 					 enum node_stat_item idx, int val)
583 {
584 	mod_node_page_state(page_pgdat(page), idx, val);
585 }
586 
587 #endif /* CONFIG_MEMCG */
588 
__lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)589 static inline void __lruvec_stat_add_folio(struct folio *folio,
590 					   enum node_stat_item idx)
591 {
592 	__lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
593 }
594 
__lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)595 static inline void __lruvec_stat_sub_folio(struct folio *folio,
596 					   enum node_stat_item idx)
597 {
598 	__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
599 }
600 
lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)601 static inline void lruvec_stat_add_folio(struct folio *folio,
602 					 enum node_stat_item idx)
603 {
604 	lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
605 }
606 
lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)607 static inline void lruvec_stat_sub_folio(struct folio *folio,
608 					 enum node_stat_item idx)
609 {
610 	lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
611 }
612 
613 void memmap_boot_pages_add(long delta);
614 void memmap_pages_add(long delta);
615 #endif /* _LINUX_VMSTAT_H */
616