xref: /linux/include/linux/vmstat.h (revision 4aa748dd1abf337426b4c941ae1b606ed0e2a5aa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4 
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
12 
13 extern int sysctl_stat_interval;
14 
15 #ifdef CONFIG_NUMA
16 #define ENABLE_NUMA_STAT   1
17 #define DISABLE_NUMA_STAT   0
18 extern int sysctl_vm_numa_stat;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20 int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
21 		void *buffer, size_t *length, loff_t *ppos);
22 #endif
23 
24 struct reclaim_stat {
25 	unsigned nr_dirty;
26 	unsigned nr_unqueued_dirty;
27 	unsigned nr_congested;
28 	unsigned nr_writeback;
29 	unsigned nr_immediate;
30 	unsigned nr_pageout;
31 	unsigned nr_activate[ANON_AND_FILE];
32 	unsigned nr_ref_keep;
33 	unsigned nr_unmap_fail;
34 	unsigned nr_lazyfree_fail;
35 	unsigned nr_demoted;
36 };
37 
38 /* Stat data for system wide items */
39 enum vm_stat_item {
40 	NR_DIRTY_THRESHOLD,
41 	NR_DIRTY_BG_THRESHOLD,
42 	NR_MEMMAP_PAGES,	/* page metadata allocated through buddy allocator */
43 	NR_MEMMAP_BOOT_PAGES,	/* page metadata allocated through boot allocator */
44 	NR_VM_STAT_ITEMS,
45 };
46 
47 #ifdef CONFIG_VM_EVENT_COUNTERS
48 /*
49  * Light weight per cpu counter implementation.
50  *
51  * Counters should only be incremented and no critical kernel component
52  * should rely on the counter values.
53  *
54  * Counters are handled completely inline. On many platforms the code
55  * generated will simply be the increment of a global address.
56  */
57 
58 struct vm_event_state {
59 	unsigned long event[NR_VM_EVENT_ITEMS];
60 };
61 
62 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
63 
64 /*
65  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
66  * local_irq_disable overhead.
67  */
__count_vm_event(enum vm_event_item item)68 static inline void __count_vm_event(enum vm_event_item item)
69 {
70 	raw_cpu_inc(vm_event_states.event[item]);
71 }
72 
count_vm_event(enum vm_event_item item)73 static inline void count_vm_event(enum vm_event_item item)
74 {
75 	this_cpu_inc(vm_event_states.event[item]);
76 }
77 
__count_vm_events(enum vm_event_item item,long delta)78 static inline void __count_vm_events(enum vm_event_item item, long delta)
79 {
80 	raw_cpu_add(vm_event_states.event[item], delta);
81 }
82 
count_vm_events(enum vm_event_item item,long delta)83 static inline void count_vm_events(enum vm_event_item item, long delta)
84 {
85 	this_cpu_add(vm_event_states.event[item], delta);
86 }
87 
88 extern void all_vm_events(unsigned long *);
89 
90 extern void vm_events_fold_cpu(int cpu);
91 
92 #else
93 
94 /* Disable counters */
count_vm_event(enum vm_event_item item)95 static inline void count_vm_event(enum vm_event_item item)
96 {
97 }
count_vm_events(enum vm_event_item item,long delta)98 static inline void count_vm_events(enum vm_event_item item, long delta)
99 {
100 }
__count_vm_event(enum vm_event_item item)101 static inline void __count_vm_event(enum vm_event_item item)
102 {
103 }
__count_vm_events(enum vm_event_item item,long delta)104 static inline void __count_vm_events(enum vm_event_item item, long delta)
105 {
106 }
all_vm_events(unsigned long * ret)107 static inline void all_vm_events(unsigned long *ret)
108 {
109 }
vm_events_fold_cpu(int cpu)110 static inline void vm_events_fold_cpu(int cpu)
111 {
112 }
113 
114 #endif /* CONFIG_VM_EVENT_COUNTERS */
115 
116 #ifdef CONFIG_NUMA_BALANCING
117 #define count_vm_numa_event(x)     count_vm_event(x)
118 #define count_vm_numa_events(x, y) count_vm_events(x, y)
119 #else
120 #define count_vm_numa_event(x) do {} while (0)
121 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
122 #endif /* CONFIG_NUMA_BALANCING */
123 
124 #ifdef CONFIG_DEBUG_TLBFLUSH
125 #define count_vm_tlb_event(x)	   count_vm_event(x)
126 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
127 #else
128 #define count_vm_tlb_event(x)     do {} while (0)
129 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
130 #endif
131 
132 #ifdef CONFIG_PER_VMA_LOCK_STATS
133 #define count_vm_vma_lock_event(x) count_vm_event(x)
134 #else
135 #define count_vm_vma_lock_event(x) do {} while (0)
136 #endif
137 
138 #define __count_zid_vm_events(item, zid, delta) \
139 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
140 
141 /*
142  * Zone and node-based page accounting with per cpu differentials.
143  */
144 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
145 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
146 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
147 
148 #ifdef CONFIG_NUMA
zone_numa_event_add(long x,struct zone * zone,enum numa_stat_item item)149 static inline void zone_numa_event_add(long x, struct zone *zone,
150 				enum numa_stat_item item)
151 {
152 	atomic_long_add(x, &zone->vm_numa_event[item]);
153 	atomic_long_add(x, &vm_numa_event[item]);
154 }
155 
zone_numa_event_state(struct zone * zone,enum numa_stat_item item)156 static inline unsigned long zone_numa_event_state(struct zone *zone,
157 					enum numa_stat_item item)
158 {
159 	return atomic_long_read(&zone->vm_numa_event[item]);
160 }
161 
162 static inline unsigned long
global_numa_event_state(enum numa_stat_item item)163 global_numa_event_state(enum numa_stat_item item)
164 {
165 	return atomic_long_read(&vm_numa_event[item]);
166 }
167 #endif /* CONFIG_NUMA */
168 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)169 static inline void zone_page_state_add(long x, struct zone *zone,
170 				 enum zone_stat_item item)
171 {
172 	atomic_long_add(x, &zone->vm_stat[item]);
173 	atomic_long_add(x, &vm_zone_stat[item]);
174 }
175 
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)176 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
177 				 enum node_stat_item item)
178 {
179 	atomic_long_add(x, &pgdat->vm_stat[item]);
180 	atomic_long_add(x, &vm_node_stat[item]);
181 }
182 
global_zone_page_state(enum zone_stat_item item)183 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
184 {
185 	long x = atomic_long_read(&vm_zone_stat[item]);
186 #ifdef CONFIG_SMP
187 	if (x < 0)
188 		x = 0;
189 #endif
190 	return x;
191 }
192 
193 static inline
global_node_page_state_pages(enum node_stat_item item)194 unsigned long global_node_page_state_pages(enum node_stat_item item)
195 {
196 	long x = atomic_long_read(&vm_node_stat[item]);
197 #ifdef CONFIG_SMP
198 	if (x < 0)
199 		x = 0;
200 #endif
201 	return x;
202 }
203 
global_node_page_state(enum node_stat_item item)204 static inline unsigned long global_node_page_state(enum node_stat_item item)
205 {
206 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
207 
208 	return global_node_page_state_pages(item);
209 }
210 
zone_page_state(struct zone * zone,enum zone_stat_item item)211 static inline unsigned long zone_page_state(struct zone *zone,
212 					enum zone_stat_item item)
213 {
214 	long x = atomic_long_read(&zone->vm_stat[item]);
215 #ifdef CONFIG_SMP
216 	if (x < 0)
217 		x = 0;
218 #endif
219 	return x;
220 }
221 
222 /*
223  * More accurate version that also considers the currently pending
224  * deltas. For that we need to loop over all cpus to find the current
225  * deltas. There is no synchronization so the result cannot be
226  * exactly accurate either.
227  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)228 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
229 					enum zone_stat_item item)
230 {
231 	long x = atomic_long_read(&zone->vm_stat[item]);
232 
233 #ifdef CONFIG_SMP
234 	int cpu;
235 	for_each_online_cpu(cpu)
236 		x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
237 
238 	if (x < 0)
239 		x = 0;
240 #endif
241 	return x;
242 }
243 
244 #ifdef CONFIG_NUMA
245 /* See __count_vm_event comment on why raw_cpu_inc is used. */
246 static inline void
__count_numa_event(struct zone * zone,enum numa_stat_item item)247 __count_numa_event(struct zone *zone, enum numa_stat_item item)
248 {
249 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
250 
251 	raw_cpu_inc(pzstats->vm_numa_event[item]);
252 }
253 
254 static inline void
__count_numa_events(struct zone * zone,enum numa_stat_item item,long delta)255 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
256 {
257 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
258 
259 	raw_cpu_add(pzstats->vm_numa_event[item], delta);
260 }
261 
262 extern unsigned long sum_zone_node_page_state(int node,
263 					      enum zone_stat_item item);
264 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
265 extern unsigned long node_page_state(struct pglist_data *pgdat,
266 						enum node_stat_item item);
267 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
268 					   enum node_stat_item item);
269 extern void fold_vm_numa_events(void);
270 #else
271 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
272 #define node_page_state(node, item) global_node_page_state(item)
273 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
fold_vm_numa_events(void)274 static inline void fold_vm_numa_events(void)
275 {
276 }
277 #endif /* CONFIG_NUMA */
278 
279 #ifdef CONFIG_SMP
280 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
281 void __inc_zone_page_state(struct page *, enum zone_stat_item);
282 void __dec_zone_page_state(struct page *, enum zone_stat_item);
283 
284 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
285 void __inc_node_page_state(struct page *, enum node_stat_item);
286 void __dec_node_page_state(struct page *, enum node_stat_item);
287 
288 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
289 void inc_zone_page_state(struct page *, enum zone_stat_item);
290 void dec_zone_page_state(struct page *, enum zone_stat_item);
291 
292 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
293 void inc_node_page_state(struct page *, enum node_stat_item);
294 void dec_node_page_state(struct page *, enum node_stat_item);
295 
296 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
297 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
298 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
299 extern void dec_zone_state(struct zone *, enum zone_stat_item);
300 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
301 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
302 
303 void quiet_vmstat(void);
304 void cpu_vm_stats_fold(int cpu);
305 void refresh_zone_stat_thresholds(void);
306 
307 struct ctl_table;
308 int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp,
309 		loff_t *ppos);
310 
311 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
312 
313 int calculate_pressure_threshold(struct zone *zone);
314 int calculate_normal_threshold(struct zone *zone);
315 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
316 				int (*calculate_pressure)(struct zone *));
317 #else /* CONFIG_SMP */
318 
319 /*
320  * We do not maintain differentials in a single processor configuration.
321  * The functions directly modify the zone and global counters.
322  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)323 static inline void __mod_zone_page_state(struct zone *zone,
324 			enum zone_stat_item item, long delta)
325 {
326 	zone_page_state_add(delta, zone, item);
327 }
328 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)329 static inline void __mod_node_page_state(struct pglist_data *pgdat,
330 			enum node_stat_item item, int delta)
331 {
332 	if (vmstat_item_in_bytes(item)) {
333 		/*
334 		 * Only cgroups use subpage accounting right now; at
335 		 * the global level, these items still change in
336 		 * multiples of whole pages. Store them as pages
337 		 * internally to keep the per-cpu counters compact.
338 		 */
339 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
340 		delta >>= PAGE_SHIFT;
341 	}
342 
343 	node_page_state_add(delta, pgdat, item);
344 }
345 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)346 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
347 {
348 	atomic_long_inc(&zone->vm_stat[item]);
349 	atomic_long_inc(&vm_zone_stat[item]);
350 }
351 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)352 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
353 {
354 	atomic_long_inc(&pgdat->vm_stat[item]);
355 	atomic_long_inc(&vm_node_stat[item]);
356 }
357 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)358 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
359 {
360 	atomic_long_dec(&zone->vm_stat[item]);
361 	atomic_long_dec(&vm_zone_stat[item]);
362 }
363 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)364 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
365 {
366 	atomic_long_dec(&pgdat->vm_stat[item]);
367 	atomic_long_dec(&vm_node_stat[item]);
368 }
369 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)370 static inline void __inc_zone_page_state(struct page *page,
371 			enum zone_stat_item item)
372 {
373 	__inc_zone_state(page_zone(page), item);
374 }
375 
__inc_node_page_state(struct page * page,enum node_stat_item item)376 static inline void __inc_node_page_state(struct page *page,
377 			enum node_stat_item item)
378 {
379 	__inc_node_state(page_pgdat(page), item);
380 }
381 
382 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)383 static inline void __dec_zone_page_state(struct page *page,
384 			enum zone_stat_item item)
385 {
386 	__dec_zone_state(page_zone(page), item);
387 }
388 
__dec_node_page_state(struct page * page,enum node_stat_item item)389 static inline void __dec_node_page_state(struct page *page,
390 			enum node_stat_item item)
391 {
392 	__dec_node_state(page_pgdat(page), item);
393 }
394 
395 
396 /*
397  * We only use atomic operations to update counters. So there is no need to
398  * disable interrupts.
399  */
400 #define inc_zone_page_state __inc_zone_page_state
401 #define dec_zone_page_state __dec_zone_page_state
402 #define mod_zone_page_state __mod_zone_page_state
403 
404 #define inc_node_page_state __inc_node_page_state
405 #define dec_node_page_state __dec_node_page_state
406 #define mod_node_page_state __mod_node_page_state
407 
408 #define inc_zone_state __inc_zone_state
409 #define inc_node_state __inc_node_state
410 #define dec_zone_state __dec_zone_state
411 
412 #define set_pgdat_percpu_threshold(pgdat, callback) { }
413 
refresh_zone_stat_thresholds(void)414 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)415 static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)416 static inline void quiet_vmstat(void) { }
417 
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)418 static inline void drain_zonestat(struct zone *zone,
419 			struct per_cpu_zonestat *pzstats) { }
420 #endif		/* CONFIG_SMP */
421 
__zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)422 static inline void __zone_stat_mod_folio(struct folio *folio,
423 		enum zone_stat_item item, long nr)
424 {
425 	__mod_zone_page_state(folio_zone(folio), item, nr);
426 }
427 
__zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)428 static inline void __zone_stat_add_folio(struct folio *folio,
429 		enum zone_stat_item item)
430 {
431 	__mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
432 }
433 
__zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)434 static inline void __zone_stat_sub_folio(struct folio *folio,
435 		enum zone_stat_item item)
436 {
437 	__mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
438 }
439 
zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)440 static inline void zone_stat_mod_folio(struct folio *folio,
441 		enum zone_stat_item item, long nr)
442 {
443 	mod_zone_page_state(folio_zone(folio), item, nr);
444 }
445 
zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)446 static inline void zone_stat_add_folio(struct folio *folio,
447 		enum zone_stat_item item)
448 {
449 	mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
450 }
451 
zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)452 static inline void zone_stat_sub_folio(struct folio *folio,
453 		enum zone_stat_item item)
454 {
455 	mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
456 }
457 
__node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)458 static inline void __node_stat_mod_folio(struct folio *folio,
459 		enum node_stat_item item, long nr)
460 {
461 	__mod_node_page_state(folio_pgdat(folio), item, nr);
462 }
463 
__node_stat_add_folio(struct folio * folio,enum node_stat_item item)464 static inline void __node_stat_add_folio(struct folio *folio,
465 		enum node_stat_item item)
466 {
467 	__mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
468 }
469 
__node_stat_sub_folio(struct folio * folio,enum node_stat_item item)470 static inline void __node_stat_sub_folio(struct folio *folio,
471 		enum node_stat_item item)
472 {
473 	__mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
474 }
475 
node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)476 static inline void node_stat_mod_folio(struct folio *folio,
477 		enum node_stat_item item, long nr)
478 {
479 	mod_node_page_state(folio_pgdat(folio), item, nr);
480 }
481 
node_stat_add_folio(struct folio * folio,enum node_stat_item item)482 static inline void node_stat_add_folio(struct folio *folio,
483 		enum node_stat_item item)
484 {
485 	mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
486 }
487 
node_stat_sub_folio(struct folio * folio,enum node_stat_item item)488 static inline void node_stat_sub_folio(struct folio *folio,
489 		enum node_stat_item item)
490 {
491 	mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
492 }
493 
494 extern const char * const vmstat_text[];
495 
zone_stat_name(enum zone_stat_item item)496 static inline const char *zone_stat_name(enum zone_stat_item item)
497 {
498 	return vmstat_text[item];
499 }
500 
501 #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)502 static inline const char *numa_stat_name(enum numa_stat_item item)
503 {
504 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
505 			   item];
506 }
507 #endif /* CONFIG_NUMA */
508 
node_stat_name(enum node_stat_item item)509 static inline const char *node_stat_name(enum node_stat_item item)
510 {
511 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
512 			   NR_VM_NUMA_EVENT_ITEMS +
513 			   item];
514 }
515 
lru_list_name(enum lru_list lru)516 static inline const char *lru_list_name(enum lru_list lru)
517 {
518 	return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
519 }
520 
521 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)522 static inline const char *vm_event_name(enum vm_event_item item)
523 {
524 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
525 			   NR_VM_NUMA_EVENT_ITEMS +
526 			   NR_VM_NODE_STAT_ITEMS +
527 			   NR_VM_STAT_ITEMS +
528 			   item];
529 }
530 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
531 
532 #ifdef CONFIG_MEMCG
533 
534 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
535 			int val);
536 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)537 static inline void mod_lruvec_state(struct lruvec *lruvec,
538 				    enum node_stat_item idx, int val)
539 {
540 	unsigned long flags;
541 
542 	local_irq_save(flags);
543 	__mod_lruvec_state(lruvec, idx, val);
544 	local_irq_restore(flags);
545 }
546 
547 void __lruvec_stat_mod_folio(struct folio *folio,
548 			     enum node_stat_item idx, int val);
549 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)550 static inline void lruvec_stat_mod_folio(struct folio *folio,
551 					 enum node_stat_item idx, int val)
552 {
553 	unsigned long flags;
554 
555 	local_irq_save(flags);
556 	__lruvec_stat_mod_folio(folio, idx, val);
557 	local_irq_restore(flags);
558 }
559 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)560 static inline void mod_lruvec_page_state(struct page *page,
561 					 enum node_stat_item idx, int val)
562 {
563 	lruvec_stat_mod_folio(page_folio(page), idx, val);
564 }
565 
566 #else
567 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)568 static inline void __mod_lruvec_state(struct lruvec *lruvec,
569 				      enum node_stat_item idx, int val)
570 {
571 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
572 }
573 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)574 static inline void mod_lruvec_state(struct lruvec *lruvec,
575 				    enum node_stat_item idx, int val)
576 {
577 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
578 }
579 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)580 static inline void __lruvec_stat_mod_folio(struct folio *folio,
581 					 enum node_stat_item idx, int val)
582 {
583 	__mod_node_page_state(folio_pgdat(folio), idx, val);
584 }
585 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)586 static inline void lruvec_stat_mod_folio(struct folio *folio,
587 					 enum node_stat_item idx, int val)
588 {
589 	mod_node_page_state(folio_pgdat(folio), idx, val);
590 }
591 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)592 static inline void mod_lruvec_page_state(struct page *page,
593 					 enum node_stat_item idx, int val)
594 {
595 	mod_node_page_state(page_pgdat(page), idx, val);
596 }
597 
598 #endif /* CONFIG_MEMCG */
599 
__lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)600 static inline void __lruvec_stat_add_folio(struct folio *folio,
601 					   enum node_stat_item idx)
602 {
603 	__lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
604 }
605 
__lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)606 static inline void __lruvec_stat_sub_folio(struct folio *folio,
607 					   enum node_stat_item idx)
608 {
609 	__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
610 }
611 
lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)612 static inline void lruvec_stat_add_folio(struct folio *folio,
613 					 enum node_stat_item idx)
614 {
615 	lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
616 }
617 
lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)618 static inline void lruvec_stat_sub_folio(struct folio *folio,
619 					 enum node_stat_item idx)
620 {
621 	lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
622 }
623 
624 void memmap_boot_pages_add(long delta);
625 void memmap_pages_add(long delta);
626 #endif /* _LINUX_VMSTAT_H */
627