xref: /linux/mm/vmstat.c (revision 9907e1df31c0f4bdcebe16de809121baa754e5b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmstat.c
4  *
5  *  Manages VM statistics
6  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7  *
8  *  zoned VM statistics
9  *  Copyright (C) 2006 Silicon Graphics, Inc.,
10  *		Christoph Lameter <cl@gentwo.org>
11  *  Copyright (C) 2008-2014 Christoph Lameter
12  */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_owner.h>
30 #include <linux/sched/isolation.h>
31 
32 #include "internal.h"
33 
34 #ifdef CONFIG_PROC_FS
35 #ifdef CONFIG_NUMA
36 #define ENABLE_NUMA_STAT 1
37 static int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38 
39 /* zero numa counters within a zone */
40 static void zero_zone_numa_counters(struct zone *zone)
41 {
42 	int item, cpu;
43 
44 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
45 		atomic_long_set(&zone->vm_numa_event[item], 0);
46 		for_each_online_cpu(cpu) {
47 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
48 						= 0;
49 		}
50 	}
51 }
52 
53 /* zero numa counters of all the populated zones */
54 static void zero_zones_numa_counters(void)
55 {
56 	struct zone *zone;
57 
58 	for_each_populated_zone(zone)
59 		zero_zone_numa_counters(zone);
60 }
61 
62 /* zero global numa counters */
63 static void zero_global_numa_counters(void)
64 {
65 	int item;
66 
67 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
68 		atomic_long_set(&vm_numa_event[item], 0);
69 }
70 
71 static void invalid_numa_statistics(void)
72 {
73 	zero_zones_numa_counters();
74 	zero_global_numa_counters();
75 }
76 
77 static DEFINE_MUTEX(vm_numa_stat_lock);
78 
79 static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
80 		void *buffer, size_t *length, loff_t *ppos)
81 {
82 	int ret, oldval;
83 
84 	mutex_lock(&vm_numa_stat_lock);
85 	if (write)
86 		oldval = sysctl_vm_numa_stat;
87 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
88 	if (ret || !write)
89 		goto out;
90 
91 	if (oldval == sysctl_vm_numa_stat)
92 		goto out;
93 	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
94 		static_branch_enable(&vm_numa_stat_key);
95 		pr_info("enable numa statistics\n");
96 	} else {
97 		static_branch_disable(&vm_numa_stat_key);
98 		invalid_numa_statistics();
99 		pr_info("disable numa statistics, and clear numa counters\n");
100 	}
101 
102 out:
103 	mutex_unlock(&vm_numa_stat_lock);
104 	return ret;
105 }
106 #endif
107 #endif /* CONFIG_PROC_FS */
108 
109 #ifdef CONFIG_VM_EVENT_COUNTERS
110 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
111 EXPORT_PER_CPU_SYMBOL(vm_event_states);
112 
113 static void sum_vm_events(unsigned long *ret)
114 {
115 	int cpu;
116 	int i;
117 
118 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
119 
120 	for_each_online_cpu(cpu) {
121 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
122 
123 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
124 			ret[i] += this->event[i];
125 	}
126 }
127 
128 /*
129  * Accumulate the vm event counters across all CPUs.
130  * The result is unavoidably approximate - it can change
131  * during and after execution of this function.
132 */
133 void all_vm_events(unsigned long *ret)
134 {
135 	cpus_read_lock();
136 	sum_vm_events(ret);
137 	cpus_read_unlock();
138 }
139 EXPORT_SYMBOL_GPL(all_vm_events);
140 
141 /*
142  * Fold the foreign cpu events into our own.
143  *
144  * This is adding to the events on one processor
145  * but keeps the global counts constant.
146  */
147 void vm_events_fold_cpu(int cpu)
148 {
149 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
150 	int i;
151 
152 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
153 		count_vm_events(i, fold_state->event[i]);
154 		fold_state->event[i] = 0;
155 	}
156 }
157 
158 #endif /* CONFIG_VM_EVENT_COUNTERS */
159 
160 /*
161  * Manage combined zone based / global counters
162  *
163  * vm_stat contains the global counters
164  */
165 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
166 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
167 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
168 EXPORT_SYMBOL(vm_zone_stat);
169 EXPORT_SYMBOL(vm_node_stat);
170 
171 #ifdef CONFIG_NUMA
172 static void fold_vm_zone_numa_events(struct zone *zone)
173 {
174 	unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
175 	int cpu;
176 	enum numa_stat_item item;
177 
178 	for_each_online_cpu(cpu) {
179 		struct per_cpu_zonestat *pzstats;
180 
181 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
182 		for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
183 			zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
184 	}
185 
186 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
187 		zone_numa_event_add(zone_numa_events[item], zone, item);
188 }
189 
190 void fold_vm_numa_events(void)
191 {
192 	struct zone *zone;
193 
194 	for_each_populated_zone(zone)
195 		fold_vm_zone_numa_events(zone);
196 }
197 #endif
198 
199 #ifdef CONFIG_SMP
200 
201 int calculate_pressure_threshold(struct zone *zone)
202 {
203 	int threshold;
204 	int watermark_distance;
205 
206 	/*
207 	 * As vmstats are not up to date, there is drift between the estimated
208 	 * and real values. For high thresholds and a high number of CPUs, it
209 	 * is possible for the min watermark to be breached while the estimated
210 	 * value looks fine. The pressure threshold is a reduced value such
211 	 * that even the maximum amount of drift will not accidentally breach
212 	 * the min watermark
213 	 */
214 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
215 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
216 
217 	/*
218 	 * Maximum threshold is 125
219 	 */
220 	threshold = min(125, threshold);
221 
222 	return threshold;
223 }
224 
225 int calculate_normal_threshold(struct zone *zone)
226 {
227 	int threshold;
228 	int mem;	/* memory in 128 MB units */
229 
230 	/*
231 	 * The threshold scales with the number of processors and the amount
232 	 * of memory per zone. More memory means that we can defer updates for
233 	 * longer, more processors could lead to more contention.
234  	 * fls() is used to have a cheap way of logarithmic scaling.
235 	 *
236 	 * Some sample thresholds:
237 	 *
238 	 * Threshold	Processors	(fls)	Zonesize	fls(mem)+1
239 	 * ------------------------------------------------------------------
240 	 * 8		1		1	0.9-1 GB	4
241 	 * 16		2		2	0.9-1 GB	4
242 	 * 20 		2		2	1-2 GB		5
243 	 * 24		2		2	2-4 GB		6
244 	 * 28		2		2	4-8 GB		7
245 	 * 32		2		2	8-16 GB		8
246 	 * 4		2		2	<128M		1
247 	 * 30		4		3	2-4 GB		5
248 	 * 48		4		3	8-16 GB		8
249 	 * 32		8		4	1-2 GB		4
250 	 * 32		8		4	0.9-1GB		4
251 	 * 10		16		5	<128M		1
252 	 * 40		16		5	900M		4
253 	 * 70		64		7	2-4 GB		5
254 	 * 84		64		7	4-8 GB		6
255 	 * 108		512		9	4-8 GB		6
256 	 * 125		1024		10	8-16 GB		8
257 	 * 125		1024		10	16-32 GB	9
258 	 */
259 
260 	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
261 
262 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
263 
264 	/*
265 	 * Maximum threshold is 125
266 	 */
267 	threshold = min(125, threshold);
268 
269 	return threshold;
270 }
271 
272 /*
273  * Refresh the thresholds for each zone.
274  */
275 void refresh_zone_stat_thresholds(void)
276 {
277 	struct pglist_data *pgdat;
278 	struct zone *zone;
279 	int cpu;
280 	int threshold;
281 
282 	/* Zero current pgdat thresholds */
283 	for_each_online_pgdat(pgdat) {
284 		for_each_online_cpu(cpu) {
285 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
286 		}
287 	}
288 
289 	for_each_populated_zone(zone) {
290 		struct pglist_data *pgdat = zone->zone_pgdat;
291 		unsigned long max_drift, tolerate_drift;
292 
293 		threshold = calculate_normal_threshold(zone);
294 
295 		for_each_online_cpu(cpu) {
296 			int pgdat_threshold;
297 
298 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
299 							= threshold;
300 
301 			/* Base nodestat threshold on the largest populated zone. */
302 			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
303 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
304 				= max(threshold, pgdat_threshold);
305 		}
306 
307 		/*
308 		 * Only set percpu_drift_mark if there is a danger that
309 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
310 		 * the min watermark could be breached by an allocation
311 		 */
312 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
313 		max_drift = num_online_cpus() * threshold;
314 		if (max_drift > tolerate_drift)
315 			zone->percpu_drift_mark = high_wmark_pages(zone) +
316 					max_drift;
317 	}
318 }
319 
320 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
321 				int (*calculate_pressure)(struct zone *))
322 {
323 	struct zone *zone;
324 	int cpu;
325 	int threshold;
326 	int i;
327 
328 	for (i = 0; i < pgdat->nr_zones; i++) {
329 		zone = &pgdat->node_zones[i];
330 		if (!zone->percpu_drift_mark)
331 			continue;
332 
333 		threshold = (*calculate_pressure)(zone);
334 		for_each_online_cpu(cpu)
335 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
336 							= threshold;
337 	}
338 }
339 
340 /*
341  * For use when we know that interrupts are disabled,
342  * or when we know that preemption is disabled and that
343  * particular counter cannot be updated from interrupt context.
344  */
345 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
346 			   long delta)
347 {
348 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
349 	s8 __percpu *p = pcp->vm_stat_diff + item;
350 	long x;
351 	long t;
352 
353 	/*
354 	 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
355 	 * atomicity is provided by IRQs being disabled -- either explicitly
356 	 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
357 	 * CPU migrations and preemption potentially corrupts a counter so
358 	 * disable preemption.
359 	 */
360 	preempt_disable_nested();
361 
362 	x = delta + __this_cpu_read(*p);
363 
364 	t = __this_cpu_read(pcp->stat_threshold);
365 
366 	if (unlikely(abs(x) > t)) {
367 		zone_page_state_add(x, zone, item);
368 		x = 0;
369 	}
370 	__this_cpu_write(*p, x);
371 
372 	preempt_enable_nested();
373 }
374 EXPORT_SYMBOL(__mod_zone_page_state);
375 
376 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
377 				long delta)
378 {
379 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
380 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
381 	long x;
382 	long t;
383 
384 	if (vmstat_item_in_bytes(item)) {
385 		/*
386 		 * Only cgroups use subpage accounting right now; at
387 		 * the global level, these items still change in
388 		 * multiples of whole pages. Store them as pages
389 		 * internally to keep the per-cpu counters compact.
390 		 */
391 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
392 		delta >>= PAGE_SHIFT;
393 	}
394 
395 	/* See __mod_node_page_state */
396 	preempt_disable_nested();
397 
398 	x = delta + __this_cpu_read(*p);
399 
400 	t = __this_cpu_read(pcp->stat_threshold);
401 
402 	if (unlikely(abs(x) > t)) {
403 		node_page_state_add(x, pgdat, item);
404 		x = 0;
405 	}
406 	__this_cpu_write(*p, x);
407 
408 	preempt_enable_nested();
409 }
410 EXPORT_SYMBOL(__mod_node_page_state);
411 
412 /*
413  * Optimized increment and decrement functions.
414  *
415  * These are only for a single page and therefore can take a struct page *
416  * argument instead of struct zone *. This allows the inclusion of the code
417  * generated for page_zone(page) into the optimized functions.
418  *
419  * No overflow check is necessary and therefore the differential can be
420  * incremented or decremented in place which may allow the compilers to
421  * generate better code.
422  * The increment or decrement is known and therefore one boundary check can
423  * be omitted.
424  *
425  * NOTE: These functions are very performance sensitive. Change only
426  * with care.
427  *
428  * Some processors have inc/dec instructions that are atomic vs an interrupt.
429  * However, the code must first determine the differential location in a zone
430  * based on the processor number and then inc/dec the counter. There is no
431  * guarantee without disabling preemption that the processor will not change
432  * in between and therefore the atomicity vs. interrupt cannot be exploited
433  * in a useful way here.
434  */
435 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
436 {
437 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
438 	s8 __percpu *p = pcp->vm_stat_diff + item;
439 	s8 v, t;
440 
441 	/* See __mod_node_page_state */
442 	preempt_disable_nested();
443 
444 	v = __this_cpu_inc_return(*p);
445 	t = __this_cpu_read(pcp->stat_threshold);
446 	if (unlikely(v > t)) {
447 		s8 overstep = t >> 1;
448 
449 		zone_page_state_add(v + overstep, zone, item);
450 		__this_cpu_write(*p, -overstep);
451 	}
452 
453 	preempt_enable_nested();
454 }
455 
456 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
457 {
458 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
459 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
460 	s8 v, t;
461 
462 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
463 
464 	/* See __mod_node_page_state */
465 	preempt_disable_nested();
466 
467 	v = __this_cpu_inc_return(*p);
468 	t = __this_cpu_read(pcp->stat_threshold);
469 	if (unlikely(v > t)) {
470 		s8 overstep = t >> 1;
471 
472 		node_page_state_add(v + overstep, pgdat, item);
473 		__this_cpu_write(*p, -overstep);
474 	}
475 
476 	preempt_enable_nested();
477 }
478 
479 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
480 {
481 	__inc_zone_state(page_zone(page), item);
482 }
483 EXPORT_SYMBOL(__inc_zone_page_state);
484 
485 void __inc_node_page_state(struct page *page, enum node_stat_item item)
486 {
487 	__inc_node_state(page_pgdat(page), item);
488 }
489 EXPORT_SYMBOL(__inc_node_page_state);
490 
491 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
492 {
493 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
494 	s8 __percpu *p = pcp->vm_stat_diff + item;
495 	s8 v, t;
496 
497 	/* See __mod_node_page_state */
498 	preempt_disable_nested();
499 
500 	v = __this_cpu_dec_return(*p);
501 	t = __this_cpu_read(pcp->stat_threshold);
502 	if (unlikely(v < - t)) {
503 		s8 overstep = t >> 1;
504 
505 		zone_page_state_add(v - overstep, zone, item);
506 		__this_cpu_write(*p, overstep);
507 	}
508 
509 	preempt_enable_nested();
510 }
511 
512 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
513 {
514 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
515 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
516 	s8 v, t;
517 
518 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
519 
520 	/* See __mod_node_page_state */
521 	preempt_disable_nested();
522 
523 	v = __this_cpu_dec_return(*p);
524 	t = __this_cpu_read(pcp->stat_threshold);
525 	if (unlikely(v < - t)) {
526 		s8 overstep = t >> 1;
527 
528 		node_page_state_add(v - overstep, pgdat, item);
529 		__this_cpu_write(*p, overstep);
530 	}
531 
532 	preempt_enable_nested();
533 }
534 
535 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
536 {
537 	__dec_zone_state(page_zone(page), item);
538 }
539 EXPORT_SYMBOL(__dec_zone_page_state);
540 
541 void __dec_node_page_state(struct page *page, enum node_stat_item item)
542 {
543 	__dec_node_state(page_pgdat(page), item);
544 }
545 EXPORT_SYMBOL(__dec_node_page_state);
546 
547 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
548 /*
549  * If we have cmpxchg_local support then we do not need to incur the overhead
550  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
551  *
552  * mod_state() modifies the zone counter state through atomic per cpu
553  * operations.
554  *
555  * Overstep mode specifies how overstep should handled:
556  *     0       No overstepping
557  *     1       Overstepping half of threshold
558  *     -1      Overstepping minus half of threshold
559 */
560 static inline void mod_zone_state(struct zone *zone,
561        enum zone_stat_item item, long delta, int overstep_mode)
562 {
563 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
564 	s8 __percpu *p = pcp->vm_stat_diff + item;
565 	long n, t, z;
566 	s8 o;
567 
568 	o = this_cpu_read(*p);
569 	do {
570 		z = 0;  /* overflow to zone counters */
571 
572 		/*
573 		 * The fetching of the stat_threshold is racy. We may apply
574 		 * a counter threshold to the wrong the cpu if we get
575 		 * rescheduled while executing here. However, the next
576 		 * counter update will apply the threshold again and
577 		 * therefore bring the counter under the threshold again.
578 		 *
579 		 * Most of the time the thresholds are the same anyways
580 		 * for all cpus in a zone.
581 		 */
582 		t = this_cpu_read(pcp->stat_threshold);
583 
584 		n = delta + (long)o;
585 
586 		if (abs(n) > t) {
587 			int os = overstep_mode * (t >> 1) ;
588 
589 			/* Overflow must be added to zone counters */
590 			z = n + os;
591 			n = -os;
592 		}
593 	} while (!this_cpu_try_cmpxchg(*p, &o, n));
594 
595 	if (z)
596 		zone_page_state_add(z, zone, item);
597 }
598 
599 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
600 			 long delta)
601 {
602 	mod_zone_state(zone, item, delta, 0);
603 }
604 EXPORT_SYMBOL(mod_zone_page_state);
605 
606 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
607 {
608 	mod_zone_state(page_zone(page), item, 1, 1);
609 }
610 EXPORT_SYMBOL(inc_zone_page_state);
611 
612 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
613 {
614 	mod_zone_state(page_zone(page), item, -1, -1);
615 }
616 EXPORT_SYMBOL(dec_zone_page_state);
617 
618 static inline void mod_node_state(struct pglist_data *pgdat,
619        enum node_stat_item item, int delta, int overstep_mode)
620 {
621 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
622 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
623 	long n, t, z;
624 	s8 o;
625 
626 	if (vmstat_item_in_bytes(item)) {
627 		/*
628 		 * Only cgroups use subpage accounting right now; at
629 		 * the global level, these items still change in
630 		 * multiples of whole pages. Store them as pages
631 		 * internally to keep the per-cpu counters compact.
632 		 */
633 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
634 		delta >>= PAGE_SHIFT;
635 	}
636 
637 	o = this_cpu_read(*p);
638 	do {
639 		z = 0;  /* overflow to node counters */
640 
641 		/*
642 		 * The fetching of the stat_threshold is racy. We may apply
643 		 * a counter threshold to the wrong the cpu if we get
644 		 * rescheduled while executing here. However, the next
645 		 * counter update will apply the threshold again and
646 		 * therefore bring the counter under the threshold again.
647 		 *
648 		 * Most of the time the thresholds are the same anyways
649 		 * for all cpus in a node.
650 		 */
651 		t = this_cpu_read(pcp->stat_threshold);
652 
653 		n = delta + (long)o;
654 
655 		if (abs(n) > t) {
656 			int os = overstep_mode * (t >> 1) ;
657 
658 			/* Overflow must be added to node counters */
659 			z = n + os;
660 			n = -os;
661 		}
662 	} while (!this_cpu_try_cmpxchg(*p, &o, n));
663 
664 	if (z)
665 		node_page_state_add(z, pgdat, item);
666 }
667 
668 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
669 					long delta)
670 {
671 	mod_node_state(pgdat, item, delta, 0);
672 }
673 EXPORT_SYMBOL(mod_node_page_state);
674 
675 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
676 {
677 	mod_node_state(pgdat, item, 1, 1);
678 }
679 
680 void inc_node_page_state(struct page *page, enum node_stat_item item)
681 {
682 	mod_node_state(page_pgdat(page), item, 1, 1);
683 }
684 EXPORT_SYMBOL(inc_node_page_state);
685 
686 void dec_node_page_state(struct page *page, enum node_stat_item item)
687 {
688 	mod_node_state(page_pgdat(page), item, -1, -1);
689 }
690 EXPORT_SYMBOL(dec_node_page_state);
691 #else
692 /*
693  * Use interrupt disable to serialize counter updates
694  */
695 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
696 			 long delta)
697 {
698 	unsigned long flags;
699 
700 	local_irq_save(flags);
701 	__mod_zone_page_state(zone, item, delta);
702 	local_irq_restore(flags);
703 }
704 EXPORT_SYMBOL(mod_zone_page_state);
705 
706 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
707 {
708 	unsigned long flags;
709 	struct zone *zone;
710 
711 	zone = page_zone(page);
712 	local_irq_save(flags);
713 	__inc_zone_state(zone, item);
714 	local_irq_restore(flags);
715 }
716 EXPORT_SYMBOL(inc_zone_page_state);
717 
718 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
719 {
720 	unsigned long flags;
721 
722 	local_irq_save(flags);
723 	__dec_zone_page_state(page, item);
724 	local_irq_restore(flags);
725 }
726 EXPORT_SYMBOL(dec_zone_page_state);
727 
728 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
729 {
730 	unsigned long flags;
731 
732 	local_irq_save(flags);
733 	__inc_node_state(pgdat, item);
734 	local_irq_restore(flags);
735 }
736 EXPORT_SYMBOL(inc_node_state);
737 
738 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
739 					long delta)
740 {
741 	unsigned long flags;
742 
743 	local_irq_save(flags);
744 	__mod_node_page_state(pgdat, item, delta);
745 	local_irq_restore(flags);
746 }
747 EXPORT_SYMBOL(mod_node_page_state);
748 
749 void inc_node_page_state(struct page *page, enum node_stat_item item)
750 {
751 	unsigned long flags;
752 	struct pglist_data *pgdat;
753 
754 	pgdat = page_pgdat(page);
755 	local_irq_save(flags);
756 	__inc_node_state(pgdat, item);
757 	local_irq_restore(flags);
758 }
759 EXPORT_SYMBOL(inc_node_page_state);
760 
761 void dec_node_page_state(struct page *page, enum node_stat_item item)
762 {
763 	unsigned long flags;
764 
765 	local_irq_save(flags);
766 	__dec_node_page_state(page, item);
767 	local_irq_restore(flags);
768 }
769 EXPORT_SYMBOL(dec_node_page_state);
770 #endif
771 
772 /*
773  * Fold a differential into the global counters.
774  * Returns the number of counters updated.
775  */
776 static int fold_diff(int *zone_diff, int *node_diff)
777 {
778 	int i;
779 	int changes = 0;
780 
781 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
782 		if (zone_diff[i]) {
783 			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
784 			changes++;
785 	}
786 
787 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
788 		if (node_diff[i]) {
789 			atomic_long_add(node_diff[i], &vm_node_stat[i]);
790 			changes++;
791 	}
792 	return changes;
793 }
794 
795 /*
796  * Update the zone counters for the current cpu.
797  *
798  * Note that refresh_cpu_vm_stats strives to only access
799  * node local memory. The per cpu pagesets on remote zones are placed
800  * in the memory local to the processor using that pageset. So the
801  * loop over all zones will access a series of cachelines local to
802  * the processor.
803  *
804  * The call to zone_page_state_add updates the cachelines with the
805  * statistics in the remote zone struct as well as the global cachelines
806  * with the global counters. These could cause remote node cache line
807  * bouncing and will have to be only done when necessary.
808  *
809  * The function returns the number of global counters updated.
810  */
811 static int refresh_cpu_vm_stats(bool do_pagesets)
812 {
813 	struct pglist_data *pgdat;
814 	struct zone *zone;
815 	int i;
816 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
817 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
818 	int changes = 0;
819 
820 	for_each_populated_zone(zone) {
821 		struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
822 		struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
823 
824 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
825 			int v;
826 
827 			v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
828 			if (v) {
829 
830 				atomic_long_add(v, &zone->vm_stat[i]);
831 				global_zone_diff[i] += v;
832 #ifdef CONFIG_NUMA
833 				/* 3 seconds idle till flush */
834 				__this_cpu_write(pcp->expire, 3);
835 #endif
836 			}
837 		}
838 
839 		if (do_pagesets) {
840 			cond_resched();
841 
842 			changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
843 #ifdef CONFIG_NUMA
844 			/*
845 			 * Deal with draining the remote pageset of this
846 			 * processor
847 			 *
848 			 * Check if there are pages remaining in this pageset
849 			 * if not then there is nothing to expire.
850 			 */
851 			if (!__this_cpu_read(pcp->expire) ||
852 			       !__this_cpu_read(pcp->count))
853 				continue;
854 
855 			/*
856 			 * We never drain zones local to this processor.
857 			 */
858 			if (zone_to_nid(zone) == numa_node_id()) {
859 				__this_cpu_write(pcp->expire, 0);
860 				continue;
861 			}
862 
863 			if (__this_cpu_dec_return(pcp->expire)) {
864 				changes++;
865 				continue;
866 			}
867 
868 			if (__this_cpu_read(pcp->count)) {
869 				drain_zone_pages(zone, this_cpu_ptr(pcp));
870 				changes++;
871 			}
872 #endif
873 		}
874 	}
875 
876 	for_each_online_pgdat(pgdat) {
877 		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
878 
879 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
880 			int v;
881 
882 			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
883 			if (v) {
884 				atomic_long_add(v, &pgdat->vm_stat[i]);
885 				global_node_diff[i] += v;
886 			}
887 		}
888 	}
889 
890 	changes += fold_diff(global_zone_diff, global_node_diff);
891 	return changes;
892 }
893 
894 /*
895  * Fold the data for an offline cpu into the global array.
896  * There cannot be any access by the offline cpu and therefore
897  * synchronization is simplified.
898  */
899 void cpu_vm_stats_fold(int cpu)
900 {
901 	struct pglist_data *pgdat;
902 	struct zone *zone;
903 	int i;
904 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
905 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
906 
907 	for_each_populated_zone(zone) {
908 		struct per_cpu_zonestat *pzstats;
909 
910 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
911 
912 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
913 			if (pzstats->vm_stat_diff[i]) {
914 				int v;
915 
916 				v = pzstats->vm_stat_diff[i];
917 				pzstats->vm_stat_diff[i] = 0;
918 				atomic_long_add(v, &zone->vm_stat[i]);
919 				global_zone_diff[i] += v;
920 			}
921 		}
922 #ifdef CONFIG_NUMA
923 		for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
924 			if (pzstats->vm_numa_event[i]) {
925 				unsigned long v;
926 
927 				v = pzstats->vm_numa_event[i];
928 				pzstats->vm_numa_event[i] = 0;
929 				zone_numa_event_add(v, zone, i);
930 			}
931 		}
932 #endif
933 	}
934 
935 	for_each_online_pgdat(pgdat) {
936 		struct per_cpu_nodestat *p;
937 
938 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
939 
940 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
941 			if (p->vm_node_stat_diff[i]) {
942 				int v;
943 
944 				v = p->vm_node_stat_diff[i];
945 				p->vm_node_stat_diff[i] = 0;
946 				atomic_long_add(v, &pgdat->vm_stat[i]);
947 				global_node_diff[i] += v;
948 			}
949 	}
950 
951 	fold_diff(global_zone_diff, global_node_diff);
952 }
953 
954 /*
955  * this is only called if !populated_zone(zone), which implies no other users of
956  * pset->vm_stat_diff[] exist.
957  */
958 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
959 {
960 	unsigned long v;
961 	int i;
962 
963 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
964 		if (pzstats->vm_stat_diff[i]) {
965 			v = pzstats->vm_stat_diff[i];
966 			pzstats->vm_stat_diff[i] = 0;
967 			zone_page_state_add(v, zone, i);
968 		}
969 	}
970 
971 #ifdef CONFIG_NUMA
972 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
973 		if (pzstats->vm_numa_event[i]) {
974 			v = pzstats->vm_numa_event[i];
975 			pzstats->vm_numa_event[i] = 0;
976 			zone_numa_event_add(v, zone, i);
977 		}
978 	}
979 #endif
980 }
981 #endif
982 
983 #ifdef CONFIG_NUMA
984 /*
985  * Determine the per node value of a stat item. This function
986  * is called frequently in a NUMA machine, so try to be as
987  * frugal as possible.
988  */
989 unsigned long sum_zone_node_page_state(int node,
990 				 enum zone_stat_item item)
991 {
992 	struct zone *zones = NODE_DATA(node)->node_zones;
993 	int i;
994 	unsigned long count = 0;
995 
996 	for (i = 0; i < MAX_NR_ZONES; i++)
997 		count += zone_page_state(zones + i, item);
998 
999 	return count;
1000 }
1001 
1002 /* Determine the per node value of a numa stat item. */
1003 unsigned long sum_zone_numa_event_state(int node,
1004 				 enum numa_stat_item item)
1005 {
1006 	struct zone *zones = NODE_DATA(node)->node_zones;
1007 	unsigned long count = 0;
1008 	int i;
1009 
1010 	for (i = 0; i < MAX_NR_ZONES; i++)
1011 		count += zone_numa_event_state(zones + i, item);
1012 
1013 	return count;
1014 }
1015 
1016 /*
1017  * Determine the per node value of a stat item.
1018  */
1019 unsigned long node_page_state_pages(struct pglist_data *pgdat,
1020 				    enum node_stat_item item)
1021 {
1022 	long x = atomic_long_read(&pgdat->vm_stat[item]);
1023 #ifdef CONFIG_SMP
1024 	if (x < 0)
1025 		x = 0;
1026 #endif
1027 	return x;
1028 }
1029 
1030 unsigned long node_page_state(struct pglist_data *pgdat,
1031 			      enum node_stat_item item)
1032 {
1033 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1034 
1035 	return node_page_state_pages(pgdat, item);
1036 }
1037 #endif
1038 
1039 /*
1040  * Count number of pages "struct page" and "struct page_ext" consume.
1041  * nr_memmap_boot_pages: # of pages allocated by boot allocator
1042  * nr_memmap_pages: # of pages that were allocated by buddy allocator
1043  */
1044 static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
1045 static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
1046 
1047 void memmap_boot_pages_add(long delta)
1048 {
1049 	atomic_long_add(delta, &nr_memmap_boot_pages);
1050 }
1051 
1052 void memmap_pages_add(long delta)
1053 {
1054 	atomic_long_add(delta, &nr_memmap_pages);
1055 }
1056 
1057 #ifdef CONFIG_COMPACTION
1058 
1059 struct contig_page_info {
1060 	unsigned long free_pages;
1061 	unsigned long free_blocks_total;
1062 	unsigned long free_blocks_suitable;
1063 };
1064 
1065 /*
1066  * Calculate the number of free pages in a zone, how many contiguous
1067  * pages are free and how many are large enough to satisfy an allocation of
1068  * the target size. Note that this function makes no attempt to estimate
1069  * how many suitable free blocks there *might* be if MOVABLE pages were
1070  * migrated. Calculating that is possible, but expensive and can be
1071  * figured out from userspace
1072  */
1073 static void fill_contig_page_info(struct zone *zone,
1074 				unsigned int suitable_order,
1075 				struct contig_page_info *info)
1076 {
1077 	unsigned int order;
1078 
1079 	info->free_pages = 0;
1080 	info->free_blocks_total = 0;
1081 	info->free_blocks_suitable = 0;
1082 
1083 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
1084 		unsigned long blocks;
1085 
1086 		/*
1087 		 * Count number of free blocks.
1088 		 *
1089 		 * Access to nr_free is lockless as nr_free is used only for
1090 		 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1091 		 */
1092 		blocks = data_race(zone->free_area[order].nr_free);
1093 		info->free_blocks_total += blocks;
1094 
1095 		/* Count free base pages */
1096 		info->free_pages += blocks << order;
1097 
1098 		/* Count the suitable free blocks */
1099 		if (order >= suitable_order)
1100 			info->free_blocks_suitable += blocks <<
1101 						(order - suitable_order);
1102 	}
1103 }
1104 
1105 /*
1106  * A fragmentation index only makes sense if an allocation of a requested
1107  * size would fail. If that is true, the fragmentation index indicates
1108  * whether external fragmentation or a lack of memory was the problem.
1109  * The value can be used to determine if page reclaim or compaction
1110  * should be used
1111  */
1112 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1113 {
1114 	unsigned long requested = 1UL << order;
1115 
1116 	if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1117 		return 0;
1118 
1119 	if (!info->free_blocks_total)
1120 		return 0;
1121 
1122 	/* Fragmentation index only makes sense when a request would fail */
1123 	if (info->free_blocks_suitable)
1124 		return -1000;
1125 
1126 	/*
1127 	 * Index is between 0 and 1 so return within 3 decimal places
1128 	 *
1129 	 * 0 => allocation would fail due to lack of memory
1130 	 * 1 => allocation would fail due to fragmentation
1131 	 */
1132 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1133 }
1134 
1135 /*
1136  * Calculates external fragmentation within a zone wrt the given order.
1137  * It is defined as the percentage of pages found in blocks of size
1138  * less than 1 << order. It returns values in range [0, 100].
1139  */
1140 unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1141 {
1142 	struct contig_page_info info;
1143 
1144 	fill_contig_page_info(zone, order, &info);
1145 	if (info.free_pages == 0)
1146 		return 0;
1147 
1148 	return div_u64((info.free_pages -
1149 			(info.free_blocks_suitable << order)) * 100,
1150 			info.free_pages);
1151 }
1152 
1153 /* Same as __fragmentation index but allocs contig_page_info on stack */
1154 int fragmentation_index(struct zone *zone, unsigned int order)
1155 {
1156 	struct contig_page_info info;
1157 
1158 	fill_contig_page_info(zone, order, &info);
1159 	return __fragmentation_index(order, &info);
1160 }
1161 #endif
1162 
1163 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1164     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1165 #ifdef CONFIG_ZONE_DMA
1166 #define TEXT_FOR_DMA(xx, yy) [xx##_DMA] = yy "_dma",
1167 #else
1168 #define TEXT_FOR_DMA(xx, yy)
1169 #endif
1170 
1171 #ifdef CONFIG_ZONE_DMA32
1172 #define TEXT_FOR_DMA32(xx, yy) [xx##_DMA32] = yy "_dma32",
1173 #else
1174 #define TEXT_FOR_DMA32(xx, yy)
1175 #endif
1176 
1177 #ifdef CONFIG_HIGHMEM
1178 #define TEXT_FOR_HIGHMEM(xx, yy) [xx##_HIGH] = yy "_high",
1179 #else
1180 #define TEXT_FOR_HIGHMEM(xx, yy)
1181 #endif
1182 
1183 #ifdef CONFIG_ZONE_DEVICE
1184 #define TEXT_FOR_DEVICE(xx, yy) [xx##_DEVICE] = yy "_device",
1185 #else
1186 #define TEXT_FOR_DEVICE(xx, yy)
1187 #endif
1188 
1189 #define TEXTS_FOR_ZONES(xx, yy)			\
1190 	TEXT_FOR_DMA(xx, yy)			\
1191 	TEXT_FOR_DMA32(xx, yy)			\
1192 	[xx##_NORMAL] = yy "_normal",		\
1193 	TEXT_FOR_HIGHMEM(xx, yy)		\
1194 	[xx##_MOVABLE] = yy "_movable",		\
1195 	TEXT_FOR_DEVICE(xx, yy)
1196 
1197 const char * const vmstat_text[] = {
1198 	/* enum zone_stat_item counters */
1199 #define I(x) (x)
1200 	[I(NR_FREE_PAGES)]			= "nr_free_pages",
1201 	[I(NR_FREE_PAGES_BLOCKS)]		= "nr_free_pages_blocks",
1202 	[I(NR_ZONE_INACTIVE_ANON)]		= "nr_zone_inactive_anon",
1203 	[I(NR_ZONE_ACTIVE_ANON)]		= "nr_zone_active_anon",
1204 	[I(NR_ZONE_INACTIVE_FILE)]		= "nr_zone_inactive_file",
1205 	[I(NR_ZONE_ACTIVE_FILE)]		= "nr_zone_active_file",
1206 	[I(NR_ZONE_UNEVICTABLE)]		= "nr_zone_unevictable",
1207 	[I(NR_ZONE_WRITE_PENDING)]		= "nr_zone_write_pending",
1208 	[I(NR_MLOCK)]				= "nr_mlock",
1209 #if IS_ENABLED(CONFIG_ZSMALLOC)
1210 	[I(NR_ZSPAGES)]				= "nr_zspages",
1211 #endif
1212 	[I(NR_FREE_CMA_PAGES)]			= "nr_free_cma",
1213 #ifdef CONFIG_UNACCEPTED_MEMORY
1214 	[I(NR_UNACCEPTED)]			= "nr_unaccepted",
1215 #endif
1216 #undef I
1217 
1218 	/* enum numa_stat_item counters */
1219 #define I(x) (NR_VM_ZONE_STAT_ITEMS + x)
1220 #ifdef CONFIG_NUMA
1221 	[I(NUMA_HIT)]				= "numa_hit",
1222 	[I(NUMA_MISS)]				= "numa_miss",
1223 	[I(NUMA_FOREIGN)]			= "numa_foreign",
1224 	[I(NUMA_INTERLEAVE_HIT)]		= "numa_interleave",
1225 	[I(NUMA_LOCAL)]				= "numa_local",
1226 	[I(NUMA_OTHER)]				= "numa_other",
1227 #endif
1228 #undef I
1229 
1230 	/* enum node_stat_item counters */
1231 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + x)
1232 	[I(NR_INACTIVE_ANON)]			= "nr_inactive_anon",
1233 	[I(NR_ACTIVE_ANON)]			= "nr_active_anon",
1234 	[I(NR_INACTIVE_FILE)]			= "nr_inactive_file",
1235 	[I(NR_ACTIVE_FILE)]			= "nr_active_file",
1236 	[I(NR_UNEVICTABLE)]			= "nr_unevictable",
1237 	[I(NR_SLAB_RECLAIMABLE_B)]		= "nr_slab_reclaimable",
1238 	[I(NR_SLAB_UNRECLAIMABLE_B)]		= "nr_slab_unreclaimable",
1239 	[I(NR_ISOLATED_ANON)]			= "nr_isolated_anon",
1240 	[I(NR_ISOLATED_FILE)]			= "nr_isolated_file",
1241 	[I(WORKINGSET_NODES)]			= "workingset_nodes",
1242 	[I(WORKINGSET_REFAULT_ANON)]		= "workingset_refault_anon",
1243 	[I(WORKINGSET_REFAULT_FILE)]		= "workingset_refault_file",
1244 	[I(WORKINGSET_ACTIVATE_ANON)]		= "workingset_activate_anon",
1245 	[I(WORKINGSET_ACTIVATE_FILE)]		= "workingset_activate_file",
1246 	[I(WORKINGSET_RESTORE_ANON)]		= "workingset_restore_anon",
1247 	[I(WORKINGSET_RESTORE_FILE)]		= "workingset_restore_file",
1248 	[I(WORKINGSET_NODERECLAIM)]		= "workingset_nodereclaim",
1249 	[I(NR_ANON_MAPPED)]			= "nr_anon_pages",
1250 	[I(NR_FILE_MAPPED)]			= "nr_mapped",
1251 	[I(NR_FILE_PAGES)]			= "nr_file_pages",
1252 	[I(NR_FILE_DIRTY)]			= "nr_dirty",
1253 	[I(NR_WRITEBACK)]			= "nr_writeback",
1254 	[I(NR_SHMEM)]				= "nr_shmem",
1255 	[I(NR_SHMEM_THPS)]			= "nr_shmem_hugepages",
1256 	[I(NR_SHMEM_PMDMAPPED)]			= "nr_shmem_pmdmapped",
1257 	[I(NR_FILE_THPS)]			= "nr_file_hugepages",
1258 	[I(NR_FILE_PMDMAPPED)]			= "nr_file_pmdmapped",
1259 	[I(NR_ANON_THPS)]			= "nr_anon_transparent_hugepages",
1260 	[I(NR_VMSCAN_WRITE)]			= "nr_vmscan_write",
1261 	[I(NR_VMSCAN_IMMEDIATE)]		= "nr_vmscan_immediate_reclaim",
1262 	[I(NR_DIRTIED)]				= "nr_dirtied",
1263 	[I(NR_WRITTEN)]				= "nr_written",
1264 	[I(NR_THROTTLED_WRITTEN)]		= "nr_throttled_written",
1265 	[I(NR_KERNEL_MISC_RECLAIMABLE)]		= "nr_kernel_misc_reclaimable",
1266 	[I(NR_FOLL_PIN_ACQUIRED)]		= "nr_foll_pin_acquired",
1267 	[I(NR_FOLL_PIN_RELEASED)]		= "nr_foll_pin_released",
1268 	[I(NR_KERNEL_STACK_KB)]			= "nr_kernel_stack",
1269 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1270 	[I(NR_KERNEL_SCS_KB)]			= "nr_shadow_call_stack",
1271 #endif
1272 	[I(NR_PAGETABLE)]			= "nr_page_table_pages",
1273 	[I(NR_SECONDARY_PAGETABLE)]		= "nr_sec_page_table_pages",
1274 #ifdef CONFIG_IOMMU_SUPPORT
1275 	[I(NR_IOMMU_PAGES)]			= "nr_iommu_pages",
1276 #endif
1277 #ifdef CONFIG_SWAP
1278 	[I(NR_SWAPCACHE)]			= "nr_swapcached",
1279 #endif
1280 #ifdef CONFIG_NUMA_BALANCING
1281 	[I(PGPROMOTE_SUCCESS)]			= "pgpromote_success",
1282 	[I(PGPROMOTE_CANDIDATE)]		= "pgpromote_candidate",
1283 	[I(PGPROMOTE_CANDIDATE_NRL)]		= "pgpromote_candidate_nrl",
1284 #endif
1285 	[I(PGDEMOTE_KSWAPD)]			= "pgdemote_kswapd",
1286 	[I(PGDEMOTE_DIRECT)]			= "pgdemote_direct",
1287 	[I(PGDEMOTE_KHUGEPAGED)]		= "pgdemote_khugepaged",
1288 	[I(PGDEMOTE_PROACTIVE)]			= "pgdemote_proactive",
1289 #ifdef CONFIG_HUGETLB_PAGE
1290 	[I(NR_HUGETLB)]				= "nr_hugetlb",
1291 #endif
1292 	[I(NR_BALLOON_PAGES)]			= "nr_balloon_pages",
1293 #undef I
1294 
1295 	/* system-wide enum vm_stat_item counters */
1296 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1297 	     NR_VM_NODE_STAT_ITEMS + x)
1298 	[I(NR_DIRTY_THRESHOLD)]			= "nr_dirty_threshold",
1299 	[I(NR_DIRTY_BG_THRESHOLD)]		= "nr_dirty_background_threshold",
1300 	[I(NR_MEMMAP_PAGES)]			= "nr_memmap_pages",
1301 	[I(NR_MEMMAP_BOOT_PAGES)]		= "nr_memmap_boot_pages",
1302 #undef I
1303 
1304 #if defined(CONFIG_VM_EVENT_COUNTERS)
1305 	/* enum vm_event_item counters */
1306 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1307 	     NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS + x)
1308 
1309 	[I(PGPGIN)]				= "pgpgin",
1310 	[I(PGPGOUT)]				= "pgpgout",
1311 	[I(PSWPIN)]				= "pswpin",
1312 	[I(PSWPOUT)]				= "pswpout",
1313 
1314 #define OFF (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1315 	     NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS)
1316 	TEXTS_FOR_ZONES(OFF+PGALLOC, "pgalloc")
1317 	TEXTS_FOR_ZONES(OFF+ALLOCSTALL, "allocstall")
1318 	TEXTS_FOR_ZONES(OFF+PGSCAN_SKIP, "pgskip")
1319 #undef OFF
1320 
1321 	[I(PGFREE)]				= "pgfree",
1322 	[I(PGACTIVATE)]				= "pgactivate",
1323 	[I(PGDEACTIVATE)]			= "pgdeactivate",
1324 	[I(PGLAZYFREE)]				= "pglazyfree",
1325 
1326 	[I(PGFAULT)]				= "pgfault",
1327 	[I(PGMAJFAULT)]				= "pgmajfault",
1328 	[I(PGLAZYFREED)]			= "pglazyfreed",
1329 
1330 	[I(PGREFILL)]				= "pgrefill",
1331 	[I(PGREUSE)]				= "pgreuse",
1332 	[I(PGSTEAL_KSWAPD)]			= "pgsteal_kswapd",
1333 	[I(PGSTEAL_DIRECT)]			= "pgsteal_direct",
1334 	[I(PGSTEAL_KHUGEPAGED)]			= "pgsteal_khugepaged",
1335 	[I(PGSTEAL_PROACTIVE)]			= "pgsteal_proactive",
1336 	[I(PGSCAN_KSWAPD)]			= "pgscan_kswapd",
1337 	[I(PGSCAN_DIRECT)]			= "pgscan_direct",
1338 	[I(PGSCAN_KHUGEPAGED)]			= "pgscan_khugepaged",
1339 	[I(PGSCAN_PROACTIVE)]			= "pgscan_proactive",
1340 	[I(PGSCAN_DIRECT_THROTTLE)]		= "pgscan_direct_throttle",
1341 	[I(PGSCAN_ANON)]			= "pgscan_anon",
1342 	[I(PGSCAN_FILE)]			= "pgscan_file",
1343 	[I(PGSTEAL_ANON)]			= "pgsteal_anon",
1344 	[I(PGSTEAL_FILE)]			= "pgsteal_file",
1345 
1346 #ifdef CONFIG_NUMA
1347 	[I(PGSCAN_ZONE_RECLAIM_SUCCESS)]	= "zone_reclaim_success",
1348 	[I(PGSCAN_ZONE_RECLAIM_FAILED)]		= "zone_reclaim_failed",
1349 #endif
1350 	[I(PGINODESTEAL)]			= "pginodesteal",
1351 	[I(SLABS_SCANNED)]			= "slabs_scanned",
1352 	[I(KSWAPD_INODESTEAL)]			= "kswapd_inodesteal",
1353 	[I(KSWAPD_LOW_WMARK_HIT_QUICKLY)]	= "kswapd_low_wmark_hit_quickly",
1354 	[I(KSWAPD_HIGH_WMARK_HIT_QUICKLY)]	= "kswapd_high_wmark_hit_quickly",
1355 	[I(PAGEOUTRUN)]				= "pageoutrun",
1356 
1357 	[I(PGROTATED)]				= "pgrotated",
1358 
1359 	[I(DROP_PAGECACHE)]			= "drop_pagecache",
1360 	[I(DROP_SLAB)]				= "drop_slab",
1361 	[I(OOM_KILL)]				= "oom_kill",
1362 
1363 #ifdef CONFIG_NUMA_BALANCING
1364 	[I(NUMA_PTE_UPDATES)]			= "numa_pte_updates",
1365 	[I(NUMA_HUGE_PTE_UPDATES)]		= "numa_huge_pte_updates",
1366 	[I(NUMA_HINT_FAULTS)]			= "numa_hint_faults",
1367 	[I(NUMA_HINT_FAULTS_LOCAL)]		= "numa_hint_faults_local",
1368 	[I(NUMA_PAGE_MIGRATE)]			= "numa_pages_migrated",
1369 #endif
1370 #ifdef CONFIG_MIGRATION
1371 	[I(PGMIGRATE_SUCCESS)]			= "pgmigrate_success",
1372 	[I(PGMIGRATE_FAIL)]			= "pgmigrate_fail",
1373 	[I(THP_MIGRATION_SUCCESS)]		= "thp_migration_success",
1374 	[I(THP_MIGRATION_FAIL)]			= "thp_migration_fail",
1375 	[I(THP_MIGRATION_SPLIT)]		= "thp_migration_split",
1376 #endif
1377 #ifdef CONFIG_COMPACTION
1378 	[I(COMPACTMIGRATE_SCANNED)]		= "compact_migrate_scanned",
1379 	[I(COMPACTFREE_SCANNED)]		= "compact_free_scanned",
1380 	[I(COMPACTISOLATED)]			= "compact_isolated",
1381 	[I(COMPACTSTALL)]			= "compact_stall",
1382 	[I(COMPACTFAIL)]			= "compact_fail",
1383 	[I(COMPACTSUCCESS)]			= "compact_success",
1384 	[I(KCOMPACTD_WAKE)]			= "compact_daemon_wake",
1385 	[I(KCOMPACTD_MIGRATE_SCANNED)]		= "compact_daemon_migrate_scanned",
1386 	[I(KCOMPACTD_FREE_SCANNED)]		= "compact_daemon_free_scanned",
1387 #endif
1388 
1389 #ifdef CONFIG_HUGETLB_PAGE
1390 	[I(HTLB_BUDDY_PGALLOC)]			= "htlb_buddy_alloc_success",
1391 	[I(HTLB_BUDDY_PGALLOC_FAIL)]		= "htlb_buddy_alloc_fail",
1392 #endif
1393 #ifdef CONFIG_CMA
1394 	[I(CMA_ALLOC_SUCCESS)]			= "cma_alloc_success",
1395 	[I(CMA_ALLOC_FAIL)]			= "cma_alloc_fail",
1396 #endif
1397 	[I(UNEVICTABLE_PGCULLED)]		= "unevictable_pgs_culled",
1398 	[I(UNEVICTABLE_PGSCANNED)]		= "unevictable_pgs_scanned",
1399 	[I(UNEVICTABLE_PGRESCUED)]		= "unevictable_pgs_rescued",
1400 	[I(UNEVICTABLE_PGMLOCKED)]		= "unevictable_pgs_mlocked",
1401 	[I(UNEVICTABLE_PGMUNLOCKED)]		= "unevictable_pgs_munlocked",
1402 	[I(UNEVICTABLE_PGCLEARED)]		= "unevictable_pgs_cleared",
1403 	[I(UNEVICTABLE_PGSTRANDED)]		= "unevictable_pgs_stranded",
1404 
1405 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1406 	[I(THP_FAULT_ALLOC)]			= "thp_fault_alloc",
1407 	[I(THP_FAULT_FALLBACK)]			= "thp_fault_fallback",
1408 	[I(THP_FAULT_FALLBACK_CHARGE)]		= "thp_fault_fallback_charge",
1409 	[I(THP_COLLAPSE_ALLOC)]			= "thp_collapse_alloc",
1410 	[I(THP_COLLAPSE_ALLOC_FAILED)]		= "thp_collapse_alloc_failed",
1411 	[I(THP_FILE_ALLOC)]			= "thp_file_alloc",
1412 	[I(THP_FILE_FALLBACK)]			= "thp_file_fallback",
1413 	[I(THP_FILE_FALLBACK_CHARGE)]		= "thp_file_fallback_charge",
1414 	[I(THP_FILE_MAPPED)]			= "thp_file_mapped",
1415 	[I(THP_SPLIT_PAGE)]			= "thp_split_page",
1416 	[I(THP_SPLIT_PAGE_FAILED)]		= "thp_split_page_failed",
1417 	[I(THP_DEFERRED_SPLIT_PAGE)]		= "thp_deferred_split_page",
1418 	[I(THP_UNDERUSED_SPLIT_PAGE)]		= "thp_underused_split_page",
1419 	[I(THP_SPLIT_PMD)]			= "thp_split_pmd",
1420 	[I(THP_SCAN_EXCEED_NONE_PTE)]		= "thp_scan_exceed_none_pte",
1421 	[I(THP_SCAN_EXCEED_SWAP_PTE)]		= "thp_scan_exceed_swap_pte",
1422 	[I(THP_SCAN_EXCEED_SHARED_PTE)]		= "thp_scan_exceed_share_pte",
1423 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1424 	[I(THP_SPLIT_PUD)]			= "thp_split_pud",
1425 #endif
1426 	[I(THP_ZERO_PAGE_ALLOC)]		= "thp_zero_page_alloc",
1427 	[I(THP_ZERO_PAGE_ALLOC_FAILED)]		= "thp_zero_page_alloc_failed",
1428 	[I(THP_SWPOUT)]				= "thp_swpout",
1429 	[I(THP_SWPOUT_FALLBACK)]		= "thp_swpout_fallback",
1430 #endif
1431 #ifdef CONFIG_MEMORY_BALLOON
1432 	[I(BALLOON_INFLATE)]			= "balloon_inflate",
1433 	[I(BALLOON_DEFLATE)]			= "balloon_deflate",
1434 #ifdef CONFIG_BALLOON_COMPACTION
1435 	[I(BALLOON_MIGRATE)]			= "balloon_migrate",
1436 #endif
1437 #endif /* CONFIG_MEMORY_BALLOON */
1438 #ifdef CONFIG_DEBUG_TLBFLUSH
1439 	[I(NR_TLB_REMOTE_FLUSH)]		= "nr_tlb_remote_flush",
1440 	[I(NR_TLB_REMOTE_FLUSH_RECEIVED)]	= "nr_tlb_remote_flush_received",
1441 	[I(NR_TLB_LOCAL_FLUSH_ALL)]		= "nr_tlb_local_flush_all",
1442 	[I(NR_TLB_LOCAL_FLUSH_ONE)]		= "nr_tlb_local_flush_one",
1443 #endif /* CONFIG_DEBUG_TLBFLUSH */
1444 
1445 #ifdef CONFIG_SWAP
1446 	[I(SWAP_RA)]				= "swap_ra",
1447 	[I(SWAP_RA_HIT)]			= "swap_ra_hit",
1448 	[I(SWPIN_ZERO)]				= "swpin_zero",
1449 	[I(SWPOUT_ZERO)]			= "swpout_zero",
1450 #ifdef CONFIG_KSM
1451 	[I(KSM_SWPIN_COPY)]			= "ksm_swpin_copy",
1452 #endif
1453 #endif
1454 #ifdef CONFIG_KSM
1455 	[I(COW_KSM)]				= "cow_ksm",
1456 #endif
1457 #ifdef CONFIG_ZSWAP
1458 	[I(ZSWPIN)]				= "zswpin",
1459 	[I(ZSWPOUT)]				= "zswpout",
1460 	[I(ZSWPWB)]				= "zswpwb",
1461 #endif
1462 #ifdef CONFIG_X86
1463 	[I(DIRECT_MAP_LEVEL2_SPLIT)]		= "direct_map_level2_splits",
1464 	[I(DIRECT_MAP_LEVEL3_SPLIT)]		= "direct_map_level3_splits",
1465 	[I(DIRECT_MAP_LEVEL2_COLLAPSE)]		= "direct_map_level2_collapses",
1466 	[I(DIRECT_MAP_LEVEL3_COLLAPSE)]		= "direct_map_level3_collapses",
1467 #endif
1468 #ifdef CONFIG_PER_VMA_LOCK_STATS
1469 	[I(VMA_LOCK_SUCCESS)]			= "vma_lock_success",
1470 	[I(VMA_LOCK_ABORT)]			= "vma_lock_abort",
1471 	[I(VMA_LOCK_RETRY)]			= "vma_lock_retry",
1472 	[I(VMA_LOCK_MISS)]			= "vma_lock_miss",
1473 #endif
1474 #ifdef CONFIG_DEBUG_STACK_USAGE
1475 	[I(KSTACK_1K)]				= "kstack_1k",
1476 #if THREAD_SIZE > 1024
1477 	[I(KSTACK_2K)]				= "kstack_2k",
1478 #endif
1479 #if THREAD_SIZE > 2048
1480 	[I(KSTACK_4K)]				= "kstack_4k",
1481 #endif
1482 #if THREAD_SIZE > 4096
1483 	[I(KSTACK_8K)]				= "kstack_8k",
1484 #endif
1485 #if THREAD_SIZE > 8192
1486 	[I(KSTACK_16K)]				= "kstack_16k",
1487 #endif
1488 #if THREAD_SIZE > 16384
1489 	[I(KSTACK_32K)]				= "kstack_32k",
1490 #endif
1491 #if THREAD_SIZE > 32768
1492 	[I(KSTACK_64K)]				= "kstack_64k",
1493 #endif
1494 #if THREAD_SIZE > 65536
1495 	[I(KSTACK_REST)]			= "kstack_rest",
1496 #endif
1497 #endif
1498 #undef I
1499 #endif /* CONFIG_VM_EVENT_COUNTERS */
1500 };
1501 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1502 
1503 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1504      defined(CONFIG_PROC_FS)
1505 static void *frag_start(struct seq_file *m, loff_t *pos)
1506 {
1507 	pg_data_t *pgdat;
1508 	loff_t node = *pos;
1509 
1510 	for (pgdat = first_online_pgdat();
1511 	     pgdat && node;
1512 	     pgdat = next_online_pgdat(pgdat))
1513 		--node;
1514 
1515 	return pgdat;
1516 }
1517 
1518 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1519 {
1520 	pg_data_t *pgdat = (pg_data_t *)arg;
1521 
1522 	(*pos)++;
1523 	return next_online_pgdat(pgdat);
1524 }
1525 
1526 static void frag_stop(struct seq_file *m, void *arg)
1527 {
1528 }
1529 
1530 /*
1531  * Walk zones in a node and print using a callback.
1532  * If @assert_populated is true, only use callback for zones that are populated.
1533  */
1534 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1535 		bool assert_populated, bool nolock,
1536 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1537 {
1538 	struct zone *zone;
1539 	struct zone *node_zones = pgdat->node_zones;
1540 	unsigned long flags;
1541 
1542 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1543 		if (assert_populated && !populated_zone(zone))
1544 			continue;
1545 
1546 		if (!nolock)
1547 			spin_lock_irqsave(&zone->lock, flags);
1548 		print(m, pgdat, zone);
1549 		if (!nolock)
1550 			spin_unlock_irqrestore(&zone->lock, flags);
1551 	}
1552 }
1553 #endif
1554 
1555 #ifdef CONFIG_PROC_FS
1556 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1557 						struct zone *zone)
1558 {
1559 	int order;
1560 
1561 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1562 	for (order = 0; order < NR_PAGE_ORDERS; ++order)
1563 		/*
1564 		 * Access to nr_free is lockless as nr_free is used only for
1565 		 * printing purposes. Use data_race to avoid KCSAN warning.
1566 		 */
1567 		seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1568 	seq_putc(m, '\n');
1569 }
1570 
1571 /*
1572  * This walks the free areas for each zone.
1573  */
1574 static int frag_show(struct seq_file *m, void *arg)
1575 {
1576 	pg_data_t *pgdat = (pg_data_t *)arg;
1577 	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1578 	return 0;
1579 }
1580 
1581 static void pagetypeinfo_showfree_print(struct seq_file *m,
1582 					pg_data_t *pgdat, struct zone *zone)
1583 {
1584 	int order, mtype;
1585 
1586 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1587 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1588 					pgdat->node_id,
1589 					zone->name,
1590 					migratetype_names[mtype]);
1591 		for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1592 			unsigned long freecount = 0;
1593 			struct free_area *area;
1594 			struct list_head *curr;
1595 			bool overflow = false;
1596 
1597 			area = &(zone->free_area[order]);
1598 
1599 			list_for_each(curr, &area->free_list[mtype]) {
1600 				/*
1601 				 * Cap the free_list iteration because it might
1602 				 * be really large and we are under a spinlock
1603 				 * so a long time spent here could trigger a
1604 				 * hard lockup detector. Anyway this is a
1605 				 * debugging tool so knowing there is a handful
1606 				 * of pages of this order should be more than
1607 				 * sufficient.
1608 				 */
1609 				if (++freecount >= 100000) {
1610 					overflow = true;
1611 					break;
1612 				}
1613 			}
1614 			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1615 			spin_unlock_irq(&zone->lock);
1616 			cond_resched();
1617 			spin_lock_irq(&zone->lock);
1618 		}
1619 		seq_putc(m, '\n');
1620 	}
1621 }
1622 
1623 /* Print out the free pages at each order for each migatetype */
1624 static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1625 {
1626 	int order;
1627 	pg_data_t *pgdat = (pg_data_t *)arg;
1628 
1629 	/* Print header */
1630 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1631 	for (order = 0; order < NR_PAGE_ORDERS; ++order)
1632 		seq_printf(m, "%6d ", order);
1633 	seq_putc(m, '\n');
1634 
1635 	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1636 }
1637 
1638 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1639 					pg_data_t *pgdat, struct zone *zone)
1640 {
1641 	int mtype;
1642 	unsigned long pfn;
1643 	unsigned long start_pfn = zone->zone_start_pfn;
1644 	unsigned long end_pfn = zone_end_pfn(zone);
1645 	unsigned long count[MIGRATE_TYPES] = { 0, };
1646 
1647 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1648 		struct page *page;
1649 
1650 		page = pfn_to_online_page(pfn);
1651 		if (!page)
1652 			continue;
1653 
1654 		if (page_zone(page) != zone)
1655 			continue;
1656 
1657 		mtype = get_pageblock_migratetype(page);
1658 
1659 		if (mtype < MIGRATE_TYPES)
1660 			count[mtype]++;
1661 	}
1662 
1663 	/* Print counts */
1664 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1665 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1666 		seq_printf(m, "%12lu ", count[mtype]);
1667 	seq_putc(m, '\n');
1668 }
1669 
1670 /* Print out the number of pageblocks for each migratetype */
1671 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1672 {
1673 	int mtype;
1674 	pg_data_t *pgdat = (pg_data_t *)arg;
1675 
1676 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1677 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1678 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1679 	seq_putc(m, '\n');
1680 	walk_zones_in_node(m, pgdat, true, false,
1681 		pagetypeinfo_showblockcount_print);
1682 }
1683 
1684 /*
1685  * Print out the number of pageblocks for each migratetype that contain pages
1686  * of other types. This gives an indication of how well fallbacks are being
1687  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1688  * to determine what is going on
1689  */
1690 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1691 {
1692 #ifdef CONFIG_PAGE_OWNER
1693 	int mtype;
1694 
1695 	if (!static_branch_unlikely(&page_owner_inited))
1696 		return;
1697 
1698 	drain_all_pages(NULL);
1699 
1700 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1701 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1702 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1703 	seq_putc(m, '\n');
1704 
1705 	walk_zones_in_node(m, pgdat, true, true,
1706 		pagetypeinfo_showmixedcount_print);
1707 #endif /* CONFIG_PAGE_OWNER */
1708 }
1709 
1710 /*
1711  * This prints out statistics in relation to grouping pages by mobility.
1712  * It is expensive to collect so do not constantly read the file.
1713  */
1714 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1715 {
1716 	pg_data_t *pgdat = (pg_data_t *)arg;
1717 
1718 	/* check memoryless node */
1719 	if (!node_state(pgdat->node_id, N_MEMORY))
1720 		return 0;
1721 
1722 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1723 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1724 	seq_putc(m, '\n');
1725 	pagetypeinfo_showfree(m, pgdat);
1726 	pagetypeinfo_showblockcount(m, pgdat);
1727 	pagetypeinfo_showmixedcount(m, pgdat);
1728 
1729 	return 0;
1730 }
1731 
1732 static const struct seq_operations fragmentation_op = {
1733 	.start	= frag_start,
1734 	.next	= frag_next,
1735 	.stop	= frag_stop,
1736 	.show	= frag_show,
1737 };
1738 
1739 static const struct seq_operations pagetypeinfo_op = {
1740 	.start	= frag_start,
1741 	.next	= frag_next,
1742 	.stop	= frag_stop,
1743 	.show	= pagetypeinfo_show,
1744 };
1745 
1746 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1747 {
1748 	int zid;
1749 
1750 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1751 		struct zone *compare = &pgdat->node_zones[zid];
1752 
1753 		if (populated_zone(compare))
1754 			return zone == compare;
1755 	}
1756 
1757 	return false;
1758 }
1759 
1760 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1761 							struct zone *zone)
1762 {
1763 	int i;
1764 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1765 	if (is_zone_first_populated(pgdat, zone)) {
1766 		seq_printf(m, "\n  per-node stats");
1767 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1768 			unsigned long pages = node_page_state_pages(pgdat, i);
1769 
1770 			if (vmstat_item_print_in_thp(i))
1771 				pages /= HPAGE_PMD_NR;
1772 			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1773 				   pages);
1774 		}
1775 	}
1776 	seq_printf(m,
1777 		   "\n  pages free     %lu"
1778 		   "\n        boost    %lu"
1779 		   "\n        min      %lu"
1780 		   "\n        low      %lu"
1781 		   "\n        high     %lu"
1782 		   "\n        promo    %lu"
1783 		   "\n        spanned  %lu"
1784 		   "\n        present  %lu"
1785 		   "\n        managed  %lu"
1786 		   "\n        cma      %lu",
1787 		   zone_page_state(zone, NR_FREE_PAGES),
1788 		   zone->watermark_boost,
1789 		   min_wmark_pages(zone),
1790 		   low_wmark_pages(zone),
1791 		   high_wmark_pages(zone),
1792 		   promo_wmark_pages(zone),
1793 		   zone->spanned_pages,
1794 		   zone->present_pages,
1795 		   zone_managed_pages(zone),
1796 		   zone_cma_pages(zone));
1797 
1798 	seq_printf(m,
1799 		   "\n        protection: (%ld",
1800 		   zone->lowmem_reserve[0]);
1801 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1802 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1803 	seq_putc(m, ')');
1804 
1805 	/* If unpopulated, no other information is useful */
1806 	if (!populated_zone(zone)) {
1807 		seq_putc(m, '\n');
1808 		return;
1809 	}
1810 
1811 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1812 		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1813 			   zone_page_state(zone, i));
1814 
1815 #ifdef CONFIG_NUMA
1816 	fold_vm_zone_numa_events(zone);
1817 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1818 		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1819 			   zone_numa_event_state(zone, i));
1820 #endif
1821 
1822 	seq_printf(m, "\n  pagesets");
1823 	for_each_online_cpu(i) {
1824 		struct per_cpu_pages *pcp;
1825 		struct per_cpu_zonestat __maybe_unused *pzstats;
1826 
1827 		pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1828 		seq_printf(m,
1829 			   "\n    cpu: %i"
1830 			   "\n              count:    %i"
1831 			   "\n              high:     %i"
1832 			   "\n              batch:    %i"
1833 			   "\n              high_min: %i"
1834 			   "\n              high_max: %i",
1835 			   i,
1836 			   pcp->count,
1837 			   pcp->high,
1838 			   pcp->batch,
1839 			   pcp->high_min,
1840 			   pcp->high_max);
1841 #ifdef CONFIG_SMP
1842 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1843 		seq_printf(m, "\n  vm stats threshold: %d",
1844 				pzstats->stat_threshold);
1845 #endif
1846 	}
1847 	seq_printf(m,
1848 		   "\n  node_unreclaimable:  %u"
1849 		   "\n  start_pfn:           %lu",
1850 		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1851 		   zone->zone_start_pfn);
1852 	seq_putc(m, '\n');
1853 }
1854 
1855 /*
1856  * Output information about zones in @pgdat.  All zones are printed regardless
1857  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1858  * set of all zones and userspace would not be aware of such zones if they are
1859  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1860  */
1861 static int zoneinfo_show(struct seq_file *m, void *arg)
1862 {
1863 	pg_data_t *pgdat = (pg_data_t *)arg;
1864 	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1865 	return 0;
1866 }
1867 
1868 static const struct seq_operations zoneinfo_op = {
1869 	.start	= frag_start, /* iterate over all zones. The same as in
1870 			       * fragmentation. */
1871 	.next	= frag_next,
1872 	.stop	= frag_stop,
1873 	.show	= zoneinfo_show,
1874 };
1875 
1876 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1877 			 NR_VM_NUMA_EVENT_ITEMS + \
1878 			 NR_VM_NODE_STAT_ITEMS + \
1879 			 NR_VM_STAT_ITEMS + \
1880 			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1881 			  NR_VM_EVENT_ITEMS : 0))
1882 
1883 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1884 {
1885 	unsigned long *v;
1886 	int i;
1887 
1888 	if (*pos >= NR_VMSTAT_ITEMS)
1889 		return NULL;
1890 
1891 	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) != NR_VMSTAT_ITEMS);
1892 	fold_vm_numa_events();
1893 	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1894 	m->private = v;
1895 	if (!v)
1896 		return ERR_PTR(-ENOMEM);
1897 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1898 		v[i] = global_zone_page_state(i);
1899 	v += NR_VM_ZONE_STAT_ITEMS;
1900 
1901 #ifdef CONFIG_NUMA
1902 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1903 		v[i] = global_numa_event_state(i);
1904 	v += NR_VM_NUMA_EVENT_ITEMS;
1905 #endif
1906 
1907 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1908 		v[i] = global_node_page_state_pages(i);
1909 		if (vmstat_item_print_in_thp(i))
1910 			v[i] /= HPAGE_PMD_NR;
1911 	}
1912 	v += NR_VM_NODE_STAT_ITEMS;
1913 
1914 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1915 			    v + NR_DIRTY_THRESHOLD);
1916 	v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
1917 	v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
1918 	v += NR_VM_STAT_ITEMS;
1919 
1920 #ifdef CONFIG_VM_EVENT_COUNTERS
1921 	all_vm_events(v);
1922 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1923 	v[PGPGOUT] /= 2;
1924 #endif
1925 	return (unsigned long *)m->private + *pos;
1926 }
1927 
1928 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1929 {
1930 	(*pos)++;
1931 	if (*pos >= NR_VMSTAT_ITEMS)
1932 		return NULL;
1933 	return (unsigned long *)m->private + *pos;
1934 }
1935 
1936 static int vmstat_show(struct seq_file *m, void *arg)
1937 {
1938 	unsigned long *l = arg;
1939 	unsigned long off = l - (unsigned long *)m->private;
1940 
1941 	seq_puts(m, vmstat_text[off]);
1942 	seq_put_decimal_ull(m, " ", *l);
1943 	seq_putc(m, '\n');
1944 
1945 	if (off == NR_VMSTAT_ITEMS - 1) {
1946 		/*
1947 		 * We've come to the end - add any deprecated counters to avoid
1948 		 * breaking userspace which might depend on them being present.
1949 		 */
1950 		seq_puts(m, "nr_unstable 0\n");
1951 	}
1952 	return 0;
1953 }
1954 
1955 static void vmstat_stop(struct seq_file *m, void *arg)
1956 {
1957 	kfree(m->private);
1958 	m->private = NULL;
1959 }
1960 
1961 static const struct seq_operations vmstat_op = {
1962 	.start	= vmstat_start,
1963 	.next	= vmstat_next,
1964 	.stop	= vmstat_stop,
1965 	.show	= vmstat_show,
1966 };
1967 #endif /* CONFIG_PROC_FS */
1968 
1969 #ifdef CONFIG_SMP
1970 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1971 static int sysctl_stat_interval __read_mostly = HZ;
1972 static int vmstat_late_init_done;
1973 
1974 #ifdef CONFIG_PROC_FS
1975 static void refresh_vm_stats(struct work_struct *work)
1976 {
1977 	refresh_cpu_vm_stats(true);
1978 }
1979 
1980 static int vmstat_refresh(const struct ctl_table *table, int write,
1981 		   void *buffer, size_t *lenp, loff_t *ppos)
1982 {
1983 	long val;
1984 	int err;
1985 	int i;
1986 
1987 	/*
1988 	 * The regular update, every sysctl_stat_interval, may come later
1989 	 * than expected: leaving a significant amount in per_cpu buckets.
1990 	 * This is particularly misleading when checking a quantity of HUGE
1991 	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1992 	 * which can equally be echo'ed to or cat'ted from (by root),
1993 	 * can be used to update the stats just before reading them.
1994 	 *
1995 	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1996 	 * transiently negative values, report an error here if any of
1997 	 * the stats is negative, so we know to go looking for imbalance.
1998 	 */
1999 	err = schedule_on_each_cpu(refresh_vm_stats);
2000 	if (err)
2001 		return err;
2002 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
2003 		/*
2004 		 * Skip checking stats known to go negative occasionally.
2005 		 */
2006 		switch (i) {
2007 		case NR_ZONE_WRITE_PENDING:
2008 		case NR_FREE_CMA_PAGES:
2009 			continue;
2010 		}
2011 		val = atomic_long_read(&vm_zone_stat[i]);
2012 		if (val < 0) {
2013 			pr_warn("%s: %s %ld\n",
2014 				__func__, zone_stat_name(i), val);
2015 		}
2016 	}
2017 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
2018 		/*
2019 		 * Skip checking stats known to go negative occasionally.
2020 		 */
2021 		switch (i) {
2022 		case NR_WRITEBACK:
2023 			continue;
2024 		}
2025 		val = atomic_long_read(&vm_node_stat[i]);
2026 		if (val < 0) {
2027 			pr_warn("%s: %s %ld\n",
2028 				__func__, node_stat_name(i), val);
2029 		}
2030 	}
2031 	if (write)
2032 		*ppos += *lenp;
2033 	else
2034 		*lenp = 0;
2035 	return 0;
2036 }
2037 #endif /* CONFIG_PROC_FS */
2038 
2039 static void vmstat_update(struct work_struct *w)
2040 {
2041 	if (refresh_cpu_vm_stats(true)) {
2042 		/*
2043 		 * Counters were updated so we expect more updates
2044 		 * to occur in the future. Keep on running the
2045 		 * update worker thread.
2046 		 */
2047 		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
2048 				this_cpu_ptr(&vmstat_work),
2049 				round_jiffies_relative(sysctl_stat_interval));
2050 	}
2051 }
2052 
2053 /*
2054  * Check if the diffs for a certain cpu indicate that
2055  * an update is needed.
2056  */
2057 static bool need_update(int cpu)
2058 {
2059 	pg_data_t *last_pgdat = NULL;
2060 	struct zone *zone;
2061 
2062 	for_each_populated_zone(zone) {
2063 		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2064 		struct per_cpu_nodestat *n;
2065 
2066 		/*
2067 		 * The fast way of checking if there are any vmstat diffs.
2068 		 */
2069 		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
2070 			return true;
2071 
2072 		if (last_pgdat == zone->zone_pgdat)
2073 			continue;
2074 		last_pgdat = zone->zone_pgdat;
2075 		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
2076 		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
2077 			return true;
2078 	}
2079 	return false;
2080 }
2081 
2082 /*
2083  * Switch off vmstat processing and then fold all the remaining differentials
2084  * until the diffs stay at zero. The function is used by NOHZ and can only be
2085  * invoked when tick processing is not active.
2086  */
2087 void quiet_vmstat(void)
2088 {
2089 	if (system_state != SYSTEM_RUNNING)
2090 		return;
2091 
2092 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2093 		return;
2094 
2095 	if (!need_update(smp_processor_id()))
2096 		return;
2097 
2098 	/*
2099 	 * Just refresh counters and do not care about the pending delayed
2100 	 * vmstat_update. It doesn't fire that often to matter and canceling
2101 	 * it would be too expensive from this path.
2102 	 * vmstat_shepherd will take care about that for us.
2103 	 */
2104 	refresh_cpu_vm_stats(false);
2105 }
2106 
2107 /*
2108  * Shepherd worker thread that checks the
2109  * differentials of processors that have their worker
2110  * threads for vm statistics updates disabled because of
2111  * inactivity.
2112  */
2113 static void vmstat_shepherd(struct work_struct *w);
2114 
2115 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2116 
2117 static void vmstat_shepherd(struct work_struct *w)
2118 {
2119 	int cpu;
2120 
2121 	cpus_read_lock();
2122 	/* Check processors whose vmstat worker threads have been disabled */
2123 	for_each_online_cpu(cpu) {
2124 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2125 
2126 		/*
2127 		 * In kernel users of vmstat counters either require the precise value and
2128 		 * they are using zone_page_state_snapshot interface or they can live with
2129 		 * an imprecision as the regular flushing can happen at arbitrary time and
2130 		 * cumulative error can grow (see calculate_normal_threshold).
2131 		 *
2132 		 * From that POV the regular flushing can be postponed for CPUs that have
2133 		 * been isolated from the kernel interference without critical
2134 		 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2135 		 * for all isolated CPUs to avoid interference with the isolated workload.
2136 		 */
2137 		if (cpu_is_isolated(cpu))
2138 			continue;
2139 
2140 		if (!delayed_work_pending(dw) && need_update(cpu))
2141 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2142 
2143 		cond_resched();
2144 	}
2145 	cpus_read_unlock();
2146 
2147 	schedule_delayed_work(&shepherd,
2148 		round_jiffies_relative(sysctl_stat_interval));
2149 }
2150 
2151 static void __init start_shepherd_timer(void)
2152 {
2153 	int cpu;
2154 
2155 	for_each_possible_cpu(cpu) {
2156 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2157 			vmstat_update);
2158 
2159 		/*
2160 		 * For secondary CPUs during CPU hotplug scenarios,
2161 		 * vmstat_cpu_online() will enable the work.
2162 		 * mm/vmstat:online enables and disables vmstat_work
2163 		 * symmetrically during CPU hotplug events.
2164 		 */
2165 		if (!cpu_online(cpu))
2166 			disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2167 	}
2168 
2169 	schedule_delayed_work(&shepherd,
2170 		round_jiffies_relative(sysctl_stat_interval));
2171 }
2172 
2173 static void __init init_cpu_node_state(void)
2174 {
2175 	int node;
2176 
2177 	for_each_online_node(node) {
2178 		if (!cpumask_empty(cpumask_of_node(node)))
2179 			node_set_state(node, N_CPU);
2180 	}
2181 }
2182 
2183 static int vmstat_cpu_online(unsigned int cpu)
2184 {
2185 	if (vmstat_late_init_done)
2186 		refresh_zone_stat_thresholds();
2187 
2188 	if (!node_state(cpu_to_node(cpu), N_CPU)) {
2189 		node_set_state(cpu_to_node(cpu), N_CPU);
2190 	}
2191 	enable_delayed_work(&per_cpu(vmstat_work, cpu));
2192 
2193 	return 0;
2194 }
2195 
2196 static int vmstat_cpu_down_prep(unsigned int cpu)
2197 {
2198 	disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2199 	return 0;
2200 }
2201 
2202 static int vmstat_cpu_dead(unsigned int cpu)
2203 {
2204 	const struct cpumask *node_cpus;
2205 	int node;
2206 
2207 	node = cpu_to_node(cpu);
2208 
2209 	refresh_zone_stat_thresholds();
2210 	node_cpus = cpumask_of_node(node);
2211 	if (!cpumask_empty(node_cpus))
2212 		return 0;
2213 
2214 	node_clear_state(node, N_CPU);
2215 
2216 	return 0;
2217 }
2218 
2219 static int __init vmstat_late_init(void)
2220 {
2221 	refresh_zone_stat_thresholds();
2222 	vmstat_late_init_done = 1;
2223 
2224 	return 0;
2225 }
2226 late_initcall(vmstat_late_init);
2227 #endif
2228 
2229 #ifdef CONFIG_PROC_FS
2230 static const struct ctl_table vmstat_table[] = {
2231 #ifdef CONFIG_SMP
2232 	{
2233 		.procname	= "stat_interval",
2234 		.data		= &sysctl_stat_interval,
2235 		.maxlen		= sizeof(sysctl_stat_interval),
2236 		.mode		= 0644,
2237 		.proc_handler	= proc_dointvec_jiffies,
2238 	},
2239 	{
2240 		.procname	= "stat_refresh",
2241 		.data		= NULL,
2242 		.maxlen		= 0,
2243 		.mode		= 0600,
2244 		.proc_handler	= vmstat_refresh,
2245 	},
2246 #endif
2247 #ifdef CONFIG_NUMA
2248 	{
2249 		.procname	= "numa_stat",
2250 		.data		= &sysctl_vm_numa_stat,
2251 		.maxlen		= sizeof(int),
2252 		.mode		= 0644,
2253 		.proc_handler	= sysctl_vm_numa_stat_handler,
2254 		.extra1		= SYSCTL_ZERO,
2255 		.extra2		= SYSCTL_ONE,
2256 	},
2257 #endif
2258 };
2259 #endif
2260 
2261 struct workqueue_struct *mm_percpu_wq;
2262 
2263 void __init init_mm_internals(void)
2264 {
2265 	int ret __maybe_unused;
2266 
2267 	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2268 
2269 #ifdef CONFIG_SMP
2270 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2271 					NULL, vmstat_cpu_dead);
2272 	if (ret < 0)
2273 		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2274 
2275 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2276 					vmstat_cpu_online,
2277 					vmstat_cpu_down_prep);
2278 	if (ret < 0)
2279 		pr_err("vmstat: failed to register 'online' hotplug state\n");
2280 
2281 	cpus_read_lock();
2282 	init_cpu_node_state();
2283 	cpus_read_unlock();
2284 
2285 	start_shepherd_timer();
2286 #endif
2287 #ifdef CONFIG_PROC_FS
2288 	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2289 	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2290 	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2291 	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2292 	register_sysctl_init("vm", vmstat_table);
2293 #endif
2294 }
2295 
2296 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2297 
2298 /*
2299  * Return an index indicating how much of the available free memory is
2300  * unusable for an allocation of the requested size.
2301  */
2302 static int unusable_free_index(unsigned int order,
2303 				struct contig_page_info *info)
2304 {
2305 	/* No free memory is interpreted as all free memory is unusable */
2306 	if (info->free_pages == 0)
2307 		return 1000;
2308 
2309 	/*
2310 	 * Index should be a value between 0 and 1. Return a value to 3
2311 	 * decimal places.
2312 	 *
2313 	 * 0 => no fragmentation
2314 	 * 1 => high fragmentation
2315 	 */
2316 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2317 
2318 }
2319 
2320 static void unusable_show_print(struct seq_file *m,
2321 					pg_data_t *pgdat, struct zone *zone)
2322 {
2323 	unsigned int order;
2324 	int index;
2325 	struct contig_page_info info;
2326 
2327 	seq_printf(m, "Node %d, zone %8s ",
2328 				pgdat->node_id,
2329 				zone->name);
2330 	for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2331 		fill_contig_page_info(zone, order, &info);
2332 		index = unusable_free_index(order, &info);
2333 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2334 	}
2335 
2336 	seq_putc(m, '\n');
2337 }
2338 
2339 /*
2340  * Display unusable free space index
2341  *
2342  * The unusable free space index measures how much of the available free
2343  * memory cannot be used to satisfy an allocation of a given size and is a
2344  * value between 0 and 1. The higher the value, the more of free memory is
2345  * unusable and by implication, the worse the external fragmentation is. This
2346  * can be expressed as a percentage by multiplying by 100.
2347  */
2348 static int unusable_show(struct seq_file *m, void *arg)
2349 {
2350 	pg_data_t *pgdat = (pg_data_t *)arg;
2351 
2352 	/* check memoryless node */
2353 	if (!node_state(pgdat->node_id, N_MEMORY))
2354 		return 0;
2355 
2356 	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2357 
2358 	return 0;
2359 }
2360 
2361 static const struct seq_operations unusable_sops = {
2362 	.start	= frag_start,
2363 	.next	= frag_next,
2364 	.stop	= frag_stop,
2365 	.show	= unusable_show,
2366 };
2367 
2368 DEFINE_SEQ_ATTRIBUTE(unusable);
2369 
2370 static void extfrag_show_print(struct seq_file *m,
2371 					pg_data_t *pgdat, struct zone *zone)
2372 {
2373 	unsigned int order;
2374 	int index;
2375 
2376 	/* Alloc on stack as interrupts are disabled for zone walk */
2377 	struct contig_page_info info;
2378 
2379 	seq_printf(m, "Node %d, zone %8s ",
2380 				pgdat->node_id,
2381 				zone->name);
2382 	for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2383 		fill_contig_page_info(zone, order, &info);
2384 		index = __fragmentation_index(order, &info);
2385 		seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2386 	}
2387 
2388 	seq_putc(m, '\n');
2389 }
2390 
2391 /*
2392  * Display fragmentation index for orders that allocations would fail for
2393  */
2394 static int extfrag_show(struct seq_file *m, void *arg)
2395 {
2396 	pg_data_t *pgdat = (pg_data_t *)arg;
2397 
2398 	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2399 
2400 	return 0;
2401 }
2402 
2403 static const struct seq_operations extfrag_sops = {
2404 	.start	= frag_start,
2405 	.next	= frag_next,
2406 	.stop	= frag_stop,
2407 	.show	= extfrag_show,
2408 };
2409 
2410 DEFINE_SEQ_ATTRIBUTE(extfrag);
2411 
2412 static int __init extfrag_debug_init(void)
2413 {
2414 	struct dentry *extfrag_debug_root;
2415 
2416 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2417 
2418 	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2419 			    &unusable_fops);
2420 
2421 	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2422 			    &extfrag_fops);
2423 
2424 	return 0;
2425 }
2426 
2427 module_init(extfrag_debug_init);
2428 
2429 #endif
2430