xref: /linux/mm/vmstat.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *		Christoph Lameter <christoph@lameter.com>
10  *  Copyright (C) 2008-2014 Christoph Lameter
11  */
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
30 
31 #include "internal.h"
32 
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states);
36 
37 static void sum_vm_events(unsigned long *ret)
38 {
39 	int cpu;
40 	int i;
41 
42 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43 
44 	for_each_online_cpu(cpu) {
45 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46 
47 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 			ret[i] += this->event[i];
49 	}
50 }
51 
52 /*
53  * Accumulate the vm event counters across all CPUs.
54  * The result is unavoidably approximate - it can change
55  * during and after execution of this function.
56 */
57 void all_vm_events(unsigned long *ret)
58 {
59 	get_online_cpus();
60 	sum_vm_events(ret);
61 	put_online_cpus();
62 }
63 EXPORT_SYMBOL_GPL(all_vm_events);
64 
65 /*
66  * Fold the foreign cpu events into our own.
67  *
68  * This is adding to the events on one processor
69  * but keeps the global counts constant.
70  */
71 void vm_events_fold_cpu(int cpu)
72 {
73 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 	int i;
75 
76 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 		count_vm_events(i, fold_state->event[i]);
78 		fold_state->event[i] = 0;
79 	}
80 }
81 
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
83 
84 /*
85  * Manage combined zone based / global counters
86  *
87  * vm_stat contains the global counters
88  */
89 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
91 EXPORT_SYMBOL(vm_zone_stat);
92 EXPORT_SYMBOL(vm_node_stat);
93 
94 #ifdef CONFIG_SMP
95 
96 int calculate_pressure_threshold(struct zone *zone)
97 {
98 	int threshold;
99 	int watermark_distance;
100 
101 	/*
102 	 * As vmstats are not up to date, there is drift between the estimated
103 	 * and real values. For high thresholds and a high number of CPUs, it
104 	 * is possible for the min watermark to be breached while the estimated
105 	 * value looks fine. The pressure threshold is a reduced value such
106 	 * that even the maximum amount of drift will not accidentally breach
107 	 * the min watermark
108 	 */
109 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
110 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
111 
112 	/*
113 	 * Maximum threshold is 125
114 	 */
115 	threshold = min(125, threshold);
116 
117 	return threshold;
118 }
119 
120 int calculate_normal_threshold(struct zone *zone)
121 {
122 	int threshold;
123 	int mem;	/* memory in 128 MB units */
124 
125 	/*
126 	 * The threshold scales with the number of processors and the amount
127 	 * of memory per zone. More memory means that we can defer updates for
128 	 * longer, more processors could lead to more contention.
129  	 * fls() is used to have a cheap way of logarithmic scaling.
130 	 *
131 	 * Some sample thresholds:
132 	 *
133 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
134 	 * ------------------------------------------------------------------
135 	 * 8		1		1	0.9-1 GB	4
136 	 * 16		2		2	0.9-1 GB	4
137 	 * 20 		2		2	1-2 GB		5
138 	 * 24		2		2	2-4 GB		6
139 	 * 28		2		2	4-8 GB		7
140 	 * 32		2		2	8-16 GB		8
141 	 * 4		2		2	<128M		1
142 	 * 30		4		3	2-4 GB		5
143 	 * 48		4		3	8-16 GB		8
144 	 * 32		8		4	1-2 GB		4
145 	 * 32		8		4	0.9-1GB		4
146 	 * 10		16		5	<128M		1
147 	 * 40		16		5	900M		4
148 	 * 70		64		7	2-4 GB		5
149 	 * 84		64		7	4-8 GB		6
150 	 * 108		512		9	4-8 GB		6
151 	 * 125		1024		10	8-16 GB		8
152 	 * 125		1024		10	16-32 GB	9
153 	 */
154 
155 	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
156 
157 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
158 
159 	/*
160 	 * Maximum threshold is 125
161 	 */
162 	threshold = min(125, threshold);
163 
164 	return threshold;
165 }
166 
167 /*
168  * Refresh the thresholds for each zone.
169  */
170 void refresh_zone_stat_thresholds(void)
171 {
172 	struct pglist_data *pgdat;
173 	struct zone *zone;
174 	int cpu;
175 	int threshold;
176 
177 	/* Zero current pgdat thresholds */
178 	for_each_online_pgdat(pgdat) {
179 		for_each_online_cpu(cpu) {
180 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
181 		}
182 	}
183 
184 	for_each_populated_zone(zone) {
185 		struct pglist_data *pgdat = zone->zone_pgdat;
186 		unsigned long max_drift, tolerate_drift;
187 
188 		threshold = calculate_normal_threshold(zone);
189 
190 		for_each_online_cpu(cpu) {
191 			int pgdat_threshold;
192 
193 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
194 							= threshold;
195 
196 			/* Base nodestat threshold on the largest populated zone. */
197 			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
198 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
199 				= max(threshold, pgdat_threshold);
200 		}
201 
202 		/*
203 		 * Only set percpu_drift_mark if there is a danger that
204 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
205 		 * the min watermark could be breached by an allocation
206 		 */
207 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
208 		max_drift = num_online_cpus() * threshold;
209 		if (max_drift > tolerate_drift)
210 			zone->percpu_drift_mark = high_wmark_pages(zone) +
211 					max_drift;
212 	}
213 }
214 
215 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
216 				int (*calculate_pressure)(struct zone *))
217 {
218 	struct zone *zone;
219 	int cpu;
220 	int threshold;
221 	int i;
222 
223 	for (i = 0; i < pgdat->nr_zones; i++) {
224 		zone = &pgdat->node_zones[i];
225 		if (!zone->percpu_drift_mark)
226 			continue;
227 
228 		threshold = (*calculate_pressure)(zone);
229 		for_each_online_cpu(cpu)
230 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
231 							= threshold;
232 	}
233 }
234 
235 /*
236  * For use when we know that interrupts are disabled,
237  * or when we know that preemption is disabled and that
238  * particular counter cannot be updated from interrupt context.
239  */
240 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
241 			   long delta)
242 {
243 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
244 	s8 __percpu *p = pcp->vm_stat_diff + item;
245 	long x;
246 	long t;
247 
248 	x = delta + __this_cpu_read(*p);
249 
250 	t = __this_cpu_read(pcp->stat_threshold);
251 
252 	if (unlikely(x > t || x < -t)) {
253 		zone_page_state_add(x, zone, item);
254 		x = 0;
255 	}
256 	__this_cpu_write(*p, x);
257 }
258 EXPORT_SYMBOL(__mod_zone_page_state);
259 
260 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
261 				long delta)
262 {
263 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
264 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
265 	long x;
266 	long t;
267 
268 	x = delta + __this_cpu_read(*p);
269 
270 	t = __this_cpu_read(pcp->stat_threshold);
271 
272 	if (unlikely(x > t || x < -t)) {
273 		node_page_state_add(x, pgdat, item);
274 		x = 0;
275 	}
276 	__this_cpu_write(*p, x);
277 }
278 EXPORT_SYMBOL(__mod_node_page_state);
279 
280 /*
281  * Optimized increment and decrement functions.
282  *
283  * These are only for a single page and therefore can take a struct page *
284  * argument instead of struct zone *. This allows the inclusion of the code
285  * generated for page_zone(page) into the optimized functions.
286  *
287  * No overflow check is necessary and therefore the differential can be
288  * incremented or decremented in place which may allow the compilers to
289  * generate better code.
290  * The increment or decrement is known and therefore one boundary check can
291  * be omitted.
292  *
293  * NOTE: These functions are very performance sensitive. Change only
294  * with care.
295  *
296  * Some processors have inc/dec instructions that are atomic vs an interrupt.
297  * However, the code must first determine the differential location in a zone
298  * based on the processor number and then inc/dec the counter. There is no
299  * guarantee without disabling preemption that the processor will not change
300  * in between and therefore the atomicity vs. interrupt cannot be exploited
301  * in a useful way here.
302  */
303 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
304 {
305 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
306 	s8 __percpu *p = pcp->vm_stat_diff + item;
307 	s8 v, t;
308 
309 	v = __this_cpu_inc_return(*p);
310 	t = __this_cpu_read(pcp->stat_threshold);
311 	if (unlikely(v > t)) {
312 		s8 overstep = t >> 1;
313 
314 		zone_page_state_add(v + overstep, zone, item);
315 		__this_cpu_write(*p, -overstep);
316 	}
317 }
318 
319 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
320 {
321 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
322 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
323 	s8 v, t;
324 
325 	v = __this_cpu_inc_return(*p);
326 	t = __this_cpu_read(pcp->stat_threshold);
327 	if (unlikely(v > t)) {
328 		s8 overstep = t >> 1;
329 
330 		node_page_state_add(v + overstep, pgdat, item);
331 		__this_cpu_write(*p, -overstep);
332 	}
333 }
334 
335 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
336 {
337 	__inc_zone_state(page_zone(page), item);
338 }
339 EXPORT_SYMBOL(__inc_zone_page_state);
340 
341 void __inc_node_page_state(struct page *page, enum node_stat_item item)
342 {
343 	__inc_node_state(page_pgdat(page), item);
344 }
345 EXPORT_SYMBOL(__inc_node_page_state);
346 
347 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
348 {
349 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
350 	s8 __percpu *p = pcp->vm_stat_diff + item;
351 	s8 v, t;
352 
353 	v = __this_cpu_dec_return(*p);
354 	t = __this_cpu_read(pcp->stat_threshold);
355 	if (unlikely(v < - t)) {
356 		s8 overstep = t >> 1;
357 
358 		zone_page_state_add(v - overstep, zone, item);
359 		__this_cpu_write(*p, overstep);
360 	}
361 }
362 
363 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
364 {
365 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
366 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
367 	s8 v, t;
368 
369 	v = __this_cpu_dec_return(*p);
370 	t = __this_cpu_read(pcp->stat_threshold);
371 	if (unlikely(v < - t)) {
372 		s8 overstep = t >> 1;
373 
374 		node_page_state_add(v - overstep, pgdat, item);
375 		__this_cpu_write(*p, overstep);
376 	}
377 }
378 
379 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
380 {
381 	__dec_zone_state(page_zone(page), item);
382 }
383 EXPORT_SYMBOL(__dec_zone_page_state);
384 
385 void __dec_node_page_state(struct page *page, enum node_stat_item item)
386 {
387 	__dec_node_state(page_pgdat(page), item);
388 }
389 EXPORT_SYMBOL(__dec_node_page_state);
390 
391 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
392 /*
393  * If we have cmpxchg_local support then we do not need to incur the overhead
394  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
395  *
396  * mod_state() modifies the zone counter state through atomic per cpu
397  * operations.
398  *
399  * Overstep mode specifies how overstep should handled:
400  *     0       No overstepping
401  *     1       Overstepping half of threshold
402  *     -1      Overstepping minus half of threshold
403 */
404 static inline void mod_zone_state(struct zone *zone,
405        enum zone_stat_item item, long delta, int overstep_mode)
406 {
407 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
408 	s8 __percpu *p = pcp->vm_stat_diff + item;
409 	long o, n, t, z;
410 
411 	do {
412 		z = 0;  /* overflow to zone counters */
413 
414 		/*
415 		 * The fetching of the stat_threshold is racy. We may apply
416 		 * a counter threshold to the wrong the cpu if we get
417 		 * rescheduled while executing here. However, the next
418 		 * counter update will apply the threshold again and
419 		 * therefore bring the counter under the threshold again.
420 		 *
421 		 * Most of the time the thresholds are the same anyways
422 		 * for all cpus in a zone.
423 		 */
424 		t = this_cpu_read(pcp->stat_threshold);
425 
426 		o = this_cpu_read(*p);
427 		n = delta + o;
428 
429 		if (n > t || n < -t) {
430 			int os = overstep_mode * (t >> 1) ;
431 
432 			/* Overflow must be added to zone counters */
433 			z = n + os;
434 			n = -os;
435 		}
436 	} while (this_cpu_cmpxchg(*p, o, n) != o);
437 
438 	if (z)
439 		zone_page_state_add(z, zone, item);
440 }
441 
442 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
443 			 long delta)
444 {
445 	mod_zone_state(zone, item, delta, 0);
446 }
447 EXPORT_SYMBOL(mod_zone_page_state);
448 
449 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
450 {
451 	mod_zone_state(page_zone(page), item, 1, 1);
452 }
453 EXPORT_SYMBOL(inc_zone_page_state);
454 
455 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
456 {
457 	mod_zone_state(page_zone(page), item, -1, -1);
458 }
459 EXPORT_SYMBOL(dec_zone_page_state);
460 
461 static inline void mod_node_state(struct pglist_data *pgdat,
462        enum node_stat_item item, int delta, int overstep_mode)
463 {
464 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
465 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
466 	long o, n, t, z;
467 
468 	do {
469 		z = 0;  /* overflow to node counters */
470 
471 		/*
472 		 * The fetching of the stat_threshold is racy. We may apply
473 		 * a counter threshold to the wrong the cpu if we get
474 		 * rescheduled while executing here. However, the next
475 		 * counter update will apply the threshold again and
476 		 * therefore bring the counter under the threshold again.
477 		 *
478 		 * Most of the time the thresholds are the same anyways
479 		 * for all cpus in a node.
480 		 */
481 		t = this_cpu_read(pcp->stat_threshold);
482 
483 		o = this_cpu_read(*p);
484 		n = delta + o;
485 
486 		if (n > t || n < -t) {
487 			int os = overstep_mode * (t >> 1) ;
488 
489 			/* Overflow must be added to node counters */
490 			z = n + os;
491 			n = -os;
492 		}
493 	} while (this_cpu_cmpxchg(*p, o, n) != o);
494 
495 	if (z)
496 		node_page_state_add(z, pgdat, item);
497 }
498 
499 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
500 					long delta)
501 {
502 	mod_node_state(pgdat, item, delta, 0);
503 }
504 EXPORT_SYMBOL(mod_node_page_state);
505 
506 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
507 {
508 	mod_node_state(pgdat, item, 1, 1);
509 }
510 
511 void inc_node_page_state(struct page *page, enum node_stat_item item)
512 {
513 	mod_node_state(page_pgdat(page), item, 1, 1);
514 }
515 EXPORT_SYMBOL(inc_node_page_state);
516 
517 void dec_node_page_state(struct page *page, enum node_stat_item item)
518 {
519 	mod_node_state(page_pgdat(page), item, -1, -1);
520 }
521 EXPORT_SYMBOL(dec_node_page_state);
522 #else
523 /*
524  * Use interrupt disable to serialize counter updates
525  */
526 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
527 			 long delta)
528 {
529 	unsigned long flags;
530 
531 	local_irq_save(flags);
532 	__mod_zone_page_state(zone, item, delta);
533 	local_irq_restore(flags);
534 }
535 EXPORT_SYMBOL(mod_zone_page_state);
536 
537 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
538 {
539 	unsigned long flags;
540 	struct zone *zone;
541 
542 	zone = page_zone(page);
543 	local_irq_save(flags);
544 	__inc_zone_state(zone, item);
545 	local_irq_restore(flags);
546 }
547 EXPORT_SYMBOL(inc_zone_page_state);
548 
549 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
550 {
551 	unsigned long flags;
552 
553 	local_irq_save(flags);
554 	__dec_zone_page_state(page, item);
555 	local_irq_restore(flags);
556 }
557 EXPORT_SYMBOL(dec_zone_page_state);
558 
559 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
560 {
561 	unsigned long flags;
562 
563 	local_irq_save(flags);
564 	__inc_node_state(pgdat, item);
565 	local_irq_restore(flags);
566 }
567 EXPORT_SYMBOL(inc_node_state);
568 
569 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
570 					long delta)
571 {
572 	unsigned long flags;
573 
574 	local_irq_save(flags);
575 	__mod_node_page_state(pgdat, item, delta);
576 	local_irq_restore(flags);
577 }
578 EXPORT_SYMBOL(mod_node_page_state);
579 
580 void inc_node_page_state(struct page *page, enum node_stat_item item)
581 {
582 	unsigned long flags;
583 	struct pglist_data *pgdat;
584 
585 	pgdat = page_pgdat(page);
586 	local_irq_save(flags);
587 	__inc_node_state(pgdat, item);
588 	local_irq_restore(flags);
589 }
590 EXPORT_SYMBOL(inc_node_page_state);
591 
592 void dec_node_page_state(struct page *page, enum node_stat_item item)
593 {
594 	unsigned long flags;
595 
596 	local_irq_save(flags);
597 	__dec_node_page_state(page, item);
598 	local_irq_restore(flags);
599 }
600 EXPORT_SYMBOL(dec_node_page_state);
601 #endif
602 
603 /*
604  * Fold a differential into the global counters.
605  * Returns the number of counters updated.
606  */
607 static int fold_diff(int *zone_diff, int *node_diff)
608 {
609 	int i;
610 	int changes = 0;
611 
612 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
613 		if (zone_diff[i]) {
614 			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
615 			changes++;
616 	}
617 
618 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
619 		if (node_diff[i]) {
620 			atomic_long_add(node_diff[i], &vm_node_stat[i]);
621 			changes++;
622 	}
623 	return changes;
624 }
625 
626 /*
627  * Update the zone counters for the current cpu.
628  *
629  * Note that refresh_cpu_vm_stats strives to only access
630  * node local memory. The per cpu pagesets on remote zones are placed
631  * in the memory local to the processor using that pageset. So the
632  * loop over all zones will access a series of cachelines local to
633  * the processor.
634  *
635  * The call to zone_page_state_add updates the cachelines with the
636  * statistics in the remote zone struct as well as the global cachelines
637  * with the global counters. These could cause remote node cache line
638  * bouncing and will have to be only done when necessary.
639  *
640  * The function returns the number of global counters updated.
641  */
642 static int refresh_cpu_vm_stats(bool do_pagesets)
643 {
644 	struct pglist_data *pgdat;
645 	struct zone *zone;
646 	int i;
647 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
648 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
649 	int changes = 0;
650 
651 	for_each_populated_zone(zone) {
652 		struct per_cpu_pageset __percpu *p = zone->pageset;
653 
654 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
655 			int v;
656 
657 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
658 			if (v) {
659 
660 				atomic_long_add(v, &zone->vm_stat[i]);
661 				global_zone_diff[i] += v;
662 #ifdef CONFIG_NUMA
663 				/* 3 seconds idle till flush */
664 				__this_cpu_write(p->expire, 3);
665 #endif
666 			}
667 		}
668 #ifdef CONFIG_NUMA
669 		if (do_pagesets) {
670 			cond_resched();
671 			/*
672 			 * Deal with draining the remote pageset of this
673 			 * processor
674 			 *
675 			 * Check if there are pages remaining in this pageset
676 			 * if not then there is nothing to expire.
677 			 */
678 			if (!__this_cpu_read(p->expire) ||
679 			       !__this_cpu_read(p->pcp.count))
680 				continue;
681 
682 			/*
683 			 * We never drain zones local to this processor.
684 			 */
685 			if (zone_to_nid(zone) == numa_node_id()) {
686 				__this_cpu_write(p->expire, 0);
687 				continue;
688 			}
689 
690 			if (__this_cpu_dec_return(p->expire))
691 				continue;
692 
693 			if (__this_cpu_read(p->pcp.count)) {
694 				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
695 				changes++;
696 			}
697 		}
698 #endif
699 	}
700 
701 	for_each_online_pgdat(pgdat) {
702 		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
703 
704 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
705 			int v;
706 
707 			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
708 			if (v) {
709 				atomic_long_add(v, &pgdat->vm_stat[i]);
710 				global_node_diff[i] += v;
711 			}
712 		}
713 	}
714 
715 	changes += fold_diff(global_zone_diff, global_node_diff);
716 	return changes;
717 }
718 
719 /*
720  * Fold the data for an offline cpu into the global array.
721  * There cannot be any access by the offline cpu and therefore
722  * synchronization is simplified.
723  */
724 void cpu_vm_stats_fold(int cpu)
725 {
726 	struct pglist_data *pgdat;
727 	struct zone *zone;
728 	int i;
729 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
730 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
731 
732 	for_each_populated_zone(zone) {
733 		struct per_cpu_pageset *p;
734 
735 		p = per_cpu_ptr(zone->pageset, cpu);
736 
737 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
738 			if (p->vm_stat_diff[i]) {
739 				int v;
740 
741 				v = p->vm_stat_diff[i];
742 				p->vm_stat_diff[i] = 0;
743 				atomic_long_add(v, &zone->vm_stat[i]);
744 				global_zone_diff[i] += v;
745 			}
746 	}
747 
748 	for_each_online_pgdat(pgdat) {
749 		struct per_cpu_nodestat *p;
750 
751 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
752 
753 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
754 			if (p->vm_node_stat_diff[i]) {
755 				int v;
756 
757 				v = p->vm_node_stat_diff[i];
758 				p->vm_node_stat_diff[i] = 0;
759 				atomic_long_add(v, &pgdat->vm_stat[i]);
760 				global_node_diff[i] += v;
761 			}
762 	}
763 
764 	fold_diff(global_zone_diff, global_node_diff);
765 }
766 
767 /*
768  * this is only called if !populated_zone(zone), which implies no other users of
769  * pset->vm_stat_diff[] exsist.
770  */
771 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
772 {
773 	int i;
774 
775 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
776 		if (pset->vm_stat_diff[i]) {
777 			int v = pset->vm_stat_diff[i];
778 			pset->vm_stat_diff[i] = 0;
779 			atomic_long_add(v, &zone->vm_stat[i]);
780 			atomic_long_add(v, &vm_zone_stat[i]);
781 		}
782 }
783 #endif
784 
785 #ifdef CONFIG_NUMA
786 /*
787  * Determine the per node value of a stat item. This function
788  * is called frequently in a NUMA machine, so try to be as
789  * frugal as possible.
790  */
791 unsigned long sum_zone_node_page_state(int node,
792 				 enum zone_stat_item item)
793 {
794 	struct zone *zones = NODE_DATA(node)->node_zones;
795 	int i;
796 	unsigned long count = 0;
797 
798 	for (i = 0; i < MAX_NR_ZONES; i++)
799 		count += zone_page_state(zones + i, item);
800 
801 	return count;
802 }
803 
804 /*
805  * Determine the per node value of a stat item.
806  */
807 unsigned long node_page_state(struct pglist_data *pgdat,
808 				enum node_stat_item item)
809 {
810 	long x = atomic_long_read(&pgdat->vm_stat[item]);
811 #ifdef CONFIG_SMP
812 	if (x < 0)
813 		x = 0;
814 #endif
815 	return x;
816 }
817 #endif
818 
819 #ifdef CONFIG_COMPACTION
820 
821 struct contig_page_info {
822 	unsigned long free_pages;
823 	unsigned long free_blocks_total;
824 	unsigned long free_blocks_suitable;
825 };
826 
827 /*
828  * Calculate the number of free pages in a zone, how many contiguous
829  * pages are free and how many are large enough to satisfy an allocation of
830  * the target size. Note that this function makes no attempt to estimate
831  * how many suitable free blocks there *might* be if MOVABLE pages were
832  * migrated. Calculating that is possible, but expensive and can be
833  * figured out from userspace
834  */
835 static void fill_contig_page_info(struct zone *zone,
836 				unsigned int suitable_order,
837 				struct contig_page_info *info)
838 {
839 	unsigned int order;
840 
841 	info->free_pages = 0;
842 	info->free_blocks_total = 0;
843 	info->free_blocks_suitable = 0;
844 
845 	for (order = 0; order < MAX_ORDER; order++) {
846 		unsigned long blocks;
847 
848 		/* Count number of free blocks */
849 		blocks = zone->free_area[order].nr_free;
850 		info->free_blocks_total += blocks;
851 
852 		/* Count free base pages */
853 		info->free_pages += blocks << order;
854 
855 		/* Count the suitable free blocks */
856 		if (order >= suitable_order)
857 			info->free_blocks_suitable += blocks <<
858 						(order - suitable_order);
859 	}
860 }
861 
862 /*
863  * A fragmentation index only makes sense if an allocation of a requested
864  * size would fail. If that is true, the fragmentation index indicates
865  * whether external fragmentation or a lack of memory was the problem.
866  * The value can be used to determine if page reclaim or compaction
867  * should be used
868  */
869 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
870 {
871 	unsigned long requested = 1UL << order;
872 
873 	if (!info->free_blocks_total)
874 		return 0;
875 
876 	/* Fragmentation index only makes sense when a request would fail */
877 	if (info->free_blocks_suitable)
878 		return -1000;
879 
880 	/*
881 	 * Index is between 0 and 1 so return within 3 decimal places
882 	 *
883 	 * 0 => allocation would fail due to lack of memory
884 	 * 1 => allocation would fail due to fragmentation
885 	 */
886 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
887 }
888 
889 /* Same as __fragmentation index but allocs contig_page_info on stack */
890 int fragmentation_index(struct zone *zone, unsigned int order)
891 {
892 	struct contig_page_info info;
893 
894 	fill_contig_page_info(zone, order, &info);
895 	return __fragmentation_index(order, &info);
896 }
897 #endif
898 
899 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
900 #ifdef CONFIG_ZONE_DMA
901 #define TEXT_FOR_DMA(xx) xx "_dma",
902 #else
903 #define TEXT_FOR_DMA(xx)
904 #endif
905 
906 #ifdef CONFIG_ZONE_DMA32
907 #define TEXT_FOR_DMA32(xx) xx "_dma32",
908 #else
909 #define TEXT_FOR_DMA32(xx)
910 #endif
911 
912 #ifdef CONFIG_HIGHMEM
913 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
914 #else
915 #define TEXT_FOR_HIGHMEM(xx)
916 #endif
917 
918 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
919 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
920 
921 const char * const vmstat_text[] = {
922 	/* enum zone_stat_item countes */
923 	"nr_free_pages",
924 	"nr_zone_inactive_anon",
925 	"nr_zone_active_anon",
926 	"nr_zone_inactive_file",
927 	"nr_zone_active_file",
928 	"nr_zone_unevictable",
929 	"nr_zone_write_pending",
930 	"nr_mlock",
931 	"nr_slab_reclaimable",
932 	"nr_slab_unreclaimable",
933 	"nr_page_table_pages",
934 	"nr_kernel_stack",
935 	"nr_bounce",
936 #if IS_ENABLED(CONFIG_ZSMALLOC)
937 	"nr_zspages",
938 #endif
939 #ifdef CONFIG_NUMA
940 	"numa_hit",
941 	"numa_miss",
942 	"numa_foreign",
943 	"numa_interleave",
944 	"numa_local",
945 	"numa_other",
946 #endif
947 	"nr_free_cma",
948 
949 	/* Node-based counters */
950 	"nr_inactive_anon",
951 	"nr_active_anon",
952 	"nr_inactive_file",
953 	"nr_active_file",
954 	"nr_unevictable",
955 	"nr_isolated_anon",
956 	"nr_isolated_file",
957 	"workingset_refault",
958 	"workingset_activate",
959 	"workingset_nodereclaim",
960 	"nr_anon_pages",
961 	"nr_mapped",
962 	"nr_file_pages",
963 	"nr_dirty",
964 	"nr_writeback",
965 	"nr_writeback_temp",
966 	"nr_shmem",
967 	"nr_shmem_hugepages",
968 	"nr_shmem_pmdmapped",
969 	"nr_anon_transparent_hugepages",
970 	"nr_unstable",
971 	"nr_vmscan_write",
972 	"nr_vmscan_immediate_reclaim",
973 	"nr_dirtied",
974 	"nr_written",
975 
976 	/* enum writeback_stat_item counters */
977 	"nr_dirty_threshold",
978 	"nr_dirty_background_threshold",
979 
980 #ifdef CONFIG_VM_EVENT_COUNTERS
981 	/* enum vm_event_item counters */
982 	"pgpgin",
983 	"pgpgout",
984 	"pswpin",
985 	"pswpout",
986 
987 	TEXTS_FOR_ZONES("pgalloc")
988 	TEXTS_FOR_ZONES("allocstall")
989 	TEXTS_FOR_ZONES("pgskip")
990 
991 	"pgfree",
992 	"pgactivate",
993 	"pgdeactivate",
994 	"pglazyfree",
995 
996 	"pgfault",
997 	"pgmajfault",
998 	"pglazyfreed",
999 
1000 	"pgrefill",
1001 	"pgsteal_kswapd",
1002 	"pgsteal_direct",
1003 	"pgscan_kswapd",
1004 	"pgscan_direct",
1005 	"pgscan_direct_throttle",
1006 
1007 #ifdef CONFIG_NUMA
1008 	"zone_reclaim_failed",
1009 #endif
1010 	"pginodesteal",
1011 	"slabs_scanned",
1012 	"kswapd_inodesteal",
1013 	"kswapd_low_wmark_hit_quickly",
1014 	"kswapd_high_wmark_hit_quickly",
1015 	"pageoutrun",
1016 
1017 	"pgrotated",
1018 
1019 	"drop_pagecache",
1020 	"drop_slab",
1021 
1022 #ifdef CONFIG_NUMA_BALANCING
1023 	"numa_pte_updates",
1024 	"numa_huge_pte_updates",
1025 	"numa_hint_faults",
1026 	"numa_hint_faults_local",
1027 	"numa_pages_migrated",
1028 #endif
1029 #ifdef CONFIG_MIGRATION
1030 	"pgmigrate_success",
1031 	"pgmigrate_fail",
1032 #endif
1033 #ifdef CONFIG_COMPACTION
1034 	"compact_migrate_scanned",
1035 	"compact_free_scanned",
1036 	"compact_isolated",
1037 	"compact_stall",
1038 	"compact_fail",
1039 	"compact_success",
1040 	"compact_daemon_wake",
1041 	"compact_daemon_migrate_scanned",
1042 	"compact_daemon_free_scanned",
1043 #endif
1044 
1045 #ifdef CONFIG_HUGETLB_PAGE
1046 	"htlb_buddy_alloc_success",
1047 	"htlb_buddy_alloc_fail",
1048 #endif
1049 	"unevictable_pgs_culled",
1050 	"unevictable_pgs_scanned",
1051 	"unevictable_pgs_rescued",
1052 	"unevictable_pgs_mlocked",
1053 	"unevictable_pgs_munlocked",
1054 	"unevictable_pgs_cleared",
1055 	"unevictable_pgs_stranded",
1056 
1057 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1058 	"thp_fault_alloc",
1059 	"thp_fault_fallback",
1060 	"thp_collapse_alloc",
1061 	"thp_collapse_alloc_failed",
1062 	"thp_file_alloc",
1063 	"thp_file_mapped",
1064 	"thp_split_page",
1065 	"thp_split_page_failed",
1066 	"thp_deferred_split_page",
1067 	"thp_split_pmd",
1068 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1069 	"thp_split_pud",
1070 #endif
1071 	"thp_zero_page_alloc",
1072 	"thp_zero_page_alloc_failed",
1073 #endif
1074 #ifdef CONFIG_MEMORY_BALLOON
1075 	"balloon_inflate",
1076 	"balloon_deflate",
1077 #ifdef CONFIG_BALLOON_COMPACTION
1078 	"balloon_migrate",
1079 #endif
1080 #endif /* CONFIG_MEMORY_BALLOON */
1081 #ifdef CONFIG_DEBUG_TLBFLUSH
1082 #ifdef CONFIG_SMP
1083 	"nr_tlb_remote_flush",
1084 	"nr_tlb_remote_flush_received",
1085 #endif /* CONFIG_SMP */
1086 	"nr_tlb_local_flush_all",
1087 	"nr_tlb_local_flush_one",
1088 #endif /* CONFIG_DEBUG_TLBFLUSH */
1089 
1090 #ifdef CONFIG_DEBUG_VM_VMACACHE
1091 	"vmacache_find_calls",
1092 	"vmacache_find_hits",
1093 	"vmacache_full_flushes",
1094 #endif
1095 #endif /* CONFIG_VM_EVENTS_COUNTERS */
1096 };
1097 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1098 
1099 
1100 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1101      defined(CONFIG_PROC_FS)
1102 static void *frag_start(struct seq_file *m, loff_t *pos)
1103 {
1104 	pg_data_t *pgdat;
1105 	loff_t node = *pos;
1106 
1107 	for (pgdat = first_online_pgdat();
1108 	     pgdat && node;
1109 	     pgdat = next_online_pgdat(pgdat))
1110 		--node;
1111 
1112 	return pgdat;
1113 }
1114 
1115 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1116 {
1117 	pg_data_t *pgdat = (pg_data_t *)arg;
1118 
1119 	(*pos)++;
1120 	return next_online_pgdat(pgdat);
1121 }
1122 
1123 static void frag_stop(struct seq_file *m, void *arg)
1124 {
1125 }
1126 
1127 /*
1128  * Walk zones in a node and print using a callback.
1129  * If @assert_populated is true, only use callback for zones that are populated.
1130  */
1131 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1132 		bool assert_populated,
1133 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1134 {
1135 	struct zone *zone;
1136 	struct zone *node_zones = pgdat->node_zones;
1137 	unsigned long flags;
1138 
1139 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1140 		if (assert_populated && !populated_zone(zone))
1141 			continue;
1142 
1143 		spin_lock_irqsave(&zone->lock, flags);
1144 		print(m, pgdat, zone);
1145 		spin_unlock_irqrestore(&zone->lock, flags);
1146 	}
1147 }
1148 #endif
1149 
1150 #ifdef CONFIG_PROC_FS
1151 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1152 						struct zone *zone)
1153 {
1154 	int order;
1155 
1156 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1157 	for (order = 0; order < MAX_ORDER; ++order)
1158 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1159 	seq_putc(m, '\n');
1160 }
1161 
1162 /*
1163  * This walks the free areas for each zone.
1164  */
1165 static int frag_show(struct seq_file *m, void *arg)
1166 {
1167 	pg_data_t *pgdat = (pg_data_t *)arg;
1168 	walk_zones_in_node(m, pgdat, true, frag_show_print);
1169 	return 0;
1170 }
1171 
1172 static void pagetypeinfo_showfree_print(struct seq_file *m,
1173 					pg_data_t *pgdat, struct zone *zone)
1174 {
1175 	int order, mtype;
1176 
1177 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1178 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1179 					pgdat->node_id,
1180 					zone->name,
1181 					migratetype_names[mtype]);
1182 		for (order = 0; order < MAX_ORDER; ++order) {
1183 			unsigned long freecount = 0;
1184 			struct free_area *area;
1185 			struct list_head *curr;
1186 
1187 			area = &(zone->free_area[order]);
1188 
1189 			list_for_each(curr, &area->free_list[mtype])
1190 				freecount++;
1191 			seq_printf(m, "%6lu ", freecount);
1192 		}
1193 		seq_putc(m, '\n');
1194 	}
1195 }
1196 
1197 /* Print out the free pages at each order for each migatetype */
1198 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1199 {
1200 	int order;
1201 	pg_data_t *pgdat = (pg_data_t *)arg;
1202 
1203 	/* Print header */
1204 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1205 	for (order = 0; order < MAX_ORDER; ++order)
1206 		seq_printf(m, "%6d ", order);
1207 	seq_putc(m, '\n');
1208 
1209 	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showfree_print);
1210 
1211 	return 0;
1212 }
1213 
1214 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1215 					pg_data_t *pgdat, struct zone *zone)
1216 {
1217 	int mtype;
1218 	unsigned long pfn;
1219 	unsigned long start_pfn = zone->zone_start_pfn;
1220 	unsigned long end_pfn = zone_end_pfn(zone);
1221 	unsigned long count[MIGRATE_TYPES] = { 0, };
1222 
1223 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1224 		struct page *page;
1225 
1226 		if (!pfn_valid(pfn))
1227 			continue;
1228 
1229 		page = pfn_to_page(pfn);
1230 
1231 		/* Watch for unexpected holes punched in the memmap */
1232 		if (!memmap_valid_within(pfn, page, zone))
1233 			continue;
1234 
1235 		if (page_zone(page) != zone)
1236 			continue;
1237 
1238 		mtype = get_pageblock_migratetype(page);
1239 
1240 		if (mtype < MIGRATE_TYPES)
1241 			count[mtype]++;
1242 	}
1243 
1244 	/* Print counts */
1245 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1246 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1247 		seq_printf(m, "%12lu ", count[mtype]);
1248 	seq_putc(m, '\n');
1249 }
1250 
1251 /* Print out the free pages at each order for each migratetype */
1252 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1253 {
1254 	int mtype;
1255 	pg_data_t *pgdat = (pg_data_t *)arg;
1256 
1257 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1258 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1259 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1260 	seq_putc(m, '\n');
1261 	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showblockcount_print);
1262 
1263 	return 0;
1264 }
1265 
1266 /*
1267  * Print out the number of pageblocks for each migratetype that contain pages
1268  * of other types. This gives an indication of how well fallbacks are being
1269  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1270  * to determine what is going on
1271  */
1272 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1273 {
1274 #ifdef CONFIG_PAGE_OWNER
1275 	int mtype;
1276 
1277 	if (!static_branch_unlikely(&page_owner_inited))
1278 		return;
1279 
1280 	drain_all_pages(NULL);
1281 
1282 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1283 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1284 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1285 	seq_putc(m, '\n');
1286 
1287 	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showmixedcount_print);
1288 #endif /* CONFIG_PAGE_OWNER */
1289 }
1290 
1291 /*
1292  * This prints out statistics in relation to grouping pages by mobility.
1293  * It is expensive to collect so do not constantly read the file.
1294  */
1295 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1296 {
1297 	pg_data_t *pgdat = (pg_data_t *)arg;
1298 
1299 	/* check memoryless node */
1300 	if (!node_state(pgdat->node_id, N_MEMORY))
1301 		return 0;
1302 
1303 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1304 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1305 	seq_putc(m, '\n');
1306 	pagetypeinfo_showfree(m, pgdat);
1307 	pagetypeinfo_showblockcount(m, pgdat);
1308 	pagetypeinfo_showmixedcount(m, pgdat);
1309 
1310 	return 0;
1311 }
1312 
1313 static const struct seq_operations fragmentation_op = {
1314 	.start	= frag_start,
1315 	.next	= frag_next,
1316 	.stop	= frag_stop,
1317 	.show	= frag_show,
1318 };
1319 
1320 static int fragmentation_open(struct inode *inode, struct file *file)
1321 {
1322 	return seq_open(file, &fragmentation_op);
1323 }
1324 
1325 static const struct file_operations fragmentation_file_operations = {
1326 	.open		= fragmentation_open,
1327 	.read		= seq_read,
1328 	.llseek		= seq_lseek,
1329 	.release	= seq_release,
1330 };
1331 
1332 static const struct seq_operations pagetypeinfo_op = {
1333 	.start	= frag_start,
1334 	.next	= frag_next,
1335 	.stop	= frag_stop,
1336 	.show	= pagetypeinfo_show,
1337 };
1338 
1339 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1340 {
1341 	return seq_open(file, &pagetypeinfo_op);
1342 }
1343 
1344 static const struct file_operations pagetypeinfo_file_ops = {
1345 	.open		= pagetypeinfo_open,
1346 	.read		= seq_read,
1347 	.llseek		= seq_lseek,
1348 	.release	= seq_release,
1349 };
1350 
1351 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1352 {
1353 	int zid;
1354 
1355 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1356 		struct zone *compare = &pgdat->node_zones[zid];
1357 
1358 		if (populated_zone(compare))
1359 			return zone == compare;
1360 	}
1361 
1362 	return false;
1363 }
1364 
1365 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1366 							struct zone *zone)
1367 {
1368 	int i;
1369 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1370 	if (is_zone_first_populated(pgdat, zone)) {
1371 		seq_printf(m, "\n  per-node stats");
1372 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1373 			seq_printf(m, "\n      %-12s %lu",
1374 				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1375 				node_page_state(pgdat, i));
1376 		}
1377 	}
1378 	seq_printf(m,
1379 		   "\n  pages free     %lu"
1380 		   "\n        min      %lu"
1381 		   "\n        low      %lu"
1382 		   "\n        high     %lu"
1383 		   "\n        spanned  %lu"
1384 		   "\n        present  %lu"
1385 		   "\n        managed  %lu",
1386 		   zone_page_state(zone, NR_FREE_PAGES),
1387 		   min_wmark_pages(zone),
1388 		   low_wmark_pages(zone),
1389 		   high_wmark_pages(zone),
1390 		   zone->spanned_pages,
1391 		   zone->present_pages,
1392 		   zone->managed_pages);
1393 
1394 	seq_printf(m,
1395 		   "\n        protection: (%ld",
1396 		   zone->lowmem_reserve[0]);
1397 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1398 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1399 	seq_putc(m, ')');
1400 
1401 	/* If unpopulated, no other information is useful */
1402 	if (!populated_zone(zone)) {
1403 		seq_putc(m, '\n');
1404 		return;
1405 	}
1406 
1407 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1408 		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
1409 				zone_page_state(zone, i));
1410 
1411 	seq_printf(m, "\n  pagesets");
1412 	for_each_online_cpu(i) {
1413 		struct per_cpu_pageset *pageset;
1414 
1415 		pageset = per_cpu_ptr(zone->pageset, i);
1416 		seq_printf(m,
1417 			   "\n    cpu: %i"
1418 			   "\n              count: %i"
1419 			   "\n              high:  %i"
1420 			   "\n              batch: %i",
1421 			   i,
1422 			   pageset->pcp.count,
1423 			   pageset->pcp.high,
1424 			   pageset->pcp.batch);
1425 #ifdef CONFIG_SMP
1426 		seq_printf(m, "\n  vm stats threshold: %d",
1427 				pageset->stat_threshold);
1428 #endif
1429 	}
1430 	seq_printf(m,
1431 		   "\n  node_unreclaimable:  %u"
1432 		   "\n  start_pfn:           %lu"
1433 		   "\n  node_inactive_ratio: %u",
1434 		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1435 		   zone->zone_start_pfn,
1436 		   zone->zone_pgdat->inactive_ratio);
1437 	seq_putc(m, '\n');
1438 }
1439 
1440 /*
1441  * Output information about zones in @pgdat.  All zones are printed regardless
1442  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1443  * set of all zones and userspace would not be aware of such zones if they are
1444  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1445  */
1446 static int zoneinfo_show(struct seq_file *m, void *arg)
1447 {
1448 	pg_data_t *pgdat = (pg_data_t *)arg;
1449 	walk_zones_in_node(m, pgdat, false, zoneinfo_show_print);
1450 	return 0;
1451 }
1452 
1453 static const struct seq_operations zoneinfo_op = {
1454 	.start	= frag_start, /* iterate over all zones. The same as in
1455 			       * fragmentation. */
1456 	.next	= frag_next,
1457 	.stop	= frag_stop,
1458 	.show	= zoneinfo_show,
1459 };
1460 
1461 static int zoneinfo_open(struct inode *inode, struct file *file)
1462 {
1463 	return seq_open(file, &zoneinfo_op);
1464 }
1465 
1466 static const struct file_operations proc_zoneinfo_file_operations = {
1467 	.open		= zoneinfo_open,
1468 	.read		= seq_read,
1469 	.llseek		= seq_lseek,
1470 	.release	= seq_release,
1471 };
1472 
1473 enum writeback_stat_item {
1474 	NR_DIRTY_THRESHOLD,
1475 	NR_DIRTY_BG_THRESHOLD,
1476 	NR_VM_WRITEBACK_STAT_ITEMS,
1477 };
1478 
1479 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1480 {
1481 	unsigned long *v;
1482 	int i, stat_items_size;
1483 
1484 	if (*pos >= ARRAY_SIZE(vmstat_text))
1485 		return NULL;
1486 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1487 			  NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1488 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1489 
1490 #ifdef CONFIG_VM_EVENT_COUNTERS
1491 	stat_items_size += sizeof(struct vm_event_state);
1492 #endif
1493 
1494 	v = kmalloc(stat_items_size, GFP_KERNEL);
1495 	m->private = v;
1496 	if (!v)
1497 		return ERR_PTR(-ENOMEM);
1498 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1499 		v[i] = global_page_state(i);
1500 	v += NR_VM_ZONE_STAT_ITEMS;
1501 
1502 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1503 		v[i] = global_node_page_state(i);
1504 	v += NR_VM_NODE_STAT_ITEMS;
1505 
1506 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1507 			    v + NR_DIRTY_THRESHOLD);
1508 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1509 
1510 #ifdef CONFIG_VM_EVENT_COUNTERS
1511 	all_vm_events(v);
1512 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1513 	v[PGPGOUT] /= 2;
1514 #endif
1515 	return (unsigned long *)m->private + *pos;
1516 }
1517 
1518 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1519 {
1520 	(*pos)++;
1521 	if (*pos >= ARRAY_SIZE(vmstat_text))
1522 		return NULL;
1523 	return (unsigned long *)m->private + *pos;
1524 }
1525 
1526 static int vmstat_show(struct seq_file *m, void *arg)
1527 {
1528 	unsigned long *l = arg;
1529 	unsigned long off = l - (unsigned long *)m->private;
1530 
1531 	seq_puts(m, vmstat_text[off]);
1532 	seq_put_decimal_ull(m, " ", *l);
1533 	seq_putc(m, '\n');
1534 	return 0;
1535 }
1536 
1537 static void vmstat_stop(struct seq_file *m, void *arg)
1538 {
1539 	kfree(m->private);
1540 	m->private = NULL;
1541 }
1542 
1543 static const struct seq_operations vmstat_op = {
1544 	.start	= vmstat_start,
1545 	.next	= vmstat_next,
1546 	.stop	= vmstat_stop,
1547 	.show	= vmstat_show,
1548 };
1549 
1550 static int vmstat_open(struct inode *inode, struct file *file)
1551 {
1552 	return seq_open(file, &vmstat_op);
1553 }
1554 
1555 static const struct file_operations proc_vmstat_file_operations = {
1556 	.open		= vmstat_open,
1557 	.read		= seq_read,
1558 	.llseek		= seq_lseek,
1559 	.release	= seq_release,
1560 };
1561 #endif /* CONFIG_PROC_FS */
1562 
1563 #ifdef CONFIG_SMP
1564 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1565 int sysctl_stat_interval __read_mostly = HZ;
1566 
1567 #ifdef CONFIG_PROC_FS
1568 static void refresh_vm_stats(struct work_struct *work)
1569 {
1570 	refresh_cpu_vm_stats(true);
1571 }
1572 
1573 int vmstat_refresh(struct ctl_table *table, int write,
1574 		   void __user *buffer, size_t *lenp, loff_t *ppos)
1575 {
1576 	long val;
1577 	int err;
1578 	int i;
1579 
1580 	/*
1581 	 * The regular update, every sysctl_stat_interval, may come later
1582 	 * than expected: leaving a significant amount in per_cpu buckets.
1583 	 * This is particularly misleading when checking a quantity of HUGE
1584 	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1585 	 * which can equally be echo'ed to or cat'ted from (by root),
1586 	 * can be used to update the stats just before reading them.
1587 	 *
1588 	 * Oh, and since global_page_state() etc. are so careful to hide
1589 	 * transiently negative values, report an error here if any of
1590 	 * the stats is negative, so we know to go looking for imbalance.
1591 	 */
1592 	err = schedule_on_each_cpu(refresh_vm_stats);
1593 	if (err)
1594 		return err;
1595 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1596 		val = atomic_long_read(&vm_zone_stat[i]);
1597 		if (val < 0) {
1598 			pr_warn("%s: %s %ld\n",
1599 				__func__, vmstat_text[i], val);
1600 			err = -EINVAL;
1601 		}
1602 	}
1603 	if (err)
1604 		return err;
1605 	if (write)
1606 		*ppos += *lenp;
1607 	else
1608 		*lenp = 0;
1609 	return 0;
1610 }
1611 #endif /* CONFIG_PROC_FS */
1612 
1613 static void vmstat_update(struct work_struct *w)
1614 {
1615 	if (refresh_cpu_vm_stats(true)) {
1616 		/*
1617 		 * Counters were updated so we expect more updates
1618 		 * to occur in the future. Keep on running the
1619 		 * update worker thread.
1620 		 */
1621 		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1622 				this_cpu_ptr(&vmstat_work),
1623 				round_jiffies_relative(sysctl_stat_interval));
1624 	}
1625 }
1626 
1627 /*
1628  * Switch off vmstat processing and then fold all the remaining differentials
1629  * until the diffs stay at zero. The function is used by NOHZ and can only be
1630  * invoked when tick processing is not active.
1631  */
1632 /*
1633  * Check if the diffs for a certain cpu indicate that
1634  * an update is needed.
1635  */
1636 static bool need_update(int cpu)
1637 {
1638 	struct zone *zone;
1639 
1640 	for_each_populated_zone(zone) {
1641 		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1642 
1643 		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1644 		/*
1645 		 * The fast way of checking if there are any vmstat diffs.
1646 		 * This works because the diffs are byte sized items.
1647 		 */
1648 		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1649 			return true;
1650 
1651 	}
1652 	return false;
1653 }
1654 
1655 /*
1656  * Switch off vmstat processing and then fold all the remaining differentials
1657  * until the diffs stay at zero. The function is used by NOHZ and can only be
1658  * invoked when tick processing is not active.
1659  */
1660 void quiet_vmstat(void)
1661 {
1662 	if (system_state != SYSTEM_RUNNING)
1663 		return;
1664 
1665 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1666 		return;
1667 
1668 	if (!need_update(smp_processor_id()))
1669 		return;
1670 
1671 	/*
1672 	 * Just refresh counters and do not care about the pending delayed
1673 	 * vmstat_update. It doesn't fire that often to matter and canceling
1674 	 * it would be too expensive from this path.
1675 	 * vmstat_shepherd will take care about that for us.
1676 	 */
1677 	refresh_cpu_vm_stats(false);
1678 }
1679 
1680 /*
1681  * Shepherd worker thread that checks the
1682  * differentials of processors that have their worker
1683  * threads for vm statistics updates disabled because of
1684  * inactivity.
1685  */
1686 static void vmstat_shepherd(struct work_struct *w);
1687 
1688 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1689 
1690 static void vmstat_shepherd(struct work_struct *w)
1691 {
1692 	int cpu;
1693 
1694 	get_online_cpus();
1695 	/* Check processors whose vmstat worker threads have been disabled */
1696 	for_each_online_cpu(cpu) {
1697 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1698 
1699 		if (!delayed_work_pending(dw) && need_update(cpu))
1700 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1701 	}
1702 	put_online_cpus();
1703 
1704 	schedule_delayed_work(&shepherd,
1705 		round_jiffies_relative(sysctl_stat_interval));
1706 }
1707 
1708 static void __init start_shepherd_timer(void)
1709 {
1710 	int cpu;
1711 
1712 	for_each_possible_cpu(cpu)
1713 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1714 			vmstat_update);
1715 
1716 	schedule_delayed_work(&shepherd,
1717 		round_jiffies_relative(sysctl_stat_interval));
1718 }
1719 
1720 static void __init init_cpu_node_state(void)
1721 {
1722 	int node;
1723 
1724 	for_each_online_node(node) {
1725 		if (cpumask_weight(cpumask_of_node(node)) > 0)
1726 			node_set_state(node, N_CPU);
1727 	}
1728 }
1729 
1730 static int vmstat_cpu_online(unsigned int cpu)
1731 {
1732 	refresh_zone_stat_thresholds();
1733 	node_set_state(cpu_to_node(cpu), N_CPU);
1734 	return 0;
1735 }
1736 
1737 static int vmstat_cpu_down_prep(unsigned int cpu)
1738 {
1739 	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1740 	return 0;
1741 }
1742 
1743 static int vmstat_cpu_dead(unsigned int cpu)
1744 {
1745 	const struct cpumask *node_cpus;
1746 	int node;
1747 
1748 	node = cpu_to_node(cpu);
1749 
1750 	refresh_zone_stat_thresholds();
1751 	node_cpus = cpumask_of_node(node);
1752 	if (cpumask_weight(node_cpus) > 0)
1753 		return 0;
1754 
1755 	node_clear_state(node, N_CPU);
1756 	return 0;
1757 }
1758 
1759 #endif
1760 
1761 struct workqueue_struct *mm_percpu_wq;
1762 
1763 void __init init_mm_internals(void)
1764 {
1765 	int ret __maybe_unused;
1766 
1767 	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1768 
1769 #ifdef CONFIG_SMP
1770 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1771 					NULL, vmstat_cpu_dead);
1772 	if (ret < 0)
1773 		pr_err("vmstat: failed to register 'dead' hotplug state\n");
1774 
1775 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
1776 					vmstat_cpu_online,
1777 					vmstat_cpu_down_prep);
1778 	if (ret < 0)
1779 		pr_err("vmstat: failed to register 'online' hotplug state\n");
1780 
1781 	get_online_cpus();
1782 	init_cpu_node_state();
1783 	put_online_cpus();
1784 
1785 	start_shepherd_timer();
1786 #endif
1787 #ifdef CONFIG_PROC_FS
1788 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1789 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1790 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1791 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1792 #endif
1793 }
1794 
1795 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1796 
1797 /*
1798  * Return an index indicating how much of the available free memory is
1799  * unusable for an allocation of the requested size.
1800  */
1801 static int unusable_free_index(unsigned int order,
1802 				struct contig_page_info *info)
1803 {
1804 	/* No free memory is interpreted as all free memory is unusable */
1805 	if (info->free_pages == 0)
1806 		return 1000;
1807 
1808 	/*
1809 	 * Index should be a value between 0 and 1. Return a value to 3
1810 	 * decimal places.
1811 	 *
1812 	 * 0 => no fragmentation
1813 	 * 1 => high fragmentation
1814 	 */
1815 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1816 
1817 }
1818 
1819 static void unusable_show_print(struct seq_file *m,
1820 					pg_data_t *pgdat, struct zone *zone)
1821 {
1822 	unsigned int order;
1823 	int index;
1824 	struct contig_page_info info;
1825 
1826 	seq_printf(m, "Node %d, zone %8s ",
1827 				pgdat->node_id,
1828 				zone->name);
1829 	for (order = 0; order < MAX_ORDER; ++order) {
1830 		fill_contig_page_info(zone, order, &info);
1831 		index = unusable_free_index(order, &info);
1832 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1833 	}
1834 
1835 	seq_putc(m, '\n');
1836 }
1837 
1838 /*
1839  * Display unusable free space index
1840  *
1841  * The unusable free space index measures how much of the available free
1842  * memory cannot be used to satisfy an allocation of a given size and is a
1843  * value between 0 and 1. The higher the value, the more of free memory is
1844  * unusable and by implication, the worse the external fragmentation is. This
1845  * can be expressed as a percentage by multiplying by 100.
1846  */
1847 static int unusable_show(struct seq_file *m, void *arg)
1848 {
1849 	pg_data_t *pgdat = (pg_data_t *)arg;
1850 
1851 	/* check memoryless node */
1852 	if (!node_state(pgdat->node_id, N_MEMORY))
1853 		return 0;
1854 
1855 	walk_zones_in_node(m, pgdat, true, unusable_show_print);
1856 
1857 	return 0;
1858 }
1859 
1860 static const struct seq_operations unusable_op = {
1861 	.start	= frag_start,
1862 	.next	= frag_next,
1863 	.stop	= frag_stop,
1864 	.show	= unusable_show,
1865 };
1866 
1867 static int unusable_open(struct inode *inode, struct file *file)
1868 {
1869 	return seq_open(file, &unusable_op);
1870 }
1871 
1872 static const struct file_operations unusable_file_ops = {
1873 	.open		= unusable_open,
1874 	.read		= seq_read,
1875 	.llseek		= seq_lseek,
1876 	.release	= seq_release,
1877 };
1878 
1879 static void extfrag_show_print(struct seq_file *m,
1880 					pg_data_t *pgdat, struct zone *zone)
1881 {
1882 	unsigned int order;
1883 	int index;
1884 
1885 	/* Alloc on stack as interrupts are disabled for zone walk */
1886 	struct contig_page_info info;
1887 
1888 	seq_printf(m, "Node %d, zone %8s ",
1889 				pgdat->node_id,
1890 				zone->name);
1891 	for (order = 0; order < MAX_ORDER; ++order) {
1892 		fill_contig_page_info(zone, order, &info);
1893 		index = __fragmentation_index(order, &info);
1894 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1895 	}
1896 
1897 	seq_putc(m, '\n');
1898 }
1899 
1900 /*
1901  * Display fragmentation index for orders that allocations would fail for
1902  */
1903 static int extfrag_show(struct seq_file *m, void *arg)
1904 {
1905 	pg_data_t *pgdat = (pg_data_t *)arg;
1906 
1907 	walk_zones_in_node(m, pgdat, true, extfrag_show_print);
1908 
1909 	return 0;
1910 }
1911 
1912 static const struct seq_operations extfrag_op = {
1913 	.start	= frag_start,
1914 	.next	= frag_next,
1915 	.stop	= frag_stop,
1916 	.show	= extfrag_show,
1917 };
1918 
1919 static int extfrag_open(struct inode *inode, struct file *file)
1920 {
1921 	return seq_open(file, &extfrag_op);
1922 }
1923 
1924 static const struct file_operations extfrag_file_ops = {
1925 	.open		= extfrag_open,
1926 	.read		= seq_read,
1927 	.llseek		= seq_lseek,
1928 	.release	= seq_release,
1929 };
1930 
1931 static int __init extfrag_debug_init(void)
1932 {
1933 	struct dentry *extfrag_debug_root;
1934 
1935 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1936 	if (!extfrag_debug_root)
1937 		return -ENOMEM;
1938 
1939 	if (!debugfs_create_file("unusable_index", 0444,
1940 			extfrag_debug_root, NULL, &unusable_file_ops))
1941 		goto fail;
1942 
1943 	if (!debugfs_create_file("extfrag_index", 0444,
1944 			extfrag_debug_root, NULL, &extfrag_file_ops))
1945 		goto fail;
1946 
1947 	return 0;
1948 fail:
1949 	debugfs_remove_recursive(extfrag_debug_root);
1950 	return -ENOMEM;
1951 }
1952 
1953 module_init(extfrag_debug_init);
1954 #endif
1955