xref: /linux/mm/vmstat.c (revision 09316c09dde33aae14f34489d9e3d243ec0d5938)
1f6ac2354SChristoph Lameter /*
2f6ac2354SChristoph Lameter  *  linux/mm/vmstat.c
3f6ac2354SChristoph Lameter  *
4f6ac2354SChristoph Lameter  *  Manages VM statistics
5f6ac2354SChristoph Lameter  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
62244b95aSChristoph Lameter  *
72244b95aSChristoph Lameter  *  zoned VM statistics
82244b95aSChristoph Lameter  *  Copyright (C) 2006 Silicon Graphics, Inc.,
92244b95aSChristoph Lameter  *		Christoph Lameter <christoph@lameter.com>
10f6ac2354SChristoph Lameter  */
118f32f7e5SAlexey Dobriyan #include <linux/fs.h>
12f6ac2354SChristoph Lameter #include <linux/mm.h>
134e950f6fSAlexey Dobriyan #include <linux/err.h>
142244b95aSChristoph Lameter #include <linux/module.h>
155a0e3ad6STejun Heo #include <linux/slab.h>
16df9ecabaSChristoph Lameter #include <linux/cpu.h>
17c748e134SAdrian Bunk #include <linux/vmstat.h>
18e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
19f1a5ab12SMel Gorman #include <linux/math64.h>
2079da826aSMichael Rubin #include <linux/writeback.h>
2136deb0beSNamhyung Kim #include <linux/compaction.h>
226e543d57SLisa Du #include <linux/mm_inline.h>
236e543d57SLisa Du 
246e543d57SLisa Du #include "internal.h"
25f6ac2354SChristoph Lameter 
26f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
27f8891e5eSChristoph Lameter DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
28f8891e5eSChristoph Lameter EXPORT_PER_CPU_SYMBOL(vm_event_states);
29f8891e5eSChristoph Lameter 
3031f961a8SMinchan Kim static void sum_vm_events(unsigned long *ret)
31f8891e5eSChristoph Lameter {
329eccf2a8SChristoph Lameter 	int cpu;
33f8891e5eSChristoph Lameter 	int i;
34f8891e5eSChristoph Lameter 
35f8891e5eSChristoph Lameter 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
36f8891e5eSChristoph Lameter 
3731f961a8SMinchan Kim 	for_each_online_cpu(cpu) {
38f8891e5eSChristoph Lameter 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
39f8891e5eSChristoph Lameter 
40f8891e5eSChristoph Lameter 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
41f8891e5eSChristoph Lameter 			ret[i] += this->event[i];
42f8891e5eSChristoph Lameter 	}
43f8891e5eSChristoph Lameter }
44f8891e5eSChristoph Lameter 
45f8891e5eSChristoph Lameter /*
46f8891e5eSChristoph Lameter  * Accumulate the vm event counters across all CPUs.
47f8891e5eSChristoph Lameter  * The result is unavoidably approximate - it can change
48f8891e5eSChristoph Lameter  * during and after execution of this function.
49f8891e5eSChristoph Lameter */
50f8891e5eSChristoph Lameter void all_vm_events(unsigned long *ret)
51f8891e5eSChristoph Lameter {
52b5be1132SKOSAKI Motohiro 	get_online_cpus();
5331f961a8SMinchan Kim 	sum_vm_events(ret);
54b5be1132SKOSAKI Motohiro 	put_online_cpus();
55f8891e5eSChristoph Lameter }
5632dd66fcSHeiko Carstens EXPORT_SYMBOL_GPL(all_vm_events);
57f8891e5eSChristoph Lameter 
58f8891e5eSChristoph Lameter /*
59f8891e5eSChristoph Lameter  * Fold the foreign cpu events into our own.
60f8891e5eSChristoph Lameter  *
61f8891e5eSChristoph Lameter  * This is adding to the events on one processor
62f8891e5eSChristoph Lameter  * but keeps the global counts constant.
63f8891e5eSChristoph Lameter  */
64f8891e5eSChristoph Lameter void vm_events_fold_cpu(int cpu)
65f8891e5eSChristoph Lameter {
66f8891e5eSChristoph Lameter 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
67f8891e5eSChristoph Lameter 	int i;
68f8891e5eSChristoph Lameter 
69f8891e5eSChristoph Lameter 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
70f8891e5eSChristoph Lameter 		count_vm_events(i, fold_state->event[i]);
71f8891e5eSChristoph Lameter 		fold_state->event[i] = 0;
72f8891e5eSChristoph Lameter 	}
73f8891e5eSChristoph Lameter }
74f8891e5eSChristoph Lameter 
75f8891e5eSChristoph Lameter #endif /* CONFIG_VM_EVENT_COUNTERS */
76f8891e5eSChristoph Lameter 
772244b95aSChristoph Lameter /*
782244b95aSChristoph Lameter  * Manage combined zone based / global counters
792244b95aSChristoph Lameter  *
802244b95aSChristoph Lameter  * vm_stat contains the global counters
812244b95aSChristoph Lameter  */
82a1cb2c60SDimitri Sivanich atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
832244b95aSChristoph Lameter EXPORT_SYMBOL(vm_stat);
842244b95aSChristoph Lameter 
852244b95aSChristoph Lameter #ifdef CONFIG_SMP
862244b95aSChristoph Lameter 
87b44129b3SMel Gorman int calculate_pressure_threshold(struct zone *zone)
8888f5acf8SMel Gorman {
8988f5acf8SMel Gorman 	int threshold;
9088f5acf8SMel Gorman 	int watermark_distance;
9188f5acf8SMel Gorman 
9288f5acf8SMel Gorman 	/*
9388f5acf8SMel Gorman 	 * As vmstats are not up to date, there is drift between the estimated
9488f5acf8SMel Gorman 	 * and real values. For high thresholds and a high number of CPUs, it
9588f5acf8SMel Gorman 	 * is possible for the min watermark to be breached while the estimated
9688f5acf8SMel Gorman 	 * value looks fine. The pressure threshold is a reduced value such
9788f5acf8SMel Gorman 	 * that even the maximum amount of drift will not accidentally breach
9888f5acf8SMel Gorman 	 * the min watermark
9988f5acf8SMel Gorman 	 */
10088f5acf8SMel Gorman 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
10188f5acf8SMel Gorman 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
10288f5acf8SMel Gorman 
10388f5acf8SMel Gorman 	/*
10488f5acf8SMel Gorman 	 * Maximum threshold is 125
10588f5acf8SMel Gorman 	 */
10688f5acf8SMel Gorman 	threshold = min(125, threshold);
10788f5acf8SMel Gorman 
10888f5acf8SMel Gorman 	return threshold;
10988f5acf8SMel Gorman }
11088f5acf8SMel Gorman 
111b44129b3SMel Gorman int calculate_normal_threshold(struct zone *zone)
112df9ecabaSChristoph Lameter {
113df9ecabaSChristoph Lameter 	int threshold;
114df9ecabaSChristoph Lameter 	int mem;	/* memory in 128 MB units */
1152244b95aSChristoph Lameter 
1162244b95aSChristoph Lameter 	/*
117df9ecabaSChristoph Lameter 	 * The threshold scales with the number of processors and the amount
118df9ecabaSChristoph Lameter 	 * of memory per zone. More memory means that we can defer updates for
119df9ecabaSChristoph Lameter 	 * longer, more processors could lead to more contention.
120df9ecabaSChristoph Lameter  	 * fls() is used to have a cheap way of logarithmic scaling.
1212244b95aSChristoph Lameter 	 *
122df9ecabaSChristoph Lameter 	 * Some sample thresholds:
123df9ecabaSChristoph Lameter 	 *
124df9ecabaSChristoph Lameter 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
125df9ecabaSChristoph Lameter 	 * ------------------------------------------------------------------
126df9ecabaSChristoph Lameter 	 * 8		1		1	0.9-1 GB	4
127df9ecabaSChristoph Lameter 	 * 16		2		2	0.9-1 GB	4
128df9ecabaSChristoph Lameter 	 * 20 		2		2	1-2 GB		5
129df9ecabaSChristoph Lameter 	 * 24		2		2	2-4 GB		6
130df9ecabaSChristoph Lameter 	 * 28		2		2	4-8 GB		7
131df9ecabaSChristoph Lameter 	 * 32		2		2	8-16 GB		8
132df9ecabaSChristoph Lameter 	 * 4		2		2	<128M		1
133df9ecabaSChristoph Lameter 	 * 30		4		3	2-4 GB		5
134df9ecabaSChristoph Lameter 	 * 48		4		3	8-16 GB		8
135df9ecabaSChristoph Lameter 	 * 32		8		4	1-2 GB		4
136df9ecabaSChristoph Lameter 	 * 32		8		4	0.9-1GB		4
137df9ecabaSChristoph Lameter 	 * 10		16		5	<128M		1
138df9ecabaSChristoph Lameter 	 * 40		16		5	900M		4
139df9ecabaSChristoph Lameter 	 * 70		64		7	2-4 GB		5
140df9ecabaSChristoph Lameter 	 * 84		64		7	4-8 GB		6
141df9ecabaSChristoph Lameter 	 * 108		512		9	4-8 GB		6
142df9ecabaSChristoph Lameter 	 * 125		1024		10	8-16 GB		8
143df9ecabaSChristoph Lameter 	 * 125		1024		10	16-32 GB	9
1442244b95aSChristoph Lameter 	 */
145df9ecabaSChristoph Lameter 
146b40da049SJiang Liu 	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
147df9ecabaSChristoph Lameter 
148df9ecabaSChristoph Lameter 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
149df9ecabaSChristoph Lameter 
150df9ecabaSChristoph Lameter 	/*
151df9ecabaSChristoph Lameter 	 * Maximum threshold is 125
152df9ecabaSChristoph Lameter 	 */
153df9ecabaSChristoph Lameter 	threshold = min(125, threshold);
154df9ecabaSChristoph Lameter 
155df9ecabaSChristoph Lameter 	return threshold;
156df9ecabaSChristoph Lameter }
157df9ecabaSChristoph Lameter 
158df9ecabaSChristoph Lameter /*
159df9ecabaSChristoph Lameter  * Refresh the thresholds for each zone.
160df9ecabaSChristoph Lameter  */
161a6cccdc3SKOSAKI Motohiro void refresh_zone_stat_thresholds(void)
1622244b95aSChristoph Lameter {
163df9ecabaSChristoph Lameter 	struct zone *zone;
164df9ecabaSChristoph Lameter 	int cpu;
165df9ecabaSChristoph Lameter 	int threshold;
166df9ecabaSChristoph Lameter 
167ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
168aa454840SChristoph Lameter 		unsigned long max_drift, tolerate_drift;
169aa454840SChristoph Lameter 
170b44129b3SMel Gorman 		threshold = calculate_normal_threshold(zone);
171df9ecabaSChristoph Lameter 
172df9ecabaSChristoph Lameter 		for_each_online_cpu(cpu)
17399dcc3e5SChristoph Lameter 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
17499dcc3e5SChristoph Lameter 							= threshold;
175aa454840SChristoph Lameter 
176aa454840SChristoph Lameter 		/*
177aa454840SChristoph Lameter 		 * Only set percpu_drift_mark if there is a danger that
178aa454840SChristoph Lameter 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
179aa454840SChristoph Lameter 		 * the min watermark could be breached by an allocation
180aa454840SChristoph Lameter 		 */
181aa454840SChristoph Lameter 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
182aa454840SChristoph Lameter 		max_drift = num_online_cpus() * threshold;
183aa454840SChristoph Lameter 		if (max_drift > tolerate_drift)
184aa454840SChristoph Lameter 			zone->percpu_drift_mark = high_wmark_pages(zone) +
185aa454840SChristoph Lameter 					max_drift;
186df9ecabaSChristoph Lameter 	}
1872244b95aSChristoph Lameter }
1882244b95aSChristoph Lameter 
189b44129b3SMel Gorman void set_pgdat_percpu_threshold(pg_data_t *pgdat,
190b44129b3SMel Gorman 				int (*calculate_pressure)(struct zone *))
19188f5acf8SMel Gorman {
19288f5acf8SMel Gorman 	struct zone *zone;
19388f5acf8SMel Gorman 	int cpu;
19488f5acf8SMel Gorman 	int threshold;
19588f5acf8SMel Gorman 	int i;
19688f5acf8SMel Gorman 
19788f5acf8SMel Gorman 	for (i = 0; i < pgdat->nr_zones; i++) {
19888f5acf8SMel Gorman 		zone = &pgdat->node_zones[i];
19988f5acf8SMel Gorman 		if (!zone->percpu_drift_mark)
20088f5acf8SMel Gorman 			continue;
20188f5acf8SMel Gorman 
202b44129b3SMel Gorman 		threshold = (*calculate_pressure)(zone);
203bb0b6dffSMel Gorman 		for_each_online_cpu(cpu)
20488f5acf8SMel Gorman 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
20588f5acf8SMel Gorman 							= threshold;
20688f5acf8SMel Gorman 	}
20788f5acf8SMel Gorman }
20888f5acf8SMel Gorman 
2092244b95aSChristoph Lameter /*
210bea04b07SJianyu Zhan  * For use when we know that interrupts are disabled,
211bea04b07SJianyu Zhan  * or when we know that preemption is disabled and that
212bea04b07SJianyu Zhan  * particular counter cannot be updated from interrupt context.
2132244b95aSChristoph Lameter  */
2142244b95aSChristoph Lameter void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
2152244b95aSChristoph Lameter 				int delta)
2162244b95aSChristoph Lameter {
21712938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
21812938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
2192244b95aSChristoph Lameter 	long x;
22012938a92SChristoph Lameter 	long t;
2212244b95aSChristoph Lameter 
22212938a92SChristoph Lameter 	x = delta + __this_cpu_read(*p);
2232244b95aSChristoph Lameter 
22412938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
22512938a92SChristoph Lameter 
22612938a92SChristoph Lameter 	if (unlikely(x > t || x < -t)) {
2272244b95aSChristoph Lameter 		zone_page_state_add(x, zone, item);
2282244b95aSChristoph Lameter 		x = 0;
2292244b95aSChristoph Lameter 	}
23012938a92SChristoph Lameter 	__this_cpu_write(*p, x);
2312244b95aSChristoph Lameter }
2322244b95aSChristoph Lameter EXPORT_SYMBOL(__mod_zone_page_state);
2332244b95aSChristoph Lameter 
2342244b95aSChristoph Lameter /*
2352244b95aSChristoph Lameter  * Optimized increment and decrement functions.
2362244b95aSChristoph Lameter  *
2372244b95aSChristoph Lameter  * These are only for a single page and therefore can take a struct page *
2382244b95aSChristoph Lameter  * argument instead of struct zone *. This allows the inclusion of the code
2392244b95aSChristoph Lameter  * generated for page_zone(page) into the optimized functions.
2402244b95aSChristoph Lameter  *
2412244b95aSChristoph Lameter  * No overflow check is necessary and therefore the differential can be
2422244b95aSChristoph Lameter  * incremented or decremented in place which may allow the compilers to
2432244b95aSChristoph Lameter  * generate better code.
2442244b95aSChristoph Lameter  * The increment or decrement is known and therefore one boundary check can
2452244b95aSChristoph Lameter  * be omitted.
2462244b95aSChristoph Lameter  *
247df9ecabaSChristoph Lameter  * NOTE: These functions are very performance sensitive. Change only
248df9ecabaSChristoph Lameter  * with care.
249df9ecabaSChristoph Lameter  *
2502244b95aSChristoph Lameter  * Some processors have inc/dec instructions that are atomic vs an interrupt.
2512244b95aSChristoph Lameter  * However, the code must first determine the differential location in a zone
2522244b95aSChristoph Lameter  * based on the processor number and then inc/dec the counter. There is no
2532244b95aSChristoph Lameter  * guarantee without disabling preemption that the processor will not change
2542244b95aSChristoph Lameter  * in between and therefore the atomicity vs. interrupt cannot be exploited
2552244b95aSChristoph Lameter  * in a useful way here.
2562244b95aSChristoph Lameter  */
257c8785385SChristoph Lameter void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2582244b95aSChristoph Lameter {
25912938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
26012938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
26112938a92SChristoph Lameter 	s8 v, t;
2622244b95aSChristoph Lameter 
263908ee0f1SChristoph Lameter 	v = __this_cpu_inc_return(*p);
26412938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
26512938a92SChristoph Lameter 	if (unlikely(v > t)) {
26612938a92SChristoph Lameter 		s8 overstep = t >> 1;
2672244b95aSChristoph Lameter 
26812938a92SChristoph Lameter 		zone_page_state_add(v + overstep, zone, item);
26912938a92SChristoph Lameter 		__this_cpu_write(*p, -overstep);
2702244b95aSChristoph Lameter 	}
2712244b95aSChristoph Lameter }
272ca889e6cSChristoph Lameter 
273ca889e6cSChristoph Lameter void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
274ca889e6cSChristoph Lameter {
275ca889e6cSChristoph Lameter 	__inc_zone_state(page_zone(page), item);
276ca889e6cSChristoph Lameter }
2772244b95aSChristoph Lameter EXPORT_SYMBOL(__inc_zone_page_state);
2782244b95aSChristoph Lameter 
279c8785385SChristoph Lameter void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2802244b95aSChristoph Lameter {
28112938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
28212938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
28312938a92SChristoph Lameter 	s8 v, t;
2842244b95aSChristoph Lameter 
285908ee0f1SChristoph Lameter 	v = __this_cpu_dec_return(*p);
28612938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
28712938a92SChristoph Lameter 	if (unlikely(v < - t)) {
28812938a92SChristoph Lameter 		s8 overstep = t >> 1;
2892244b95aSChristoph Lameter 
29012938a92SChristoph Lameter 		zone_page_state_add(v - overstep, zone, item);
29112938a92SChristoph Lameter 		__this_cpu_write(*p, overstep);
2922244b95aSChristoph Lameter 	}
2932244b95aSChristoph Lameter }
294c8785385SChristoph Lameter 
295c8785385SChristoph Lameter void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
296c8785385SChristoph Lameter {
297c8785385SChristoph Lameter 	__dec_zone_state(page_zone(page), item);
298c8785385SChristoph Lameter }
2992244b95aSChristoph Lameter EXPORT_SYMBOL(__dec_zone_page_state);
3002244b95aSChristoph Lameter 
3014156153cSHeiko Carstens #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
3027c839120SChristoph Lameter /*
3037c839120SChristoph Lameter  * If we have cmpxchg_local support then we do not need to incur the overhead
3047c839120SChristoph Lameter  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
3057c839120SChristoph Lameter  *
3067c839120SChristoph Lameter  * mod_state() modifies the zone counter state through atomic per cpu
3077c839120SChristoph Lameter  * operations.
3087c839120SChristoph Lameter  *
3097c839120SChristoph Lameter  * Overstep mode specifies how overstep should handled:
3107c839120SChristoph Lameter  *     0       No overstepping
3117c839120SChristoph Lameter  *     1       Overstepping half of threshold
3127c839120SChristoph Lameter  *     -1      Overstepping minus half of threshold
3137c839120SChristoph Lameter */
3147c839120SChristoph Lameter static inline void mod_state(struct zone *zone,
3157c839120SChristoph Lameter        enum zone_stat_item item, int delta, int overstep_mode)
3167c839120SChristoph Lameter {
3177c839120SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
3187c839120SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
3197c839120SChristoph Lameter 	long o, n, t, z;
3207c839120SChristoph Lameter 
3217c839120SChristoph Lameter 	do {
3227c839120SChristoph Lameter 		z = 0;  /* overflow to zone counters */
3237c839120SChristoph Lameter 
3247c839120SChristoph Lameter 		/*
3257c839120SChristoph Lameter 		 * The fetching of the stat_threshold is racy. We may apply
3267c839120SChristoph Lameter 		 * a counter threshold to the wrong the cpu if we get
327d3bc2367SChristoph Lameter 		 * rescheduled while executing here. However, the next
328d3bc2367SChristoph Lameter 		 * counter update will apply the threshold again and
329d3bc2367SChristoph Lameter 		 * therefore bring the counter under the threshold again.
330d3bc2367SChristoph Lameter 		 *
331d3bc2367SChristoph Lameter 		 * Most of the time the thresholds are the same anyways
332d3bc2367SChristoph Lameter 		 * for all cpus in a zone.
3337c839120SChristoph Lameter 		 */
3347c839120SChristoph Lameter 		t = this_cpu_read(pcp->stat_threshold);
3357c839120SChristoph Lameter 
3367c839120SChristoph Lameter 		o = this_cpu_read(*p);
3377c839120SChristoph Lameter 		n = delta + o;
3387c839120SChristoph Lameter 
3397c839120SChristoph Lameter 		if (n > t || n < -t) {
3407c839120SChristoph Lameter 			int os = overstep_mode * (t >> 1) ;
3417c839120SChristoph Lameter 
3427c839120SChristoph Lameter 			/* Overflow must be added to zone counters */
3437c839120SChristoph Lameter 			z = n + os;
3447c839120SChristoph Lameter 			n = -os;
3457c839120SChristoph Lameter 		}
3467c839120SChristoph Lameter 	} while (this_cpu_cmpxchg(*p, o, n) != o);
3477c839120SChristoph Lameter 
3487c839120SChristoph Lameter 	if (z)
3497c839120SChristoph Lameter 		zone_page_state_add(z, zone, item);
3507c839120SChristoph Lameter }
3517c839120SChristoph Lameter 
3527c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3537c839120SChristoph Lameter 					int delta)
3547c839120SChristoph Lameter {
3557c839120SChristoph Lameter 	mod_state(zone, item, delta, 0);
3567c839120SChristoph Lameter }
3577c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3587c839120SChristoph Lameter 
3597c839120SChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
3607c839120SChristoph Lameter {
3617c839120SChristoph Lameter 	mod_state(zone, item, 1, 1);
3627c839120SChristoph Lameter }
3637c839120SChristoph Lameter 
3647c839120SChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
3657c839120SChristoph Lameter {
3667c839120SChristoph Lameter 	mod_state(page_zone(page), item, 1, 1);
3677c839120SChristoph Lameter }
3687c839120SChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
3697c839120SChristoph Lameter 
3707c839120SChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
3717c839120SChristoph Lameter {
3727c839120SChristoph Lameter 	mod_state(page_zone(page), item, -1, -1);
3737c839120SChristoph Lameter }
3747c839120SChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
3757c839120SChristoph Lameter #else
3767c839120SChristoph Lameter /*
3777c839120SChristoph Lameter  * Use interrupt disable to serialize counter updates
3787c839120SChristoph Lameter  */
3797c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3807c839120SChristoph Lameter 					int delta)
3817c839120SChristoph Lameter {
3827c839120SChristoph Lameter 	unsigned long flags;
3837c839120SChristoph Lameter 
3847c839120SChristoph Lameter 	local_irq_save(flags);
3857c839120SChristoph Lameter 	__mod_zone_page_state(zone, item, delta);
3867c839120SChristoph Lameter 	local_irq_restore(flags);
3877c839120SChristoph Lameter }
3887c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3897c839120SChristoph Lameter 
390ca889e6cSChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
391ca889e6cSChristoph Lameter {
392ca889e6cSChristoph Lameter 	unsigned long flags;
393ca889e6cSChristoph Lameter 
394ca889e6cSChristoph Lameter 	local_irq_save(flags);
395ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
396ca889e6cSChristoph Lameter 	local_irq_restore(flags);
397ca889e6cSChristoph Lameter }
398ca889e6cSChristoph Lameter 
3992244b95aSChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
4002244b95aSChristoph Lameter {
4012244b95aSChristoph Lameter 	unsigned long flags;
4022244b95aSChristoph Lameter 	struct zone *zone;
4032244b95aSChristoph Lameter 
4042244b95aSChristoph Lameter 	zone = page_zone(page);
4052244b95aSChristoph Lameter 	local_irq_save(flags);
406ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
4072244b95aSChristoph Lameter 	local_irq_restore(flags);
4082244b95aSChristoph Lameter }
4092244b95aSChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
4102244b95aSChristoph Lameter 
4112244b95aSChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
4122244b95aSChristoph Lameter {
4132244b95aSChristoph Lameter 	unsigned long flags;
4142244b95aSChristoph Lameter 
4152244b95aSChristoph Lameter 	local_irq_save(flags);
416a302eb4eSChristoph Lameter 	__dec_zone_page_state(page, item);
4172244b95aSChristoph Lameter 	local_irq_restore(flags);
4182244b95aSChristoph Lameter }
4192244b95aSChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
4207c839120SChristoph Lameter #endif
4212244b95aSChristoph Lameter 
4224edb0748SChristoph Lameter static inline void fold_diff(int *diff)
4234edb0748SChristoph Lameter {
4244edb0748SChristoph Lameter 	int i;
4254edb0748SChristoph Lameter 
4264edb0748SChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
4274edb0748SChristoph Lameter 		if (diff[i])
4284edb0748SChristoph Lameter 			atomic_long_add(diff[i], &vm_stat[i]);
4294edb0748SChristoph Lameter }
4304edb0748SChristoph Lameter 
4312244b95aSChristoph Lameter /*
4322bb921e5SChristoph Lameter  * Update the zone counters for the current cpu.
433a7f75e25SChristoph Lameter  *
4344037d452SChristoph Lameter  * Note that refresh_cpu_vm_stats strives to only access
4354037d452SChristoph Lameter  * node local memory. The per cpu pagesets on remote zones are placed
4364037d452SChristoph Lameter  * in the memory local to the processor using that pageset. So the
4374037d452SChristoph Lameter  * loop over all zones will access a series of cachelines local to
4384037d452SChristoph Lameter  * the processor.
4394037d452SChristoph Lameter  *
4404037d452SChristoph Lameter  * The call to zone_page_state_add updates the cachelines with the
4414037d452SChristoph Lameter  * statistics in the remote zone struct as well as the global cachelines
4424037d452SChristoph Lameter  * with the global counters. These could cause remote node cache line
4434037d452SChristoph Lameter  * bouncing and will have to be only done when necessary.
4442244b95aSChristoph Lameter  */
445fbc2edb0SChristoph Lameter static void refresh_cpu_vm_stats(void)
4462244b95aSChristoph Lameter {
4472244b95aSChristoph Lameter 	struct zone *zone;
4482244b95aSChristoph Lameter 	int i;
449a7f75e25SChristoph Lameter 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
4502244b95aSChristoph Lameter 
451ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
452fbc2edb0SChristoph Lameter 		struct per_cpu_pageset __percpu *p = zone->pageset;
4532244b95aSChristoph Lameter 
454fbc2edb0SChristoph Lameter 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
455a7f75e25SChristoph Lameter 			int v;
456a7f75e25SChristoph Lameter 
457fbc2edb0SChristoph Lameter 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
458fbc2edb0SChristoph Lameter 			if (v) {
459fbc2edb0SChristoph Lameter 
460a7f75e25SChristoph Lameter 				atomic_long_add(v, &zone->vm_stat[i]);
461a7f75e25SChristoph Lameter 				global_diff[i] += v;
4624037d452SChristoph Lameter #ifdef CONFIG_NUMA
4634037d452SChristoph Lameter 				/* 3 seconds idle till flush */
464fbc2edb0SChristoph Lameter 				__this_cpu_write(p->expire, 3);
4654037d452SChristoph Lameter #endif
4662244b95aSChristoph Lameter 			}
467fbc2edb0SChristoph Lameter 		}
468468fd62eSDimitri Sivanich 		cond_resched();
4694037d452SChristoph Lameter #ifdef CONFIG_NUMA
4704037d452SChristoph Lameter 		/*
4714037d452SChristoph Lameter 		 * Deal with draining the remote pageset of this
4724037d452SChristoph Lameter 		 * processor
4734037d452SChristoph Lameter 		 *
4744037d452SChristoph Lameter 		 * Check if there are pages remaining in this pageset
4754037d452SChristoph Lameter 		 * if not then there is nothing to expire.
4764037d452SChristoph Lameter 		 */
477fbc2edb0SChristoph Lameter 		if (!__this_cpu_read(p->expire) ||
478fbc2edb0SChristoph Lameter 			       !__this_cpu_read(p->pcp.count))
4794037d452SChristoph Lameter 			continue;
4804037d452SChristoph Lameter 
4814037d452SChristoph Lameter 		/*
4824037d452SChristoph Lameter 		 * We never drain zones local to this processor.
4834037d452SChristoph Lameter 		 */
4844037d452SChristoph Lameter 		if (zone_to_nid(zone) == numa_node_id()) {
485fbc2edb0SChristoph Lameter 			__this_cpu_write(p->expire, 0);
4864037d452SChristoph Lameter 			continue;
4874037d452SChristoph Lameter 		}
4884037d452SChristoph Lameter 
489fbc2edb0SChristoph Lameter 
490fbc2edb0SChristoph Lameter 		if (__this_cpu_dec_return(p->expire))
4914037d452SChristoph Lameter 			continue;
4924037d452SChristoph Lameter 
493fbc2edb0SChristoph Lameter 		if (__this_cpu_read(p->pcp.count))
4947c8e0181SChristoph Lameter 			drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
4954037d452SChristoph Lameter #endif
4962244b95aSChristoph Lameter 	}
4974edb0748SChristoph Lameter 	fold_diff(global_diff);
4982244b95aSChristoph Lameter }
4992244b95aSChristoph Lameter 
50040f4b1eaSCody P Schafer /*
5012bb921e5SChristoph Lameter  * Fold the data for an offline cpu into the global array.
5022bb921e5SChristoph Lameter  * There cannot be any access by the offline cpu and therefore
5032bb921e5SChristoph Lameter  * synchronization is simplified.
5042bb921e5SChristoph Lameter  */
5052bb921e5SChristoph Lameter void cpu_vm_stats_fold(int cpu)
5062bb921e5SChristoph Lameter {
5072bb921e5SChristoph Lameter 	struct zone *zone;
5082bb921e5SChristoph Lameter 	int i;
5092bb921e5SChristoph Lameter 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
5102bb921e5SChristoph Lameter 
5112bb921e5SChristoph Lameter 	for_each_populated_zone(zone) {
5122bb921e5SChristoph Lameter 		struct per_cpu_pageset *p;
5132bb921e5SChristoph Lameter 
5142bb921e5SChristoph Lameter 		p = per_cpu_ptr(zone->pageset, cpu);
5152bb921e5SChristoph Lameter 
5162bb921e5SChristoph Lameter 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
5172bb921e5SChristoph Lameter 			if (p->vm_stat_diff[i]) {
5182bb921e5SChristoph Lameter 				int v;
5192bb921e5SChristoph Lameter 
5202bb921e5SChristoph Lameter 				v = p->vm_stat_diff[i];
5212bb921e5SChristoph Lameter 				p->vm_stat_diff[i] = 0;
5222bb921e5SChristoph Lameter 				atomic_long_add(v, &zone->vm_stat[i]);
5232bb921e5SChristoph Lameter 				global_diff[i] += v;
5242bb921e5SChristoph Lameter 			}
5252bb921e5SChristoph Lameter 	}
5262bb921e5SChristoph Lameter 
5274edb0748SChristoph Lameter 	fold_diff(global_diff);
5282bb921e5SChristoph Lameter }
5292bb921e5SChristoph Lameter 
5302bb921e5SChristoph Lameter /*
53140f4b1eaSCody P Schafer  * this is only called if !populated_zone(zone), which implies no other users of
53240f4b1eaSCody P Schafer  * pset->vm_stat_diff[] exsist.
53340f4b1eaSCody P Schafer  */
5345a883813SMinchan Kim void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
5355a883813SMinchan Kim {
5365a883813SMinchan Kim 	int i;
5375a883813SMinchan Kim 
5385a883813SMinchan Kim 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
5395a883813SMinchan Kim 		if (pset->vm_stat_diff[i]) {
5405a883813SMinchan Kim 			int v = pset->vm_stat_diff[i];
5415a883813SMinchan Kim 			pset->vm_stat_diff[i] = 0;
5425a883813SMinchan Kim 			atomic_long_add(v, &zone->vm_stat[i]);
5435a883813SMinchan Kim 			atomic_long_add(v, &vm_stat[i]);
5445a883813SMinchan Kim 		}
5455a883813SMinchan Kim }
5462244b95aSChristoph Lameter #endif
5472244b95aSChristoph Lameter 
548ca889e6cSChristoph Lameter #ifdef CONFIG_NUMA
549ca889e6cSChristoph Lameter /*
550ca889e6cSChristoph Lameter  * zonelist = the list of zones passed to the allocator
551ca889e6cSChristoph Lameter  * z 	    = the zone from which the allocation occurred.
552ca889e6cSChristoph Lameter  *
553ca889e6cSChristoph Lameter  * Must be called with interrupts disabled.
55478afd561SAndi Kleen  *
55578afd561SAndi Kleen  * When __GFP_OTHER_NODE is set assume the node of the preferred
55678afd561SAndi Kleen  * zone is the local node. This is useful for daemons who allocate
55778afd561SAndi Kleen  * memory on behalf of other processes.
558ca889e6cSChristoph Lameter  */
55978afd561SAndi Kleen void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
560ca889e6cSChristoph Lameter {
56118ea7e71SMel Gorman 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
562ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_HIT);
563ca889e6cSChristoph Lameter 	} else {
564ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_MISS);
56518ea7e71SMel Gorman 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
566ca889e6cSChristoph Lameter 	}
56778afd561SAndi Kleen 	if (z->node == ((flags & __GFP_OTHER_NODE) ?
56878afd561SAndi Kleen 			preferred_zone->node : numa_node_id()))
569ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_LOCAL);
570ca889e6cSChristoph Lameter 	else
571ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_OTHER);
572ca889e6cSChristoph Lameter }
573ca889e6cSChristoph Lameter #endif
574ca889e6cSChristoph Lameter 
575d7a5752cSMel Gorman #ifdef CONFIG_COMPACTION
57636deb0beSNamhyung Kim 
577d7a5752cSMel Gorman struct contig_page_info {
578d7a5752cSMel Gorman 	unsigned long free_pages;
579d7a5752cSMel Gorman 	unsigned long free_blocks_total;
580d7a5752cSMel Gorman 	unsigned long free_blocks_suitable;
581d7a5752cSMel Gorman };
582d7a5752cSMel Gorman 
583d7a5752cSMel Gorman /*
584d7a5752cSMel Gorman  * Calculate the number of free pages in a zone, how many contiguous
585d7a5752cSMel Gorman  * pages are free and how many are large enough to satisfy an allocation of
586d7a5752cSMel Gorman  * the target size. Note that this function makes no attempt to estimate
587d7a5752cSMel Gorman  * how many suitable free blocks there *might* be if MOVABLE pages were
588d7a5752cSMel Gorman  * migrated. Calculating that is possible, but expensive and can be
589d7a5752cSMel Gorman  * figured out from userspace
590d7a5752cSMel Gorman  */
591d7a5752cSMel Gorman static void fill_contig_page_info(struct zone *zone,
592d7a5752cSMel Gorman 				unsigned int suitable_order,
593d7a5752cSMel Gorman 				struct contig_page_info *info)
594d7a5752cSMel Gorman {
595d7a5752cSMel Gorman 	unsigned int order;
596d7a5752cSMel Gorman 
597d7a5752cSMel Gorman 	info->free_pages = 0;
598d7a5752cSMel Gorman 	info->free_blocks_total = 0;
599d7a5752cSMel Gorman 	info->free_blocks_suitable = 0;
600d7a5752cSMel Gorman 
601d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; order++) {
602d7a5752cSMel Gorman 		unsigned long blocks;
603d7a5752cSMel Gorman 
604d7a5752cSMel Gorman 		/* Count number of free blocks */
605d7a5752cSMel Gorman 		blocks = zone->free_area[order].nr_free;
606d7a5752cSMel Gorman 		info->free_blocks_total += blocks;
607d7a5752cSMel Gorman 
608d7a5752cSMel Gorman 		/* Count free base pages */
609d7a5752cSMel Gorman 		info->free_pages += blocks << order;
610d7a5752cSMel Gorman 
611d7a5752cSMel Gorman 		/* Count the suitable free blocks */
612d7a5752cSMel Gorman 		if (order >= suitable_order)
613d7a5752cSMel Gorman 			info->free_blocks_suitable += blocks <<
614d7a5752cSMel Gorman 						(order - suitable_order);
615d7a5752cSMel Gorman 	}
616d7a5752cSMel Gorman }
617f1a5ab12SMel Gorman 
618f1a5ab12SMel Gorman /*
619f1a5ab12SMel Gorman  * A fragmentation index only makes sense if an allocation of a requested
620f1a5ab12SMel Gorman  * size would fail. If that is true, the fragmentation index indicates
621f1a5ab12SMel Gorman  * whether external fragmentation or a lack of memory was the problem.
622f1a5ab12SMel Gorman  * The value can be used to determine if page reclaim or compaction
623f1a5ab12SMel Gorman  * should be used
624f1a5ab12SMel Gorman  */
62556de7263SMel Gorman static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
626f1a5ab12SMel Gorman {
627f1a5ab12SMel Gorman 	unsigned long requested = 1UL << order;
628f1a5ab12SMel Gorman 
629f1a5ab12SMel Gorman 	if (!info->free_blocks_total)
630f1a5ab12SMel Gorman 		return 0;
631f1a5ab12SMel Gorman 
632f1a5ab12SMel Gorman 	/* Fragmentation index only makes sense when a request would fail */
633f1a5ab12SMel Gorman 	if (info->free_blocks_suitable)
634f1a5ab12SMel Gorman 		return -1000;
635f1a5ab12SMel Gorman 
636f1a5ab12SMel Gorman 	/*
637f1a5ab12SMel Gorman 	 * Index is between 0 and 1 so return within 3 decimal places
638f1a5ab12SMel Gorman 	 *
639f1a5ab12SMel Gorman 	 * 0 => allocation would fail due to lack of memory
640f1a5ab12SMel Gorman 	 * 1 => allocation would fail due to fragmentation
641f1a5ab12SMel Gorman 	 */
642f1a5ab12SMel Gorman 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
643f1a5ab12SMel Gorman }
64456de7263SMel Gorman 
64556de7263SMel Gorman /* Same as __fragmentation index but allocs contig_page_info on stack */
64656de7263SMel Gorman int fragmentation_index(struct zone *zone, unsigned int order)
64756de7263SMel Gorman {
64856de7263SMel Gorman 	struct contig_page_info info;
64956de7263SMel Gorman 
65056de7263SMel Gorman 	fill_contig_page_info(zone, order, &info);
65156de7263SMel Gorman 	return __fragmentation_index(order, &info);
65256de7263SMel Gorman }
653d7a5752cSMel Gorman #endif
654d7a5752cSMel Gorman 
655d7a5752cSMel Gorman #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
6568f32f7e5SAlexey Dobriyan #include <linux/proc_fs.h>
657f6ac2354SChristoph Lameter #include <linux/seq_file.h>
658f6ac2354SChristoph Lameter 
659467c996cSMel Gorman static char * const migratetype_names[MIGRATE_TYPES] = {
660467c996cSMel Gorman 	"Unmovable",
661467c996cSMel Gorman 	"Reclaimable",
662467c996cSMel Gorman 	"Movable",
663467c996cSMel Gorman 	"Reserve",
66447118af0SMichal Nazarewicz #ifdef CONFIG_CMA
66547118af0SMichal Nazarewicz 	"CMA",
66647118af0SMichal Nazarewicz #endif
667194159fbSMinchan Kim #ifdef CONFIG_MEMORY_ISOLATION
66891446b06SKOSAKI Motohiro 	"Isolate",
669194159fbSMinchan Kim #endif
670467c996cSMel Gorman };
671467c996cSMel Gorman 
672f6ac2354SChristoph Lameter static void *frag_start(struct seq_file *m, loff_t *pos)
673f6ac2354SChristoph Lameter {
674f6ac2354SChristoph Lameter 	pg_data_t *pgdat;
675f6ac2354SChristoph Lameter 	loff_t node = *pos;
676f6ac2354SChristoph Lameter 	for (pgdat = first_online_pgdat();
677f6ac2354SChristoph Lameter 	     pgdat && node;
678f6ac2354SChristoph Lameter 	     pgdat = next_online_pgdat(pgdat))
679f6ac2354SChristoph Lameter 		--node;
680f6ac2354SChristoph Lameter 
681f6ac2354SChristoph Lameter 	return pgdat;
682f6ac2354SChristoph Lameter }
683f6ac2354SChristoph Lameter 
684f6ac2354SChristoph Lameter static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
685f6ac2354SChristoph Lameter {
686f6ac2354SChristoph Lameter 	pg_data_t *pgdat = (pg_data_t *)arg;
687f6ac2354SChristoph Lameter 
688f6ac2354SChristoph Lameter 	(*pos)++;
689f6ac2354SChristoph Lameter 	return next_online_pgdat(pgdat);
690f6ac2354SChristoph Lameter }
691f6ac2354SChristoph Lameter 
692f6ac2354SChristoph Lameter static void frag_stop(struct seq_file *m, void *arg)
693f6ac2354SChristoph Lameter {
694f6ac2354SChristoph Lameter }
695f6ac2354SChristoph Lameter 
696467c996cSMel Gorman /* Walk all the zones in a node and print using a callback */
697467c996cSMel Gorman static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
698467c996cSMel Gorman 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
699f6ac2354SChristoph Lameter {
700f6ac2354SChristoph Lameter 	struct zone *zone;
701f6ac2354SChristoph Lameter 	struct zone *node_zones = pgdat->node_zones;
702f6ac2354SChristoph Lameter 	unsigned long flags;
703f6ac2354SChristoph Lameter 
704f6ac2354SChristoph Lameter 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
705f6ac2354SChristoph Lameter 		if (!populated_zone(zone))
706f6ac2354SChristoph Lameter 			continue;
707f6ac2354SChristoph Lameter 
708f6ac2354SChristoph Lameter 		spin_lock_irqsave(&zone->lock, flags);
709467c996cSMel Gorman 		print(m, pgdat, zone);
710467c996cSMel Gorman 		spin_unlock_irqrestore(&zone->lock, flags);
711467c996cSMel Gorman 	}
712467c996cSMel Gorman }
713d7a5752cSMel Gorman #endif
714467c996cSMel Gorman 
7150d6617c7SDavid Rientjes #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
716fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA
717fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx) xx "_dma",
718fa25c503SKOSAKI Motohiro #else
719fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx)
720fa25c503SKOSAKI Motohiro #endif
721fa25c503SKOSAKI Motohiro 
722fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA32
723fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx) xx "_dma32",
724fa25c503SKOSAKI Motohiro #else
725fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx)
726fa25c503SKOSAKI Motohiro #endif
727fa25c503SKOSAKI Motohiro 
728fa25c503SKOSAKI Motohiro #ifdef CONFIG_HIGHMEM
729fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx) xx "_high",
730fa25c503SKOSAKI Motohiro #else
731fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx)
732fa25c503SKOSAKI Motohiro #endif
733fa25c503SKOSAKI Motohiro 
734fa25c503SKOSAKI Motohiro #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
735fa25c503SKOSAKI Motohiro 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
736fa25c503SKOSAKI Motohiro 
737fa25c503SKOSAKI Motohiro const char * const vmstat_text[] = {
738*09316c09SKonstantin Khlebnikov 	/* enum zone_stat_item countes */
739fa25c503SKOSAKI Motohiro 	"nr_free_pages",
74081c0a2bbSJohannes Weiner 	"nr_alloc_batch",
741fa25c503SKOSAKI Motohiro 	"nr_inactive_anon",
742fa25c503SKOSAKI Motohiro 	"nr_active_anon",
743fa25c503SKOSAKI Motohiro 	"nr_inactive_file",
744fa25c503SKOSAKI Motohiro 	"nr_active_file",
745fa25c503SKOSAKI Motohiro 	"nr_unevictable",
746fa25c503SKOSAKI Motohiro 	"nr_mlock",
747fa25c503SKOSAKI Motohiro 	"nr_anon_pages",
748fa25c503SKOSAKI Motohiro 	"nr_mapped",
749fa25c503SKOSAKI Motohiro 	"nr_file_pages",
750fa25c503SKOSAKI Motohiro 	"nr_dirty",
751fa25c503SKOSAKI Motohiro 	"nr_writeback",
752fa25c503SKOSAKI Motohiro 	"nr_slab_reclaimable",
753fa25c503SKOSAKI Motohiro 	"nr_slab_unreclaimable",
754fa25c503SKOSAKI Motohiro 	"nr_page_table_pages",
755fa25c503SKOSAKI Motohiro 	"nr_kernel_stack",
756fa25c503SKOSAKI Motohiro 	"nr_unstable",
757fa25c503SKOSAKI Motohiro 	"nr_bounce",
758fa25c503SKOSAKI Motohiro 	"nr_vmscan_write",
75949ea7eb6SMel Gorman 	"nr_vmscan_immediate_reclaim",
760fa25c503SKOSAKI Motohiro 	"nr_writeback_temp",
761fa25c503SKOSAKI Motohiro 	"nr_isolated_anon",
762fa25c503SKOSAKI Motohiro 	"nr_isolated_file",
763fa25c503SKOSAKI Motohiro 	"nr_shmem",
764fa25c503SKOSAKI Motohiro 	"nr_dirtied",
765fa25c503SKOSAKI Motohiro 	"nr_written",
7660d5d823aSMel Gorman 	"nr_pages_scanned",
767fa25c503SKOSAKI Motohiro 
768fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
769fa25c503SKOSAKI Motohiro 	"numa_hit",
770fa25c503SKOSAKI Motohiro 	"numa_miss",
771fa25c503SKOSAKI Motohiro 	"numa_foreign",
772fa25c503SKOSAKI Motohiro 	"numa_interleave",
773fa25c503SKOSAKI Motohiro 	"numa_local",
774fa25c503SKOSAKI Motohiro 	"numa_other",
775fa25c503SKOSAKI Motohiro #endif
776a528910eSJohannes Weiner 	"workingset_refault",
777a528910eSJohannes Weiner 	"workingset_activate",
778449dd698SJohannes Weiner 	"workingset_nodereclaim",
779fa25c503SKOSAKI Motohiro 	"nr_anon_transparent_hugepages",
780d1ce749aSBartlomiej Zolnierkiewicz 	"nr_free_cma",
781*09316c09SKonstantin Khlebnikov 
782*09316c09SKonstantin Khlebnikov 	/* enum writeback_stat_item counters */
783fa25c503SKOSAKI Motohiro 	"nr_dirty_threshold",
784fa25c503SKOSAKI Motohiro 	"nr_dirty_background_threshold",
785fa25c503SKOSAKI Motohiro 
786fa25c503SKOSAKI Motohiro #ifdef CONFIG_VM_EVENT_COUNTERS
787*09316c09SKonstantin Khlebnikov 	/* enum vm_event_item counters */
788fa25c503SKOSAKI Motohiro 	"pgpgin",
789fa25c503SKOSAKI Motohiro 	"pgpgout",
790fa25c503SKOSAKI Motohiro 	"pswpin",
791fa25c503SKOSAKI Motohiro 	"pswpout",
792fa25c503SKOSAKI Motohiro 
793fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgalloc")
794fa25c503SKOSAKI Motohiro 
795fa25c503SKOSAKI Motohiro 	"pgfree",
796fa25c503SKOSAKI Motohiro 	"pgactivate",
797fa25c503SKOSAKI Motohiro 	"pgdeactivate",
798fa25c503SKOSAKI Motohiro 
799fa25c503SKOSAKI Motohiro 	"pgfault",
800fa25c503SKOSAKI Motohiro 	"pgmajfault",
801fa25c503SKOSAKI Motohiro 
802fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgrefill")
803904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_kswapd")
804904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_direct")
805fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_kswapd")
806fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_direct")
80768243e76SMel Gorman 	"pgscan_direct_throttle",
808fa25c503SKOSAKI Motohiro 
809fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
810fa25c503SKOSAKI Motohiro 	"zone_reclaim_failed",
811fa25c503SKOSAKI Motohiro #endif
812fa25c503SKOSAKI Motohiro 	"pginodesteal",
813fa25c503SKOSAKI Motohiro 	"slabs_scanned",
814fa25c503SKOSAKI Motohiro 	"kswapd_inodesteal",
815fa25c503SKOSAKI Motohiro 	"kswapd_low_wmark_hit_quickly",
816fa25c503SKOSAKI Motohiro 	"kswapd_high_wmark_hit_quickly",
817fa25c503SKOSAKI Motohiro 	"pageoutrun",
818fa25c503SKOSAKI Motohiro 	"allocstall",
819fa25c503SKOSAKI Motohiro 
820fa25c503SKOSAKI Motohiro 	"pgrotated",
821fa25c503SKOSAKI Motohiro 
8225509a5d2SDave Hansen 	"drop_pagecache",
8235509a5d2SDave Hansen 	"drop_slab",
8245509a5d2SDave Hansen 
82503c5a6e1SMel Gorman #ifdef CONFIG_NUMA_BALANCING
82603c5a6e1SMel Gorman 	"numa_pte_updates",
82772403b4aSMel Gorman 	"numa_huge_pte_updates",
82803c5a6e1SMel Gorman 	"numa_hint_faults",
82903c5a6e1SMel Gorman 	"numa_hint_faults_local",
83003c5a6e1SMel Gorman 	"numa_pages_migrated",
83103c5a6e1SMel Gorman #endif
8325647bc29SMel Gorman #ifdef CONFIG_MIGRATION
8335647bc29SMel Gorman 	"pgmigrate_success",
8345647bc29SMel Gorman 	"pgmigrate_fail",
8355647bc29SMel Gorman #endif
836fa25c503SKOSAKI Motohiro #ifdef CONFIG_COMPACTION
837397487dbSMel Gorman 	"compact_migrate_scanned",
838397487dbSMel Gorman 	"compact_free_scanned",
839397487dbSMel Gorman 	"compact_isolated",
840fa25c503SKOSAKI Motohiro 	"compact_stall",
841fa25c503SKOSAKI Motohiro 	"compact_fail",
842fa25c503SKOSAKI Motohiro 	"compact_success",
843fa25c503SKOSAKI Motohiro #endif
844fa25c503SKOSAKI Motohiro 
845fa25c503SKOSAKI Motohiro #ifdef CONFIG_HUGETLB_PAGE
846fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_success",
847fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_fail",
848fa25c503SKOSAKI Motohiro #endif
849fa25c503SKOSAKI Motohiro 	"unevictable_pgs_culled",
850fa25c503SKOSAKI Motohiro 	"unevictable_pgs_scanned",
851fa25c503SKOSAKI Motohiro 	"unevictable_pgs_rescued",
852fa25c503SKOSAKI Motohiro 	"unevictable_pgs_mlocked",
853fa25c503SKOSAKI Motohiro 	"unevictable_pgs_munlocked",
854fa25c503SKOSAKI Motohiro 	"unevictable_pgs_cleared",
855fa25c503SKOSAKI Motohiro 	"unevictable_pgs_stranded",
856fa25c503SKOSAKI Motohiro 
857fa25c503SKOSAKI Motohiro #ifdef CONFIG_TRANSPARENT_HUGEPAGE
858fa25c503SKOSAKI Motohiro 	"thp_fault_alloc",
859fa25c503SKOSAKI Motohiro 	"thp_fault_fallback",
860fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc",
861fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc_failed",
862fa25c503SKOSAKI Motohiro 	"thp_split",
863d8a8e1f0SKirill A. Shutemov 	"thp_zero_page_alloc",
864d8a8e1f0SKirill A. Shutemov 	"thp_zero_page_alloc_failed",
865fa25c503SKOSAKI Motohiro #endif
866*09316c09SKonstantin Khlebnikov #ifdef CONFIG_MEMORY_BALLOON
867*09316c09SKonstantin Khlebnikov 	"balloon_inflate",
868*09316c09SKonstantin Khlebnikov 	"balloon_deflate",
869*09316c09SKonstantin Khlebnikov #ifdef CONFIG_BALLOON_COMPACTION
870*09316c09SKonstantin Khlebnikov 	"balloon_migrate",
871*09316c09SKonstantin Khlebnikov #endif
872*09316c09SKonstantin Khlebnikov #endif /* CONFIG_MEMORY_BALLOON */
873ec659934SMel Gorman #ifdef CONFIG_DEBUG_TLBFLUSH
8746df46865SDave Hansen #ifdef CONFIG_SMP
8759824cf97SDave Hansen 	"nr_tlb_remote_flush",
8769824cf97SDave Hansen 	"nr_tlb_remote_flush_received",
877ec659934SMel Gorman #endif /* CONFIG_SMP */
8789824cf97SDave Hansen 	"nr_tlb_local_flush_all",
8799824cf97SDave Hansen 	"nr_tlb_local_flush_one",
880ec659934SMel Gorman #endif /* CONFIG_DEBUG_TLBFLUSH */
881fa25c503SKOSAKI Motohiro 
8824f115147SDavidlohr Bueso #ifdef CONFIG_DEBUG_VM_VMACACHE
8834f115147SDavidlohr Bueso 	"vmacache_find_calls",
8844f115147SDavidlohr Bueso 	"vmacache_find_hits",
8854f115147SDavidlohr Bueso #endif
886fa25c503SKOSAKI Motohiro #endif /* CONFIG_VM_EVENTS_COUNTERS */
887fa25c503SKOSAKI Motohiro };
8880d6617c7SDavid Rientjes #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
889fa25c503SKOSAKI Motohiro 
890fa25c503SKOSAKI Motohiro 
891d7a5752cSMel Gorman #ifdef CONFIG_PROC_FS
892467c996cSMel Gorman static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
893467c996cSMel Gorman 						struct zone *zone)
894467c996cSMel Gorman {
895467c996cSMel Gorman 	int order;
896467c996cSMel Gorman 
897f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
898f6ac2354SChristoph Lameter 	for (order = 0; order < MAX_ORDER; ++order)
899f6ac2354SChristoph Lameter 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
900f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
901f6ac2354SChristoph Lameter }
902467c996cSMel Gorman 
903467c996cSMel Gorman /*
904467c996cSMel Gorman  * This walks the free areas for each zone.
905467c996cSMel Gorman  */
906467c996cSMel Gorman static int frag_show(struct seq_file *m, void *arg)
907467c996cSMel Gorman {
908467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
909467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, frag_show_print);
910467c996cSMel Gorman 	return 0;
911467c996cSMel Gorman }
912467c996cSMel Gorman 
913467c996cSMel Gorman static void pagetypeinfo_showfree_print(struct seq_file *m,
914467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
915467c996cSMel Gorman {
916467c996cSMel Gorman 	int order, mtype;
917467c996cSMel Gorman 
918467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
919467c996cSMel Gorman 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
920467c996cSMel Gorman 					pgdat->node_id,
921467c996cSMel Gorman 					zone->name,
922467c996cSMel Gorman 					migratetype_names[mtype]);
923467c996cSMel Gorman 		for (order = 0; order < MAX_ORDER; ++order) {
924467c996cSMel Gorman 			unsigned long freecount = 0;
925467c996cSMel Gorman 			struct free_area *area;
926467c996cSMel Gorman 			struct list_head *curr;
927467c996cSMel Gorman 
928467c996cSMel Gorman 			area = &(zone->free_area[order]);
929467c996cSMel Gorman 
930467c996cSMel Gorman 			list_for_each(curr, &area->free_list[mtype])
931467c996cSMel Gorman 				freecount++;
932467c996cSMel Gorman 			seq_printf(m, "%6lu ", freecount);
933467c996cSMel Gorman 		}
934467c996cSMel Gorman 		seq_putc(m, '\n');
935467c996cSMel Gorman 	}
936467c996cSMel Gorman }
937467c996cSMel Gorman 
938467c996cSMel Gorman /* Print out the free pages at each order for each migatetype */
939467c996cSMel Gorman static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
940467c996cSMel Gorman {
941467c996cSMel Gorman 	int order;
942467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
943467c996cSMel Gorman 
944467c996cSMel Gorman 	/* Print header */
945467c996cSMel Gorman 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
946467c996cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order)
947467c996cSMel Gorman 		seq_printf(m, "%6d ", order);
948467c996cSMel Gorman 	seq_putc(m, '\n');
949467c996cSMel Gorman 
950467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
951467c996cSMel Gorman 
952467c996cSMel Gorman 	return 0;
953467c996cSMel Gorman }
954467c996cSMel Gorman 
955467c996cSMel Gorman static void pagetypeinfo_showblockcount_print(struct seq_file *m,
956467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
957467c996cSMel Gorman {
958467c996cSMel Gorman 	int mtype;
959467c996cSMel Gorman 	unsigned long pfn;
960467c996cSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
961108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
962467c996cSMel Gorman 	unsigned long count[MIGRATE_TYPES] = { 0, };
963467c996cSMel Gorman 
964467c996cSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
965467c996cSMel Gorman 		struct page *page;
966467c996cSMel Gorman 
967467c996cSMel Gorman 		if (!pfn_valid(pfn))
968467c996cSMel Gorman 			continue;
969467c996cSMel Gorman 
970467c996cSMel Gorman 		page = pfn_to_page(pfn);
971eb33575cSMel Gorman 
972eb33575cSMel Gorman 		/* Watch for unexpected holes punched in the memmap */
973eb33575cSMel Gorman 		if (!memmap_valid_within(pfn, page, zone))
974e80d6a24SMel Gorman 			continue;
975eb33575cSMel Gorman 
976467c996cSMel Gorman 		mtype = get_pageblock_migratetype(page);
977467c996cSMel Gorman 
978e80d6a24SMel Gorman 		if (mtype < MIGRATE_TYPES)
979467c996cSMel Gorman 			count[mtype]++;
980467c996cSMel Gorman 	}
981467c996cSMel Gorman 
982467c996cSMel Gorman 	/* Print counts */
983467c996cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
984467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
985467c996cSMel Gorman 		seq_printf(m, "%12lu ", count[mtype]);
986467c996cSMel Gorman 	seq_putc(m, '\n');
987467c996cSMel Gorman }
988467c996cSMel Gorman 
989467c996cSMel Gorman /* Print out the free pages at each order for each migratetype */
990467c996cSMel Gorman static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
991467c996cSMel Gorman {
992467c996cSMel Gorman 	int mtype;
993467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
994467c996cSMel Gorman 
995467c996cSMel Gorman 	seq_printf(m, "\n%-23s", "Number of blocks type ");
996467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
997467c996cSMel Gorman 		seq_printf(m, "%12s ", migratetype_names[mtype]);
998467c996cSMel Gorman 	seq_putc(m, '\n');
999467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1000467c996cSMel Gorman 
1001467c996cSMel Gorman 	return 0;
1002467c996cSMel Gorman }
1003467c996cSMel Gorman 
1004467c996cSMel Gorman /*
1005467c996cSMel Gorman  * This prints out statistics in relation to grouping pages by mobility.
1006467c996cSMel Gorman  * It is expensive to collect so do not constantly read the file.
1007467c996cSMel Gorman  */
1008467c996cSMel Gorman static int pagetypeinfo_show(struct seq_file *m, void *arg)
1009467c996cSMel Gorman {
1010467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1011467c996cSMel Gorman 
101241b25a37SKOSAKI Motohiro 	/* check memoryless node */
1013a47b53c5SLai Jiangshan 	if (!node_state(pgdat->node_id, N_MEMORY))
101441b25a37SKOSAKI Motohiro 		return 0;
101541b25a37SKOSAKI Motohiro 
1016467c996cSMel Gorman 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1017467c996cSMel Gorman 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1018467c996cSMel Gorman 	seq_putc(m, '\n');
1019467c996cSMel Gorman 	pagetypeinfo_showfree(m, pgdat);
1020467c996cSMel Gorman 	pagetypeinfo_showblockcount(m, pgdat);
1021467c996cSMel Gorman 
1022f6ac2354SChristoph Lameter 	return 0;
1023f6ac2354SChristoph Lameter }
1024f6ac2354SChristoph Lameter 
10258f32f7e5SAlexey Dobriyan static const struct seq_operations fragmentation_op = {
1026f6ac2354SChristoph Lameter 	.start	= frag_start,
1027f6ac2354SChristoph Lameter 	.next	= frag_next,
1028f6ac2354SChristoph Lameter 	.stop	= frag_stop,
1029f6ac2354SChristoph Lameter 	.show	= frag_show,
1030f6ac2354SChristoph Lameter };
1031f6ac2354SChristoph Lameter 
10328f32f7e5SAlexey Dobriyan static int fragmentation_open(struct inode *inode, struct file *file)
10338f32f7e5SAlexey Dobriyan {
10348f32f7e5SAlexey Dobriyan 	return seq_open(file, &fragmentation_op);
10358f32f7e5SAlexey Dobriyan }
10368f32f7e5SAlexey Dobriyan 
10378f32f7e5SAlexey Dobriyan static const struct file_operations fragmentation_file_operations = {
10388f32f7e5SAlexey Dobriyan 	.open		= fragmentation_open,
10398f32f7e5SAlexey Dobriyan 	.read		= seq_read,
10408f32f7e5SAlexey Dobriyan 	.llseek		= seq_lseek,
10418f32f7e5SAlexey Dobriyan 	.release	= seq_release,
10428f32f7e5SAlexey Dobriyan };
10438f32f7e5SAlexey Dobriyan 
104474e2e8e8SAlexey Dobriyan static const struct seq_operations pagetypeinfo_op = {
1045467c996cSMel Gorman 	.start	= frag_start,
1046467c996cSMel Gorman 	.next	= frag_next,
1047467c996cSMel Gorman 	.stop	= frag_stop,
1048467c996cSMel Gorman 	.show	= pagetypeinfo_show,
1049467c996cSMel Gorman };
1050467c996cSMel Gorman 
105174e2e8e8SAlexey Dobriyan static int pagetypeinfo_open(struct inode *inode, struct file *file)
105274e2e8e8SAlexey Dobriyan {
105374e2e8e8SAlexey Dobriyan 	return seq_open(file, &pagetypeinfo_op);
105474e2e8e8SAlexey Dobriyan }
105574e2e8e8SAlexey Dobriyan 
105674e2e8e8SAlexey Dobriyan static const struct file_operations pagetypeinfo_file_ops = {
105774e2e8e8SAlexey Dobriyan 	.open		= pagetypeinfo_open,
105874e2e8e8SAlexey Dobriyan 	.read		= seq_read,
105974e2e8e8SAlexey Dobriyan 	.llseek		= seq_lseek,
106074e2e8e8SAlexey Dobriyan 	.release	= seq_release,
106174e2e8e8SAlexey Dobriyan };
106274e2e8e8SAlexey Dobriyan 
1063467c996cSMel Gorman static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1064467c996cSMel Gorman 							struct zone *zone)
1065f6ac2354SChristoph Lameter {
1066f6ac2354SChristoph Lameter 	int i;
1067f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1068f6ac2354SChristoph Lameter 	seq_printf(m,
1069f6ac2354SChristoph Lameter 		   "\n  pages free     %lu"
1070f6ac2354SChristoph Lameter 		   "\n        min      %lu"
1071f6ac2354SChristoph Lameter 		   "\n        low      %lu"
1072f6ac2354SChristoph Lameter 		   "\n        high     %lu"
107308d9ae7cSWu Fengguang 		   "\n        scanned  %lu"
1074f6ac2354SChristoph Lameter 		   "\n        spanned  %lu"
10759feedc9dSJiang Liu 		   "\n        present  %lu"
10769feedc9dSJiang Liu 		   "\n        managed  %lu",
107788f5acf8SMel Gorman 		   zone_page_state(zone, NR_FREE_PAGES),
107841858966SMel Gorman 		   min_wmark_pages(zone),
107941858966SMel Gorman 		   low_wmark_pages(zone),
108041858966SMel Gorman 		   high_wmark_pages(zone),
10810d5d823aSMel Gorman 		   zone_page_state(zone, NR_PAGES_SCANNED),
1082f6ac2354SChristoph Lameter 		   zone->spanned_pages,
10839feedc9dSJiang Liu 		   zone->present_pages,
10849feedc9dSJiang Liu 		   zone->managed_pages);
10852244b95aSChristoph Lameter 
10862244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
10872244b95aSChristoph Lameter 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
10882244b95aSChristoph Lameter 				zone_page_state(zone, i));
10892244b95aSChristoph Lameter 
1090f6ac2354SChristoph Lameter 	seq_printf(m,
10913484b2deSMel Gorman 		   "\n        protection: (%ld",
1092f6ac2354SChristoph Lameter 		   zone->lowmem_reserve[0]);
1093f6ac2354SChristoph Lameter 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
10943484b2deSMel Gorman 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1095f6ac2354SChristoph Lameter 	seq_printf(m,
1096f6ac2354SChristoph Lameter 		   ")"
1097f6ac2354SChristoph Lameter 		   "\n  pagesets");
1098f6ac2354SChristoph Lameter 	for_each_online_cpu(i) {
1099f6ac2354SChristoph Lameter 		struct per_cpu_pageset *pageset;
1100f6ac2354SChristoph Lameter 
110199dcc3e5SChristoph Lameter 		pageset = per_cpu_ptr(zone->pageset, i);
1102f6ac2354SChristoph Lameter 		seq_printf(m,
11033dfa5721SChristoph Lameter 			   "\n    cpu: %i"
1104f6ac2354SChristoph Lameter 			   "\n              count: %i"
1105f6ac2354SChristoph Lameter 			   "\n              high:  %i"
1106f6ac2354SChristoph Lameter 			   "\n              batch: %i",
11073dfa5721SChristoph Lameter 			   i,
11083dfa5721SChristoph Lameter 			   pageset->pcp.count,
11093dfa5721SChristoph Lameter 			   pageset->pcp.high,
11103dfa5721SChristoph Lameter 			   pageset->pcp.batch);
1111df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1112df9ecabaSChristoph Lameter 		seq_printf(m, "\n  vm stats threshold: %d",
1113df9ecabaSChristoph Lameter 				pageset->stat_threshold);
1114df9ecabaSChristoph Lameter #endif
1115f6ac2354SChristoph Lameter 	}
1116f6ac2354SChristoph Lameter 	seq_printf(m,
1117f6ac2354SChristoph Lameter 		   "\n  all_unreclaimable: %u"
1118556adecbSRik van Riel 		   "\n  start_pfn:         %lu"
1119556adecbSRik van Riel 		   "\n  inactive_ratio:    %u",
11206e543d57SLisa Du 		   !zone_reclaimable(zone),
1121556adecbSRik van Riel 		   zone->zone_start_pfn,
1122556adecbSRik van Riel 		   zone->inactive_ratio);
1123f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
1124f6ac2354SChristoph Lameter }
1125467c996cSMel Gorman 
1126467c996cSMel Gorman /*
1127467c996cSMel Gorman  * Output information about zones in @pgdat.
1128467c996cSMel Gorman  */
1129467c996cSMel Gorman static int zoneinfo_show(struct seq_file *m, void *arg)
1130467c996cSMel Gorman {
1131467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1132467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1133f6ac2354SChristoph Lameter 	return 0;
1134f6ac2354SChristoph Lameter }
1135f6ac2354SChristoph Lameter 
11365c9fe628SAlexey Dobriyan static const struct seq_operations zoneinfo_op = {
1137f6ac2354SChristoph Lameter 	.start	= frag_start, /* iterate over all zones. The same as in
1138f6ac2354SChristoph Lameter 			       * fragmentation. */
1139f6ac2354SChristoph Lameter 	.next	= frag_next,
1140f6ac2354SChristoph Lameter 	.stop	= frag_stop,
1141f6ac2354SChristoph Lameter 	.show	= zoneinfo_show,
1142f6ac2354SChristoph Lameter };
1143f6ac2354SChristoph Lameter 
11445c9fe628SAlexey Dobriyan static int zoneinfo_open(struct inode *inode, struct file *file)
11455c9fe628SAlexey Dobriyan {
11465c9fe628SAlexey Dobriyan 	return seq_open(file, &zoneinfo_op);
11475c9fe628SAlexey Dobriyan }
11485c9fe628SAlexey Dobriyan 
11495c9fe628SAlexey Dobriyan static const struct file_operations proc_zoneinfo_file_operations = {
11505c9fe628SAlexey Dobriyan 	.open		= zoneinfo_open,
11515c9fe628SAlexey Dobriyan 	.read		= seq_read,
11525c9fe628SAlexey Dobriyan 	.llseek		= seq_lseek,
11535c9fe628SAlexey Dobriyan 	.release	= seq_release,
11545c9fe628SAlexey Dobriyan };
11555c9fe628SAlexey Dobriyan 
115679da826aSMichael Rubin enum writeback_stat_item {
115779da826aSMichael Rubin 	NR_DIRTY_THRESHOLD,
115879da826aSMichael Rubin 	NR_DIRTY_BG_THRESHOLD,
115979da826aSMichael Rubin 	NR_VM_WRITEBACK_STAT_ITEMS,
116079da826aSMichael Rubin };
116179da826aSMichael Rubin 
1162f6ac2354SChristoph Lameter static void *vmstat_start(struct seq_file *m, loff_t *pos)
1163f6ac2354SChristoph Lameter {
11642244b95aSChristoph Lameter 	unsigned long *v;
116579da826aSMichael Rubin 	int i, stat_items_size;
1166f6ac2354SChristoph Lameter 
1167f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1168f6ac2354SChristoph Lameter 		return NULL;
116979da826aSMichael Rubin 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
117079da826aSMichael Rubin 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1171f6ac2354SChristoph Lameter 
1172f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
117379da826aSMichael Rubin 	stat_items_size += sizeof(struct vm_event_state);
1174f8891e5eSChristoph Lameter #endif
117579da826aSMichael Rubin 
117679da826aSMichael Rubin 	v = kmalloc(stat_items_size, GFP_KERNEL);
11772244b95aSChristoph Lameter 	m->private = v;
11782244b95aSChristoph Lameter 	if (!v)
1179f6ac2354SChristoph Lameter 		return ERR_PTR(-ENOMEM);
11802244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
11812244b95aSChristoph Lameter 		v[i] = global_page_state(i);
118279da826aSMichael Rubin 	v += NR_VM_ZONE_STAT_ITEMS;
118379da826aSMichael Rubin 
118479da826aSMichael Rubin 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
118579da826aSMichael Rubin 			    v + NR_DIRTY_THRESHOLD);
118679da826aSMichael Rubin 	v += NR_VM_WRITEBACK_STAT_ITEMS;
118779da826aSMichael Rubin 
1188f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
118979da826aSMichael Rubin 	all_vm_events(v);
119079da826aSMichael Rubin 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
119179da826aSMichael Rubin 	v[PGPGOUT] /= 2;
1192f8891e5eSChristoph Lameter #endif
1193ff8b16d7SWu Fengguang 	return (unsigned long *)m->private + *pos;
1194f6ac2354SChristoph Lameter }
1195f6ac2354SChristoph Lameter 
1196f6ac2354SChristoph Lameter static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1197f6ac2354SChristoph Lameter {
1198f6ac2354SChristoph Lameter 	(*pos)++;
1199f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1200f6ac2354SChristoph Lameter 		return NULL;
1201f6ac2354SChristoph Lameter 	return (unsigned long *)m->private + *pos;
1202f6ac2354SChristoph Lameter }
1203f6ac2354SChristoph Lameter 
1204f6ac2354SChristoph Lameter static int vmstat_show(struct seq_file *m, void *arg)
1205f6ac2354SChristoph Lameter {
1206f6ac2354SChristoph Lameter 	unsigned long *l = arg;
1207f6ac2354SChristoph Lameter 	unsigned long off = l - (unsigned long *)m->private;
1208f6ac2354SChristoph Lameter 
1209f6ac2354SChristoph Lameter 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1210f6ac2354SChristoph Lameter 	return 0;
1211f6ac2354SChristoph Lameter }
1212f6ac2354SChristoph Lameter 
1213f6ac2354SChristoph Lameter static void vmstat_stop(struct seq_file *m, void *arg)
1214f6ac2354SChristoph Lameter {
1215f6ac2354SChristoph Lameter 	kfree(m->private);
1216f6ac2354SChristoph Lameter 	m->private = NULL;
1217f6ac2354SChristoph Lameter }
1218f6ac2354SChristoph Lameter 
1219b6aa44abSAlexey Dobriyan static const struct seq_operations vmstat_op = {
1220f6ac2354SChristoph Lameter 	.start	= vmstat_start,
1221f6ac2354SChristoph Lameter 	.next	= vmstat_next,
1222f6ac2354SChristoph Lameter 	.stop	= vmstat_stop,
1223f6ac2354SChristoph Lameter 	.show	= vmstat_show,
1224f6ac2354SChristoph Lameter };
1225f6ac2354SChristoph Lameter 
1226b6aa44abSAlexey Dobriyan static int vmstat_open(struct inode *inode, struct file *file)
1227b6aa44abSAlexey Dobriyan {
1228b6aa44abSAlexey Dobriyan 	return seq_open(file, &vmstat_op);
1229b6aa44abSAlexey Dobriyan }
1230b6aa44abSAlexey Dobriyan 
1231b6aa44abSAlexey Dobriyan static const struct file_operations proc_vmstat_file_operations = {
1232b6aa44abSAlexey Dobriyan 	.open		= vmstat_open,
1233b6aa44abSAlexey Dobriyan 	.read		= seq_read,
1234b6aa44abSAlexey Dobriyan 	.llseek		= seq_lseek,
1235b6aa44abSAlexey Dobriyan 	.release	= seq_release,
1236b6aa44abSAlexey Dobriyan };
1237f6ac2354SChristoph Lameter #endif /* CONFIG_PROC_FS */
1238f6ac2354SChristoph Lameter 
1239df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1240d1187ed2SChristoph Lameter static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
124177461ab3SChristoph Lameter int sysctl_stat_interval __read_mostly = HZ;
1242d1187ed2SChristoph Lameter 
1243d1187ed2SChristoph Lameter static void vmstat_update(struct work_struct *w)
1244d1187ed2SChristoph Lameter {
1245fbc2edb0SChristoph Lameter 	refresh_cpu_vm_stats();
12467c8e0181SChristoph Lameter 	schedule_delayed_work(this_cpu_ptr(&vmstat_work),
124798f4ebb2SAnton Blanchard 		round_jiffies_relative(sysctl_stat_interval));
1248d1187ed2SChristoph Lameter }
1249d1187ed2SChristoph Lameter 
12500db0628dSPaul Gortmaker static void start_cpu_timer(int cpu)
1251d1187ed2SChristoph Lameter {
12521871e52cSTejun Heo 	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1253d1187ed2SChristoph Lameter 
1254203b42f7STejun Heo 	INIT_DEFERRABLE_WORK(work, vmstat_update);
12551871e52cSTejun Heo 	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1256d1187ed2SChristoph Lameter }
1257d1187ed2SChristoph Lameter 
1258807a1bd2SToshi Kani static void vmstat_cpu_dead(int node)
1259807a1bd2SToshi Kani {
1260807a1bd2SToshi Kani 	int cpu;
1261807a1bd2SToshi Kani 
1262807a1bd2SToshi Kani 	get_online_cpus();
1263807a1bd2SToshi Kani 	for_each_online_cpu(cpu)
1264807a1bd2SToshi Kani 		if (cpu_to_node(cpu) == node)
1265807a1bd2SToshi Kani 			goto end;
1266807a1bd2SToshi Kani 
1267807a1bd2SToshi Kani 	node_clear_state(node, N_CPU);
1268807a1bd2SToshi Kani end:
1269807a1bd2SToshi Kani 	put_online_cpus();
1270807a1bd2SToshi Kani }
1271807a1bd2SToshi Kani 
1272df9ecabaSChristoph Lameter /*
1273df9ecabaSChristoph Lameter  * Use the cpu notifier to insure that the thresholds are recalculated
1274df9ecabaSChristoph Lameter  * when necessary.
1275df9ecabaSChristoph Lameter  */
12760db0628dSPaul Gortmaker static int vmstat_cpuup_callback(struct notifier_block *nfb,
1277df9ecabaSChristoph Lameter 		unsigned long action,
1278df9ecabaSChristoph Lameter 		void *hcpu)
1279df9ecabaSChristoph Lameter {
1280d1187ed2SChristoph Lameter 	long cpu = (long)hcpu;
1281d1187ed2SChristoph Lameter 
1282df9ecabaSChristoph Lameter 	switch (action) {
1283d1187ed2SChristoph Lameter 	case CPU_ONLINE:
1284d1187ed2SChristoph Lameter 	case CPU_ONLINE_FROZEN:
12855ee28a44SKAMEZAWA Hiroyuki 		refresh_zone_stat_thresholds();
1286d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
1287ad596925SChristoph Lameter 		node_set_state(cpu_to_node(cpu), N_CPU);
1288d1187ed2SChristoph Lameter 		break;
1289d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE:
1290d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE_FROZEN:
1291afe2c511STejun Heo 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1292d1187ed2SChristoph Lameter 		per_cpu(vmstat_work, cpu).work.func = NULL;
1293d1187ed2SChristoph Lameter 		break;
1294d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED:
1295d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED_FROZEN:
1296d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
1297d1187ed2SChristoph Lameter 		break;
1298df9ecabaSChristoph Lameter 	case CPU_DEAD:
12998bb78442SRafael J. Wysocki 	case CPU_DEAD_FROZEN:
1300df9ecabaSChristoph Lameter 		refresh_zone_stat_thresholds();
1301807a1bd2SToshi Kani 		vmstat_cpu_dead(cpu_to_node(cpu));
1302df9ecabaSChristoph Lameter 		break;
1303df9ecabaSChristoph Lameter 	default:
1304df9ecabaSChristoph Lameter 		break;
1305df9ecabaSChristoph Lameter 	}
1306df9ecabaSChristoph Lameter 	return NOTIFY_OK;
1307df9ecabaSChristoph Lameter }
1308df9ecabaSChristoph Lameter 
13090db0628dSPaul Gortmaker static struct notifier_block vmstat_notifier =
1310df9ecabaSChristoph Lameter 	{ &vmstat_cpuup_callback, NULL, 0 };
13118f32f7e5SAlexey Dobriyan #endif
1312df9ecabaSChristoph Lameter 
1313e2fc88d0SAdrian Bunk static int __init setup_vmstat(void)
1314df9ecabaSChristoph Lameter {
13158f32f7e5SAlexey Dobriyan #ifdef CONFIG_SMP
1316d1187ed2SChristoph Lameter 	int cpu;
1317d1187ed2SChristoph Lameter 
13180be94badSSrivatsa S. Bhat 	cpu_notifier_register_begin();
13190be94badSSrivatsa S. Bhat 	__register_cpu_notifier(&vmstat_notifier);
1320d1187ed2SChristoph Lameter 
1321d7e0b37aSToshi Kani 	for_each_online_cpu(cpu) {
1322d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
1323d7e0b37aSToshi Kani 		node_set_state(cpu_to_node(cpu), N_CPU);
1324d7e0b37aSToshi Kani 	}
13250be94badSSrivatsa S. Bhat 	cpu_notifier_register_done();
13268f32f7e5SAlexey Dobriyan #endif
13278f32f7e5SAlexey Dobriyan #ifdef CONFIG_PROC_FS
13288f32f7e5SAlexey Dobriyan 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
132974e2e8e8SAlexey Dobriyan 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1330b6aa44abSAlexey Dobriyan 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
13315c9fe628SAlexey Dobriyan 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
13328f32f7e5SAlexey Dobriyan #endif
1333df9ecabaSChristoph Lameter 	return 0;
1334df9ecabaSChristoph Lameter }
1335df9ecabaSChristoph Lameter module_init(setup_vmstat)
1336d7a5752cSMel Gorman 
1337d7a5752cSMel Gorman #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1338d7a5752cSMel Gorman #include <linux/debugfs.h>
1339d7a5752cSMel Gorman 
1340d7a5752cSMel Gorman 
1341d7a5752cSMel Gorman /*
1342d7a5752cSMel Gorman  * Return an index indicating how much of the available free memory is
1343d7a5752cSMel Gorman  * unusable for an allocation of the requested size.
1344d7a5752cSMel Gorman  */
1345d7a5752cSMel Gorman static int unusable_free_index(unsigned int order,
1346d7a5752cSMel Gorman 				struct contig_page_info *info)
1347d7a5752cSMel Gorman {
1348d7a5752cSMel Gorman 	/* No free memory is interpreted as all free memory is unusable */
1349d7a5752cSMel Gorman 	if (info->free_pages == 0)
1350d7a5752cSMel Gorman 		return 1000;
1351d7a5752cSMel Gorman 
1352d7a5752cSMel Gorman 	/*
1353d7a5752cSMel Gorman 	 * Index should be a value between 0 and 1. Return a value to 3
1354d7a5752cSMel Gorman 	 * decimal places.
1355d7a5752cSMel Gorman 	 *
1356d7a5752cSMel Gorman 	 * 0 => no fragmentation
1357d7a5752cSMel Gorman 	 * 1 => high fragmentation
1358d7a5752cSMel Gorman 	 */
1359d7a5752cSMel Gorman 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1360d7a5752cSMel Gorman 
1361d7a5752cSMel Gorman }
1362d7a5752cSMel Gorman 
1363d7a5752cSMel Gorman static void unusable_show_print(struct seq_file *m,
1364d7a5752cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1365d7a5752cSMel Gorman {
1366d7a5752cSMel Gorman 	unsigned int order;
1367d7a5752cSMel Gorman 	int index;
1368d7a5752cSMel Gorman 	struct contig_page_info info;
1369d7a5752cSMel Gorman 
1370d7a5752cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1371d7a5752cSMel Gorman 				pgdat->node_id,
1372d7a5752cSMel Gorman 				zone->name);
1373d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1374d7a5752cSMel Gorman 		fill_contig_page_info(zone, order, &info);
1375d7a5752cSMel Gorman 		index = unusable_free_index(order, &info);
1376d7a5752cSMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1377d7a5752cSMel Gorman 	}
1378d7a5752cSMel Gorman 
1379d7a5752cSMel Gorman 	seq_putc(m, '\n');
1380d7a5752cSMel Gorman }
1381d7a5752cSMel Gorman 
1382d7a5752cSMel Gorman /*
1383d7a5752cSMel Gorman  * Display unusable free space index
1384d7a5752cSMel Gorman  *
1385d7a5752cSMel Gorman  * The unusable free space index measures how much of the available free
1386d7a5752cSMel Gorman  * memory cannot be used to satisfy an allocation of a given size and is a
1387d7a5752cSMel Gorman  * value between 0 and 1. The higher the value, the more of free memory is
1388d7a5752cSMel Gorman  * unusable and by implication, the worse the external fragmentation is. This
1389d7a5752cSMel Gorman  * can be expressed as a percentage by multiplying by 100.
1390d7a5752cSMel Gorman  */
1391d7a5752cSMel Gorman static int unusable_show(struct seq_file *m, void *arg)
1392d7a5752cSMel Gorman {
1393d7a5752cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1394d7a5752cSMel Gorman 
1395d7a5752cSMel Gorman 	/* check memoryless node */
1396a47b53c5SLai Jiangshan 	if (!node_state(pgdat->node_id, N_MEMORY))
1397d7a5752cSMel Gorman 		return 0;
1398d7a5752cSMel Gorman 
1399d7a5752cSMel Gorman 	walk_zones_in_node(m, pgdat, unusable_show_print);
1400d7a5752cSMel Gorman 
1401d7a5752cSMel Gorman 	return 0;
1402d7a5752cSMel Gorman }
1403d7a5752cSMel Gorman 
1404d7a5752cSMel Gorman static const struct seq_operations unusable_op = {
1405d7a5752cSMel Gorman 	.start	= frag_start,
1406d7a5752cSMel Gorman 	.next	= frag_next,
1407d7a5752cSMel Gorman 	.stop	= frag_stop,
1408d7a5752cSMel Gorman 	.show	= unusable_show,
1409d7a5752cSMel Gorman };
1410d7a5752cSMel Gorman 
1411d7a5752cSMel Gorman static int unusable_open(struct inode *inode, struct file *file)
1412d7a5752cSMel Gorman {
1413d7a5752cSMel Gorman 	return seq_open(file, &unusable_op);
1414d7a5752cSMel Gorman }
1415d7a5752cSMel Gorman 
1416d7a5752cSMel Gorman static const struct file_operations unusable_file_ops = {
1417d7a5752cSMel Gorman 	.open		= unusable_open,
1418d7a5752cSMel Gorman 	.read		= seq_read,
1419d7a5752cSMel Gorman 	.llseek		= seq_lseek,
1420d7a5752cSMel Gorman 	.release	= seq_release,
1421d7a5752cSMel Gorman };
1422d7a5752cSMel Gorman 
1423f1a5ab12SMel Gorman static void extfrag_show_print(struct seq_file *m,
1424f1a5ab12SMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1425f1a5ab12SMel Gorman {
1426f1a5ab12SMel Gorman 	unsigned int order;
1427f1a5ab12SMel Gorman 	int index;
1428f1a5ab12SMel Gorman 
1429f1a5ab12SMel Gorman 	/* Alloc on stack as interrupts are disabled for zone walk */
1430f1a5ab12SMel Gorman 	struct contig_page_info info;
1431f1a5ab12SMel Gorman 
1432f1a5ab12SMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1433f1a5ab12SMel Gorman 				pgdat->node_id,
1434f1a5ab12SMel Gorman 				zone->name);
1435f1a5ab12SMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1436f1a5ab12SMel Gorman 		fill_contig_page_info(zone, order, &info);
143756de7263SMel Gorman 		index = __fragmentation_index(order, &info);
1438f1a5ab12SMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1439f1a5ab12SMel Gorman 	}
1440f1a5ab12SMel Gorman 
1441f1a5ab12SMel Gorman 	seq_putc(m, '\n');
1442f1a5ab12SMel Gorman }
1443f1a5ab12SMel Gorman 
1444f1a5ab12SMel Gorman /*
1445f1a5ab12SMel Gorman  * Display fragmentation index for orders that allocations would fail for
1446f1a5ab12SMel Gorman  */
1447f1a5ab12SMel Gorman static int extfrag_show(struct seq_file *m, void *arg)
1448f1a5ab12SMel Gorman {
1449f1a5ab12SMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1450f1a5ab12SMel Gorman 
1451f1a5ab12SMel Gorman 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1452f1a5ab12SMel Gorman 
1453f1a5ab12SMel Gorman 	return 0;
1454f1a5ab12SMel Gorman }
1455f1a5ab12SMel Gorman 
1456f1a5ab12SMel Gorman static const struct seq_operations extfrag_op = {
1457f1a5ab12SMel Gorman 	.start	= frag_start,
1458f1a5ab12SMel Gorman 	.next	= frag_next,
1459f1a5ab12SMel Gorman 	.stop	= frag_stop,
1460f1a5ab12SMel Gorman 	.show	= extfrag_show,
1461f1a5ab12SMel Gorman };
1462f1a5ab12SMel Gorman 
1463f1a5ab12SMel Gorman static int extfrag_open(struct inode *inode, struct file *file)
1464f1a5ab12SMel Gorman {
1465f1a5ab12SMel Gorman 	return seq_open(file, &extfrag_op);
1466f1a5ab12SMel Gorman }
1467f1a5ab12SMel Gorman 
1468f1a5ab12SMel Gorman static const struct file_operations extfrag_file_ops = {
1469f1a5ab12SMel Gorman 	.open		= extfrag_open,
1470f1a5ab12SMel Gorman 	.read		= seq_read,
1471f1a5ab12SMel Gorman 	.llseek		= seq_lseek,
1472f1a5ab12SMel Gorman 	.release	= seq_release,
1473f1a5ab12SMel Gorman };
1474f1a5ab12SMel Gorman 
1475d7a5752cSMel Gorman static int __init extfrag_debug_init(void)
1476d7a5752cSMel Gorman {
1477bde8bd8aSSasikantha babu 	struct dentry *extfrag_debug_root;
1478bde8bd8aSSasikantha babu 
1479d7a5752cSMel Gorman 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1480d7a5752cSMel Gorman 	if (!extfrag_debug_root)
1481d7a5752cSMel Gorman 		return -ENOMEM;
1482d7a5752cSMel Gorman 
1483d7a5752cSMel Gorman 	if (!debugfs_create_file("unusable_index", 0444,
1484d7a5752cSMel Gorman 			extfrag_debug_root, NULL, &unusable_file_ops))
1485bde8bd8aSSasikantha babu 		goto fail;
1486d7a5752cSMel Gorman 
1487f1a5ab12SMel Gorman 	if (!debugfs_create_file("extfrag_index", 0444,
1488f1a5ab12SMel Gorman 			extfrag_debug_root, NULL, &extfrag_file_ops))
1489bde8bd8aSSasikantha babu 		goto fail;
1490f1a5ab12SMel Gorman 
1491d7a5752cSMel Gorman 	return 0;
1492bde8bd8aSSasikantha babu fail:
1493bde8bd8aSSasikantha babu 	debugfs_remove_recursive(extfrag_debug_root);
1494bde8bd8aSSasikantha babu 	return -ENOMEM;
1495d7a5752cSMel Gorman }
1496d7a5752cSMel Gorman 
1497d7a5752cSMel Gorman module_init(extfrag_debug_init);
1498d7a5752cSMel Gorman #endif
1499