xref: /linux/mm/vmstat.c (revision 3c4868710951dd7a6b991d71ca5f46737c4acf28)
1f6ac2354SChristoph Lameter /*
2f6ac2354SChristoph Lameter  *  linux/mm/vmstat.c
3f6ac2354SChristoph Lameter  *
4f6ac2354SChristoph Lameter  *  Manages VM statistics
5f6ac2354SChristoph Lameter  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
62244b95aSChristoph Lameter  *
72244b95aSChristoph Lameter  *  zoned VM statistics
82244b95aSChristoph Lameter  *  Copyright (C) 2006 Silicon Graphics, Inc.,
92244b95aSChristoph Lameter  *		Christoph Lameter <christoph@lameter.com>
107cc36bbdSChristoph Lameter  *  Copyright (C) 2008-2014 Christoph Lameter
11f6ac2354SChristoph Lameter  */
128f32f7e5SAlexey Dobriyan #include <linux/fs.h>
13f6ac2354SChristoph Lameter #include <linux/mm.h>
144e950f6fSAlexey Dobriyan #include <linux/err.h>
152244b95aSChristoph Lameter #include <linux/module.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
17df9ecabaSChristoph Lameter #include <linux/cpu.h>
187cc36bbdSChristoph Lameter #include <linux/cpumask.h>
19c748e134SAdrian Bunk #include <linux/vmstat.h>
20*3c486871SAndrew Morton #include <linux/proc_fs.h>
21*3c486871SAndrew Morton #include <linux/seq_file.h>
22*3c486871SAndrew Morton #include <linux/debugfs.h>
23e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
24f1a5ab12SMel Gorman #include <linux/math64.h>
2579da826aSMichael Rubin #include <linux/writeback.h>
2636deb0beSNamhyung Kim #include <linux/compaction.h>
276e543d57SLisa Du #include <linux/mm_inline.h>
2848c96a36SJoonsoo Kim #include <linux/page_ext.h>
2948c96a36SJoonsoo Kim #include <linux/page_owner.h>
306e543d57SLisa Du 
316e543d57SLisa Du #include "internal.h"
32f6ac2354SChristoph Lameter 
33f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
34f8891e5eSChristoph Lameter DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35f8891e5eSChristoph Lameter EXPORT_PER_CPU_SYMBOL(vm_event_states);
36f8891e5eSChristoph Lameter 
3731f961a8SMinchan Kim static void sum_vm_events(unsigned long *ret)
38f8891e5eSChristoph Lameter {
399eccf2a8SChristoph Lameter 	int cpu;
40f8891e5eSChristoph Lameter 	int i;
41f8891e5eSChristoph Lameter 
42f8891e5eSChristoph Lameter 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43f8891e5eSChristoph Lameter 
4431f961a8SMinchan Kim 	for_each_online_cpu(cpu) {
45f8891e5eSChristoph Lameter 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46f8891e5eSChristoph Lameter 
47f8891e5eSChristoph Lameter 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48f8891e5eSChristoph Lameter 			ret[i] += this->event[i];
49f8891e5eSChristoph Lameter 	}
50f8891e5eSChristoph Lameter }
51f8891e5eSChristoph Lameter 
52f8891e5eSChristoph Lameter /*
53f8891e5eSChristoph Lameter  * Accumulate the vm event counters across all CPUs.
54f8891e5eSChristoph Lameter  * The result is unavoidably approximate - it can change
55f8891e5eSChristoph Lameter  * during and after execution of this function.
56f8891e5eSChristoph Lameter */
57f8891e5eSChristoph Lameter void all_vm_events(unsigned long *ret)
58f8891e5eSChristoph Lameter {
59b5be1132SKOSAKI Motohiro 	get_online_cpus();
6031f961a8SMinchan Kim 	sum_vm_events(ret);
61b5be1132SKOSAKI Motohiro 	put_online_cpus();
62f8891e5eSChristoph Lameter }
6332dd66fcSHeiko Carstens EXPORT_SYMBOL_GPL(all_vm_events);
64f8891e5eSChristoph Lameter 
65f8891e5eSChristoph Lameter /*
66f8891e5eSChristoph Lameter  * Fold the foreign cpu events into our own.
67f8891e5eSChristoph Lameter  *
68f8891e5eSChristoph Lameter  * This is adding to the events on one processor
69f8891e5eSChristoph Lameter  * but keeps the global counts constant.
70f8891e5eSChristoph Lameter  */
71f8891e5eSChristoph Lameter void vm_events_fold_cpu(int cpu)
72f8891e5eSChristoph Lameter {
73f8891e5eSChristoph Lameter 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74f8891e5eSChristoph Lameter 	int i;
75f8891e5eSChristoph Lameter 
76f8891e5eSChristoph Lameter 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77f8891e5eSChristoph Lameter 		count_vm_events(i, fold_state->event[i]);
78f8891e5eSChristoph Lameter 		fold_state->event[i] = 0;
79f8891e5eSChristoph Lameter 	}
80f8891e5eSChristoph Lameter }
81f8891e5eSChristoph Lameter 
82f8891e5eSChristoph Lameter #endif /* CONFIG_VM_EVENT_COUNTERS */
83f8891e5eSChristoph Lameter 
842244b95aSChristoph Lameter /*
852244b95aSChristoph Lameter  * Manage combined zone based / global counters
862244b95aSChristoph Lameter  *
872244b95aSChristoph Lameter  * vm_stat contains the global counters
882244b95aSChristoph Lameter  */
89a1cb2c60SDimitri Sivanich atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
902244b95aSChristoph Lameter EXPORT_SYMBOL(vm_stat);
912244b95aSChristoph Lameter 
922244b95aSChristoph Lameter #ifdef CONFIG_SMP
932244b95aSChristoph Lameter 
94b44129b3SMel Gorman int calculate_pressure_threshold(struct zone *zone)
9588f5acf8SMel Gorman {
9688f5acf8SMel Gorman 	int threshold;
9788f5acf8SMel Gorman 	int watermark_distance;
9888f5acf8SMel Gorman 
9988f5acf8SMel Gorman 	/*
10088f5acf8SMel Gorman 	 * As vmstats are not up to date, there is drift between the estimated
10188f5acf8SMel Gorman 	 * and real values. For high thresholds and a high number of CPUs, it
10288f5acf8SMel Gorman 	 * is possible for the min watermark to be breached while the estimated
10388f5acf8SMel Gorman 	 * value looks fine. The pressure threshold is a reduced value such
10488f5acf8SMel Gorman 	 * that even the maximum amount of drift will not accidentally breach
10588f5acf8SMel Gorman 	 * the min watermark
10688f5acf8SMel Gorman 	 */
10788f5acf8SMel Gorman 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
10888f5acf8SMel Gorman 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
10988f5acf8SMel Gorman 
11088f5acf8SMel Gorman 	/*
11188f5acf8SMel Gorman 	 * Maximum threshold is 125
11288f5acf8SMel Gorman 	 */
11388f5acf8SMel Gorman 	threshold = min(125, threshold);
11488f5acf8SMel Gorman 
11588f5acf8SMel Gorman 	return threshold;
11688f5acf8SMel Gorman }
11788f5acf8SMel Gorman 
118b44129b3SMel Gorman int calculate_normal_threshold(struct zone *zone)
119df9ecabaSChristoph Lameter {
120df9ecabaSChristoph Lameter 	int threshold;
121df9ecabaSChristoph Lameter 	int mem;	/* memory in 128 MB units */
1222244b95aSChristoph Lameter 
1232244b95aSChristoph Lameter 	/*
124df9ecabaSChristoph Lameter 	 * The threshold scales with the number of processors and the amount
125df9ecabaSChristoph Lameter 	 * of memory per zone. More memory means that we can defer updates for
126df9ecabaSChristoph Lameter 	 * longer, more processors could lead to more contention.
127df9ecabaSChristoph Lameter  	 * fls() is used to have a cheap way of logarithmic scaling.
1282244b95aSChristoph Lameter 	 *
129df9ecabaSChristoph Lameter 	 * Some sample thresholds:
130df9ecabaSChristoph Lameter 	 *
131df9ecabaSChristoph Lameter 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
132df9ecabaSChristoph Lameter 	 * ------------------------------------------------------------------
133df9ecabaSChristoph Lameter 	 * 8		1		1	0.9-1 GB	4
134df9ecabaSChristoph Lameter 	 * 16		2		2	0.9-1 GB	4
135df9ecabaSChristoph Lameter 	 * 20 		2		2	1-2 GB		5
136df9ecabaSChristoph Lameter 	 * 24		2		2	2-4 GB		6
137df9ecabaSChristoph Lameter 	 * 28		2		2	4-8 GB		7
138df9ecabaSChristoph Lameter 	 * 32		2		2	8-16 GB		8
139df9ecabaSChristoph Lameter 	 * 4		2		2	<128M		1
140df9ecabaSChristoph Lameter 	 * 30		4		3	2-4 GB		5
141df9ecabaSChristoph Lameter 	 * 48		4		3	8-16 GB		8
142df9ecabaSChristoph Lameter 	 * 32		8		4	1-2 GB		4
143df9ecabaSChristoph Lameter 	 * 32		8		4	0.9-1GB		4
144df9ecabaSChristoph Lameter 	 * 10		16		5	<128M		1
145df9ecabaSChristoph Lameter 	 * 40		16		5	900M		4
146df9ecabaSChristoph Lameter 	 * 70		64		7	2-4 GB		5
147df9ecabaSChristoph Lameter 	 * 84		64		7	4-8 GB		6
148df9ecabaSChristoph Lameter 	 * 108		512		9	4-8 GB		6
149df9ecabaSChristoph Lameter 	 * 125		1024		10	8-16 GB		8
150df9ecabaSChristoph Lameter 	 * 125		1024		10	16-32 GB	9
1512244b95aSChristoph Lameter 	 */
152df9ecabaSChristoph Lameter 
153b40da049SJiang Liu 	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154df9ecabaSChristoph Lameter 
155df9ecabaSChristoph Lameter 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156df9ecabaSChristoph Lameter 
157df9ecabaSChristoph Lameter 	/*
158df9ecabaSChristoph Lameter 	 * Maximum threshold is 125
159df9ecabaSChristoph Lameter 	 */
160df9ecabaSChristoph Lameter 	threshold = min(125, threshold);
161df9ecabaSChristoph Lameter 
162df9ecabaSChristoph Lameter 	return threshold;
163df9ecabaSChristoph Lameter }
164df9ecabaSChristoph Lameter 
165df9ecabaSChristoph Lameter /*
166df9ecabaSChristoph Lameter  * Refresh the thresholds for each zone.
167df9ecabaSChristoph Lameter  */
168a6cccdc3SKOSAKI Motohiro void refresh_zone_stat_thresholds(void)
1692244b95aSChristoph Lameter {
170df9ecabaSChristoph Lameter 	struct zone *zone;
171df9ecabaSChristoph Lameter 	int cpu;
172df9ecabaSChristoph Lameter 	int threshold;
173df9ecabaSChristoph Lameter 
174ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
175aa454840SChristoph Lameter 		unsigned long max_drift, tolerate_drift;
176aa454840SChristoph Lameter 
177b44129b3SMel Gorman 		threshold = calculate_normal_threshold(zone);
178df9ecabaSChristoph Lameter 
179df9ecabaSChristoph Lameter 		for_each_online_cpu(cpu)
18099dcc3e5SChristoph Lameter 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
18199dcc3e5SChristoph Lameter 							= threshold;
182aa454840SChristoph Lameter 
183aa454840SChristoph Lameter 		/*
184aa454840SChristoph Lameter 		 * Only set percpu_drift_mark if there is a danger that
185aa454840SChristoph Lameter 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
186aa454840SChristoph Lameter 		 * the min watermark could be breached by an allocation
187aa454840SChristoph Lameter 		 */
188aa454840SChristoph Lameter 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189aa454840SChristoph Lameter 		max_drift = num_online_cpus() * threshold;
190aa454840SChristoph Lameter 		if (max_drift > tolerate_drift)
191aa454840SChristoph Lameter 			zone->percpu_drift_mark = high_wmark_pages(zone) +
192aa454840SChristoph Lameter 					max_drift;
193df9ecabaSChristoph Lameter 	}
1942244b95aSChristoph Lameter }
1952244b95aSChristoph Lameter 
196b44129b3SMel Gorman void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197b44129b3SMel Gorman 				int (*calculate_pressure)(struct zone *))
19888f5acf8SMel Gorman {
19988f5acf8SMel Gorman 	struct zone *zone;
20088f5acf8SMel Gorman 	int cpu;
20188f5acf8SMel Gorman 	int threshold;
20288f5acf8SMel Gorman 	int i;
20388f5acf8SMel Gorman 
20488f5acf8SMel Gorman 	for (i = 0; i < pgdat->nr_zones; i++) {
20588f5acf8SMel Gorman 		zone = &pgdat->node_zones[i];
20688f5acf8SMel Gorman 		if (!zone->percpu_drift_mark)
20788f5acf8SMel Gorman 			continue;
20888f5acf8SMel Gorman 
209b44129b3SMel Gorman 		threshold = (*calculate_pressure)(zone);
210bb0b6dffSMel Gorman 		for_each_online_cpu(cpu)
21188f5acf8SMel Gorman 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
21288f5acf8SMel Gorman 							= threshold;
21388f5acf8SMel Gorman 	}
21488f5acf8SMel Gorman }
21588f5acf8SMel Gorman 
2162244b95aSChristoph Lameter /*
217bea04b07SJianyu Zhan  * For use when we know that interrupts are disabled,
218bea04b07SJianyu Zhan  * or when we know that preemption is disabled and that
219bea04b07SJianyu Zhan  * particular counter cannot be updated from interrupt context.
2202244b95aSChristoph Lameter  */
2212244b95aSChristoph Lameter void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
2222244b95aSChristoph Lameter 				int delta)
2232244b95aSChristoph Lameter {
22412938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
22512938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
2262244b95aSChristoph Lameter 	long x;
22712938a92SChristoph Lameter 	long t;
2282244b95aSChristoph Lameter 
22912938a92SChristoph Lameter 	x = delta + __this_cpu_read(*p);
2302244b95aSChristoph Lameter 
23112938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
23212938a92SChristoph Lameter 
23312938a92SChristoph Lameter 	if (unlikely(x > t || x < -t)) {
2342244b95aSChristoph Lameter 		zone_page_state_add(x, zone, item);
2352244b95aSChristoph Lameter 		x = 0;
2362244b95aSChristoph Lameter 	}
23712938a92SChristoph Lameter 	__this_cpu_write(*p, x);
2382244b95aSChristoph Lameter }
2392244b95aSChristoph Lameter EXPORT_SYMBOL(__mod_zone_page_state);
2402244b95aSChristoph Lameter 
2412244b95aSChristoph Lameter /*
2422244b95aSChristoph Lameter  * Optimized increment and decrement functions.
2432244b95aSChristoph Lameter  *
2442244b95aSChristoph Lameter  * These are only for a single page and therefore can take a struct page *
2452244b95aSChristoph Lameter  * argument instead of struct zone *. This allows the inclusion of the code
2462244b95aSChristoph Lameter  * generated for page_zone(page) into the optimized functions.
2472244b95aSChristoph Lameter  *
2482244b95aSChristoph Lameter  * No overflow check is necessary and therefore the differential can be
2492244b95aSChristoph Lameter  * incremented or decremented in place which may allow the compilers to
2502244b95aSChristoph Lameter  * generate better code.
2512244b95aSChristoph Lameter  * The increment or decrement is known and therefore one boundary check can
2522244b95aSChristoph Lameter  * be omitted.
2532244b95aSChristoph Lameter  *
254df9ecabaSChristoph Lameter  * NOTE: These functions are very performance sensitive. Change only
255df9ecabaSChristoph Lameter  * with care.
256df9ecabaSChristoph Lameter  *
2572244b95aSChristoph Lameter  * Some processors have inc/dec instructions that are atomic vs an interrupt.
2582244b95aSChristoph Lameter  * However, the code must first determine the differential location in a zone
2592244b95aSChristoph Lameter  * based on the processor number and then inc/dec the counter. There is no
2602244b95aSChristoph Lameter  * guarantee without disabling preemption that the processor will not change
2612244b95aSChristoph Lameter  * in between and therefore the atomicity vs. interrupt cannot be exploited
2622244b95aSChristoph Lameter  * in a useful way here.
2632244b95aSChristoph Lameter  */
264c8785385SChristoph Lameter void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2652244b95aSChristoph Lameter {
26612938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
26712938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
26812938a92SChristoph Lameter 	s8 v, t;
2692244b95aSChristoph Lameter 
270908ee0f1SChristoph Lameter 	v = __this_cpu_inc_return(*p);
27112938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
27212938a92SChristoph Lameter 	if (unlikely(v > t)) {
27312938a92SChristoph Lameter 		s8 overstep = t >> 1;
2742244b95aSChristoph Lameter 
27512938a92SChristoph Lameter 		zone_page_state_add(v + overstep, zone, item);
27612938a92SChristoph Lameter 		__this_cpu_write(*p, -overstep);
2772244b95aSChristoph Lameter 	}
2782244b95aSChristoph Lameter }
279ca889e6cSChristoph Lameter 
280ca889e6cSChristoph Lameter void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281ca889e6cSChristoph Lameter {
282ca889e6cSChristoph Lameter 	__inc_zone_state(page_zone(page), item);
283ca889e6cSChristoph Lameter }
2842244b95aSChristoph Lameter EXPORT_SYMBOL(__inc_zone_page_state);
2852244b95aSChristoph Lameter 
286c8785385SChristoph Lameter void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2872244b95aSChristoph Lameter {
28812938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
28912938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
29012938a92SChristoph Lameter 	s8 v, t;
2912244b95aSChristoph Lameter 
292908ee0f1SChristoph Lameter 	v = __this_cpu_dec_return(*p);
29312938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
29412938a92SChristoph Lameter 	if (unlikely(v < - t)) {
29512938a92SChristoph Lameter 		s8 overstep = t >> 1;
2962244b95aSChristoph Lameter 
29712938a92SChristoph Lameter 		zone_page_state_add(v - overstep, zone, item);
29812938a92SChristoph Lameter 		__this_cpu_write(*p, overstep);
2992244b95aSChristoph Lameter 	}
3002244b95aSChristoph Lameter }
301c8785385SChristoph Lameter 
302c8785385SChristoph Lameter void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303c8785385SChristoph Lameter {
304c8785385SChristoph Lameter 	__dec_zone_state(page_zone(page), item);
305c8785385SChristoph Lameter }
3062244b95aSChristoph Lameter EXPORT_SYMBOL(__dec_zone_page_state);
3072244b95aSChristoph Lameter 
3084156153cSHeiko Carstens #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
3097c839120SChristoph Lameter /*
3107c839120SChristoph Lameter  * If we have cmpxchg_local support then we do not need to incur the overhead
3117c839120SChristoph Lameter  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
3127c839120SChristoph Lameter  *
3137c839120SChristoph Lameter  * mod_state() modifies the zone counter state through atomic per cpu
3147c839120SChristoph Lameter  * operations.
3157c839120SChristoph Lameter  *
3167c839120SChristoph Lameter  * Overstep mode specifies how overstep should handled:
3177c839120SChristoph Lameter  *     0       No overstepping
3187c839120SChristoph Lameter  *     1       Overstepping half of threshold
3197c839120SChristoph Lameter  *     -1      Overstepping minus half of threshold
3207c839120SChristoph Lameter */
3217c839120SChristoph Lameter static inline void mod_state(struct zone *zone,
3227c839120SChristoph Lameter        enum zone_stat_item item, int delta, int overstep_mode)
3237c839120SChristoph Lameter {
3247c839120SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
3257c839120SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
3267c839120SChristoph Lameter 	long o, n, t, z;
3277c839120SChristoph Lameter 
3287c839120SChristoph Lameter 	do {
3297c839120SChristoph Lameter 		z = 0;  /* overflow to zone counters */
3307c839120SChristoph Lameter 
3317c839120SChristoph Lameter 		/*
3327c839120SChristoph Lameter 		 * The fetching of the stat_threshold is racy. We may apply
3337c839120SChristoph Lameter 		 * a counter threshold to the wrong the cpu if we get
334d3bc2367SChristoph Lameter 		 * rescheduled while executing here. However, the next
335d3bc2367SChristoph Lameter 		 * counter update will apply the threshold again and
336d3bc2367SChristoph Lameter 		 * therefore bring the counter under the threshold again.
337d3bc2367SChristoph Lameter 		 *
338d3bc2367SChristoph Lameter 		 * Most of the time the thresholds are the same anyways
339d3bc2367SChristoph Lameter 		 * for all cpus in a zone.
3407c839120SChristoph Lameter 		 */
3417c839120SChristoph Lameter 		t = this_cpu_read(pcp->stat_threshold);
3427c839120SChristoph Lameter 
3437c839120SChristoph Lameter 		o = this_cpu_read(*p);
3447c839120SChristoph Lameter 		n = delta + o;
3457c839120SChristoph Lameter 
3467c839120SChristoph Lameter 		if (n > t || n < -t) {
3477c839120SChristoph Lameter 			int os = overstep_mode * (t >> 1) ;
3487c839120SChristoph Lameter 
3497c839120SChristoph Lameter 			/* Overflow must be added to zone counters */
3507c839120SChristoph Lameter 			z = n + os;
3517c839120SChristoph Lameter 			n = -os;
3527c839120SChristoph Lameter 		}
3537c839120SChristoph Lameter 	} while (this_cpu_cmpxchg(*p, o, n) != o);
3547c839120SChristoph Lameter 
3557c839120SChristoph Lameter 	if (z)
3567c839120SChristoph Lameter 		zone_page_state_add(z, zone, item);
3577c839120SChristoph Lameter }
3587c839120SChristoph Lameter 
3597c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3607c839120SChristoph Lameter 					int delta)
3617c839120SChristoph Lameter {
3627c839120SChristoph Lameter 	mod_state(zone, item, delta, 0);
3637c839120SChristoph Lameter }
3647c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3657c839120SChristoph Lameter 
3667c839120SChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
3677c839120SChristoph Lameter {
3687c839120SChristoph Lameter 	mod_state(zone, item, 1, 1);
3697c839120SChristoph Lameter }
3707c839120SChristoph Lameter 
3717c839120SChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
3727c839120SChristoph Lameter {
3737c839120SChristoph Lameter 	mod_state(page_zone(page), item, 1, 1);
3747c839120SChristoph Lameter }
3757c839120SChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
3767c839120SChristoph Lameter 
3777c839120SChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
3787c839120SChristoph Lameter {
3797c839120SChristoph Lameter 	mod_state(page_zone(page), item, -1, -1);
3807c839120SChristoph Lameter }
3817c839120SChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
3827c839120SChristoph Lameter #else
3837c839120SChristoph Lameter /*
3847c839120SChristoph Lameter  * Use interrupt disable to serialize counter updates
3857c839120SChristoph Lameter  */
3867c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3877c839120SChristoph Lameter 					int delta)
3887c839120SChristoph Lameter {
3897c839120SChristoph Lameter 	unsigned long flags;
3907c839120SChristoph Lameter 
3917c839120SChristoph Lameter 	local_irq_save(flags);
3927c839120SChristoph Lameter 	__mod_zone_page_state(zone, item, delta);
3937c839120SChristoph Lameter 	local_irq_restore(flags);
3947c839120SChristoph Lameter }
3957c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3967c839120SChristoph Lameter 
397ca889e6cSChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398ca889e6cSChristoph Lameter {
399ca889e6cSChristoph Lameter 	unsigned long flags;
400ca889e6cSChristoph Lameter 
401ca889e6cSChristoph Lameter 	local_irq_save(flags);
402ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
403ca889e6cSChristoph Lameter 	local_irq_restore(flags);
404ca889e6cSChristoph Lameter }
405ca889e6cSChristoph Lameter 
4062244b95aSChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
4072244b95aSChristoph Lameter {
4082244b95aSChristoph Lameter 	unsigned long flags;
4092244b95aSChristoph Lameter 	struct zone *zone;
4102244b95aSChristoph Lameter 
4112244b95aSChristoph Lameter 	zone = page_zone(page);
4122244b95aSChristoph Lameter 	local_irq_save(flags);
413ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
4142244b95aSChristoph Lameter 	local_irq_restore(flags);
4152244b95aSChristoph Lameter }
4162244b95aSChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
4172244b95aSChristoph Lameter 
4182244b95aSChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
4192244b95aSChristoph Lameter {
4202244b95aSChristoph Lameter 	unsigned long flags;
4212244b95aSChristoph Lameter 
4222244b95aSChristoph Lameter 	local_irq_save(flags);
423a302eb4eSChristoph Lameter 	__dec_zone_page_state(page, item);
4242244b95aSChristoph Lameter 	local_irq_restore(flags);
4252244b95aSChristoph Lameter }
4262244b95aSChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
4277c839120SChristoph Lameter #endif
4282244b95aSChristoph Lameter 
4297cc36bbdSChristoph Lameter 
4307cc36bbdSChristoph Lameter /*
4317cc36bbdSChristoph Lameter  * Fold a differential into the global counters.
4327cc36bbdSChristoph Lameter  * Returns the number of counters updated.
4337cc36bbdSChristoph Lameter  */
4347cc36bbdSChristoph Lameter static int fold_diff(int *diff)
4354edb0748SChristoph Lameter {
4364edb0748SChristoph Lameter 	int i;
4377cc36bbdSChristoph Lameter 	int changes = 0;
4384edb0748SChristoph Lameter 
4394edb0748SChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
4407cc36bbdSChristoph Lameter 		if (diff[i]) {
4414edb0748SChristoph Lameter 			atomic_long_add(diff[i], &vm_stat[i]);
4427cc36bbdSChristoph Lameter 			changes++;
4437cc36bbdSChristoph Lameter 	}
4447cc36bbdSChristoph Lameter 	return changes;
4454edb0748SChristoph Lameter }
4464edb0748SChristoph Lameter 
4472244b95aSChristoph Lameter /*
4482bb921e5SChristoph Lameter  * Update the zone counters for the current cpu.
449a7f75e25SChristoph Lameter  *
4504037d452SChristoph Lameter  * Note that refresh_cpu_vm_stats strives to only access
4514037d452SChristoph Lameter  * node local memory. The per cpu pagesets on remote zones are placed
4524037d452SChristoph Lameter  * in the memory local to the processor using that pageset. So the
4534037d452SChristoph Lameter  * loop over all zones will access a series of cachelines local to
4544037d452SChristoph Lameter  * the processor.
4554037d452SChristoph Lameter  *
4564037d452SChristoph Lameter  * The call to zone_page_state_add updates the cachelines with the
4574037d452SChristoph Lameter  * statistics in the remote zone struct as well as the global cachelines
4584037d452SChristoph Lameter  * with the global counters. These could cause remote node cache line
4594037d452SChristoph Lameter  * bouncing and will have to be only done when necessary.
4607cc36bbdSChristoph Lameter  *
4617cc36bbdSChristoph Lameter  * The function returns the number of global counters updated.
4622244b95aSChristoph Lameter  */
4637cc36bbdSChristoph Lameter static int refresh_cpu_vm_stats(void)
4642244b95aSChristoph Lameter {
4652244b95aSChristoph Lameter 	struct zone *zone;
4662244b95aSChristoph Lameter 	int i;
467a7f75e25SChristoph Lameter 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
4687cc36bbdSChristoph Lameter 	int changes = 0;
4692244b95aSChristoph Lameter 
470ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
471fbc2edb0SChristoph Lameter 		struct per_cpu_pageset __percpu *p = zone->pageset;
4722244b95aSChristoph Lameter 
473fbc2edb0SChristoph Lameter 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474a7f75e25SChristoph Lameter 			int v;
475a7f75e25SChristoph Lameter 
476fbc2edb0SChristoph Lameter 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477fbc2edb0SChristoph Lameter 			if (v) {
478fbc2edb0SChristoph Lameter 
479a7f75e25SChristoph Lameter 				atomic_long_add(v, &zone->vm_stat[i]);
480a7f75e25SChristoph Lameter 				global_diff[i] += v;
4814037d452SChristoph Lameter #ifdef CONFIG_NUMA
4824037d452SChristoph Lameter 				/* 3 seconds idle till flush */
483fbc2edb0SChristoph Lameter 				__this_cpu_write(p->expire, 3);
4844037d452SChristoph Lameter #endif
4852244b95aSChristoph Lameter 			}
486fbc2edb0SChristoph Lameter 		}
487468fd62eSDimitri Sivanich 		cond_resched();
4884037d452SChristoph Lameter #ifdef CONFIG_NUMA
4894037d452SChristoph Lameter 		/*
4904037d452SChristoph Lameter 		 * Deal with draining the remote pageset of this
4914037d452SChristoph Lameter 		 * processor
4924037d452SChristoph Lameter 		 *
4934037d452SChristoph Lameter 		 * Check if there are pages remaining in this pageset
4944037d452SChristoph Lameter 		 * if not then there is nothing to expire.
4954037d452SChristoph Lameter 		 */
496fbc2edb0SChristoph Lameter 		if (!__this_cpu_read(p->expire) ||
497fbc2edb0SChristoph Lameter 			       !__this_cpu_read(p->pcp.count))
4984037d452SChristoph Lameter 			continue;
4994037d452SChristoph Lameter 
5004037d452SChristoph Lameter 		/*
5014037d452SChristoph Lameter 		 * We never drain zones local to this processor.
5024037d452SChristoph Lameter 		 */
5034037d452SChristoph Lameter 		if (zone_to_nid(zone) == numa_node_id()) {
504fbc2edb0SChristoph Lameter 			__this_cpu_write(p->expire, 0);
5054037d452SChristoph Lameter 			continue;
5064037d452SChristoph Lameter 		}
5074037d452SChristoph Lameter 
508fbc2edb0SChristoph Lameter 		if (__this_cpu_dec_return(p->expire))
5094037d452SChristoph Lameter 			continue;
5104037d452SChristoph Lameter 
5117cc36bbdSChristoph Lameter 		if (__this_cpu_read(p->pcp.count)) {
5127c8e0181SChristoph Lameter 			drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
5137cc36bbdSChristoph Lameter 			changes++;
5147cc36bbdSChristoph Lameter 		}
5154037d452SChristoph Lameter #endif
5162244b95aSChristoph Lameter 	}
5177cc36bbdSChristoph Lameter 	changes += fold_diff(global_diff);
5187cc36bbdSChristoph Lameter 	return changes;
5192244b95aSChristoph Lameter }
5202244b95aSChristoph Lameter 
52140f4b1eaSCody P Schafer /*
5222bb921e5SChristoph Lameter  * Fold the data for an offline cpu into the global array.
5232bb921e5SChristoph Lameter  * There cannot be any access by the offline cpu and therefore
5242bb921e5SChristoph Lameter  * synchronization is simplified.
5252bb921e5SChristoph Lameter  */
5262bb921e5SChristoph Lameter void cpu_vm_stats_fold(int cpu)
5272bb921e5SChristoph Lameter {
5282bb921e5SChristoph Lameter 	struct zone *zone;
5292bb921e5SChristoph Lameter 	int i;
5302bb921e5SChristoph Lameter 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
5312bb921e5SChristoph Lameter 
5322bb921e5SChristoph Lameter 	for_each_populated_zone(zone) {
5332bb921e5SChristoph Lameter 		struct per_cpu_pageset *p;
5342bb921e5SChristoph Lameter 
5352bb921e5SChristoph Lameter 		p = per_cpu_ptr(zone->pageset, cpu);
5362bb921e5SChristoph Lameter 
5372bb921e5SChristoph Lameter 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
5382bb921e5SChristoph Lameter 			if (p->vm_stat_diff[i]) {
5392bb921e5SChristoph Lameter 				int v;
5402bb921e5SChristoph Lameter 
5412bb921e5SChristoph Lameter 				v = p->vm_stat_diff[i];
5422bb921e5SChristoph Lameter 				p->vm_stat_diff[i] = 0;
5432bb921e5SChristoph Lameter 				atomic_long_add(v, &zone->vm_stat[i]);
5442bb921e5SChristoph Lameter 				global_diff[i] += v;
5452bb921e5SChristoph Lameter 			}
5462bb921e5SChristoph Lameter 	}
5472bb921e5SChristoph Lameter 
5484edb0748SChristoph Lameter 	fold_diff(global_diff);
5492bb921e5SChristoph Lameter }
5502bb921e5SChristoph Lameter 
5512bb921e5SChristoph Lameter /*
55240f4b1eaSCody P Schafer  * this is only called if !populated_zone(zone), which implies no other users of
55340f4b1eaSCody P Schafer  * pset->vm_stat_diff[] exsist.
55440f4b1eaSCody P Schafer  */
5555a883813SMinchan Kim void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
5565a883813SMinchan Kim {
5575a883813SMinchan Kim 	int i;
5585a883813SMinchan Kim 
5595a883813SMinchan Kim 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
5605a883813SMinchan Kim 		if (pset->vm_stat_diff[i]) {
5615a883813SMinchan Kim 			int v = pset->vm_stat_diff[i];
5625a883813SMinchan Kim 			pset->vm_stat_diff[i] = 0;
5635a883813SMinchan Kim 			atomic_long_add(v, &zone->vm_stat[i]);
5645a883813SMinchan Kim 			atomic_long_add(v, &vm_stat[i]);
5655a883813SMinchan Kim 		}
5665a883813SMinchan Kim }
5672244b95aSChristoph Lameter #endif
5682244b95aSChristoph Lameter 
569ca889e6cSChristoph Lameter #ifdef CONFIG_NUMA
570ca889e6cSChristoph Lameter /*
571ca889e6cSChristoph Lameter  * zonelist = the list of zones passed to the allocator
572ca889e6cSChristoph Lameter  * z 	    = the zone from which the allocation occurred.
573ca889e6cSChristoph Lameter  *
574ca889e6cSChristoph Lameter  * Must be called with interrupts disabled.
57578afd561SAndi Kleen  *
57678afd561SAndi Kleen  * When __GFP_OTHER_NODE is set assume the node of the preferred
57778afd561SAndi Kleen  * zone is the local node. This is useful for daemons who allocate
57878afd561SAndi Kleen  * memory on behalf of other processes.
579ca889e6cSChristoph Lameter  */
58078afd561SAndi Kleen void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
581ca889e6cSChristoph Lameter {
58218ea7e71SMel Gorman 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
583ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_HIT);
584ca889e6cSChristoph Lameter 	} else {
585ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_MISS);
58618ea7e71SMel Gorman 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
587ca889e6cSChristoph Lameter 	}
58878afd561SAndi Kleen 	if (z->node == ((flags & __GFP_OTHER_NODE) ?
58978afd561SAndi Kleen 			preferred_zone->node : numa_node_id()))
590ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_LOCAL);
591ca889e6cSChristoph Lameter 	else
592ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_OTHER);
593ca889e6cSChristoph Lameter }
594ca889e6cSChristoph Lameter #endif
595ca889e6cSChristoph Lameter 
596d7a5752cSMel Gorman #ifdef CONFIG_COMPACTION
59736deb0beSNamhyung Kim 
598d7a5752cSMel Gorman struct contig_page_info {
599d7a5752cSMel Gorman 	unsigned long free_pages;
600d7a5752cSMel Gorman 	unsigned long free_blocks_total;
601d7a5752cSMel Gorman 	unsigned long free_blocks_suitable;
602d7a5752cSMel Gorman };
603d7a5752cSMel Gorman 
604d7a5752cSMel Gorman /*
605d7a5752cSMel Gorman  * Calculate the number of free pages in a zone, how many contiguous
606d7a5752cSMel Gorman  * pages are free and how many are large enough to satisfy an allocation of
607d7a5752cSMel Gorman  * the target size. Note that this function makes no attempt to estimate
608d7a5752cSMel Gorman  * how many suitable free blocks there *might* be if MOVABLE pages were
609d7a5752cSMel Gorman  * migrated. Calculating that is possible, but expensive and can be
610d7a5752cSMel Gorman  * figured out from userspace
611d7a5752cSMel Gorman  */
612d7a5752cSMel Gorman static void fill_contig_page_info(struct zone *zone,
613d7a5752cSMel Gorman 				unsigned int suitable_order,
614d7a5752cSMel Gorman 				struct contig_page_info *info)
615d7a5752cSMel Gorman {
616d7a5752cSMel Gorman 	unsigned int order;
617d7a5752cSMel Gorman 
618d7a5752cSMel Gorman 	info->free_pages = 0;
619d7a5752cSMel Gorman 	info->free_blocks_total = 0;
620d7a5752cSMel Gorman 	info->free_blocks_suitable = 0;
621d7a5752cSMel Gorman 
622d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; order++) {
623d7a5752cSMel Gorman 		unsigned long blocks;
624d7a5752cSMel Gorman 
625d7a5752cSMel Gorman 		/* Count number of free blocks */
626d7a5752cSMel Gorman 		blocks = zone->free_area[order].nr_free;
627d7a5752cSMel Gorman 		info->free_blocks_total += blocks;
628d7a5752cSMel Gorman 
629d7a5752cSMel Gorman 		/* Count free base pages */
630d7a5752cSMel Gorman 		info->free_pages += blocks << order;
631d7a5752cSMel Gorman 
632d7a5752cSMel Gorman 		/* Count the suitable free blocks */
633d7a5752cSMel Gorman 		if (order >= suitable_order)
634d7a5752cSMel Gorman 			info->free_blocks_suitable += blocks <<
635d7a5752cSMel Gorman 						(order - suitable_order);
636d7a5752cSMel Gorman 	}
637d7a5752cSMel Gorman }
638f1a5ab12SMel Gorman 
639f1a5ab12SMel Gorman /*
640f1a5ab12SMel Gorman  * A fragmentation index only makes sense if an allocation of a requested
641f1a5ab12SMel Gorman  * size would fail. If that is true, the fragmentation index indicates
642f1a5ab12SMel Gorman  * whether external fragmentation or a lack of memory was the problem.
643f1a5ab12SMel Gorman  * The value can be used to determine if page reclaim or compaction
644f1a5ab12SMel Gorman  * should be used
645f1a5ab12SMel Gorman  */
64656de7263SMel Gorman static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
647f1a5ab12SMel Gorman {
648f1a5ab12SMel Gorman 	unsigned long requested = 1UL << order;
649f1a5ab12SMel Gorman 
650f1a5ab12SMel Gorman 	if (!info->free_blocks_total)
651f1a5ab12SMel Gorman 		return 0;
652f1a5ab12SMel Gorman 
653f1a5ab12SMel Gorman 	/* Fragmentation index only makes sense when a request would fail */
654f1a5ab12SMel Gorman 	if (info->free_blocks_suitable)
655f1a5ab12SMel Gorman 		return -1000;
656f1a5ab12SMel Gorman 
657f1a5ab12SMel Gorman 	/*
658f1a5ab12SMel Gorman 	 * Index is between 0 and 1 so return within 3 decimal places
659f1a5ab12SMel Gorman 	 *
660f1a5ab12SMel Gorman 	 * 0 => allocation would fail due to lack of memory
661f1a5ab12SMel Gorman 	 * 1 => allocation would fail due to fragmentation
662f1a5ab12SMel Gorman 	 */
663f1a5ab12SMel Gorman 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
664f1a5ab12SMel Gorman }
66556de7263SMel Gorman 
66656de7263SMel Gorman /* Same as __fragmentation index but allocs contig_page_info on stack */
66756de7263SMel Gorman int fragmentation_index(struct zone *zone, unsigned int order)
66856de7263SMel Gorman {
66956de7263SMel Gorman 	struct contig_page_info info;
67056de7263SMel Gorman 
67156de7263SMel Gorman 	fill_contig_page_info(zone, order, &info);
67256de7263SMel Gorman 	return __fragmentation_index(order, &info);
67356de7263SMel Gorman }
674d7a5752cSMel Gorman #endif
675d7a5752cSMel Gorman 
6760d6617c7SDavid Rientjes #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
677fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA
678fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx) xx "_dma",
679fa25c503SKOSAKI Motohiro #else
680fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx)
681fa25c503SKOSAKI Motohiro #endif
682fa25c503SKOSAKI Motohiro 
683fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA32
684fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx) xx "_dma32",
685fa25c503SKOSAKI Motohiro #else
686fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx)
687fa25c503SKOSAKI Motohiro #endif
688fa25c503SKOSAKI Motohiro 
689fa25c503SKOSAKI Motohiro #ifdef CONFIG_HIGHMEM
690fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx) xx "_high",
691fa25c503SKOSAKI Motohiro #else
692fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx)
693fa25c503SKOSAKI Motohiro #endif
694fa25c503SKOSAKI Motohiro 
695fa25c503SKOSAKI Motohiro #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
696fa25c503SKOSAKI Motohiro 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
697fa25c503SKOSAKI Motohiro 
698fa25c503SKOSAKI Motohiro const char * const vmstat_text[] = {
69909316c09SKonstantin Khlebnikov 	/* enum zone_stat_item countes */
700fa25c503SKOSAKI Motohiro 	"nr_free_pages",
70181c0a2bbSJohannes Weiner 	"nr_alloc_batch",
702fa25c503SKOSAKI Motohiro 	"nr_inactive_anon",
703fa25c503SKOSAKI Motohiro 	"nr_active_anon",
704fa25c503SKOSAKI Motohiro 	"nr_inactive_file",
705fa25c503SKOSAKI Motohiro 	"nr_active_file",
706fa25c503SKOSAKI Motohiro 	"nr_unevictable",
707fa25c503SKOSAKI Motohiro 	"nr_mlock",
708fa25c503SKOSAKI Motohiro 	"nr_anon_pages",
709fa25c503SKOSAKI Motohiro 	"nr_mapped",
710fa25c503SKOSAKI Motohiro 	"nr_file_pages",
711fa25c503SKOSAKI Motohiro 	"nr_dirty",
712fa25c503SKOSAKI Motohiro 	"nr_writeback",
713fa25c503SKOSAKI Motohiro 	"nr_slab_reclaimable",
714fa25c503SKOSAKI Motohiro 	"nr_slab_unreclaimable",
715fa25c503SKOSAKI Motohiro 	"nr_page_table_pages",
716fa25c503SKOSAKI Motohiro 	"nr_kernel_stack",
717fa25c503SKOSAKI Motohiro 	"nr_unstable",
718fa25c503SKOSAKI Motohiro 	"nr_bounce",
719fa25c503SKOSAKI Motohiro 	"nr_vmscan_write",
72049ea7eb6SMel Gorman 	"nr_vmscan_immediate_reclaim",
721fa25c503SKOSAKI Motohiro 	"nr_writeback_temp",
722fa25c503SKOSAKI Motohiro 	"nr_isolated_anon",
723fa25c503SKOSAKI Motohiro 	"nr_isolated_file",
724fa25c503SKOSAKI Motohiro 	"nr_shmem",
725fa25c503SKOSAKI Motohiro 	"nr_dirtied",
726fa25c503SKOSAKI Motohiro 	"nr_written",
7270d5d823aSMel Gorman 	"nr_pages_scanned",
728fa25c503SKOSAKI Motohiro 
729fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
730fa25c503SKOSAKI Motohiro 	"numa_hit",
731fa25c503SKOSAKI Motohiro 	"numa_miss",
732fa25c503SKOSAKI Motohiro 	"numa_foreign",
733fa25c503SKOSAKI Motohiro 	"numa_interleave",
734fa25c503SKOSAKI Motohiro 	"numa_local",
735fa25c503SKOSAKI Motohiro 	"numa_other",
736fa25c503SKOSAKI Motohiro #endif
737a528910eSJohannes Weiner 	"workingset_refault",
738a528910eSJohannes Weiner 	"workingset_activate",
739449dd698SJohannes Weiner 	"workingset_nodereclaim",
740fa25c503SKOSAKI Motohiro 	"nr_anon_transparent_hugepages",
741d1ce749aSBartlomiej Zolnierkiewicz 	"nr_free_cma",
74209316c09SKonstantin Khlebnikov 
74309316c09SKonstantin Khlebnikov 	/* enum writeback_stat_item counters */
744fa25c503SKOSAKI Motohiro 	"nr_dirty_threshold",
745fa25c503SKOSAKI Motohiro 	"nr_dirty_background_threshold",
746fa25c503SKOSAKI Motohiro 
747fa25c503SKOSAKI Motohiro #ifdef CONFIG_VM_EVENT_COUNTERS
74809316c09SKonstantin Khlebnikov 	/* enum vm_event_item counters */
749fa25c503SKOSAKI Motohiro 	"pgpgin",
750fa25c503SKOSAKI Motohiro 	"pgpgout",
751fa25c503SKOSAKI Motohiro 	"pswpin",
752fa25c503SKOSAKI Motohiro 	"pswpout",
753fa25c503SKOSAKI Motohiro 
754fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgalloc")
755fa25c503SKOSAKI Motohiro 
756fa25c503SKOSAKI Motohiro 	"pgfree",
757fa25c503SKOSAKI Motohiro 	"pgactivate",
758fa25c503SKOSAKI Motohiro 	"pgdeactivate",
759fa25c503SKOSAKI Motohiro 
760fa25c503SKOSAKI Motohiro 	"pgfault",
761fa25c503SKOSAKI Motohiro 	"pgmajfault",
762fa25c503SKOSAKI Motohiro 
763fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgrefill")
764904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_kswapd")
765904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_direct")
766fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_kswapd")
767fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_direct")
76868243e76SMel Gorman 	"pgscan_direct_throttle",
769fa25c503SKOSAKI Motohiro 
770fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
771fa25c503SKOSAKI Motohiro 	"zone_reclaim_failed",
772fa25c503SKOSAKI Motohiro #endif
773fa25c503SKOSAKI Motohiro 	"pginodesteal",
774fa25c503SKOSAKI Motohiro 	"slabs_scanned",
775fa25c503SKOSAKI Motohiro 	"kswapd_inodesteal",
776fa25c503SKOSAKI Motohiro 	"kswapd_low_wmark_hit_quickly",
777fa25c503SKOSAKI Motohiro 	"kswapd_high_wmark_hit_quickly",
778fa25c503SKOSAKI Motohiro 	"pageoutrun",
779fa25c503SKOSAKI Motohiro 	"allocstall",
780fa25c503SKOSAKI Motohiro 
781fa25c503SKOSAKI Motohiro 	"pgrotated",
782fa25c503SKOSAKI Motohiro 
7835509a5d2SDave Hansen 	"drop_pagecache",
7845509a5d2SDave Hansen 	"drop_slab",
7855509a5d2SDave Hansen 
78603c5a6e1SMel Gorman #ifdef CONFIG_NUMA_BALANCING
78703c5a6e1SMel Gorman 	"numa_pte_updates",
78872403b4aSMel Gorman 	"numa_huge_pte_updates",
78903c5a6e1SMel Gorman 	"numa_hint_faults",
79003c5a6e1SMel Gorman 	"numa_hint_faults_local",
79103c5a6e1SMel Gorman 	"numa_pages_migrated",
79203c5a6e1SMel Gorman #endif
7935647bc29SMel Gorman #ifdef CONFIG_MIGRATION
7945647bc29SMel Gorman 	"pgmigrate_success",
7955647bc29SMel Gorman 	"pgmigrate_fail",
7965647bc29SMel Gorman #endif
797fa25c503SKOSAKI Motohiro #ifdef CONFIG_COMPACTION
798397487dbSMel Gorman 	"compact_migrate_scanned",
799397487dbSMel Gorman 	"compact_free_scanned",
800397487dbSMel Gorman 	"compact_isolated",
801fa25c503SKOSAKI Motohiro 	"compact_stall",
802fa25c503SKOSAKI Motohiro 	"compact_fail",
803fa25c503SKOSAKI Motohiro 	"compact_success",
804fa25c503SKOSAKI Motohiro #endif
805fa25c503SKOSAKI Motohiro 
806fa25c503SKOSAKI Motohiro #ifdef CONFIG_HUGETLB_PAGE
807fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_success",
808fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_fail",
809fa25c503SKOSAKI Motohiro #endif
810fa25c503SKOSAKI Motohiro 	"unevictable_pgs_culled",
811fa25c503SKOSAKI Motohiro 	"unevictable_pgs_scanned",
812fa25c503SKOSAKI Motohiro 	"unevictable_pgs_rescued",
813fa25c503SKOSAKI Motohiro 	"unevictable_pgs_mlocked",
814fa25c503SKOSAKI Motohiro 	"unevictable_pgs_munlocked",
815fa25c503SKOSAKI Motohiro 	"unevictable_pgs_cleared",
816fa25c503SKOSAKI Motohiro 	"unevictable_pgs_stranded",
817fa25c503SKOSAKI Motohiro 
818fa25c503SKOSAKI Motohiro #ifdef CONFIG_TRANSPARENT_HUGEPAGE
819fa25c503SKOSAKI Motohiro 	"thp_fault_alloc",
820fa25c503SKOSAKI Motohiro 	"thp_fault_fallback",
821fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc",
822fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc_failed",
823fa25c503SKOSAKI Motohiro 	"thp_split",
824d8a8e1f0SKirill A. Shutemov 	"thp_zero_page_alloc",
825d8a8e1f0SKirill A. Shutemov 	"thp_zero_page_alloc_failed",
826fa25c503SKOSAKI Motohiro #endif
82709316c09SKonstantin Khlebnikov #ifdef CONFIG_MEMORY_BALLOON
82809316c09SKonstantin Khlebnikov 	"balloon_inflate",
82909316c09SKonstantin Khlebnikov 	"balloon_deflate",
83009316c09SKonstantin Khlebnikov #ifdef CONFIG_BALLOON_COMPACTION
83109316c09SKonstantin Khlebnikov 	"balloon_migrate",
83209316c09SKonstantin Khlebnikov #endif
83309316c09SKonstantin Khlebnikov #endif /* CONFIG_MEMORY_BALLOON */
834ec659934SMel Gorman #ifdef CONFIG_DEBUG_TLBFLUSH
8356df46865SDave Hansen #ifdef CONFIG_SMP
8369824cf97SDave Hansen 	"nr_tlb_remote_flush",
8379824cf97SDave Hansen 	"nr_tlb_remote_flush_received",
838ec659934SMel Gorman #endif /* CONFIG_SMP */
8399824cf97SDave Hansen 	"nr_tlb_local_flush_all",
8409824cf97SDave Hansen 	"nr_tlb_local_flush_one",
841ec659934SMel Gorman #endif /* CONFIG_DEBUG_TLBFLUSH */
842fa25c503SKOSAKI Motohiro 
8434f115147SDavidlohr Bueso #ifdef CONFIG_DEBUG_VM_VMACACHE
8444f115147SDavidlohr Bueso 	"vmacache_find_calls",
8454f115147SDavidlohr Bueso 	"vmacache_find_hits",
846f5f302e2SDavidlohr Bueso 	"vmacache_full_flushes",
8474f115147SDavidlohr Bueso #endif
848fa25c503SKOSAKI Motohiro #endif /* CONFIG_VM_EVENTS_COUNTERS */
849fa25c503SKOSAKI Motohiro };
8500d6617c7SDavid Rientjes #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
851fa25c503SKOSAKI Motohiro 
852fa25c503SKOSAKI Motohiro 
853*3c486871SAndrew Morton #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
854*3c486871SAndrew Morton      defined(CONFIG_PROC_FS)
855*3c486871SAndrew Morton static void *frag_start(struct seq_file *m, loff_t *pos)
856*3c486871SAndrew Morton {
857*3c486871SAndrew Morton 	pg_data_t *pgdat;
858*3c486871SAndrew Morton 	loff_t node = *pos;
859*3c486871SAndrew Morton 
860*3c486871SAndrew Morton 	for (pgdat = first_online_pgdat();
861*3c486871SAndrew Morton 	     pgdat && node;
862*3c486871SAndrew Morton 	     pgdat = next_online_pgdat(pgdat))
863*3c486871SAndrew Morton 		--node;
864*3c486871SAndrew Morton 
865*3c486871SAndrew Morton 	return pgdat;
866*3c486871SAndrew Morton }
867*3c486871SAndrew Morton 
868*3c486871SAndrew Morton static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
869*3c486871SAndrew Morton {
870*3c486871SAndrew Morton 	pg_data_t *pgdat = (pg_data_t *)arg;
871*3c486871SAndrew Morton 
872*3c486871SAndrew Morton 	(*pos)++;
873*3c486871SAndrew Morton 	return next_online_pgdat(pgdat);
874*3c486871SAndrew Morton }
875*3c486871SAndrew Morton 
876*3c486871SAndrew Morton static void frag_stop(struct seq_file *m, void *arg)
877*3c486871SAndrew Morton {
878*3c486871SAndrew Morton }
879*3c486871SAndrew Morton 
880*3c486871SAndrew Morton /* Walk all the zones in a node and print using a callback */
881*3c486871SAndrew Morton static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
882*3c486871SAndrew Morton 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
883*3c486871SAndrew Morton {
884*3c486871SAndrew Morton 	struct zone *zone;
885*3c486871SAndrew Morton 	struct zone *node_zones = pgdat->node_zones;
886*3c486871SAndrew Morton 	unsigned long flags;
887*3c486871SAndrew Morton 
888*3c486871SAndrew Morton 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
889*3c486871SAndrew Morton 		if (!populated_zone(zone))
890*3c486871SAndrew Morton 			continue;
891*3c486871SAndrew Morton 
892*3c486871SAndrew Morton 		spin_lock_irqsave(&zone->lock, flags);
893*3c486871SAndrew Morton 		print(m, pgdat, zone);
894*3c486871SAndrew Morton 		spin_unlock_irqrestore(&zone->lock, flags);
895*3c486871SAndrew Morton 	}
896*3c486871SAndrew Morton }
897*3c486871SAndrew Morton #endif
898*3c486871SAndrew Morton 
899d7a5752cSMel Gorman #ifdef CONFIG_PROC_FS
900*3c486871SAndrew Morton static char * const migratetype_names[MIGRATE_TYPES] = {
901*3c486871SAndrew Morton 	"Unmovable",
902*3c486871SAndrew Morton 	"Reclaimable",
903*3c486871SAndrew Morton 	"Movable",
904*3c486871SAndrew Morton 	"Reserve",
905*3c486871SAndrew Morton #ifdef CONFIG_CMA
906*3c486871SAndrew Morton 	"CMA",
907*3c486871SAndrew Morton #endif
908*3c486871SAndrew Morton #ifdef CONFIG_MEMORY_ISOLATION
909*3c486871SAndrew Morton 	"Isolate",
910*3c486871SAndrew Morton #endif
911*3c486871SAndrew Morton };
912*3c486871SAndrew Morton 
913467c996cSMel Gorman static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
914467c996cSMel Gorman 						struct zone *zone)
915467c996cSMel Gorman {
916467c996cSMel Gorman 	int order;
917467c996cSMel Gorman 
918f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
919f6ac2354SChristoph Lameter 	for (order = 0; order < MAX_ORDER; ++order)
920f6ac2354SChristoph Lameter 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
921f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
922f6ac2354SChristoph Lameter }
923467c996cSMel Gorman 
924467c996cSMel Gorman /*
925467c996cSMel Gorman  * This walks the free areas for each zone.
926467c996cSMel Gorman  */
927467c996cSMel Gorman static int frag_show(struct seq_file *m, void *arg)
928467c996cSMel Gorman {
929467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
930467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, frag_show_print);
931467c996cSMel Gorman 	return 0;
932467c996cSMel Gorman }
933467c996cSMel Gorman 
934467c996cSMel Gorman static void pagetypeinfo_showfree_print(struct seq_file *m,
935467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
936467c996cSMel Gorman {
937467c996cSMel Gorman 	int order, mtype;
938467c996cSMel Gorman 
939467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
940467c996cSMel Gorman 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
941467c996cSMel Gorman 					pgdat->node_id,
942467c996cSMel Gorman 					zone->name,
943467c996cSMel Gorman 					migratetype_names[mtype]);
944467c996cSMel Gorman 		for (order = 0; order < MAX_ORDER; ++order) {
945467c996cSMel Gorman 			unsigned long freecount = 0;
946467c996cSMel Gorman 			struct free_area *area;
947467c996cSMel Gorman 			struct list_head *curr;
948467c996cSMel Gorman 
949467c996cSMel Gorman 			area = &(zone->free_area[order]);
950467c996cSMel Gorman 
951467c996cSMel Gorman 			list_for_each(curr, &area->free_list[mtype])
952467c996cSMel Gorman 				freecount++;
953467c996cSMel Gorman 			seq_printf(m, "%6lu ", freecount);
954467c996cSMel Gorman 		}
955467c996cSMel Gorman 		seq_putc(m, '\n');
956467c996cSMel Gorman 	}
957467c996cSMel Gorman }
958467c996cSMel Gorman 
959467c996cSMel Gorman /* Print out the free pages at each order for each migatetype */
960467c996cSMel Gorman static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
961467c996cSMel Gorman {
962467c996cSMel Gorman 	int order;
963467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
964467c996cSMel Gorman 
965467c996cSMel Gorman 	/* Print header */
966467c996cSMel Gorman 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
967467c996cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order)
968467c996cSMel Gorman 		seq_printf(m, "%6d ", order);
969467c996cSMel Gorman 	seq_putc(m, '\n');
970467c996cSMel Gorman 
971467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
972467c996cSMel Gorman 
973467c996cSMel Gorman 	return 0;
974467c996cSMel Gorman }
975467c996cSMel Gorman 
976467c996cSMel Gorman static void pagetypeinfo_showblockcount_print(struct seq_file *m,
977467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
978467c996cSMel Gorman {
979467c996cSMel Gorman 	int mtype;
980467c996cSMel Gorman 	unsigned long pfn;
981467c996cSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
982108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
983467c996cSMel Gorman 	unsigned long count[MIGRATE_TYPES] = { 0, };
984467c996cSMel Gorman 
985467c996cSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
986467c996cSMel Gorman 		struct page *page;
987467c996cSMel Gorman 
988467c996cSMel Gorman 		if (!pfn_valid(pfn))
989467c996cSMel Gorman 			continue;
990467c996cSMel Gorman 
991467c996cSMel Gorman 		page = pfn_to_page(pfn);
992eb33575cSMel Gorman 
993eb33575cSMel Gorman 		/* Watch for unexpected holes punched in the memmap */
994eb33575cSMel Gorman 		if (!memmap_valid_within(pfn, page, zone))
995e80d6a24SMel Gorman 			continue;
996eb33575cSMel Gorman 
997467c996cSMel Gorman 		mtype = get_pageblock_migratetype(page);
998467c996cSMel Gorman 
999e80d6a24SMel Gorman 		if (mtype < MIGRATE_TYPES)
1000467c996cSMel Gorman 			count[mtype]++;
1001467c996cSMel Gorman 	}
1002467c996cSMel Gorman 
1003467c996cSMel Gorman 	/* Print counts */
1004467c996cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1005467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1006467c996cSMel Gorman 		seq_printf(m, "%12lu ", count[mtype]);
1007467c996cSMel Gorman 	seq_putc(m, '\n');
1008467c996cSMel Gorman }
1009467c996cSMel Gorman 
1010467c996cSMel Gorman /* Print out the free pages at each order for each migratetype */
1011467c996cSMel Gorman static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1012467c996cSMel Gorman {
1013467c996cSMel Gorman 	int mtype;
1014467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1015467c996cSMel Gorman 
1016467c996cSMel Gorman 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1017467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1018467c996cSMel Gorman 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1019467c996cSMel Gorman 	seq_putc(m, '\n');
1020467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1021467c996cSMel Gorman 
1022467c996cSMel Gorman 	return 0;
1023467c996cSMel Gorman }
1024467c996cSMel Gorman 
102548c96a36SJoonsoo Kim #ifdef CONFIG_PAGE_OWNER
102648c96a36SJoonsoo Kim static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
102748c96a36SJoonsoo Kim 							pg_data_t *pgdat,
102848c96a36SJoonsoo Kim 							struct zone *zone)
102948c96a36SJoonsoo Kim {
103048c96a36SJoonsoo Kim 	struct page *page;
103148c96a36SJoonsoo Kim 	struct page_ext *page_ext;
103248c96a36SJoonsoo Kim 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
103348c96a36SJoonsoo Kim 	unsigned long end_pfn = pfn + zone->spanned_pages;
103448c96a36SJoonsoo Kim 	unsigned long count[MIGRATE_TYPES] = { 0, };
103548c96a36SJoonsoo Kim 	int pageblock_mt, page_mt;
103648c96a36SJoonsoo Kim 	int i;
103748c96a36SJoonsoo Kim 
103848c96a36SJoonsoo Kim 	/* Scan block by block. First and last block may be incomplete */
103948c96a36SJoonsoo Kim 	pfn = zone->zone_start_pfn;
104048c96a36SJoonsoo Kim 
104148c96a36SJoonsoo Kim 	/*
104248c96a36SJoonsoo Kim 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
104348c96a36SJoonsoo Kim 	 * a zone boundary, it will be double counted between zones. This does
104448c96a36SJoonsoo Kim 	 * not matter as the mixed block count will still be correct
104548c96a36SJoonsoo Kim 	 */
104648c96a36SJoonsoo Kim 	for (; pfn < end_pfn; ) {
104748c96a36SJoonsoo Kim 		if (!pfn_valid(pfn)) {
104848c96a36SJoonsoo Kim 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
104948c96a36SJoonsoo Kim 			continue;
105048c96a36SJoonsoo Kim 		}
105148c96a36SJoonsoo Kim 
105248c96a36SJoonsoo Kim 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
105348c96a36SJoonsoo Kim 		block_end_pfn = min(block_end_pfn, end_pfn);
105448c96a36SJoonsoo Kim 
105548c96a36SJoonsoo Kim 		page = pfn_to_page(pfn);
105648c96a36SJoonsoo Kim 		pageblock_mt = get_pfnblock_migratetype(page, pfn);
105748c96a36SJoonsoo Kim 
105848c96a36SJoonsoo Kim 		for (; pfn < block_end_pfn; pfn++) {
105948c96a36SJoonsoo Kim 			if (!pfn_valid_within(pfn))
106048c96a36SJoonsoo Kim 				continue;
106148c96a36SJoonsoo Kim 
106248c96a36SJoonsoo Kim 			page = pfn_to_page(pfn);
106348c96a36SJoonsoo Kim 			if (PageBuddy(page)) {
106448c96a36SJoonsoo Kim 				pfn += (1UL << page_order(page)) - 1;
106548c96a36SJoonsoo Kim 				continue;
106648c96a36SJoonsoo Kim 			}
106748c96a36SJoonsoo Kim 
106848c96a36SJoonsoo Kim 			if (PageReserved(page))
106948c96a36SJoonsoo Kim 				continue;
107048c96a36SJoonsoo Kim 
107148c96a36SJoonsoo Kim 			page_ext = lookup_page_ext(page);
107248c96a36SJoonsoo Kim 
107348c96a36SJoonsoo Kim 			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
107448c96a36SJoonsoo Kim 				continue;
107548c96a36SJoonsoo Kim 
107648c96a36SJoonsoo Kim 			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
107748c96a36SJoonsoo Kim 			if (pageblock_mt != page_mt) {
107848c96a36SJoonsoo Kim 				if (is_migrate_cma(pageblock_mt))
107948c96a36SJoonsoo Kim 					count[MIGRATE_MOVABLE]++;
108048c96a36SJoonsoo Kim 				else
108148c96a36SJoonsoo Kim 					count[pageblock_mt]++;
108248c96a36SJoonsoo Kim 
108348c96a36SJoonsoo Kim 				pfn = block_end_pfn;
108448c96a36SJoonsoo Kim 				break;
108548c96a36SJoonsoo Kim 			}
108648c96a36SJoonsoo Kim 			pfn += (1UL << page_ext->order) - 1;
108748c96a36SJoonsoo Kim 		}
108848c96a36SJoonsoo Kim 	}
108948c96a36SJoonsoo Kim 
109048c96a36SJoonsoo Kim 	/* Print counts */
109148c96a36SJoonsoo Kim 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
109248c96a36SJoonsoo Kim 	for (i = 0; i < MIGRATE_TYPES; i++)
109348c96a36SJoonsoo Kim 		seq_printf(m, "%12lu ", count[i]);
109448c96a36SJoonsoo Kim 	seq_putc(m, '\n');
109548c96a36SJoonsoo Kim }
109648c96a36SJoonsoo Kim #endif /* CONFIG_PAGE_OWNER */
109748c96a36SJoonsoo Kim 
109848c96a36SJoonsoo Kim /*
109948c96a36SJoonsoo Kim  * Print out the number of pageblocks for each migratetype that contain pages
110048c96a36SJoonsoo Kim  * of other types. This gives an indication of how well fallbacks are being
110148c96a36SJoonsoo Kim  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
110248c96a36SJoonsoo Kim  * to determine what is going on
110348c96a36SJoonsoo Kim  */
110448c96a36SJoonsoo Kim static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
110548c96a36SJoonsoo Kim {
110648c96a36SJoonsoo Kim #ifdef CONFIG_PAGE_OWNER
110748c96a36SJoonsoo Kim 	int mtype;
110848c96a36SJoonsoo Kim 
110948c96a36SJoonsoo Kim 	if (!page_owner_inited)
111048c96a36SJoonsoo Kim 		return;
111148c96a36SJoonsoo Kim 
111248c96a36SJoonsoo Kim 	drain_all_pages(NULL);
111348c96a36SJoonsoo Kim 
111448c96a36SJoonsoo Kim 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
111548c96a36SJoonsoo Kim 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
111648c96a36SJoonsoo Kim 		seq_printf(m, "%12s ", migratetype_names[mtype]);
111748c96a36SJoonsoo Kim 	seq_putc(m, '\n');
111848c96a36SJoonsoo Kim 
111948c96a36SJoonsoo Kim 	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
112048c96a36SJoonsoo Kim #endif /* CONFIG_PAGE_OWNER */
112148c96a36SJoonsoo Kim }
112248c96a36SJoonsoo Kim 
1123467c996cSMel Gorman /*
1124467c996cSMel Gorman  * This prints out statistics in relation to grouping pages by mobility.
1125467c996cSMel Gorman  * It is expensive to collect so do not constantly read the file.
1126467c996cSMel Gorman  */
1127467c996cSMel Gorman static int pagetypeinfo_show(struct seq_file *m, void *arg)
1128467c996cSMel Gorman {
1129467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1130467c996cSMel Gorman 
113141b25a37SKOSAKI Motohiro 	/* check memoryless node */
1132a47b53c5SLai Jiangshan 	if (!node_state(pgdat->node_id, N_MEMORY))
113341b25a37SKOSAKI Motohiro 		return 0;
113441b25a37SKOSAKI Motohiro 
1135467c996cSMel Gorman 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1136467c996cSMel Gorman 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1137467c996cSMel Gorman 	seq_putc(m, '\n');
1138467c996cSMel Gorman 	pagetypeinfo_showfree(m, pgdat);
1139467c996cSMel Gorman 	pagetypeinfo_showblockcount(m, pgdat);
114048c96a36SJoonsoo Kim 	pagetypeinfo_showmixedcount(m, pgdat);
1141467c996cSMel Gorman 
1142f6ac2354SChristoph Lameter 	return 0;
1143f6ac2354SChristoph Lameter }
1144f6ac2354SChristoph Lameter 
11458f32f7e5SAlexey Dobriyan static const struct seq_operations fragmentation_op = {
1146f6ac2354SChristoph Lameter 	.start	= frag_start,
1147f6ac2354SChristoph Lameter 	.next	= frag_next,
1148f6ac2354SChristoph Lameter 	.stop	= frag_stop,
1149f6ac2354SChristoph Lameter 	.show	= frag_show,
1150f6ac2354SChristoph Lameter };
1151f6ac2354SChristoph Lameter 
11528f32f7e5SAlexey Dobriyan static int fragmentation_open(struct inode *inode, struct file *file)
11538f32f7e5SAlexey Dobriyan {
11548f32f7e5SAlexey Dobriyan 	return seq_open(file, &fragmentation_op);
11558f32f7e5SAlexey Dobriyan }
11568f32f7e5SAlexey Dobriyan 
11578f32f7e5SAlexey Dobriyan static const struct file_operations fragmentation_file_operations = {
11588f32f7e5SAlexey Dobriyan 	.open		= fragmentation_open,
11598f32f7e5SAlexey Dobriyan 	.read		= seq_read,
11608f32f7e5SAlexey Dobriyan 	.llseek		= seq_lseek,
11618f32f7e5SAlexey Dobriyan 	.release	= seq_release,
11628f32f7e5SAlexey Dobriyan };
11638f32f7e5SAlexey Dobriyan 
116474e2e8e8SAlexey Dobriyan static const struct seq_operations pagetypeinfo_op = {
1165467c996cSMel Gorman 	.start	= frag_start,
1166467c996cSMel Gorman 	.next	= frag_next,
1167467c996cSMel Gorman 	.stop	= frag_stop,
1168467c996cSMel Gorman 	.show	= pagetypeinfo_show,
1169467c996cSMel Gorman };
1170467c996cSMel Gorman 
117174e2e8e8SAlexey Dobriyan static int pagetypeinfo_open(struct inode *inode, struct file *file)
117274e2e8e8SAlexey Dobriyan {
117374e2e8e8SAlexey Dobriyan 	return seq_open(file, &pagetypeinfo_op);
117474e2e8e8SAlexey Dobriyan }
117574e2e8e8SAlexey Dobriyan 
117674e2e8e8SAlexey Dobriyan static const struct file_operations pagetypeinfo_file_ops = {
117774e2e8e8SAlexey Dobriyan 	.open		= pagetypeinfo_open,
117874e2e8e8SAlexey Dobriyan 	.read		= seq_read,
117974e2e8e8SAlexey Dobriyan 	.llseek		= seq_lseek,
118074e2e8e8SAlexey Dobriyan 	.release	= seq_release,
118174e2e8e8SAlexey Dobriyan };
118274e2e8e8SAlexey Dobriyan 
1183467c996cSMel Gorman static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1184467c996cSMel Gorman 							struct zone *zone)
1185f6ac2354SChristoph Lameter {
1186f6ac2354SChristoph Lameter 	int i;
1187f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1188f6ac2354SChristoph Lameter 	seq_printf(m,
1189f6ac2354SChristoph Lameter 		   "\n  pages free     %lu"
1190f6ac2354SChristoph Lameter 		   "\n        min      %lu"
1191f6ac2354SChristoph Lameter 		   "\n        low      %lu"
1192f6ac2354SChristoph Lameter 		   "\n        high     %lu"
119308d9ae7cSWu Fengguang 		   "\n        scanned  %lu"
1194f6ac2354SChristoph Lameter 		   "\n        spanned  %lu"
11959feedc9dSJiang Liu 		   "\n        present  %lu"
11969feedc9dSJiang Liu 		   "\n        managed  %lu",
119788f5acf8SMel Gorman 		   zone_page_state(zone, NR_FREE_PAGES),
119841858966SMel Gorman 		   min_wmark_pages(zone),
119941858966SMel Gorman 		   low_wmark_pages(zone),
120041858966SMel Gorman 		   high_wmark_pages(zone),
12010d5d823aSMel Gorman 		   zone_page_state(zone, NR_PAGES_SCANNED),
1202f6ac2354SChristoph Lameter 		   zone->spanned_pages,
12039feedc9dSJiang Liu 		   zone->present_pages,
12049feedc9dSJiang Liu 		   zone->managed_pages);
12052244b95aSChristoph Lameter 
12062244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
12072244b95aSChristoph Lameter 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
12082244b95aSChristoph Lameter 				zone_page_state(zone, i));
12092244b95aSChristoph Lameter 
1210f6ac2354SChristoph Lameter 	seq_printf(m,
12113484b2deSMel Gorman 		   "\n        protection: (%ld",
1212f6ac2354SChristoph Lameter 		   zone->lowmem_reserve[0]);
1213f6ac2354SChristoph Lameter 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
12143484b2deSMel Gorman 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1215f6ac2354SChristoph Lameter 	seq_printf(m,
1216f6ac2354SChristoph Lameter 		   ")"
1217f6ac2354SChristoph Lameter 		   "\n  pagesets");
1218f6ac2354SChristoph Lameter 	for_each_online_cpu(i) {
1219f6ac2354SChristoph Lameter 		struct per_cpu_pageset *pageset;
1220f6ac2354SChristoph Lameter 
122199dcc3e5SChristoph Lameter 		pageset = per_cpu_ptr(zone->pageset, i);
1222f6ac2354SChristoph Lameter 		seq_printf(m,
12233dfa5721SChristoph Lameter 			   "\n    cpu: %i"
1224f6ac2354SChristoph Lameter 			   "\n              count: %i"
1225f6ac2354SChristoph Lameter 			   "\n              high:  %i"
1226f6ac2354SChristoph Lameter 			   "\n              batch: %i",
12273dfa5721SChristoph Lameter 			   i,
12283dfa5721SChristoph Lameter 			   pageset->pcp.count,
12293dfa5721SChristoph Lameter 			   pageset->pcp.high,
12303dfa5721SChristoph Lameter 			   pageset->pcp.batch);
1231df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1232df9ecabaSChristoph Lameter 		seq_printf(m, "\n  vm stats threshold: %d",
1233df9ecabaSChristoph Lameter 				pageset->stat_threshold);
1234df9ecabaSChristoph Lameter #endif
1235f6ac2354SChristoph Lameter 	}
1236f6ac2354SChristoph Lameter 	seq_printf(m,
1237f6ac2354SChristoph Lameter 		   "\n  all_unreclaimable: %u"
1238556adecbSRik van Riel 		   "\n  start_pfn:         %lu"
1239556adecbSRik van Riel 		   "\n  inactive_ratio:    %u",
12406e543d57SLisa Du 		   !zone_reclaimable(zone),
1241556adecbSRik van Riel 		   zone->zone_start_pfn,
1242556adecbSRik van Riel 		   zone->inactive_ratio);
1243f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
1244f6ac2354SChristoph Lameter }
1245467c996cSMel Gorman 
1246467c996cSMel Gorman /*
1247467c996cSMel Gorman  * Output information about zones in @pgdat.
1248467c996cSMel Gorman  */
1249467c996cSMel Gorman static int zoneinfo_show(struct seq_file *m, void *arg)
1250467c996cSMel Gorman {
1251467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1252467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1253f6ac2354SChristoph Lameter 	return 0;
1254f6ac2354SChristoph Lameter }
1255f6ac2354SChristoph Lameter 
12565c9fe628SAlexey Dobriyan static const struct seq_operations zoneinfo_op = {
1257f6ac2354SChristoph Lameter 	.start	= frag_start, /* iterate over all zones. The same as in
1258f6ac2354SChristoph Lameter 			       * fragmentation. */
1259f6ac2354SChristoph Lameter 	.next	= frag_next,
1260f6ac2354SChristoph Lameter 	.stop	= frag_stop,
1261f6ac2354SChristoph Lameter 	.show	= zoneinfo_show,
1262f6ac2354SChristoph Lameter };
1263f6ac2354SChristoph Lameter 
12645c9fe628SAlexey Dobriyan static int zoneinfo_open(struct inode *inode, struct file *file)
12655c9fe628SAlexey Dobriyan {
12665c9fe628SAlexey Dobriyan 	return seq_open(file, &zoneinfo_op);
12675c9fe628SAlexey Dobriyan }
12685c9fe628SAlexey Dobriyan 
12695c9fe628SAlexey Dobriyan static const struct file_operations proc_zoneinfo_file_operations = {
12705c9fe628SAlexey Dobriyan 	.open		= zoneinfo_open,
12715c9fe628SAlexey Dobriyan 	.read		= seq_read,
12725c9fe628SAlexey Dobriyan 	.llseek		= seq_lseek,
12735c9fe628SAlexey Dobriyan 	.release	= seq_release,
12745c9fe628SAlexey Dobriyan };
12755c9fe628SAlexey Dobriyan 
127679da826aSMichael Rubin enum writeback_stat_item {
127779da826aSMichael Rubin 	NR_DIRTY_THRESHOLD,
127879da826aSMichael Rubin 	NR_DIRTY_BG_THRESHOLD,
127979da826aSMichael Rubin 	NR_VM_WRITEBACK_STAT_ITEMS,
128079da826aSMichael Rubin };
128179da826aSMichael Rubin 
1282f6ac2354SChristoph Lameter static void *vmstat_start(struct seq_file *m, loff_t *pos)
1283f6ac2354SChristoph Lameter {
12842244b95aSChristoph Lameter 	unsigned long *v;
128579da826aSMichael Rubin 	int i, stat_items_size;
1286f6ac2354SChristoph Lameter 
1287f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1288f6ac2354SChristoph Lameter 		return NULL;
128979da826aSMichael Rubin 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
129079da826aSMichael Rubin 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1291f6ac2354SChristoph Lameter 
1292f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
129379da826aSMichael Rubin 	stat_items_size += sizeof(struct vm_event_state);
1294f8891e5eSChristoph Lameter #endif
129579da826aSMichael Rubin 
129679da826aSMichael Rubin 	v = kmalloc(stat_items_size, GFP_KERNEL);
12972244b95aSChristoph Lameter 	m->private = v;
12982244b95aSChristoph Lameter 	if (!v)
1299f6ac2354SChristoph Lameter 		return ERR_PTR(-ENOMEM);
13002244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
13012244b95aSChristoph Lameter 		v[i] = global_page_state(i);
130279da826aSMichael Rubin 	v += NR_VM_ZONE_STAT_ITEMS;
130379da826aSMichael Rubin 
130479da826aSMichael Rubin 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
130579da826aSMichael Rubin 			    v + NR_DIRTY_THRESHOLD);
130679da826aSMichael Rubin 	v += NR_VM_WRITEBACK_STAT_ITEMS;
130779da826aSMichael Rubin 
1308f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
130979da826aSMichael Rubin 	all_vm_events(v);
131079da826aSMichael Rubin 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
131179da826aSMichael Rubin 	v[PGPGOUT] /= 2;
1312f8891e5eSChristoph Lameter #endif
1313ff8b16d7SWu Fengguang 	return (unsigned long *)m->private + *pos;
1314f6ac2354SChristoph Lameter }
1315f6ac2354SChristoph Lameter 
1316f6ac2354SChristoph Lameter static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1317f6ac2354SChristoph Lameter {
1318f6ac2354SChristoph Lameter 	(*pos)++;
1319f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1320f6ac2354SChristoph Lameter 		return NULL;
1321f6ac2354SChristoph Lameter 	return (unsigned long *)m->private + *pos;
1322f6ac2354SChristoph Lameter }
1323f6ac2354SChristoph Lameter 
1324f6ac2354SChristoph Lameter static int vmstat_show(struct seq_file *m, void *arg)
1325f6ac2354SChristoph Lameter {
1326f6ac2354SChristoph Lameter 	unsigned long *l = arg;
1327f6ac2354SChristoph Lameter 	unsigned long off = l - (unsigned long *)m->private;
1328f6ac2354SChristoph Lameter 
1329f6ac2354SChristoph Lameter 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1330f6ac2354SChristoph Lameter 	return 0;
1331f6ac2354SChristoph Lameter }
1332f6ac2354SChristoph Lameter 
1333f6ac2354SChristoph Lameter static void vmstat_stop(struct seq_file *m, void *arg)
1334f6ac2354SChristoph Lameter {
1335f6ac2354SChristoph Lameter 	kfree(m->private);
1336f6ac2354SChristoph Lameter 	m->private = NULL;
1337f6ac2354SChristoph Lameter }
1338f6ac2354SChristoph Lameter 
1339b6aa44abSAlexey Dobriyan static const struct seq_operations vmstat_op = {
1340f6ac2354SChristoph Lameter 	.start	= vmstat_start,
1341f6ac2354SChristoph Lameter 	.next	= vmstat_next,
1342f6ac2354SChristoph Lameter 	.stop	= vmstat_stop,
1343f6ac2354SChristoph Lameter 	.show	= vmstat_show,
1344f6ac2354SChristoph Lameter };
1345f6ac2354SChristoph Lameter 
1346b6aa44abSAlexey Dobriyan static int vmstat_open(struct inode *inode, struct file *file)
1347b6aa44abSAlexey Dobriyan {
1348b6aa44abSAlexey Dobriyan 	return seq_open(file, &vmstat_op);
1349b6aa44abSAlexey Dobriyan }
1350b6aa44abSAlexey Dobriyan 
1351b6aa44abSAlexey Dobriyan static const struct file_operations proc_vmstat_file_operations = {
1352b6aa44abSAlexey Dobriyan 	.open		= vmstat_open,
1353b6aa44abSAlexey Dobriyan 	.read		= seq_read,
1354b6aa44abSAlexey Dobriyan 	.llseek		= seq_lseek,
1355b6aa44abSAlexey Dobriyan 	.release	= seq_release,
1356b6aa44abSAlexey Dobriyan };
1357f6ac2354SChristoph Lameter #endif /* CONFIG_PROC_FS */
1358f6ac2354SChristoph Lameter 
1359df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1360d1187ed2SChristoph Lameter static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
136177461ab3SChristoph Lameter int sysctl_stat_interval __read_mostly = HZ;
13627cc36bbdSChristoph Lameter static cpumask_var_t cpu_stat_off;
1363d1187ed2SChristoph Lameter 
1364d1187ed2SChristoph Lameter static void vmstat_update(struct work_struct *w)
1365d1187ed2SChristoph Lameter {
13667cc36bbdSChristoph Lameter 	if (refresh_cpu_vm_stats())
13677cc36bbdSChristoph Lameter 		/*
13687cc36bbdSChristoph Lameter 		 * Counters were updated so we expect more updates
13697cc36bbdSChristoph Lameter 		 * to occur in the future. Keep on running the
13707cc36bbdSChristoph Lameter 		 * update worker thread.
13717cc36bbdSChristoph Lameter 		 */
13727c8e0181SChristoph Lameter 		schedule_delayed_work(this_cpu_ptr(&vmstat_work),
137398f4ebb2SAnton Blanchard 			round_jiffies_relative(sysctl_stat_interval));
13747cc36bbdSChristoph Lameter 	else {
13757cc36bbdSChristoph Lameter 		/*
13767cc36bbdSChristoph Lameter 		 * We did not update any counters so the app may be in
13777cc36bbdSChristoph Lameter 		 * a mode where it does not cause counter updates.
13787cc36bbdSChristoph Lameter 		 * We may be uselessly running vmstat_update.
13797cc36bbdSChristoph Lameter 		 * Defer the checking for differentials to the
13807cc36bbdSChristoph Lameter 		 * shepherd thread on a different processor.
13817cc36bbdSChristoph Lameter 		 */
13827cc36bbdSChristoph Lameter 		int r;
13837cc36bbdSChristoph Lameter 		/*
13847cc36bbdSChristoph Lameter 		 * Shepherd work thread does not race since it never
13857cc36bbdSChristoph Lameter 		 * changes the bit if its zero but the cpu
13867cc36bbdSChristoph Lameter 		 * online / off line code may race if
13877cc36bbdSChristoph Lameter 		 * worker threads are still allowed during
13887cc36bbdSChristoph Lameter 		 * shutdown / startup.
13897cc36bbdSChristoph Lameter 		 */
13907cc36bbdSChristoph Lameter 		r = cpumask_test_and_set_cpu(smp_processor_id(),
13917cc36bbdSChristoph Lameter 			cpu_stat_off);
13927cc36bbdSChristoph Lameter 		VM_BUG_ON(r);
13937cc36bbdSChristoph Lameter 	}
1394d1187ed2SChristoph Lameter }
1395d1187ed2SChristoph Lameter 
13967cc36bbdSChristoph Lameter /*
13977cc36bbdSChristoph Lameter  * Check if the diffs for a certain cpu indicate that
13987cc36bbdSChristoph Lameter  * an update is needed.
13997cc36bbdSChristoph Lameter  */
14007cc36bbdSChristoph Lameter static bool need_update(int cpu)
1401d1187ed2SChristoph Lameter {
14027cc36bbdSChristoph Lameter 	struct zone *zone;
1403d1187ed2SChristoph Lameter 
14047cc36bbdSChristoph Lameter 	for_each_populated_zone(zone) {
14057cc36bbdSChristoph Lameter 		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
14067cc36bbdSChristoph Lameter 
14077cc36bbdSChristoph Lameter 		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
14087cc36bbdSChristoph Lameter 		/*
14097cc36bbdSChristoph Lameter 		 * The fast way of checking if there are any vmstat diffs.
14107cc36bbdSChristoph Lameter 		 * This works because the diffs are byte sized items.
14117cc36bbdSChristoph Lameter 		 */
14127cc36bbdSChristoph Lameter 		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
14137cc36bbdSChristoph Lameter 			return true;
14147cc36bbdSChristoph Lameter 
14157cc36bbdSChristoph Lameter 	}
14167cc36bbdSChristoph Lameter 	return false;
14177cc36bbdSChristoph Lameter }
14187cc36bbdSChristoph Lameter 
14197cc36bbdSChristoph Lameter 
14207cc36bbdSChristoph Lameter /*
14217cc36bbdSChristoph Lameter  * Shepherd worker thread that checks the
14227cc36bbdSChristoph Lameter  * differentials of processors that have their worker
14237cc36bbdSChristoph Lameter  * threads for vm statistics updates disabled because of
14247cc36bbdSChristoph Lameter  * inactivity.
14257cc36bbdSChristoph Lameter  */
14267cc36bbdSChristoph Lameter static void vmstat_shepherd(struct work_struct *w);
14277cc36bbdSChristoph Lameter 
14287cc36bbdSChristoph Lameter static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
14297cc36bbdSChristoph Lameter 
14307cc36bbdSChristoph Lameter static void vmstat_shepherd(struct work_struct *w)
14317cc36bbdSChristoph Lameter {
14327cc36bbdSChristoph Lameter 	int cpu;
14337cc36bbdSChristoph Lameter 
14347cc36bbdSChristoph Lameter 	get_online_cpus();
14357cc36bbdSChristoph Lameter 	/* Check processors whose vmstat worker threads have been disabled */
14367cc36bbdSChristoph Lameter 	for_each_cpu(cpu, cpu_stat_off)
14377cc36bbdSChristoph Lameter 		if (need_update(cpu) &&
14387cc36bbdSChristoph Lameter 			cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
14397cc36bbdSChristoph Lameter 
14407cc36bbdSChristoph Lameter 			schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu),
14417cc36bbdSChristoph Lameter 				__round_jiffies_relative(sysctl_stat_interval, cpu));
14427cc36bbdSChristoph Lameter 
14437cc36bbdSChristoph Lameter 	put_online_cpus();
14447cc36bbdSChristoph Lameter 
14457cc36bbdSChristoph Lameter 	schedule_delayed_work(&shepherd,
14467cc36bbdSChristoph Lameter 		round_jiffies_relative(sysctl_stat_interval));
14477cc36bbdSChristoph Lameter 
14487cc36bbdSChristoph Lameter }
14497cc36bbdSChristoph Lameter 
14507cc36bbdSChristoph Lameter static void __init start_shepherd_timer(void)
14517cc36bbdSChristoph Lameter {
14527cc36bbdSChristoph Lameter 	int cpu;
14537cc36bbdSChristoph Lameter 
14547cc36bbdSChristoph Lameter 	for_each_possible_cpu(cpu)
14557cc36bbdSChristoph Lameter 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
14567cc36bbdSChristoph Lameter 			vmstat_update);
14577cc36bbdSChristoph Lameter 
14587cc36bbdSChristoph Lameter 	if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
14597cc36bbdSChristoph Lameter 		BUG();
14607cc36bbdSChristoph Lameter 	cpumask_copy(cpu_stat_off, cpu_online_mask);
14617cc36bbdSChristoph Lameter 
14627cc36bbdSChristoph Lameter 	schedule_delayed_work(&shepherd,
14637cc36bbdSChristoph Lameter 		round_jiffies_relative(sysctl_stat_interval));
1464d1187ed2SChristoph Lameter }
1465d1187ed2SChristoph Lameter 
1466807a1bd2SToshi Kani static void vmstat_cpu_dead(int node)
1467807a1bd2SToshi Kani {
1468807a1bd2SToshi Kani 	int cpu;
1469807a1bd2SToshi Kani 
1470807a1bd2SToshi Kani 	get_online_cpus();
1471807a1bd2SToshi Kani 	for_each_online_cpu(cpu)
1472807a1bd2SToshi Kani 		if (cpu_to_node(cpu) == node)
1473807a1bd2SToshi Kani 			goto end;
1474807a1bd2SToshi Kani 
1475807a1bd2SToshi Kani 	node_clear_state(node, N_CPU);
1476807a1bd2SToshi Kani end:
1477807a1bd2SToshi Kani 	put_online_cpus();
1478807a1bd2SToshi Kani }
1479807a1bd2SToshi Kani 
1480df9ecabaSChristoph Lameter /*
1481df9ecabaSChristoph Lameter  * Use the cpu notifier to insure that the thresholds are recalculated
1482df9ecabaSChristoph Lameter  * when necessary.
1483df9ecabaSChristoph Lameter  */
14840db0628dSPaul Gortmaker static int vmstat_cpuup_callback(struct notifier_block *nfb,
1485df9ecabaSChristoph Lameter 		unsigned long action,
1486df9ecabaSChristoph Lameter 		void *hcpu)
1487df9ecabaSChristoph Lameter {
1488d1187ed2SChristoph Lameter 	long cpu = (long)hcpu;
1489d1187ed2SChristoph Lameter 
1490df9ecabaSChristoph Lameter 	switch (action) {
1491d1187ed2SChristoph Lameter 	case CPU_ONLINE:
1492d1187ed2SChristoph Lameter 	case CPU_ONLINE_FROZEN:
14935ee28a44SKAMEZAWA Hiroyuki 		refresh_zone_stat_thresholds();
1494ad596925SChristoph Lameter 		node_set_state(cpu_to_node(cpu), N_CPU);
14957cc36bbdSChristoph Lameter 		cpumask_set_cpu(cpu, cpu_stat_off);
1496d1187ed2SChristoph Lameter 		break;
1497d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE:
1498d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE_FROZEN:
1499afe2c511STejun Heo 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
15007cc36bbdSChristoph Lameter 		cpumask_clear_cpu(cpu, cpu_stat_off);
1501d1187ed2SChristoph Lameter 		break;
1502d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED:
1503d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED_FROZEN:
15047cc36bbdSChristoph Lameter 		cpumask_set_cpu(cpu, cpu_stat_off);
1505d1187ed2SChristoph Lameter 		break;
1506df9ecabaSChristoph Lameter 	case CPU_DEAD:
15078bb78442SRafael J. Wysocki 	case CPU_DEAD_FROZEN:
1508df9ecabaSChristoph Lameter 		refresh_zone_stat_thresholds();
1509807a1bd2SToshi Kani 		vmstat_cpu_dead(cpu_to_node(cpu));
1510df9ecabaSChristoph Lameter 		break;
1511df9ecabaSChristoph Lameter 	default:
1512df9ecabaSChristoph Lameter 		break;
1513df9ecabaSChristoph Lameter 	}
1514df9ecabaSChristoph Lameter 	return NOTIFY_OK;
1515df9ecabaSChristoph Lameter }
1516df9ecabaSChristoph Lameter 
15170db0628dSPaul Gortmaker static struct notifier_block vmstat_notifier =
1518df9ecabaSChristoph Lameter 	{ &vmstat_cpuup_callback, NULL, 0 };
15198f32f7e5SAlexey Dobriyan #endif
1520df9ecabaSChristoph Lameter 
1521e2fc88d0SAdrian Bunk static int __init setup_vmstat(void)
1522df9ecabaSChristoph Lameter {
15238f32f7e5SAlexey Dobriyan #ifdef CONFIG_SMP
15240be94badSSrivatsa S. Bhat 	cpu_notifier_register_begin();
15250be94badSSrivatsa S. Bhat 	__register_cpu_notifier(&vmstat_notifier);
1526d1187ed2SChristoph Lameter 
15277cc36bbdSChristoph Lameter 	start_shepherd_timer();
15280be94badSSrivatsa S. Bhat 	cpu_notifier_register_done();
15298f32f7e5SAlexey Dobriyan #endif
15308f32f7e5SAlexey Dobriyan #ifdef CONFIG_PROC_FS
15318f32f7e5SAlexey Dobriyan 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
153274e2e8e8SAlexey Dobriyan 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1533b6aa44abSAlexey Dobriyan 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
15345c9fe628SAlexey Dobriyan 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
15358f32f7e5SAlexey Dobriyan #endif
1536df9ecabaSChristoph Lameter 	return 0;
1537df9ecabaSChristoph Lameter }
1538df9ecabaSChristoph Lameter module_init(setup_vmstat)
1539d7a5752cSMel Gorman 
1540d7a5752cSMel Gorman #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1541d7a5752cSMel Gorman 
1542d7a5752cSMel Gorman /*
1543d7a5752cSMel Gorman  * Return an index indicating how much of the available free memory is
1544d7a5752cSMel Gorman  * unusable for an allocation of the requested size.
1545d7a5752cSMel Gorman  */
1546d7a5752cSMel Gorman static int unusable_free_index(unsigned int order,
1547d7a5752cSMel Gorman 				struct contig_page_info *info)
1548d7a5752cSMel Gorman {
1549d7a5752cSMel Gorman 	/* No free memory is interpreted as all free memory is unusable */
1550d7a5752cSMel Gorman 	if (info->free_pages == 0)
1551d7a5752cSMel Gorman 		return 1000;
1552d7a5752cSMel Gorman 
1553d7a5752cSMel Gorman 	/*
1554d7a5752cSMel Gorman 	 * Index should be a value between 0 and 1. Return a value to 3
1555d7a5752cSMel Gorman 	 * decimal places.
1556d7a5752cSMel Gorman 	 *
1557d7a5752cSMel Gorman 	 * 0 => no fragmentation
1558d7a5752cSMel Gorman 	 * 1 => high fragmentation
1559d7a5752cSMel Gorman 	 */
1560d7a5752cSMel Gorman 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1561d7a5752cSMel Gorman 
1562d7a5752cSMel Gorman }
1563d7a5752cSMel Gorman 
1564d7a5752cSMel Gorman static void unusable_show_print(struct seq_file *m,
1565d7a5752cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1566d7a5752cSMel Gorman {
1567d7a5752cSMel Gorman 	unsigned int order;
1568d7a5752cSMel Gorman 	int index;
1569d7a5752cSMel Gorman 	struct contig_page_info info;
1570d7a5752cSMel Gorman 
1571d7a5752cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1572d7a5752cSMel Gorman 				pgdat->node_id,
1573d7a5752cSMel Gorman 				zone->name);
1574d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1575d7a5752cSMel Gorman 		fill_contig_page_info(zone, order, &info);
1576d7a5752cSMel Gorman 		index = unusable_free_index(order, &info);
1577d7a5752cSMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1578d7a5752cSMel Gorman 	}
1579d7a5752cSMel Gorman 
1580d7a5752cSMel Gorman 	seq_putc(m, '\n');
1581d7a5752cSMel Gorman }
1582d7a5752cSMel Gorman 
1583d7a5752cSMel Gorman /*
1584d7a5752cSMel Gorman  * Display unusable free space index
1585d7a5752cSMel Gorman  *
1586d7a5752cSMel Gorman  * The unusable free space index measures how much of the available free
1587d7a5752cSMel Gorman  * memory cannot be used to satisfy an allocation of a given size and is a
1588d7a5752cSMel Gorman  * value between 0 and 1. The higher the value, the more of free memory is
1589d7a5752cSMel Gorman  * unusable and by implication, the worse the external fragmentation is. This
1590d7a5752cSMel Gorman  * can be expressed as a percentage by multiplying by 100.
1591d7a5752cSMel Gorman  */
1592d7a5752cSMel Gorman static int unusable_show(struct seq_file *m, void *arg)
1593d7a5752cSMel Gorman {
1594d7a5752cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1595d7a5752cSMel Gorman 
1596d7a5752cSMel Gorman 	/* check memoryless node */
1597a47b53c5SLai Jiangshan 	if (!node_state(pgdat->node_id, N_MEMORY))
1598d7a5752cSMel Gorman 		return 0;
1599d7a5752cSMel Gorman 
1600d7a5752cSMel Gorman 	walk_zones_in_node(m, pgdat, unusable_show_print);
1601d7a5752cSMel Gorman 
1602d7a5752cSMel Gorman 	return 0;
1603d7a5752cSMel Gorman }
1604d7a5752cSMel Gorman 
1605d7a5752cSMel Gorman static const struct seq_operations unusable_op = {
1606d7a5752cSMel Gorman 	.start	= frag_start,
1607d7a5752cSMel Gorman 	.next	= frag_next,
1608d7a5752cSMel Gorman 	.stop	= frag_stop,
1609d7a5752cSMel Gorman 	.show	= unusable_show,
1610d7a5752cSMel Gorman };
1611d7a5752cSMel Gorman 
1612d7a5752cSMel Gorman static int unusable_open(struct inode *inode, struct file *file)
1613d7a5752cSMel Gorman {
1614d7a5752cSMel Gorman 	return seq_open(file, &unusable_op);
1615d7a5752cSMel Gorman }
1616d7a5752cSMel Gorman 
1617d7a5752cSMel Gorman static const struct file_operations unusable_file_ops = {
1618d7a5752cSMel Gorman 	.open		= unusable_open,
1619d7a5752cSMel Gorman 	.read		= seq_read,
1620d7a5752cSMel Gorman 	.llseek		= seq_lseek,
1621d7a5752cSMel Gorman 	.release	= seq_release,
1622d7a5752cSMel Gorman };
1623d7a5752cSMel Gorman 
1624f1a5ab12SMel Gorman static void extfrag_show_print(struct seq_file *m,
1625f1a5ab12SMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1626f1a5ab12SMel Gorman {
1627f1a5ab12SMel Gorman 	unsigned int order;
1628f1a5ab12SMel Gorman 	int index;
1629f1a5ab12SMel Gorman 
1630f1a5ab12SMel Gorman 	/* Alloc on stack as interrupts are disabled for zone walk */
1631f1a5ab12SMel Gorman 	struct contig_page_info info;
1632f1a5ab12SMel Gorman 
1633f1a5ab12SMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1634f1a5ab12SMel Gorman 				pgdat->node_id,
1635f1a5ab12SMel Gorman 				zone->name);
1636f1a5ab12SMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1637f1a5ab12SMel Gorman 		fill_contig_page_info(zone, order, &info);
163856de7263SMel Gorman 		index = __fragmentation_index(order, &info);
1639f1a5ab12SMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1640f1a5ab12SMel Gorman 	}
1641f1a5ab12SMel Gorman 
1642f1a5ab12SMel Gorman 	seq_putc(m, '\n');
1643f1a5ab12SMel Gorman }
1644f1a5ab12SMel Gorman 
1645f1a5ab12SMel Gorman /*
1646f1a5ab12SMel Gorman  * Display fragmentation index for orders that allocations would fail for
1647f1a5ab12SMel Gorman  */
1648f1a5ab12SMel Gorman static int extfrag_show(struct seq_file *m, void *arg)
1649f1a5ab12SMel Gorman {
1650f1a5ab12SMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1651f1a5ab12SMel Gorman 
1652f1a5ab12SMel Gorman 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1653f1a5ab12SMel Gorman 
1654f1a5ab12SMel Gorman 	return 0;
1655f1a5ab12SMel Gorman }
1656f1a5ab12SMel Gorman 
1657f1a5ab12SMel Gorman static const struct seq_operations extfrag_op = {
1658f1a5ab12SMel Gorman 	.start	= frag_start,
1659f1a5ab12SMel Gorman 	.next	= frag_next,
1660f1a5ab12SMel Gorman 	.stop	= frag_stop,
1661f1a5ab12SMel Gorman 	.show	= extfrag_show,
1662f1a5ab12SMel Gorman };
1663f1a5ab12SMel Gorman 
1664f1a5ab12SMel Gorman static int extfrag_open(struct inode *inode, struct file *file)
1665f1a5ab12SMel Gorman {
1666f1a5ab12SMel Gorman 	return seq_open(file, &extfrag_op);
1667f1a5ab12SMel Gorman }
1668f1a5ab12SMel Gorman 
1669f1a5ab12SMel Gorman static const struct file_operations extfrag_file_ops = {
1670f1a5ab12SMel Gorman 	.open		= extfrag_open,
1671f1a5ab12SMel Gorman 	.read		= seq_read,
1672f1a5ab12SMel Gorman 	.llseek		= seq_lseek,
1673f1a5ab12SMel Gorman 	.release	= seq_release,
1674f1a5ab12SMel Gorman };
1675f1a5ab12SMel Gorman 
1676d7a5752cSMel Gorman static int __init extfrag_debug_init(void)
1677d7a5752cSMel Gorman {
1678bde8bd8aSSasikantha babu 	struct dentry *extfrag_debug_root;
1679bde8bd8aSSasikantha babu 
1680d7a5752cSMel Gorman 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1681d7a5752cSMel Gorman 	if (!extfrag_debug_root)
1682d7a5752cSMel Gorman 		return -ENOMEM;
1683d7a5752cSMel Gorman 
1684d7a5752cSMel Gorman 	if (!debugfs_create_file("unusable_index", 0444,
1685d7a5752cSMel Gorman 			extfrag_debug_root, NULL, &unusable_file_ops))
1686bde8bd8aSSasikantha babu 		goto fail;
1687d7a5752cSMel Gorman 
1688f1a5ab12SMel Gorman 	if (!debugfs_create_file("extfrag_index", 0444,
1689f1a5ab12SMel Gorman 			extfrag_debug_root, NULL, &extfrag_file_ops))
1690bde8bd8aSSasikantha babu 		goto fail;
1691f1a5ab12SMel Gorman 
1692d7a5752cSMel Gorman 	return 0;
1693bde8bd8aSSasikantha babu fail:
1694bde8bd8aSSasikantha babu 	debugfs_remove_recursive(extfrag_debug_root);
1695bde8bd8aSSasikantha babu 	return -ENOMEM;
1696d7a5752cSMel Gorman }
1697d7a5752cSMel Gorman 
1698d7a5752cSMel Gorman module_init(extfrag_debug_init);
1699d7a5752cSMel Gorman #endif
1700