xref: /linux/mm/vmstat.c (revision 397487db696cae0b026a474a5cd66f4e372995e6)
1f6ac2354SChristoph Lameter /*
2f6ac2354SChristoph Lameter  *  linux/mm/vmstat.c
3f6ac2354SChristoph Lameter  *
4f6ac2354SChristoph Lameter  *  Manages VM statistics
5f6ac2354SChristoph Lameter  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
62244b95aSChristoph Lameter  *
72244b95aSChristoph Lameter  *  zoned VM statistics
82244b95aSChristoph Lameter  *  Copyright (C) 2006 Silicon Graphics, Inc.,
92244b95aSChristoph Lameter  *		Christoph Lameter <christoph@lameter.com>
10f6ac2354SChristoph Lameter  */
118f32f7e5SAlexey Dobriyan #include <linux/fs.h>
12f6ac2354SChristoph Lameter #include <linux/mm.h>
134e950f6fSAlexey Dobriyan #include <linux/err.h>
142244b95aSChristoph Lameter #include <linux/module.h>
155a0e3ad6STejun Heo #include <linux/slab.h>
16df9ecabaSChristoph Lameter #include <linux/cpu.h>
17c748e134SAdrian Bunk #include <linux/vmstat.h>
18e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
19f1a5ab12SMel Gorman #include <linux/math64.h>
2079da826aSMichael Rubin #include <linux/writeback.h>
2136deb0beSNamhyung Kim #include <linux/compaction.h>
22f6ac2354SChristoph Lameter 
23f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
24f8891e5eSChristoph Lameter DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
25f8891e5eSChristoph Lameter EXPORT_PER_CPU_SYMBOL(vm_event_states);
26f8891e5eSChristoph Lameter 
2731f961a8SMinchan Kim static void sum_vm_events(unsigned long *ret)
28f8891e5eSChristoph Lameter {
299eccf2a8SChristoph Lameter 	int cpu;
30f8891e5eSChristoph Lameter 	int i;
31f8891e5eSChristoph Lameter 
32f8891e5eSChristoph Lameter 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
33f8891e5eSChristoph Lameter 
3431f961a8SMinchan Kim 	for_each_online_cpu(cpu) {
35f8891e5eSChristoph Lameter 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
36f8891e5eSChristoph Lameter 
37f8891e5eSChristoph Lameter 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
38f8891e5eSChristoph Lameter 			ret[i] += this->event[i];
39f8891e5eSChristoph Lameter 	}
40f8891e5eSChristoph Lameter }
41f8891e5eSChristoph Lameter 
42f8891e5eSChristoph Lameter /*
43f8891e5eSChristoph Lameter  * Accumulate the vm event counters across all CPUs.
44f8891e5eSChristoph Lameter  * The result is unavoidably approximate - it can change
45f8891e5eSChristoph Lameter  * during and after execution of this function.
46f8891e5eSChristoph Lameter */
47f8891e5eSChristoph Lameter void all_vm_events(unsigned long *ret)
48f8891e5eSChristoph Lameter {
49b5be1132SKOSAKI Motohiro 	get_online_cpus();
5031f961a8SMinchan Kim 	sum_vm_events(ret);
51b5be1132SKOSAKI Motohiro 	put_online_cpus();
52f8891e5eSChristoph Lameter }
5332dd66fcSHeiko Carstens EXPORT_SYMBOL_GPL(all_vm_events);
54f8891e5eSChristoph Lameter 
55f8891e5eSChristoph Lameter #ifdef CONFIG_HOTPLUG
56f8891e5eSChristoph Lameter /*
57f8891e5eSChristoph Lameter  * Fold the foreign cpu events into our own.
58f8891e5eSChristoph Lameter  *
59f8891e5eSChristoph Lameter  * This is adding to the events on one processor
60f8891e5eSChristoph Lameter  * but keeps the global counts constant.
61f8891e5eSChristoph Lameter  */
62f8891e5eSChristoph Lameter void vm_events_fold_cpu(int cpu)
63f8891e5eSChristoph Lameter {
64f8891e5eSChristoph Lameter 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65f8891e5eSChristoph Lameter 	int i;
66f8891e5eSChristoph Lameter 
67f8891e5eSChristoph Lameter 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68f8891e5eSChristoph Lameter 		count_vm_events(i, fold_state->event[i]);
69f8891e5eSChristoph Lameter 		fold_state->event[i] = 0;
70f8891e5eSChristoph Lameter 	}
71f8891e5eSChristoph Lameter }
72f8891e5eSChristoph Lameter #endif /* CONFIG_HOTPLUG */
73f8891e5eSChristoph Lameter 
74f8891e5eSChristoph Lameter #endif /* CONFIG_VM_EVENT_COUNTERS */
75f8891e5eSChristoph Lameter 
762244b95aSChristoph Lameter /*
772244b95aSChristoph Lameter  * Manage combined zone based / global counters
782244b95aSChristoph Lameter  *
792244b95aSChristoph Lameter  * vm_stat contains the global counters
802244b95aSChristoph Lameter  */
81a1cb2c60SDimitri Sivanich atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
822244b95aSChristoph Lameter EXPORT_SYMBOL(vm_stat);
832244b95aSChristoph Lameter 
842244b95aSChristoph Lameter #ifdef CONFIG_SMP
852244b95aSChristoph Lameter 
86b44129b3SMel Gorman int calculate_pressure_threshold(struct zone *zone)
8788f5acf8SMel Gorman {
8888f5acf8SMel Gorman 	int threshold;
8988f5acf8SMel Gorman 	int watermark_distance;
9088f5acf8SMel Gorman 
9188f5acf8SMel Gorman 	/*
9288f5acf8SMel Gorman 	 * As vmstats are not up to date, there is drift between the estimated
9388f5acf8SMel Gorman 	 * and real values. For high thresholds and a high number of CPUs, it
9488f5acf8SMel Gorman 	 * is possible for the min watermark to be breached while the estimated
9588f5acf8SMel Gorman 	 * value looks fine. The pressure threshold is a reduced value such
9688f5acf8SMel Gorman 	 * that even the maximum amount of drift will not accidentally breach
9788f5acf8SMel Gorman 	 * the min watermark
9888f5acf8SMel Gorman 	 */
9988f5acf8SMel Gorman 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
10088f5acf8SMel Gorman 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
10188f5acf8SMel Gorman 
10288f5acf8SMel Gorman 	/*
10388f5acf8SMel Gorman 	 * Maximum threshold is 125
10488f5acf8SMel Gorman 	 */
10588f5acf8SMel Gorman 	threshold = min(125, threshold);
10688f5acf8SMel Gorman 
10788f5acf8SMel Gorman 	return threshold;
10888f5acf8SMel Gorman }
10988f5acf8SMel Gorman 
110b44129b3SMel Gorman int calculate_normal_threshold(struct zone *zone)
111df9ecabaSChristoph Lameter {
112df9ecabaSChristoph Lameter 	int threshold;
113df9ecabaSChristoph Lameter 	int mem;	/* memory in 128 MB units */
1142244b95aSChristoph Lameter 
1152244b95aSChristoph Lameter 	/*
116df9ecabaSChristoph Lameter 	 * The threshold scales with the number of processors and the amount
117df9ecabaSChristoph Lameter 	 * of memory per zone. More memory means that we can defer updates for
118df9ecabaSChristoph Lameter 	 * longer, more processors could lead to more contention.
119df9ecabaSChristoph Lameter  	 * fls() is used to have a cheap way of logarithmic scaling.
1202244b95aSChristoph Lameter 	 *
121df9ecabaSChristoph Lameter 	 * Some sample thresholds:
122df9ecabaSChristoph Lameter 	 *
123df9ecabaSChristoph Lameter 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
124df9ecabaSChristoph Lameter 	 * ------------------------------------------------------------------
125df9ecabaSChristoph Lameter 	 * 8		1		1	0.9-1 GB	4
126df9ecabaSChristoph Lameter 	 * 16		2		2	0.9-1 GB	4
127df9ecabaSChristoph Lameter 	 * 20 		2		2	1-2 GB		5
128df9ecabaSChristoph Lameter 	 * 24		2		2	2-4 GB		6
129df9ecabaSChristoph Lameter 	 * 28		2		2	4-8 GB		7
130df9ecabaSChristoph Lameter 	 * 32		2		2	8-16 GB		8
131df9ecabaSChristoph Lameter 	 * 4		2		2	<128M		1
132df9ecabaSChristoph Lameter 	 * 30		4		3	2-4 GB		5
133df9ecabaSChristoph Lameter 	 * 48		4		3	8-16 GB		8
134df9ecabaSChristoph Lameter 	 * 32		8		4	1-2 GB		4
135df9ecabaSChristoph Lameter 	 * 32		8		4	0.9-1GB		4
136df9ecabaSChristoph Lameter 	 * 10		16		5	<128M		1
137df9ecabaSChristoph Lameter 	 * 40		16		5	900M		4
138df9ecabaSChristoph Lameter 	 * 70		64		7	2-4 GB		5
139df9ecabaSChristoph Lameter 	 * 84		64		7	4-8 GB		6
140df9ecabaSChristoph Lameter 	 * 108		512		9	4-8 GB		6
141df9ecabaSChristoph Lameter 	 * 125		1024		10	8-16 GB		8
142df9ecabaSChristoph Lameter 	 * 125		1024		10	16-32 GB	9
1432244b95aSChristoph Lameter 	 */
144df9ecabaSChristoph Lameter 
145df9ecabaSChristoph Lameter 	mem = zone->present_pages >> (27 - PAGE_SHIFT);
146df9ecabaSChristoph Lameter 
147df9ecabaSChristoph Lameter 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
148df9ecabaSChristoph Lameter 
149df9ecabaSChristoph Lameter 	/*
150df9ecabaSChristoph Lameter 	 * Maximum threshold is 125
151df9ecabaSChristoph Lameter 	 */
152df9ecabaSChristoph Lameter 	threshold = min(125, threshold);
153df9ecabaSChristoph Lameter 
154df9ecabaSChristoph Lameter 	return threshold;
155df9ecabaSChristoph Lameter }
156df9ecabaSChristoph Lameter 
157df9ecabaSChristoph Lameter /*
158df9ecabaSChristoph Lameter  * Refresh the thresholds for each zone.
159df9ecabaSChristoph Lameter  */
160a6cccdc3SKOSAKI Motohiro void refresh_zone_stat_thresholds(void)
1612244b95aSChristoph Lameter {
162df9ecabaSChristoph Lameter 	struct zone *zone;
163df9ecabaSChristoph Lameter 	int cpu;
164df9ecabaSChristoph Lameter 	int threshold;
165df9ecabaSChristoph Lameter 
166ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
167aa454840SChristoph Lameter 		unsigned long max_drift, tolerate_drift;
168aa454840SChristoph Lameter 
169b44129b3SMel Gorman 		threshold = calculate_normal_threshold(zone);
170df9ecabaSChristoph Lameter 
171df9ecabaSChristoph Lameter 		for_each_online_cpu(cpu)
17299dcc3e5SChristoph Lameter 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
17399dcc3e5SChristoph Lameter 							= threshold;
174aa454840SChristoph Lameter 
175aa454840SChristoph Lameter 		/*
176aa454840SChristoph Lameter 		 * Only set percpu_drift_mark if there is a danger that
177aa454840SChristoph Lameter 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
178aa454840SChristoph Lameter 		 * the min watermark could be breached by an allocation
179aa454840SChristoph Lameter 		 */
180aa454840SChristoph Lameter 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
181aa454840SChristoph Lameter 		max_drift = num_online_cpus() * threshold;
182aa454840SChristoph Lameter 		if (max_drift > tolerate_drift)
183aa454840SChristoph Lameter 			zone->percpu_drift_mark = high_wmark_pages(zone) +
184aa454840SChristoph Lameter 					max_drift;
185df9ecabaSChristoph Lameter 	}
1862244b95aSChristoph Lameter }
1872244b95aSChristoph Lameter 
188b44129b3SMel Gorman void set_pgdat_percpu_threshold(pg_data_t *pgdat,
189b44129b3SMel Gorman 				int (*calculate_pressure)(struct zone *))
19088f5acf8SMel Gorman {
19188f5acf8SMel Gorman 	struct zone *zone;
19288f5acf8SMel Gorman 	int cpu;
19388f5acf8SMel Gorman 	int threshold;
19488f5acf8SMel Gorman 	int i;
19588f5acf8SMel Gorman 
19688f5acf8SMel Gorman 	for (i = 0; i < pgdat->nr_zones; i++) {
19788f5acf8SMel Gorman 		zone = &pgdat->node_zones[i];
19888f5acf8SMel Gorman 		if (!zone->percpu_drift_mark)
19988f5acf8SMel Gorman 			continue;
20088f5acf8SMel Gorman 
201b44129b3SMel Gorman 		threshold = (*calculate_pressure)(zone);
202b44129b3SMel Gorman 		for_each_possible_cpu(cpu)
20388f5acf8SMel Gorman 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
20488f5acf8SMel Gorman 							= threshold;
20588f5acf8SMel Gorman 	}
20688f5acf8SMel Gorman }
20788f5acf8SMel Gorman 
2082244b95aSChristoph Lameter /*
2092244b95aSChristoph Lameter  * For use when we know that interrupts are disabled.
2102244b95aSChristoph Lameter  */
2112244b95aSChristoph Lameter void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
2122244b95aSChristoph Lameter 				int delta)
2132244b95aSChristoph Lameter {
21412938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
21512938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
2162244b95aSChristoph Lameter 	long x;
21712938a92SChristoph Lameter 	long t;
2182244b95aSChristoph Lameter 
21912938a92SChristoph Lameter 	x = delta + __this_cpu_read(*p);
2202244b95aSChristoph Lameter 
22112938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
22212938a92SChristoph Lameter 
22312938a92SChristoph Lameter 	if (unlikely(x > t || x < -t)) {
2242244b95aSChristoph Lameter 		zone_page_state_add(x, zone, item);
2252244b95aSChristoph Lameter 		x = 0;
2262244b95aSChristoph Lameter 	}
22712938a92SChristoph Lameter 	__this_cpu_write(*p, x);
2282244b95aSChristoph Lameter }
2292244b95aSChristoph Lameter EXPORT_SYMBOL(__mod_zone_page_state);
2302244b95aSChristoph Lameter 
2312244b95aSChristoph Lameter /*
2322244b95aSChristoph Lameter  * Optimized increment and decrement functions.
2332244b95aSChristoph Lameter  *
2342244b95aSChristoph Lameter  * These are only for a single page and therefore can take a struct page *
2352244b95aSChristoph Lameter  * argument instead of struct zone *. This allows the inclusion of the code
2362244b95aSChristoph Lameter  * generated for page_zone(page) into the optimized functions.
2372244b95aSChristoph Lameter  *
2382244b95aSChristoph Lameter  * No overflow check is necessary and therefore the differential can be
2392244b95aSChristoph Lameter  * incremented or decremented in place which may allow the compilers to
2402244b95aSChristoph Lameter  * generate better code.
2412244b95aSChristoph Lameter  * The increment or decrement is known and therefore one boundary check can
2422244b95aSChristoph Lameter  * be omitted.
2432244b95aSChristoph Lameter  *
244df9ecabaSChristoph Lameter  * NOTE: These functions are very performance sensitive. Change only
245df9ecabaSChristoph Lameter  * with care.
246df9ecabaSChristoph Lameter  *
2472244b95aSChristoph Lameter  * Some processors have inc/dec instructions that are atomic vs an interrupt.
2482244b95aSChristoph Lameter  * However, the code must first determine the differential location in a zone
2492244b95aSChristoph Lameter  * based on the processor number and then inc/dec the counter. There is no
2502244b95aSChristoph Lameter  * guarantee without disabling preemption that the processor will not change
2512244b95aSChristoph Lameter  * in between and therefore the atomicity vs. interrupt cannot be exploited
2522244b95aSChristoph Lameter  * in a useful way here.
2532244b95aSChristoph Lameter  */
254c8785385SChristoph Lameter void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2552244b95aSChristoph Lameter {
25612938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
25712938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
25812938a92SChristoph Lameter 	s8 v, t;
2592244b95aSChristoph Lameter 
260908ee0f1SChristoph Lameter 	v = __this_cpu_inc_return(*p);
26112938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
26212938a92SChristoph Lameter 	if (unlikely(v > t)) {
26312938a92SChristoph Lameter 		s8 overstep = t >> 1;
2642244b95aSChristoph Lameter 
26512938a92SChristoph Lameter 		zone_page_state_add(v + overstep, zone, item);
26612938a92SChristoph Lameter 		__this_cpu_write(*p, -overstep);
2672244b95aSChristoph Lameter 	}
2682244b95aSChristoph Lameter }
269ca889e6cSChristoph Lameter 
270ca889e6cSChristoph Lameter void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
271ca889e6cSChristoph Lameter {
272ca889e6cSChristoph Lameter 	__inc_zone_state(page_zone(page), item);
273ca889e6cSChristoph Lameter }
2742244b95aSChristoph Lameter EXPORT_SYMBOL(__inc_zone_page_state);
2752244b95aSChristoph Lameter 
276c8785385SChristoph Lameter void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2772244b95aSChristoph Lameter {
27812938a92SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
27912938a92SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
28012938a92SChristoph Lameter 	s8 v, t;
2812244b95aSChristoph Lameter 
282908ee0f1SChristoph Lameter 	v = __this_cpu_dec_return(*p);
28312938a92SChristoph Lameter 	t = __this_cpu_read(pcp->stat_threshold);
28412938a92SChristoph Lameter 	if (unlikely(v < - t)) {
28512938a92SChristoph Lameter 		s8 overstep = t >> 1;
2862244b95aSChristoph Lameter 
28712938a92SChristoph Lameter 		zone_page_state_add(v - overstep, zone, item);
28812938a92SChristoph Lameter 		__this_cpu_write(*p, overstep);
2892244b95aSChristoph Lameter 	}
2902244b95aSChristoph Lameter }
291c8785385SChristoph Lameter 
292c8785385SChristoph Lameter void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
293c8785385SChristoph Lameter {
294c8785385SChristoph Lameter 	__dec_zone_state(page_zone(page), item);
295c8785385SChristoph Lameter }
2962244b95aSChristoph Lameter EXPORT_SYMBOL(__dec_zone_page_state);
2972244b95aSChristoph Lameter 
2984156153cSHeiko Carstens #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
2997c839120SChristoph Lameter /*
3007c839120SChristoph Lameter  * If we have cmpxchg_local support then we do not need to incur the overhead
3017c839120SChristoph Lameter  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
3027c839120SChristoph Lameter  *
3037c839120SChristoph Lameter  * mod_state() modifies the zone counter state through atomic per cpu
3047c839120SChristoph Lameter  * operations.
3057c839120SChristoph Lameter  *
3067c839120SChristoph Lameter  * Overstep mode specifies how overstep should handled:
3077c839120SChristoph Lameter  *     0       No overstepping
3087c839120SChristoph Lameter  *     1       Overstepping half of threshold
3097c839120SChristoph Lameter  *     -1      Overstepping minus half of threshold
3107c839120SChristoph Lameter */
3117c839120SChristoph Lameter static inline void mod_state(struct zone *zone,
3127c839120SChristoph Lameter        enum zone_stat_item item, int delta, int overstep_mode)
3137c839120SChristoph Lameter {
3147c839120SChristoph Lameter 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
3157c839120SChristoph Lameter 	s8 __percpu *p = pcp->vm_stat_diff + item;
3167c839120SChristoph Lameter 	long o, n, t, z;
3177c839120SChristoph Lameter 
3187c839120SChristoph Lameter 	do {
3197c839120SChristoph Lameter 		z = 0;  /* overflow to zone counters */
3207c839120SChristoph Lameter 
3217c839120SChristoph Lameter 		/*
3227c839120SChristoph Lameter 		 * The fetching of the stat_threshold is racy. We may apply
3237c839120SChristoph Lameter 		 * a counter threshold to the wrong the cpu if we get
324d3bc2367SChristoph Lameter 		 * rescheduled while executing here. However, the next
325d3bc2367SChristoph Lameter 		 * counter update will apply the threshold again and
326d3bc2367SChristoph Lameter 		 * therefore bring the counter under the threshold again.
327d3bc2367SChristoph Lameter 		 *
328d3bc2367SChristoph Lameter 		 * Most of the time the thresholds are the same anyways
329d3bc2367SChristoph Lameter 		 * for all cpus in a zone.
3307c839120SChristoph Lameter 		 */
3317c839120SChristoph Lameter 		t = this_cpu_read(pcp->stat_threshold);
3327c839120SChristoph Lameter 
3337c839120SChristoph Lameter 		o = this_cpu_read(*p);
3347c839120SChristoph Lameter 		n = delta + o;
3357c839120SChristoph Lameter 
3367c839120SChristoph Lameter 		if (n > t || n < -t) {
3377c839120SChristoph Lameter 			int os = overstep_mode * (t >> 1) ;
3387c839120SChristoph Lameter 
3397c839120SChristoph Lameter 			/* Overflow must be added to zone counters */
3407c839120SChristoph Lameter 			z = n + os;
3417c839120SChristoph Lameter 			n = -os;
3427c839120SChristoph Lameter 		}
3437c839120SChristoph Lameter 	} while (this_cpu_cmpxchg(*p, o, n) != o);
3447c839120SChristoph Lameter 
3457c839120SChristoph Lameter 	if (z)
3467c839120SChristoph Lameter 		zone_page_state_add(z, zone, item);
3477c839120SChristoph Lameter }
3487c839120SChristoph Lameter 
3497c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3507c839120SChristoph Lameter 					int delta)
3517c839120SChristoph Lameter {
3527c839120SChristoph Lameter 	mod_state(zone, item, delta, 0);
3537c839120SChristoph Lameter }
3547c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3557c839120SChristoph Lameter 
3567c839120SChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
3577c839120SChristoph Lameter {
3587c839120SChristoph Lameter 	mod_state(zone, item, 1, 1);
3597c839120SChristoph Lameter }
3607c839120SChristoph Lameter 
3617c839120SChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
3627c839120SChristoph Lameter {
3637c839120SChristoph Lameter 	mod_state(page_zone(page), item, 1, 1);
3647c839120SChristoph Lameter }
3657c839120SChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
3667c839120SChristoph Lameter 
3677c839120SChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
3687c839120SChristoph Lameter {
3697c839120SChristoph Lameter 	mod_state(page_zone(page), item, -1, -1);
3707c839120SChristoph Lameter }
3717c839120SChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
3727c839120SChristoph Lameter #else
3737c839120SChristoph Lameter /*
3747c839120SChristoph Lameter  * Use interrupt disable to serialize counter updates
3757c839120SChristoph Lameter  */
3767c839120SChristoph Lameter void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
3777c839120SChristoph Lameter 					int delta)
3787c839120SChristoph Lameter {
3797c839120SChristoph Lameter 	unsigned long flags;
3807c839120SChristoph Lameter 
3817c839120SChristoph Lameter 	local_irq_save(flags);
3827c839120SChristoph Lameter 	__mod_zone_page_state(zone, item, delta);
3837c839120SChristoph Lameter 	local_irq_restore(flags);
3847c839120SChristoph Lameter }
3857c839120SChristoph Lameter EXPORT_SYMBOL(mod_zone_page_state);
3867c839120SChristoph Lameter 
387ca889e6cSChristoph Lameter void inc_zone_state(struct zone *zone, enum zone_stat_item item)
388ca889e6cSChristoph Lameter {
389ca889e6cSChristoph Lameter 	unsigned long flags;
390ca889e6cSChristoph Lameter 
391ca889e6cSChristoph Lameter 	local_irq_save(flags);
392ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
393ca889e6cSChristoph Lameter 	local_irq_restore(flags);
394ca889e6cSChristoph Lameter }
395ca889e6cSChristoph Lameter 
3962244b95aSChristoph Lameter void inc_zone_page_state(struct page *page, enum zone_stat_item item)
3972244b95aSChristoph Lameter {
3982244b95aSChristoph Lameter 	unsigned long flags;
3992244b95aSChristoph Lameter 	struct zone *zone;
4002244b95aSChristoph Lameter 
4012244b95aSChristoph Lameter 	zone = page_zone(page);
4022244b95aSChristoph Lameter 	local_irq_save(flags);
403ca889e6cSChristoph Lameter 	__inc_zone_state(zone, item);
4042244b95aSChristoph Lameter 	local_irq_restore(flags);
4052244b95aSChristoph Lameter }
4062244b95aSChristoph Lameter EXPORT_SYMBOL(inc_zone_page_state);
4072244b95aSChristoph Lameter 
4082244b95aSChristoph Lameter void dec_zone_page_state(struct page *page, enum zone_stat_item item)
4092244b95aSChristoph Lameter {
4102244b95aSChristoph Lameter 	unsigned long flags;
4112244b95aSChristoph Lameter 
4122244b95aSChristoph Lameter 	local_irq_save(flags);
413a302eb4eSChristoph Lameter 	__dec_zone_page_state(page, item);
4142244b95aSChristoph Lameter 	local_irq_restore(flags);
4152244b95aSChristoph Lameter }
4162244b95aSChristoph Lameter EXPORT_SYMBOL(dec_zone_page_state);
4177c839120SChristoph Lameter #endif
4182244b95aSChristoph Lameter 
4192244b95aSChristoph Lameter /*
4202244b95aSChristoph Lameter  * Update the zone counters for one cpu.
4214037d452SChristoph Lameter  *
422a7f75e25SChristoph Lameter  * The cpu specified must be either the current cpu or a processor that
423a7f75e25SChristoph Lameter  * is not online. If it is the current cpu then the execution thread must
424a7f75e25SChristoph Lameter  * be pinned to the current cpu.
425a7f75e25SChristoph Lameter  *
4264037d452SChristoph Lameter  * Note that refresh_cpu_vm_stats strives to only access
4274037d452SChristoph Lameter  * node local memory. The per cpu pagesets on remote zones are placed
4284037d452SChristoph Lameter  * in the memory local to the processor using that pageset. So the
4294037d452SChristoph Lameter  * loop over all zones will access a series of cachelines local to
4304037d452SChristoph Lameter  * the processor.
4314037d452SChristoph Lameter  *
4324037d452SChristoph Lameter  * The call to zone_page_state_add updates the cachelines with the
4334037d452SChristoph Lameter  * statistics in the remote zone struct as well as the global cachelines
4344037d452SChristoph Lameter  * with the global counters. These could cause remote node cache line
4354037d452SChristoph Lameter  * bouncing and will have to be only done when necessary.
4362244b95aSChristoph Lameter  */
4372244b95aSChristoph Lameter void refresh_cpu_vm_stats(int cpu)
4382244b95aSChristoph Lameter {
4392244b95aSChristoph Lameter 	struct zone *zone;
4402244b95aSChristoph Lameter 	int i;
441a7f75e25SChristoph Lameter 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
4422244b95aSChristoph Lameter 
443ee99c71cSKOSAKI Motohiro 	for_each_populated_zone(zone) {
4444037d452SChristoph Lameter 		struct per_cpu_pageset *p;
4452244b95aSChristoph Lameter 
44699dcc3e5SChristoph Lameter 		p = per_cpu_ptr(zone->pageset, cpu);
4472244b95aSChristoph Lameter 
4482244b95aSChristoph Lameter 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
4494037d452SChristoph Lameter 			if (p->vm_stat_diff[i]) {
450a7f75e25SChristoph Lameter 				unsigned long flags;
451a7f75e25SChristoph Lameter 				int v;
452a7f75e25SChristoph Lameter 
4532244b95aSChristoph Lameter 				local_irq_save(flags);
454a7f75e25SChristoph Lameter 				v = p->vm_stat_diff[i];
4554037d452SChristoph Lameter 				p->vm_stat_diff[i] = 0;
456a7f75e25SChristoph Lameter 				local_irq_restore(flags);
457a7f75e25SChristoph Lameter 				atomic_long_add(v, &zone->vm_stat[i]);
458a7f75e25SChristoph Lameter 				global_diff[i] += v;
4594037d452SChristoph Lameter #ifdef CONFIG_NUMA
4604037d452SChristoph Lameter 				/* 3 seconds idle till flush */
4614037d452SChristoph Lameter 				p->expire = 3;
4624037d452SChristoph Lameter #endif
4632244b95aSChristoph Lameter 			}
464468fd62eSDimitri Sivanich 		cond_resched();
4654037d452SChristoph Lameter #ifdef CONFIG_NUMA
4664037d452SChristoph Lameter 		/*
4674037d452SChristoph Lameter 		 * Deal with draining the remote pageset of this
4684037d452SChristoph Lameter 		 * processor
4694037d452SChristoph Lameter 		 *
4704037d452SChristoph Lameter 		 * Check if there are pages remaining in this pageset
4714037d452SChristoph Lameter 		 * if not then there is nothing to expire.
4724037d452SChristoph Lameter 		 */
4733dfa5721SChristoph Lameter 		if (!p->expire || !p->pcp.count)
4744037d452SChristoph Lameter 			continue;
4754037d452SChristoph Lameter 
4764037d452SChristoph Lameter 		/*
4774037d452SChristoph Lameter 		 * We never drain zones local to this processor.
4784037d452SChristoph Lameter 		 */
4794037d452SChristoph Lameter 		if (zone_to_nid(zone) == numa_node_id()) {
4804037d452SChristoph Lameter 			p->expire = 0;
4814037d452SChristoph Lameter 			continue;
4824037d452SChristoph Lameter 		}
4834037d452SChristoph Lameter 
4844037d452SChristoph Lameter 		p->expire--;
4854037d452SChristoph Lameter 		if (p->expire)
4864037d452SChristoph Lameter 			continue;
4874037d452SChristoph Lameter 
4883dfa5721SChristoph Lameter 		if (p->pcp.count)
4893dfa5721SChristoph Lameter 			drain_zone_pages(zone, &p->pcp);
4904037d452SChristoph Lameter #endif
4912244b95aSChristoph Lameter 	}
492a7f75e25SChristoph Lameter 
493a7f75e25SChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
494a7f75e25SChristoph Lameter 		if (global_diff[i])
495a7f75e25SChristoph Lameter 			atomic_long_add(global_diff[i], &vm_stat[i]);
4962244b95aSChristoph Lameter }
4972244b95aSChristoph Lameter 
4985a883813SMinchan Kim void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
4995a883813SMinchan Kim {
5005a883813SMinchan Kim 	int i;
5015a883813SMinchan Kim 
5025a883813SMinchan Kim 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
5035a883813SMinchan Kim 		if (pset->vm_stat_diff[i]) {
5045a883813SMinchan Kim 			int v = pset->vm_stat_diff[i];
5055a883813SMinchan Kim 			pset->vm_stat_diff[i] = 0;
5065a883813SMinchan Kim 			atomic_long_add(v, &zone->vm_stat[i]);
5075a883813SMinchan Kim 			atomic_long_add(v, &vm_stat[i]);
5085a883813SMinchan Kim 		}
5095a883813SMinchan Kim }
5102244b95aSChristoph Lameter #endif
5112244b95aSChristoph Lameter 
512ca889e6cSChristoph Lameter #ifdef CONFIG_NUMA
513ca889e6cSChristoph Lameter /*
514ca889e6cSChristoph Lameter  * zonelist = the list of zones passed to the allocator
515ca889e6cSChristoph Lameter  * z 	    = the zone from which the allocation occurred.
516ca889e6cSChristoph Lameter  *
517ca889e6cSChristoph Lameter  * Must be called with interrupts disabled.
51878afd561SAndi Kleen  *
51978afd561SAndi Kleen  * When __GFP_OTHER_NODE is set assume the node of the preferred
52078afd561SAndi Kleen  * zone is the local node. This is useful for daemons who allocate
52178afd561SAndi Kleen  * memory on behalf of other processes.
522ca889e6cSChristoph Lameter  */
52378afd561SAndi Kleen void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
524ca889e6cSChristoph Lameter {
52518ea7e71SMel Gorman 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
526ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_HIT);
527ca889e6cSChristoph Lameter 	} else {
528ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_MISS);
52918ea7e71SMel Gorman 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
530ca889e6cSChristoph Lameter 	}
53178afd561SAndi Kleen 	if (z->node == ((flags & __GFP_OTHER_NODE) ?
53278afd561SAndi Kleen 			preferred_zone->node : numa_node_id()))
533ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_LOCAL);
534ca889e6cSChristoph Lameter 	else
535ca889e6cSChristoph Lameter 		__inc_zone_state(z, NUMA_OTHER);
536ca889e6cSChristoph Lameter }
537ca889e6cSChristoph Lameter #endif
538ca889e6cSChristoph Lameter 
539d7a5752cSMel Gorman #ifdef CONFIG_COMPACTION
54036deb0beSNamhyung Kim 
541d7a5752cSMel Gorman struct contig_page_info {
542d7a5752cSMel Gorman 	unsigned long free_pages;
543d7a5752cSMel Gorman 	unsigned long free_blocks_total;
544d7a5752cSMel Gorman 	unsigned long free_blocks_suitable;
545d7a5752cSMel Gorman };
546d7a5752cSMel Gorman 
547d7a5752cSMel Gorman /*
548d7a5752cSMel Gorman  * Calculate the number of free pages in a zone, how many contiguous
549d7a5752cSMel Gorman  * pages are free and how many are large enough to satisfy an allocation of
550d7a5752cSMel Gorman  * the target size. Note that this function makes no attempt to estimate
551d7a5752cSMel Gorman  * how many suitable free blocks there *might* be if MOVABLE pages were
552d7a5752cSMel Gorman  * migrated. Calculating that is possible, but expensive and can be
553d7a5752cSMel Gorman  * figured out from userspace
554d7a5752cSMel Gorman  */
555d7a5752cSMel Gorman static void fill_contig_page_info(struct zone *zone,
556d7a5752cSMel Gorman 				unsigned int suitable_order,
557d7a5752cSMel Gorman 				struct contig_page_info *info)
558d7a5752cSMel Gorman {
559d7a5752cSMel Gorman 	unsigned int order;
560d7a5752cSMel Gorman 
561d7a5752cSMel Gorman 	info->free_pages = 0;
562d7a5752cSMel Gorman 	info->free_blocks_total = 0;
563d7a5752cSMel Gorman 	info->free_blocks_suitable = 0;
564d7a5752cSMel Gorman 
565d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; order++) {
566d7a5752cSMel Gorman 		unsigned long blocks;
567d7a5752cSMel Gorman 
568d7a5752cSMel Gorman 		/* Count number of free blocks */
569d7a5752cSMel Gorman 		blocks = zone->free_area[order].nr_free;
570d7a5752cSMel Gorman 		info->free_blocks_total += blocks;
571d7a5752cSMel Gorman 
572d7a5752cSMel Gorman 		/* Count free base pages */
573d7a5752cSMel Gorman 		info->free_pages += blocks << order;
574d7a5752cSMel Gorman 
575d7a5752cSMel Gorman 		/* Count the suitable free blocks */
576d7a5752cSMel Gorman 		if (order >= suitable_order)
577d7a5752cSMel Gorman 			info->free_blocks_suitable += blocks <<
578d7a5752cSMel Gorman 						(order - suitable_order);
579d7a5752cSMel Gorman 	}
580d7a5752cSMel Gorman }
581f1a5ab12SMel Gorman 
582f1a5ab12SMel Gorman /*
583f1a5ab12SMel Gorman  * A fragmentation index only makes sense if an allocation of a requested
584f1a5ab12SMel Gorman  * size would fail. If that is true, the fragmentation index indicates
585f1a5ab12SMel Gorman  * whether external fragmentation or a lack of memory was the problem.
586f1a5ab12SMel Gorman  * The value can be used to determine if page reclaim or compaction
587f1a5ab12SMel Gorman  * should be used
588f1a5ab12SMel Gorman  */
58956de7263SMel Gorman static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
590f1a5ab12SMel Gorman {
591f1a5ab12SMel Gorman 	unsigned long requested = 1UL << order;
592f1a5ab12SMel Gorman 
593f1a5ab12SMel Gorman 	if (!info->free_blocks_total)
594f1a5ab12SMel Gorman 		return 0;
595f1a5ab12SMel Gorman 
596f1a5ab12SMel Gorman 	/* Fragmentation index only makes sense when a request would fail */
597f1a5ab12SMel Gorman 	if (info->free_blocks_suitable)
598f1a5ab12SMel Gorman 		return -1000;
599f1a5ab12SMel Gorman 
600f1a5ab12SMel Gorman 	/*
601f1a5ab12SMel Gorman 	 * Index is between 0 and 1 so return within 3 decimal places
602f1a5ab12SMel Gorman 	 *
603f1a5ab12SMel Gorman 	 * 0 => allocation would fail due to lack of memory
604f1a5ab12SMel Gorman 	 * 1 => allocation would fail due to fragmentation
605f1a5ab12SMel Gorman 	 */
606f1a5ab12SMel Gorman 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
607f1a5ab12SMel Gorman }
60856de7263SMel Gorman 
60956de7263SMel Gorman /* Same as __fragmentation index but allocs contig_page_info on stack */
61056de7263SMel Gorman int fragmentation_index(struct zone *zone, unsigned int order)
61156de7263SMel Gorman {
61256de7263SMel Gorman 	struct contig_page_info info;
61356de7263SMel Gorman 
61456de7263SMel Gorman 	fill_contig_page_info(zone, order, &info);
61556de7263SMel Gorman 	return __fragmentation_index(order, &info);
61656de7263SMel Gorman }
617d7a5752cSMel Gorman #endif
618d7a5752cSMel Gorman 
619d7a5752cSMel Gorman #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
6208f32f7e5SAlexey Dobriyan #include <linux/proc_fs.h>
621f6ac2354SChristoph Lameter #include <linux/seq_file.h>
622f6ac2354SChristoph Lameter 
623467c996cSMel Gorman static char * const migratetype_names[MIGRATE_TYPES] = {
624467c996cSMel Gorman 	"Unmovable",
625467c996cSMel Gorman 	"Reclaimable",
626467c996cSMel Gorman 	"Movable",
627467c996cSMel Gorman 	"Reserve",
62847118af0SMichal Nazarewicz #ifdef CONFIG_CMA
62947118af0SMichal Nazarewicz 	"CMA",
63047118af0SMichal Nazarewicz #endif
63191446b06SKOSAKI Motohiro 	"Isolate",
632467c996cSMel Gorman };
633467c996cSMel Gorman 
634f6ac2354SChristoph Lameter static void *frag_start(struct seq_file *m, loff_t *pos)
635f6ac2354SChristoph Lameter {
636f6ac2354SChristoph Lameter 	pg_data_t *pgdat;
637f6ac2354SChristoph Lameter 	loff_t node = *pos;
638f6ac2354SChristoph Lameter 	for (pgdat = first_online_pgdat();
639f6ac2354SChristoph Lameter 	     pgdat && node;
640f6ac2354SChristoph Lameter 	     pgdat = next_online_pgdat(pgdat))
641f6ac2354SChristoph Lameter 		--node;
642f6ac2354SChristoph Lameter 
643f6ac2354SChristoph Lameter 	return pgdat;
644f6ac2354SChristoph Lameter }
645f6ac2354SChristoph Lameter 
646f6ac2354SChristoph Lameter static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
647f6ac2354SChristoph Lameter {
648f6ac2354SChristoph Lameter 	pg_data_t *pgdat = (pg_data_t *)arg;
649f6ac2354SChristoph Lameter 
650f6ac2354SChristoph Lameter 	(*pos)++;
651f6ac2354SChristoph Lameter 	return next_online_pgdat(pgdat);
652f6ac2354SChristoph Lameter }
653f6ac2354SChristoph Lameter 
654f6ac2354SChristoph Lameter static void frag_stop(struct seq_file *m, void *arg)
655f6ac2354SChristoph Lameter {
656f6ac2354SChristoph Lameter }
657f6ac2354SChristoph Lameter 
658467c996cSMel Gorman /* Walk all the zones in a node and print using a callback */
659467c996cSMel Gorman static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
660467c996cSMel Gorman 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
661f6ac2354SChristoph Lameter {
662f6ac2354SChristoph Lameter 	struct zone *zone;
663f6ac2354SChristoph Lameter 	struct zone *node_zones = pgdat->node_zones;
664f6ac2354SChristoph Lameter 	unsigned long flags;
665f6ac2354SChristoph Lameter 
666f6ac2354SChristoph Lameter 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
667f6ac2354SChristoph Lameter 		if (!populated_zone(zone))
668f6ac2354SChristoph Lameter 			continue;
669f6ac2354SChristoph Lameter 
670f6ac2354SChristoph Lameter 		spin_lock_irqsave(&zone->lock, flags);
671467c996cSMel Gorman 		print(m, pgdat, zone);
672467c996cSMel Gorman 		spin_unlock_irqrestore(&zone->lock, flags);
673467c996cSMel Gorman 	}
674467c996cSMel Gorman }
675d7a5752cSMel Gorman #endif
676467c996cSMel Gorman 
6770d6617c7SDavid Rientjes #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
678fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA
679fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx) xx "_dma",
680fa25c503SKOSAKI Motohiro #else
681fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA(xx)
682fa25c503SKOSAKI Motohiro #endif
683fa25c503SKOSAKI Motohiro 
684fa25c503SKOSAKI Motohiro #ifdef CONFIG_ZONE_DMA32
685fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx) xx "_dma32",
686fa25c503SKOSAKI Motohiro #else
687fa25c503SKOSAKI Motohiro #define TEXT_FOR_DMA32(xx)
688fa25c503SKOSAKI Motohiro #endif
689fa25c503SKOSAKI Motohiro 
690fa25c503SKOSAKI Motohiro #ifdef CONFIG_HIGHMEM
691fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx) xx "_high",
692fa25c503SKOSAKI Motohiro #else
693fa25c503SKOSAKI Motohiro #define TEXT_FOR_HIGHMEM(xx)
694fa25c503SKOSAKI Motohiro #endif
695fa25c503SKOSAKI Motohiro 
696fa25c503SKOSAKI Motohiro #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
697fa25c503SKOSAKI Motohiro 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
698fa25c503SKOSAKI Motohiro 
699fa25c503SKOSAKI Motohiro const char * const vmstat_text[] = {
700fa25c503SKOSAKI Motohiro 	/* Zoned VM counters */
701fa25c503SKOSAKI Motohiro 	"nr_free_pages",
702fa25c503SKOSAKI Motohiro 	"nr_inactive_anon",
703fa25c503SKOSAKI Motohiro 	"nr_active_anon",
704fa25c503SKOSAKI Motohiro 	"nr_inactive_file",
705fa25c503SKOSAKI Motohiro 	"nr_active_file",
706fa25c503SKOSAKI Motohiro 	"nr_unevictable",
707fa25c503SKOSAKI Motohiro 	"nr_mlock",
708fa25c503SKOSAKI Motohiro 	"nr_anon_pages",
709fa25c503SKOSAKI Motohiro 	"nr_mapped",
710fa25c503SKOSAKI Motohiro 	"nr_file_pages",
711fa25c503SKOSAKI Motohiro 	"nr_dirty",
712fa25c503SKOSAKI Motohiro 	"nr_writeback",
713fa25c503SKOSAKI Motohiro 	"nr_slab_reclaimable",
714fa25c503SKOSAKI Motohiro 	"nr_slab_unreclaimable",
715fa25c503SKOSAKI Motohiro 	"nr_page_table_pages",
716fa25c503SKOSAKI Motohiro 	"nr_kernel_stack",
717fa25c503SKOSAKI Motohiro 	"nr_unstable",
718fa25c503SKOSAKI Motohiro 	"nr_bounce",
719fa25c503SKOSAKI Motohiro 	"nr_vmscan_write",
72049ea7eb6SMel Gorman 	"nr_vmscan_immediate_reclaim",
721fa25c503SKOSAKI Motohiro 	"nr_writeback_temp",
722fa25c503SKOSAKI Motohiro 	"nr_isolated_anon",
723fa25c503SKOSAKI Motohiro 	"nr_isolated_file",
724fa25c503SKOSAKI Motohiro 	"nr_shmem",
725fa25c503SKOSAKI Motohiro 	"nr_dirtied",
726fa25c503SKOSAKI Motohiro 	"nr_written",
727fa25c503SKOSAKI Motohiro 
728fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
729fa25c503SKOSAKI Motohiro 	"numa_hit",
730fa25c503SKOSAKI Motohiro 	"numa_miss",
731fa25c503SKOSAKI Motohiro 	"numa_foreign",
732fa25c503SKOSAKI Motohiro 	"numa_interleave",
733fa25c503SKOSAKI Motohiro 	"numa_local",
734fa25c503SKOSAKI Motohiro 	"numa_other",
735fa25c503SKOSAKI Motohiro #endif
736fa25c503SKOSAKI Motohiro 	"nr_anon_transparent_hugepages",
737d1ce749aSBartlomiej Zolnierkiewicz 	"nr_free_cma",
738fa25c503SKOSAKI Motohiro 	"nr_dirty_threshold",
739fa25c503SKOSAKI Motohiro 	"nr_dirty_background_threshold",
740fa25c503SKOSAKI Motohiro 
741fa25c503SKOSAKI Motohiro #ifdef CONFIG_VM_EVENT_COUNTERS
742fa25c503SKOSAKI Motohiro 	"pgpgin",
743fa25c503SKOSAKI Motohiro 	"pgpgout",
744fa25c503SKOSAKI Motohiro 	"pswpin",
745fa25c503SKOSAKI Motohiro 	"pswpout",
746fa25c503SKOSAKI Motohiro 
747fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgalloc")
748fa25c503SKOSAKI Motohiro 
749fa25c503SKOSAKI Motohiro 	"pgfree",
750fa25c503SKOSAKI Motohiro 	"pgactivate",
751fa25c503SKOSAKI Motohiro 	"pgdeactivate",
752fa25c503SKOSAKI Motohiro 
753fa25c503SKOSAKI Motohiro 	"pgfault",
754fa25c503SKOSAKI Motohiro 	"pgmajfault",
755fa25c503SKOSAKI Motohiro 
756fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgrefill")
757904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_kswapd")
758904249aaSYing Han 	TEXTS_FOR_ZONES("pgsteal_direct")
759fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_kswapd")
760fa25c503SKOSAKI Motohiro 	TEXTS_FOR_ZONES("pgscan_direct")
76168243e76SMel Gorman 	"pgscan_direct_throttle",
762fa25c503SKOSAKI Motohiro 
763fa25c503SKOSAKI Motohiro #ifdef CONFIG_NUMA
764fa25c503SKOSAKI Motohiro 	"zone_reclaim_failed",
765fa25c503SKOSAKI Motohiro #endif
766fa25c503SKOSAKI Motohiro 	"pginodesteal",
767fa25c503SKOSAKI Motohiro 	"slabs_scanned",
768fa25c503SKOSAKI Motohiro 	"kswapd_inodesteal",
769fa25c503SKOSAKI Motohiro 	"kswapd_low_wmark_hit_quickly",
770fa25c503SKOSAKI Motohiro 	"kswapd_high_wmark_hit_quickly",
771fa25c503SKOSAKI Motohiro 	"kswapd_skip_congestion_wait",
772fa25c503SKOSAKI Motohiro 	"pageoutrun",
773fa25c503SKOSAKI Motohiro 	"allocstall",
774fa25c503SKOSAKI Motohiro 
775fa25c503SKOSAKI Motohiro 	"pgrotated",
776fa25c503SKOSAKI Motohiro 
7775647bc29SMel Gorman #ifdef CONFIG_MIGRATION
7785647bc29SMel Gorman 	"pgmigrate_success",
7795647bc29SMel Gorman 	"pgmigrate_fail",
7805647bc29SMel Gorman #endif
781fa25c503SKOSAKI Motohiro #ifdef CONFIG_COMPACTION
782*397487dbSMel Gorman 	"compact_migrate_scanned",
783*397487dbSMel Gorman 	"compact_free_scanned",
784*397487dbSMel Gorman 	"compact_isolated",
785fa25c503SKOSAKI Motohiro 	"compact_stall",
786fa25c503SKOSAKI Motohiro 	"compact_fail",
787fa25c503SKOSAKI Motohiro 	"compact_success",
788fa25c503SKOSAKI Motohiro #endif
789fa25c503SKOSAKI Motohiro 
790fa25c503SKOSAKI Motohiro #ifdef CONFIG_HUGETLB_PAGE
791fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_success",
792fa25c503SKOSAKI Motohiro 	"htlb_buddy_alloc_fail",
793fa25c503SKOSAKI Motohiro #endif
794fa25c503SKOSAKI Motohiro 	"unevictable_pgs_culled",
795fa25c503SKOSAKI Motohiro 	"unevictable_pgs_scanned",
796fa25c503SKOSAKI Motohiro 	"unevictable_pgs_rescued",
797fa25c503SKOSAKI Motohiro 	"unevictable_pgs_mlocked",
798fa25c503SKOSAKI Motohiro 	"unevictable_pgs_munlocked",
799fa25c503SKOSAKI Motohiro 	"unevictable_pgs_cleared",
800fa25c503SKOSAKI Motohiro 	"unevictable_pgs_stranded",
801fa25c503SKOSAKI Motohiro 
802fa25c503SKOSAKI Motohiro #ifdef CONFIG_TRANSPARENT_HUGEPAGE
803fa25c503SKOSAKI Motohiro 	"thp_fault_alloc",
804fa25c503SKOSAKI Motohiro 	"thp_fault_fallback",
805fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc",
806fa25c503SKOSAKI Motohiro 	"thp_collapse_alloc_failed",
807fa25c503SKOSAKI Motohiro 	"thp_split",
808fa25c503SKOSAKI Motohiro #endif
809fa25c503SKOSAKI Motohiro 
810fa25c503SKOSAKI Motohiro #endif /* CONFIG_VM_EVENTS_COUNTERS */
811fa25c503SKOSAKI Motohiro };
8120d6617c7SDavid Rientjes #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
813fa25c503SKOSAKI Motohiro 
814fa25c503SKOSAKI Motohiro 
815d7a5752cSMel Gorman #ifdef CONFIG_PROC_FS
816467c996cSMel Gorman static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
817467c996cSMel Gorman 						struct zone *zone)
818467c996cSMel Gorman {
819467c996cSMel Gorman 	int order;
820467c996cSMel Gorman 
821f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
822f6ac2354SChristoph Lameter 	for (order = 0; order < MAX_ORDER; ++order)
823f6ac2354SChristoph Lameter 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
824f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
825f6ac2354SChristoph Lameter }
826467c996cSMel Gorman 
827467c996cSMel Gorman /*
828467c996cSMel Gorman  * This walks the free areas for each zone.
829467c996cSMel Gorman  */
830467c996cSMel Gorman static int frag_show(struct seq_file *m, void *arg)
831467c996cSMel Gorman {
832467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
833467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, frag_show_print);
834467c996cSMel Gorman 	return 0;
835467c996cSMel Gorman }
836467c996cSMel Gorman 
837467c996cSMel Gorman static void pagetypeinfo_showfree_print(struct seq_file *m,
838467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
839467c996cSMel Gorman {
840467c996cSMel Gorman 	int order, mtype;
841467c996cSMel Gorman 
842467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
843467c996cSMel Gorman 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
844467c996cSMel Gorman 					pgdat->node_id,
845467c996cSMel Gorman 					zone->name,
846467c996cSMel Gorman 					migratetype_names[mtype]);
847467c996cSMel Gorman 		for (order = 0; order < MAX_ORDER; ++order) {
848467c996cSMel Gorman 			unsigned long freecount = 0;
849467c996cSMel Gorman 			struct free_area *area;
850467c996cSMel Gorman 			struct list_head *curr;
851467c996cSMel Gorman 
852467c996cSMel Gorman 			area = &(zone->free_area[order]);
853467c996cSMel Gorman 
854467c996cSMel Gorman 			list_for_each(curr, &area->free_list[mtype])
855467c996cSMel Gorman 				freecount++;
856467c996cSMel Gorman 			seq_printf(m, "%6lu ", freecount);
857467c996cSMel Gorman 		}
858467c996cSMel Gorman 		seq_putc(m, '\n');
859467c996cSMel Gorman 	}
860467c996cSMel Gorman }
861467c996cSMel Gorman 
862467c996cSMel Gorman /* Print out the free pages at each order for each migatetype */
863467c996cSMel Gorman static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
864467c996cSMel Gorman {
865467c996cSMel Gorman 	int order;
866467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
867467c996cSMel Gorman 
868467c996cSMel Gorman 	/* Print header */
869467c996cSMel Gorman 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
870467c996cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order)
871467c996cSMel Gorman 		seq_printf(m, "%6d ", order);
872467c996cSMel Gorman 	seq_putc(m, '\n');
873467c996cSMel Gorman 
874467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
875467c996cSMel Gorman 
876467c996cSMel Gorman 	return 0;
877467c996cSMel Gorman }
878467c996cSMel Gorman 
879467c996cSMel Gorman static void pagetypeinfo_showblockcount_print(struct seq_file *m,
880467c996cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
881467c996cSMel Gorman {
882467c996cSMel Gorman 	int mtype;
883467c996cSMel Gorman 	unsigned long pfn;
884467c996cSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
885467c996cSMel Gorman 	unsigned long end_pfn = start_pfn + zone->spanned_pages;
886467c996cSMel Gorman 	unsigned long count[MIGRATE_TYPES] = { 0, };
887467c996cSMel Gorman 
888467c996cSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
889467c996cSMel Gorman 		struct page *page;
890467c996cSMel Gorman 
891467c996cSMel Gorman 		if (!pfn_valid(pfn))
892467c996cSMel Gorman 			continue;
893467c996cSMel Gorman 
894467c996cSMel Gorman 		page = pfn_to_page(pfn);
895eb33575cSMel Gorman 
896eb33575cSMel Gorman 		/* Watch for unexpected holes punched in the memmap */
897eb33575cSMel Gorman 		if (!memmap_valid_within(pfn, page, zone))
898e80d6a24SMel Gorman 			continue;
899eb33575cSMel Gorman 
900467c996cSMel Gorman 		mtype = get_pageblock_migratetype(page);
901467c996cSMel Gorman 
902e80d6a24SMel Gorman 		if (mtype < MIGRATE_TYPES)
903467c996cSMel Gorman 			count[mtype]++;
904467c996cSMel Gorman 	}
905467c996cSMel Gorman 
906467c996cSMel Gorman 	/* Print counts */
907467c996cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
908467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
909467c996cSMel Gorman 		seq_printf(m, "%12lu ", count[mtype]);
910467c996cSMel Gorman 	seq_putc(m, '\n');
911467c996cSMel Gorman }
912467c996cSMel Gorman 
913467c996cSMel Gorman /* Print out the free pages at each order for each migratetype */
914467c996cSMel Gorman static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
915467c996cSMel Gorman {
916467c996cSMel Gorman 	int mtype;
917467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
918467c996cSMel Gorman 
919467c996cSMel Gorman 	seq_printf(m, "\n%-23s", "Number of blocks type ");
920467c996cSMel Gorman 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
921467c996cSMel Gorman 		seq_printf(m, "%12s ", migratetype_names[mtype]);
922467c996cSMel Gorman 	seq_putc(m, '\n');
923467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
924467c996cSMel Gorman 
925467c996cSMel Gorman 	return 0;
926467c996cSMel Gorman }
927467c996cSMel Gorman 
928467c996cSMel Gorman /*
929467c996cSMel Gorman  * This prints out statistics in relation to grouping pages by mobility.
930467c996cSMel Gorman  * It is expensive to collect so do not constantly read the file.
931467c996cSMel Gorman  */
932467c996cSMel Gorman static int pagetypeinfo_show(struct seq_file *m, void *arg)
933467c996cSMel Gorman {
934467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
935467c996cSMel Gorman 
93641b25a37SKOSAKI Motohiro 	/* check memoryless node */
93741b25a37SKOSAKI Motohiro 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
93841b25a37SKOSAKI Motohiro 		return 0;
93941b25a37SKOSAKI Motohiro 
940467c996cSMel Gorman 	seq_printf(m, "Page block order: %d\n", pageblock_order);
941467c996cSMel Gorman 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
942467c996cSMel Gorman 	seq_putc(m, '\n');
943467c996cSMel Gorman 	pagetypeinfo_showfree(m, pgdat);
944467c996cSMel Gorman 	pagetypeinfo_showblockcount(m, pgdat);
945467c996cSMel Gorman 
946f6ac2354SChristoph Lameter 	return 0;
947f6ac2354SChristoph Lameter }
948f6ac2354SChristoph Lameter 
9498f32f7e5SAlexey Dobriyan static const struct seq_operations fragmentation_op = {
950f6ac2354SChristoph Lameter 	.start	= frag_start,
951f6ac2354SChristoph Lameter 	.next	= frag_next,
952f6ac2354SChristoph Lameter 	.stop	= frag_stop,
953f6ac2354SChristoph Lameter 	.show	= frag_show,
954f6ac2354SChristoph Lameter };
955f6ac2354SChristoph Lameter 
9568f32f7e5SAlexey Dobriyan static int fragmentation_open(struct inode *inode, struct file *file)
9578f32f7e5SAlexey Dobriyan {
9588f32f7e5SAlexey Dobriyan 	return seq_open(file, &fragmentation_op);
9598f32f7e5SAlexey Dobriyan }
9608f32f7e5SAlexey Dobriyan 
9618f32f7e5SAlexey Dobriyan static const struct file_operations fragmentation_file_operations = {
9628f32f7e5SAlexey Dobriyan 	.open		= fragmentation_open,
9638f32f7e5SAlexey Dobriyan 	.read		= seq_read,
9648f32f7e5SAlexey Dobriyan 	.llseek		= seq_lseek,
9658f32f7e5SAlexey Dobriyan 	.release	= seq_release,
9668f32f7e5SAlexey Dobriyan };
9678f32f7e5SAlexey Dobriyan 
96874e2e8e8SAlexey Dobriyan static const struct seq_operations pagetypeinfo_op = {
969467c996cSMel Gorman 	.start	= frag_start,
970467c996cSMel Gorman 	.next	= frag_next,
971467c996cSMel Gorman 	.stop	= frag_stop,
972467c996cSMel Gorman 	.show	= pagetypeinfo_show,
973467c996cSMel Gorman };
974467c996cSMel Gorman 
97574e2e8e8SAlexey Dobriyan static int pagetypeinfo_open(struct inode *inode, struct file *file)
97674e2e8e8SAlexey Dobriyan {
97774e2e8e8SAlexey Dobriyan 	return seq_open(file, &pagetypeinfo_op);
97874e2e8e8SAlexey Dobriyan }
97974e2e8e8SAlexey Dobriyan 
98074e2e8e8SAlexey Dobriyan static const struct file_operations pagetypeinfo_file_ops = {
98174e2e8e8SAlexey Dobriyan 	.open		= pagetypeinfo_open,
98274e2e8e8SAlexey Dobriyan 	.read		= seq_read,
98374e2e8e8SAlexey Dobriyan 	.llseek		= seq_lseek,
98474e2e8e8SAlexey Dobriyan 	.release	= seq_release,
98574e2e8e8SAlexey Dobriyan };
98674e2e8e8SAlexey Dobriyan 
987467c996cSMel Gorman static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
988467c996cSMel Gorman 							struct zone *zone)
989f6ac2354SChristoph Lameter {
990f6ac2354SChristoph Lameter 	int i;
991f6ac2354SChristoph Lameter 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
992f6ac2354SChristoph Lameter 	seq_printf(m,
993f6ac2354SChristoph Lameter 		   "\n  pages free     %lu"
994f6ac2354SChristoph Lameter 		   "\n        min      %lu"
995f6ac2354SChristoph Lameter 		   "\n        low      %lu"
996f6ac2354SChristoph Lameter 		   "\n        high     %lu"
99708d9ae7cSWu Fengguang 		   "\n        scanned  %lu"
998f6ac2354SChristoph Lameter 		   "\n        spanned  %lu"
999f6ac2354SChristoph Lameter 		   "\n        present  %lu",
100088f5acf8SMel Gorman 		   zone_page_state(zone, NR_FREE_PAGES),
100141858966SMel Gorman 		   min_wmark_pages(zone),
100241858966SMel Gorman 		   low_wmark_pages(zone),
100341858966SMel Gorman 		   high_wmark_pages(zone),
1004f6ac2354SChristoph Lameter 		   zone->pages_scanned,
1005f6ac2354SChristoph Lameter 		   zone->spanned_pages,
1006f6ac2354SChristoph Lameter 		   zone->present_pages);
10072244b95aSChristoph Lameter 
10082244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
10092244b95aSChristoph Lameter 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
10102244b95aSChristoph Lameter 				zone_page_state(zone, i));
10112244b95aSChristoph Lameter 
1012f6ac2354SChristoph Lameter 	seq_printf(m,
1013f6ac2354SChristoph Lameter 		   "\n        protection: (%lu",
1014f6ac2354SChristoph Lameter 		   zone->lowmem_reserve[0]);
1015f6ac2354SChristoph Lameter 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1016f6ac2354SChristoph Lameter 		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1017f6ac2354SChristoph Lameter 	seq_printf(m,
1018f6ac2354SChristoph Lameter 		   ")"
1019f6ac2354SChristoph Lameter 		   "\n  pagesets");
1020f6ac2354SChristoph Lameter 	for_each_online_cpu(i) {
1021f6ac2354SChristoph Lameter 		struct per_cpu_pageset *pageset;
1022f6ac2354SChristoph Lameter 
102399dcc3e5SChristoph Lameter 		pageset = per_cpu_ptr(zone->pageset, i);
1024f6ac2354SChristoph Lameter 		seq_printf(m,
10253dfa5721SChristoph Lameter 			   "\n    cpu: %i"
1026f6ac2354SChristoph Lameter 			   "\n              count: %i"
1027f6ac2354SChristoph Lameter 			   "\n              high:  %i"
1028f6ac2354SChristoph Lameter 			   "\n              batch: %i",
10293dfa5721SChristoph Lameter 			   i,
10303dfa5721SChristoph Lameter 			   pageset->pcp.count,
10313dfa5721SChristoph Lameter 			   pageset->pcp.high,
10323dfa5721SChristoph Lameter 			   pageset->pcp.batch);
1033df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1034df9ecabaSChristoph Lameter 		seq_printf(m, "\n  vm stats threshold: %d",
1035df9ecabaSChristoph Lameter 				pageset->stat_threshold);
1036df9ecabaSChristoph Lameter #endif
1037f6ac2354SChristoph Lameter 	}
1038f6ac2354SChristoph Lameter 	seq_printf(m,
1039f6ac2354SChristoph Lameter 		   "\n  all_unreclaimable: %u"
1040556adecbSRik van Riel 		   "\n  start_pfn:         %lu"
1041556adecbSRik van Riel 		   "\n  inactive_ratio:    %u",
104293e4a89aSKOSAKI Motohiro 		   zone->all_unreclaimable,
1043556adecbSRik van Riel 		   zone->zone_start_pfn,
1044556adecbSRik van Riel 		   zone->inactive_ratio);
1045f6ac2354SChristoph Lameter 	seq_putc(m, '\n');
1046f6ac2354SChristoph Lameter }
1047467c996cSMel Gorman 
1048467c996cSMel Gorman /*
1049467c996cSMel Gorman  * Output information about zones in @pgdat.
1050467c996cSMel Gorman  */
1051467c996cSMel Gorman static int zoneinfo_show(struct seq_file *m, void *arg)
1052467c996cSMel Gorman {
1053467c996cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1054467c996cSMel Gorman 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1055f6ac2354SChristoph Lameter 	return 0;
1056f6ac2354SChristoph Lameter }
1057f6ac2354SChristoph Lameter 
10585c9fe628SAlexey Dobriyan static const struct seq_operations zoneinfo_op = {
1059f6ac2354SChristoph Lameter 	.start	= frag_start, /* iterate over all zones. The same as in
1060f6ac2354SChristoph Lameter 			       * fragmentation. */
1061f6ac2354SChristoph Lameter 	.next	= frag_next,
1062f6ac2354SChristoph Lameter 	.stop	= frag_stop,
1063f6ac2354SChristoph Lameter 	.show	= zoneinfo_show,
1064f6ac2354SChristoph Lameter };
1065f6ac2354SChristoph Lameter 
10665c9fe628SAlexey Dobriyan static int zoneinfo_open(struct inode *inode, struct file *file)
10675c9fe628SAlexey Dobriyan {
10685c9fe628SAlexey Dobriyan 	return seq_open(file, &zoneinfo_op);
10695c9fe628SAlexey Dobriyan }
10705c9fe628SAlexey Dobriyan 
10715c9fe628SAlexey Dobriyan static const struct file_operations proc_zoneinfo_file_operations = {
10725c9fe628SAlexey Dobriyan 	.open		= zoneinfo_open,
10735c9fe628SAlexey Dobriyan 	.read		= seq_read,
10745c9fe628SAlexey Dobriyan 	.llseek		= seq_lseek,
10755c9fe628SAlexey Dobriyan 	.release	= seq_release,
10765c9fe628SAlexey Dobriyan };
10775c9fe628SAlexey Dobriyan 
107879da826aSMichael Rubin enum writeback_stat_item {
107979da826aSMichael Rubin 	NR_DIRTY_THRESHOLD,
108079da826aSMichael Rubin 	NR_DIRTY_BG_THRESHOLD,
108179da826aSMichael Rubin 	NR_VM_WRITEBACK_STAT_ITEMS,
108279da826aSMichael Rubin };
108379da826aSMichael Rubin 
1084f6ac2354SChristoph Lameter static void *vmstat_start(struct seq_file *m, loff_t *pos)
1085f6ac2354SChristoph Lameter {
10862244b95aSChristoph Lameter 	unsigned long *v;
108779da826aSMichael Rubin 	int i, stat_items_size;
1088f6ac2354SChristoph Lameter 
1089f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1090f6ac2354SChristoph Lameter 		return NULL;
109179da826aSMichael Rubin 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
109279da826aSMichael Rubin 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1093f6ac2354SChristoph Lameter 
1094f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
109579da826aSMichael Rubin 	stat_items_size += sizeof(struct vm_event_state);
1096f8891e5eSChristoph Lameter #endif
109779da826aSMichael Rubin 
109879da826aSMichael Rubin 	v = kmalloc(stat_items_size, GFP_KERNEL);
10992244b95aSChristoph Lameter 	m->private = v;
11002244b95aSChristoph Lameter 	if (!v)
1101f6ac2354SChristoph Lameter 		return ERR_PTR(-ENOMEM);
11022244b95aSChristoph Lameter 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
11032244b95aSChristoph Lameter 		v[i] = global_page_state(i);
110479da826aSMichael Rubin 	v += NR_VM_ZONE_STAT_ITEMS;
110579da826aSMichael Rubin 
110679da826aSMichael Rubin 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
110779da826aSMichael Rubin 			    v + NR_DIRTY_THRESHOLD);
110879da826aSMichael Rubin 	v += NR_VM_WRITEBACK_STAT_ITEMS;
110979da826aSMichael Rubin 
1110f8891e5eSChristoph Lameter #ifdef CONFIG_VM_EVENT_COUNTERS
111179da826aSMichael Rubin 	all_vm_events(v);
111279da826aSMichael Rubin 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
111379da826aSMichael Rubin 	v[PGPGOUT] /= 2;
1114f8891e5eSChristoph Lameter #endif
1115ff8b16d7SWu Fengguang 	return (unsigned long *)m->private + *pos;
1116f6ac2354SChristoph Lameter }
1117f6ac2354SChristoph Lameter 
1118f6ac2354SChristoph Lameter static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1119f6ac2354SChristoph Lameter {
1120f6ac2354SChristoph Lameter 	(*pos)++;
1121f6ac2354SChristoph Lameter 	if (*pos >= ARRAY_SIZE(vmstat_text))
1122f6ac2354SChristoph Lameter 		return NULL;
1123f6ac2354SChristoph Lameter 	return (unsigned long *)m->private + *pos;
1124f6ac2354SChristoph Lameter }
1125f6ac2354SChristoph Lameter 
1126f6ac2354SChristoph Lameter static int vmstat_show(struct seq_file *m, void *arg)
1127f6ac2354SChristoph Lameter {
1128f6ac2354SChristoph Lameter 	unsigned long *l = arg;
1129f6ac2354SChristoph Lameter 	unsigned long off = l - (unsigned long *)m->private;
1130f6ac2354SChristoph Lameter 
1131f6ac2354SChristoph Lameter 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1132f6ac2354SChristoph Lameter 	return 0;
1133f6ac2354SChristoph Lameter }
1134f6ac2354SChristoph Lameter 
1135f6ac2354SChristoph Lameter static void vmstat_stop(struct seq_file *m, void *arg)
1136f6ac2354SChristoph Lameter {
1137f6ac2354SChristoph Lameter 	kfree(m->private);
1138f6ac2354SChristoph Lameter 	m->private = NULL;
1139f6ac2354SChristoph Lameter }
1140f6ac2354SChristoph Lameter 
1141b6aa44abSAlexey Dobriyan static const struct seq_operations vmstat_op = {
1142f6ac2354SChristoph Lameter 	.start	= vmstat_start,
1143f6ac2354SChristoph Lameter 	.next	= vmstat_next,
1144f6ac2354SChristoph Lameter 	.stop	= vmstat_stop,
1145f6ac2354SChristoph Lameter 	.show	= vmstat_show,
1146f6ac2354SChristoph Lameter };
1147f6ac2354SChristoph Lameter 
1148b6aa44abSAlexey Dobriyan static int vmstat_open(struct inode *inode, struct file *file)
1149b6aa44abSAlexey Dobriyan {
1150b6aa44abSAlexey Dobriyan 	return seq_open(file, &vmstat_op);
1151b6aa44abSAlexey Dobriyan }
1152b6aa44abSAlexey Dobriyan 
1153b6aa44abSAlexey Dobriyan static const struct file_operations proc_vmstat_file_operations = {
1154b6aa44abSAlexey Dobriyan 	.open		= vmstat_open,
1155b6aa44abSAlexey Dobriyan 	.read		= seq_read,
1156b6aa44abSAlexey Dobriyan 	.llseek		= seq_lseek,
1157b6aa44abSAlexey Dobriyan 	.release	= seq_release,
1158b6aa44abSAlexey Dobriyan };
1159f6ac2354SChristoph Lameter #endif /* CONFIG_PROC_FS */
1160f6ac2354SChristoph Lameter 
1161df9ecabaSChristoph Lameter #ifdef CONFIG_SMP
1162d1187ed2SChristoph Lameter static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
116377461ab3SChristoph Lameter int sysctl_stat_interval __read_mostly = HZ;
1164d1187ed2SChristoph Lameter 
1165d1187ed2SChristoph Lameter static void vmstat_update(struct work_struct *w)
1166d1187ed2SChristoph Lameter {
1167d1187ed2SChristoph Lameter 	refresh_cpu_vm_stats(smp_processor_id());
116877461ab3SChristoph Lameter 	schedule_delayed_work(&__get_cpu_var(vmstat_work),
116998f4ebb2SAnton Blanchard 		round_jiffies_relative(sysctl_stat_interval));
1170d1187ed2SChristoph Lameter }
1171d1187ed2SChristoph Lameter 
117242614fcdSRandy Dunlap static void __cpuinit start_cpu_timer(int cpu)
1173d1187ed2SChristoph Lameter {
11741871e52cSTejun Heo 	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1175d1187ed2SChristoph Lameter 
1176203b42f7STejun Heo 	INIT_DEFERRABLE_WORK(work, vmstat_update);
11771871e52cSTejun Heo 	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1178d1187ed2SChristoph Lameter }
1179d1187ed2SChristoph Lameter 
1180df9ecabaSChristoph Lameter /*
1181df9ecabaSChristoph Lameter  * Use the cpu notifier to insure that the thresholds are recalculated
1182df9ecabaSChristoph Lameter  * when necessary.
1183df9ecabaSChristoph Lameter  */
1184df9ecabaSChristoph Lameter static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1185df9ecabaSChristoph Lameter 		unsigned long action,
1186df9ecabaSChristoph Lameter 		void *hcpu)
1187df9ecabaSChristoph Lameter {
1188d1187ed2SChristoph Lameter 	long cpu = (long)hcpu;
1189d1187ed2SChristoph Lameter 
1190df9ecabaSChristoph Lameter 	switch (action) {
1191d1187ed2SChristoph Lameter 	case CPU_ONLINE:
1192d1187ed2SChristoph Lameter 	case CPU_ONLINE_FROZEN:
11935ee28a44SKAMEZAWA Hiroyuki 		refresh_zone_stat_thresholds();
1194d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
1195ad596925SChristoph Lameter 		node_set_state(cpu_to_node(cpu), N_CPU);
1196d1187ed2SChristoph Lameter 		break;
1197d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE:
1198d1187ed2SChristoph Lameter 	case CPU_DOWN_PREPARE_FROZEN:
1199afe2c511STejun Heo 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1200d1187ed2SChristoph Lameter 		per_cpu(vmstat_work, cpu).work.func = NULL;
1201d1187ed2SChristoph Lameter 		break;
1202d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED:
1203d1187ed2SChristoph Lameter 	case CPU_DOWN_FAILED_FROZEN:
1204d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
1205d1187ed2SChristoph Lameter 		break;
1206df9ecabaSChristoph Lameter 	case CPU_DEAD:
12078bb78442SRafael J. Wysocki 	case CPU_DEAD_FROZEN:
1208df9ecabaSChristoph Lameter 		refresh_zone_stat_thresholds();
1209df9ecabaSChristoph Lameter 		break;
1210df9ecabaSChristoph Lameter 	default:
1211df9ecabaSChristoph Lameter 		break;
1212df9ecabaSChristoph Lameter 	}
1213df9ecabaSChristoph Lameter 	return NOTIFY_OK;
1214df9ecabaSChristoph Lameter }
1215df9ecabaSChristoph Lameter 
1216df9ecabaSChristoph Lameter static struct notifier_block __cpuinitdata vmstat_notifier =
1217df9ecabaSChristoph Lameter 	{ &vmstat_cpuup_callback, NULL, 0 };
12188f32f7e5SAlexey Dobriyan #endif
1219df9ecabaSChristoph Lameter 
1220e2fc88d0SAdrian Bunk static int __init setup_vmstat(void)
1221df9ecabaSChristoph Lameter {
12228f32f7e5SAlexey Dobriyan #ifdef CONFIG_SMP
1223d1187ed2SChristoph Lameter 	int cpu;
1224d1187ed2SChristoph Lameter 
1225df9ecabaSChristoph Lameter 	register_cpu_notifier(&vmstat_notifier);
1226d1187ed2SChristoph Lameter 
1227d1187ed2SChristoph Lameter 	for_each_online_cpu(cpu)
1228d1187ed2SChristoph Lameter 		start_cpu_timer(cpu);
12298f32f7e5SAlexey Dobriyan #endif
12308f32f7e5SAlexey Dobriyan #ifdef CONFIG_PROC_FS
12318f32f7e5SAlexey Dobriyan 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
123274e2e8e8SAlexey Dobriyan 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1233b6aa44abSAlexey Dobriyan 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
12345c9fe628SAlexey Dobriyan 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
12358f32f7e5SAlexey Dobriyan #endif
1236df9ecabaSChristoph Lameter 	return 0;
1237df9ecabaSChristoph Lameter }
1238df9ecabaSChristoph Lameter module_init(setup_vmstat)
1239d7a5752cSMel Gorman 
1240d7a5752cSMel Gorman #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1241d7a5752cSMel Gorman #include <linux/debugfs.h>
1242d7a5752cSMel Gorman 
1243d7a5752cSMel Gorman 
1244d7a5752cSMel Gorman /*
1245d7a5752cSMel Gorman  * Return an index indicating how much of the available free memory is
1246d7a5752cSMel Gorman  * unusable for an allocation of the requested size.
1247d7a5752cSMel Gorman  */
1248d7a5752cSMel Gorman static int unusable_free_index(unsigned int order,
1249d7a5752cSMel Gorman 				struct contig_page_info *info)
1250d7a5752cSMel Gorman {
1251d7a5752cSMel Gorman 	/* No free memory is interpreted as all free memory is unusable */
1252d7a5752cSMel Gorman 	if (info->free_pages == 0)
1253d7a5752cSMel Gorman 		return 1000;
1254d7a5752cSMel Gorman 
1255d7a5752cSMel Gorman 	/*
1256d7a5752cSMel Gorman 	 * Index should be a value between 0 and 1. Return a value to 3
1257d7a5752cSMel Gorman 	 * decimal places.
1258d7a5752cSMel Gorman 	 *
1259d7a5752cSMel Gorman 	 * 0 => no fragmentation
1260d7a5752cSMel Gorman 	 * 1 => high fragmentation
1261d7a5752cSMel Gorman 	 */
1262d7a5752cSMel Gorman 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1263d7a5752cSMel Gorman 
1264d7a5752cSMel Gorman }
1265d7a5752cSMel Gorman 
1266d7a5752cSMel Gorman static void unusable_show_print(struct seq_file *m,
1267d7a5752cSMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1268d7a5752cSMel Gorman {
1269d7a5752cSMel Gorman 	unsigned int order;
1270d7a5752cSMel Gorman 	int index;
1271d7a5752cSMel Gorman 	struct contig_page_info info;
1272d7a5752cSMel Gorman 
1273d7a5752cSMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1274d7a5752cSMel Gorman 				pgdat->node_id,
1275d7a5752cSMel Gorman 				zone->name);
1276d7a5752cSMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1277d7a5752cSMel Gorman 		fill_contig_page_info(zone, order, &info);
1278d7a5752cSMel Gorman 		index = unusable_free_index(order, &info);
1279d7a5752cSMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1280d7a5752cSMel Gorman 	}
1281d7a5752cSMel Gorman 
1282d7a5752cSMel Gorman 	seq_putc(m, '\n');
1283d7a5752cSMel Gorman }
1284d7a5752cSMel Gorman 
1285d7a5752cSMel Gorman /*
1286d7a5752cSMel Gorman  * Display unusable free space index
1287d7a5752cSMel Gorman  *
1288d7a5752cSMel Gorman  * The unusable free space index measures how much of the available free
1289d7a5752cSMel Gorman  * memory cannot be used to satisfy an allocation of a given size and is a
1290d7a5752cSMel Gorman  * value between 0 and 1. The higher the value, the more of free memory is
1291d7a5752cSMel Gorman  * unusable and by implication, the worse the external fragmentation is. This
1292d7a5752cSMel Gorman  * can be expressed as a percentage by multiplying by 100.
1293d7a5752cSMel Gorman  */
1294d7a5752cSMel Gorman static int unusable_show(struct seq_file *m, void *arg)
1295d7a5752cSMel Gorman {
1296d7a5752cSMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1297d7a5752cSMel Gorman 
1298d7a5752cSMel Gorman 	/* check memoryless node */
1299d7a5752cSMel Gorman 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1300d7a5752cSMel Gorman 		return 0;
1301d7a5752cSMel Gorman 
1302d7a5752cSMel Gorman 	walk_zones_in_node(m, pgdat, unusable_show_print);
1303d7a5752cSMel Gorman 
1304d7a5752cSMel Gorman 	return 0;
1305d7a5752cSMel Gorman }
1306d7a5752cSMel Gorman 
1307d7a5752cSMel Gorman static const struct seq_operations unusable_op = {
1308d7a5752cSMel Gorman 	.start	= frag_start,
1309d7a5752cSMel Gorman 	.next	= frag_next,
1310d7a5752cSMel Gorman 	.stop	= frag_stop,
1311d7a5752cSMel Gorman 	.show	= unusable_show,
1312d7a5752cSMel Gorman };
1313d7a5752cSMel Gorman 
1314d7a5752cSMel Gorman static int unusable_open(struct inode *inode, struct file *file)
1315d7a5752cSMel Gorman {
1316d7a5752cSMel Gorman 	return seq_open(file, &unusable_op);
1317d7a5752cSMel Gorman }
1318d7a5752cSMel Gorman 
1319d7a5752cSMel Gorman static const struct file_operations unusable_file_ops = {
1320d7a5752cSMel Gorman 	.open		= unusable_open,
1321d7a5752cSMel Gorman 	.read		= seq_read,
1322d7a5752cSMel Gorman 	.llseek		= seq_lseek,
1323d7a5752cSMel Gorman 	.release	= seq_release,
1324d7a5752cSMel Gorman };
1325d7a5752cSMel Gorman 
1326f1a5ab12SMel Gorman static void extfrag_show_print(struct seq_file *m,
1327f1a5ab12SMel Gorman 					pg_data_t *pgdat, struct zone *zone)
1328f1a5ab12SMel Gorman {
1329f1a5ab12SMel Gorman 	unsigned int order;
1330f1a5ab12SMel Gorman 	int index;
1331f1a5ab12SMel Gorman 
1332f1a5ab12SMel Gorman 	/* Alloc on stack as interrupts are disabled for zone walk */
1333f1a5ab12SMel Gorman 	struct contig_page_info info;
1334f1a5ab12SMel Gorman 
1335f1a5ab12SMel Gorman 	seq_printf(m, "Node %d, zone %8s ",
1336f1a5ab12SMel Gorman 				pgdat->node_id,
1337f1a5ab12SMel Gorman 				zone->name);
1338f1a5ab12SMel Gorman 	for (order = 0; order < MAX_ORDER; ++order) {
1339f1a5ab12SMel Gorman 		fill_contig_page_info(zone, order, &info);
134056de7263SMel Gorman 		index = __fragmentation_index(order, &info);
1341f1a5ab12SMel Gorman 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1342f1a5ab12SMel Gorman 	}
1343f1a5ab12SMel Gorman 
1344f1a5ab12SMel Gorman 	seq_putc(m, '\n');
1345f1a5ab12SMel Gorman }
1346f1a5ab12SMel Gorman 
1347f1a5ab12SMel Gorman /*
1348f1a5ab12SMel Gorman  * Display fragmentation index for orders that allocations would fail for
1349f1a5ab12SMel Gorman  */
1350f1a5ab12SMel Gorman static int extfrag_show(struct seq_file *m, void *arg)
1351f1a5ab12SMel Gorman {
1352f1a5ab12SMel Gorman 	pg_data_t *pgdat = (pg_data_t *)arg;
1353f1a5ab12SMel Gorman 
1354f1a5ab12SMel Gorman 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1355f1a5ab12SMel Gorman 
1356f1a5ab12SMel Gorman 	return 0;
1357f1a5ab12SMel Gorman }
1358f1a5ab12SMel Gorman 
1359f1a5ab12SMel Gorman static const struct seq_operations extfrag_op = {
1360f1a5ab12SMel Gorman 	.start	= frag_start,
1361f1a5ab12SMel Gorman 	.next	= frag_next,
1362f1a5ab12SMel Gorman 	.stop	= frag_stop,
1363f1a5ab12SMel Gorman 	.show	= extfrag_show,
1364f1a5ab12SMel Gorman };
1365f1a5ab12SMel Gorman 
1366f1a5ab12SMel Gorman static int extfrag_open(struct inode *inode, struct file *file)
1367f1a5ab12SMel Gorman {
1368f1a5ab12SMel Gorman 	return seq_open(file, &extfrag_op);
1369f1a5ab12SMel Gorman }
1370f1a5ab12SMel Gorman 
1371f1a5ab12SMel Gorman static const struct file_operations extfrag_file_ops = {
1372f1a5ab12SMel Gorman 	.open		= extfrag_open,
1373f1a5ab12SMel Gorman 	.read		= seq_read,
1374f1a5ab12SMel Gorman 	.llseek		= seq_lseek,
1375f1a5ab12SMel Gorman 	.release	= seq_release,
1376f1a5ab12SMel Gorman };
1377f1a5ab12SMel Gorman 
1378d7a5752cSMel Gorman static int __init extfrag_debug_init(void)
1379d7a5752cSMel Gorman {
1380bde8bd8aSSasikantha babu 	struct dentry *extfrag_debug_root;
1381bde8bd8aSSasikantha babu 
1382d7a5752cSMel Gorman 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1383d7a5752cSMel Gorman 	if (!extfrag_debug_root)
1384d7a5752cSMel Gorman 		return -ENOMEM;
1385d7a5752cSMel Gorman 
1386d7a5752cSMel Gorman 	if (!debugfs_create_file("unusable_index", 0444,
1387d7a5752cSMel Gorman 			extfrag_debug_root, NULL, &unusable_file_ops))
1388bde8bd8aSSasikantha babu 		goto fail;
1389d7a5752cSMel Gorman 
1390f1a5ab12SMel Gorman 	if (!debugfs_create_file("extfrag_index", 0444,
1391f1a5ab12SMel Gorman 			extfrag_debug_root, NULL, &extfrag_file_ops))
1392bde8bd8aSSasikantha babu 		goto fail;
1393f1a5ab12SMel Gorman 
1394d7a5752cSMel Gorman 	return 0;
1395bde8bd8aSSasikantha babu fail:
1396bde8bd8aSSasikantha babu 	debugfs_remove_recursive(extfrag_debug_root);
1397bde8bd8aSSasikantha babu 	return -ENOMEM;
1398d7a5752cSMel Gorman }
1399d7a5752cSMel Gorman 
1400d7a5752cSMel Gorman module_init(extfrag_debug_init);
1401d7a5752cSMel Gorman #endif
1402