xref: /linux/mm/vmstat.c (revision f0f2c2b5b40b5e621a47a6a274117cce77841f1e)
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *		Christoph Lameter <christoph@lameter.com>
10  */
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/vmstat.h>
18 #include <linux/sched.h>
19 #include <linux/math64.h>
20 #include <linux/writeback.h>
21 #include <linux/compaction.h>
22 
23 #ifdef CONFIG_VM_EVENT_COUNTERS
24 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
25 EXPORT_PER_CPU_SYMBOL(vm_event_states);
26 
27 static void sum_vm_events(unsigned long *ret)
28 {
29 	int cpu;
30 	int i;
31 
32 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
33 
34 	for_each_online_cpu(cpu) {
35 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
36 
37 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
38 			ret[i] += this->event[i];
39 	}
40 }
41 
42 /*
43  * Accumulate the vm event counters across all CPUs.
44  * The result is unavoidably approximate - it can change
45  * during and after execution of this function.
46 */
47 void all_vm_events(unsigned long *ret)
48 {
49 	get_online_cpus();
50 	sum_vm_events(ret);
51 	put_online_cpus();
52 }
53 EXPORT_SYMBOL_GPL(all_vm_events);
54 
55 #ifdef CONFIG_HOTPLUG
56 /*
57  * Fold the foreign cpu events into our own.
58  *
59  * This is adding to the events on one processor
60  * but keeps the global counts constant.
61  */
62 void vm_events_fold_cpu(int cpu)
63 {
64 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65 	int i;
66 
67 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68 		count_vm_events(i, fold_state->event[i]);
69 		fold_state->event[i] = 0;
70 	}
71 }
72 #endif /* CONFIG_HOTPLUG */
73 
74 #endif /* CONFIG_VM_EVENT_COUNTERS */
75 
76 /*
77  * Manage combined zone based / global counters
78  *
79  * vm_stat contains the global counters
80  */
81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82 EXPORT_SYMBOL(vm_stat);
83 
84 #ifdef CONFIG_SMP
85 
86 static int calculate_threshold(struct zone *zone)
87 {
88 	int threshold;
89 	int mem;	/* memory in 128 MB units */
90 
91 	/*
92 	 * The threshold scales with the number of processors and the amount
93 	 * of memory per zone. More memory means that we can defer updates for
94 	 * longer, more processors could lead to more contention.
95  	 * fls() is used to have a cheap way of logarithmic scaling.
96 	 *
97 	 * Some sample thresholds:
98 	 *
99 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
100 	 * ------------------------------------------------------------------
101 	 * 8		1		1	0.9-1 GB	4
102 	 * 16		2		2	0.9-1 GB	4
103 	 * 20 		2		2	1-2 GB		5
104 	 * 24		2		2	2-4 GB		6
105 	 * 28		2		2	4-8 GB		7
106 	 * 32		2		2	8-16 GB		8
107 	 * 4		2		2	<128M		1
108 	 * 30		4		3	2-4 GB		5
109 	 * 48		4		3	8-16 GB		8
110 	 * 32		8		4	1-2 GB		4
111 	 * 32		8		4	0.9-1GB		4
112 	 * 10		16		5	<128M		1
113 	 * 40		16		5	900M		4
114 	 * 70		64		7	2-4 GB		5
115 	 * 84		64		7	4-8 GB		6
116 	 * 108		512		9	4-8 GB		6
117 	 * 125		1024		10	8-16 GB		8
118 	 * 125		1024		10	16-32 GB	9
119 	 */
120 
121 	mem = zone->present_pages >> (27 - PAGE_SHIFT);
122 
123 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
124 
125 	/*
126 	 * Maximum threshold is 125
127 	 */
128 	threshold = min(125, threshold);
129 
130 	return threshold;
131 }
132 
133 /*
134  * Refresh the thresholds for each zone.
135  */
136 static void refresh_zone_stat_thresholds(void)
137 {
138 	struct zone *zone;
139 	int cpu;
140 	int threshold;
141 
142 	for_each_populated_zone(zone) {
143 		unsigned long max_drift, tolerate_drift;
144 
145 		threshold = calculate_threshold(zone);
146 
147 		for_each_online_cpu(cpu)
148 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
149 							= threshold;
150 
151 		/*
152 		 * Only set percpu_drift_mark if there is a danger that
153 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
154 		 * the min watermark could be breached by an allocation
155 		 */
156 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
157 		max_drift = num_online_cpus() * threshold;
158 		if (max_drift > tolerate_drift)
159 			zone->percpu_drift_mark = high_wmark_pages(zone) +
160 					max_drift;
161 	}
162 }
163 
164 /*
165  * For use when we know that interrupts are disabled.
166  */
167 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
168 				int delta)
169 {
170 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
171 	s8 __percpu *p = pcp->vm_stat_diff + item;
172 	long x;
173 	long t;
174 
175 	x = delta + __this_cpu_read(*p);
176 
177 	t = __this_cpu_read(pcp->stat_threshold);
178 
179 	if (unlikely(x > t || x < -t)) {
180 		zone_page_state_add(x, zone, item);
181 		x = 0;
182 	}
183 	__this_cpu_write(*p, x);
184 }
185 EXPORT_SYMBOL(__mod_zone_page_state);
186 
187 /*
188  * Optimized increment and decrement functions.
189  *
190  * These are only for a single page and therefore can take a struct page *
191  * argument instead of struct zone *. This allows the inclusion of the code
192  * generated for page_zone(page) into the optimized functions.
193  *
194  * No overflow check is necessary and therefore the differential can be
195  * incremented or decremented in place which may allow the compilers to
196  * generate better code.
197  * The increment or decrement is known and therefore one boundary check can
198  * be omitted.
199  *
200  * NOTE: These functions are very performance sensitive. Change only
201  * with care.
202  *
203  * Some processors have inc/dec instructions that are atomic vs an interrupt.
204  * However, the code must first determine the differential location in a zone
205  * based on the processor number and then inc/dec the counter. There is no
206  * guarantee without disabling preemption that the processor will not change
207  * in between and therefore the atomicity vs. interrupt cannot be exploited
208  * in a useful way here.
209  */
210 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
211 {
212 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
213 	s8 __percpu *p = pcp->vm_stat_diff + item;
214 	s8 v, t;
215 
216 	v = __this_cpu_inc_return(*p);
217 	t = __this_cpu_read(pcp->stat_threshold);
218 	if (unlikely(v > t)) {
219 		s8 overstep = t >> 1;
220 
221 		zone_page_state_add(v + overstep, zone, item);
222 		__this_cpu_write(*p, -overstep);
223 	}
224 }
225 
226 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
227 {
228 	__inc_zone_state(page_zone(page), item);
229 }
230 EXPORT_SYMBOL(__inc_zone_page_state);
231 
232 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
233 {
234 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
235 	s8 __percpu *p = pcp->vm_stat_diff + item;
236 	s8 v, t;
237 
238 	v = __this_cpu_dec_return(*p);
239 	t = __this_cpu_read(pcp->stat_threshold);
240 	if (unlikely(v < - t)) {
241 		s8 overstep = t >> 1;
242 
243 		zone_page_state_add(v - overstep, zone, item);
244 		__this_cpu_write(*p, overstep);
245 	}
246 }
247 
248 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
249 {
250 	__dec_zone_state(page_zone(page), item);
251 }
252 EXPORT_SYMBOL(__dec_zone_page_state);
253 
254 #ifdef CONFIG_CMPXCHG_LOCAL
255 /*
256  * If we have cmpxchg_local support then we do not need to incur the overhead
257  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
258  *
259  * mod_state() modifies the zone counter state through atomic per cpu
260  * operations.
261  *
262  * Overstep mode specifies how overstep should handled:
263  *     0       No overstepping
264  *     1       Overstepping half of threshold
265  *     -1      Overstepping minus half of threshold
266 */
267 static inline void mod_state(struct zone *zone,
268        enum zone_stat_item item, int delta, int overstep_mode)
269 {
270 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
271 	s8 __percpu *p = pcp->vm_stat_diff + item;
272 	long o, n, t, z;
273 
274 	do {
275 		z = 0;  /* overflow to zone counters */
276 
277 		/*
278 		 * The fetching of the stat_threshold is racy. We may apply
279 		 * a counter threshold to the wrong the cpu if we get
280 		 * rescheduled while executing here. However, the following
281 		 * will apply the threshold again and therefore bring the
282 		 * counter under the threshold.
283 		 */
284 		t = this_cpu_read(pcp->stat_threshold);
285 
286 		o = this_cpu_read(*p);
287 		n = delta + o;
288 
289 		if (n > t || n < -t) {
290 			int os = overstep_mode * (t >> 1) ;
291 
292 			/* Overflow must be added to zone counters */
293 			z = n + os;
294 			n = -os;
295 		}
296 	} while (this_cpu_cmpxchg(*p, o, n) != o);
297 
298 	if (z)
299 		zone_page_state_add(z, zone, item);
300 }
301 
302 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
303 					int delta)
304 {
305 	mod_state(zone, item, delta, 0);
306 }
307 EXPORT_SYMBOL(mod_zone_page_state);
308 
309 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
310 {
311 	mod_state(zone, item, 1, 1);
312 }
313 
314 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
315 {
316 	mod_state(page_zone(page), item, 1, 1);
317 }
318 EXPORT_SYMBOL(inc_zone_page_state);
319 
320 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
321 {
322 	mod_state(page_zone(page), item, -1, -1);
323 }
324 EXPORT_SYMBOL(dec_zone_page_state);
325 #else
326 /*
327  * Use interrupt disable to serialize counter updates
328  */
329 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
330 					int delta)
331 {
332 	unsigned long flags;
333 
334 	local_irq_save(flags);
335 	__mod_zone_page_state(zone, item, delta);
336 	local_irq_restore(flags);
337 }
338 EXPORT_SYMBOL(mod_zone_page_state);
339 
340 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
341 {
342 	unsigned long flags;
343 
344 	local_irq_save(flags);
345 	__inc_zone_state(zone, item);
346 	local_irq_restore(flags);
347 }
348 
349 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
350 {
351 	unsigned long flags;
352 	struct zone *zone;
353 
354 	zone = page_zone(page);
355 	local_irq_save(flags);
356 	__inc_zone_state(zone, item);
357 	local_irq_restore(flags);
358 }
359 EXPORT_SYMBOL(inc_zone_page_state);
360 
361 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
362 {
363 	unsigned long flags;
364 
365 	local_irq_save(flags);
366 	__dec_zone_page_state(page, item);
367 	local_irq_restore(flags);
368 }
369 EXPORT_SYMBOL(dec_zone_page_state);
370 #endif
371 
372 /*
373  * Update the zone counters for one cpu.
374  *
375  * The cpu specified must be either the current cpu or a processor that
376  * is not online. If it is the current cpu then the execution thread must
377  * be pinned to the current cpu.
378  *
379  * Note that refresh_cpu_vm_stats strives to only access
380  * node local memory. The per cpu pagesets on remote zones are placed
381  * in the memory local to the processor using that pageset. So the
382  * loop over all zones will access a series of cachelines local to
383  * the processor.
384  *
385  * The call to zone_page_state_add updates the cachelines with the
386  * statistics in the remote zone struct as well as the global cachelines
387  * with the global counters. These could cause remote node cache line
388  * bouncing and will have to be only done when necessary.
389  */
390 void refresh_cpu_vm_stats(int cpu)
391 {
392 	struct zone *zone;
393 	int i;
394 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
395 
396 	for_each_populated_zone(zone) {
397 		struct per_cpu_pageset *p;
398 
399 		p = per_cpu_ptr(zone->pageset, cpu);
400 
401 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
402 			if (p->vm_stat_diff[i]) {
403 				unsigned long flags;
404 				int v;
405 
406 				local_irq_save(flags);
407 				v = p->vm_stat_diff[i];
408 				p->vm_stat_diff[i] = 0;
409 				local_irq_restore(flags);
410 				atomic_long_add(v, &zone->vm_stat[i]);
411 				global_diff[i] += v;
412 #ifdef CONFIG_NUMA
413 				/* 3 seconds idle till flush */
414 				p->expire = 3;
415 #endif
416 			}
417 		cond_resched();
418 #ifdef CONFIG_NUMA
419 		/*
420 		 * Deal with draining the remote pageset of this
421 		 * processor
422 		 *
423 		 * Check if there are pages remaining in this pageset
424 		 * if not then there is nothing to expire.
425 		 */
426 		if (!p->expire || !p->pcp.count)
427 			continue;
428 
429 		/*
430 		 * We never drain zones local to this processor.
431 		 */
432 		if (zone_to_nid(zone) == numa_node_id()) {
433 			p->expire = 0;
434 			continue;
435 		}
436 
437 		p->expire--;
438 		if (p->expire)
439 			continue;
440 
441 		if (p->pcp.count)
442 			drain_zone_pages(zone, &p->pcp);
443 #endif
444 	}
445 
446 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
447 		if (global_diff[i])
448 			atomic_long_add(global_diff[i], &vm_stat[i]);
449 }
450 
451 #endif
452 
453 #ifdef CONFIG_NUMA
454 /*
455  * zonelist = the list of zones passed to the allocator
456  * z 	    = the zone from which the allocation occurred.
457  *
458  * Must be called with interrupts disabled.
459  */
460 void zone_statistics(struct zone *preferred_zone, struct zone *z)
461 {
462 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
463 		__inc_zone_state(z, NUMA_HIT);
464 	} else {
465 		__inc_zone_state(z, NUMA_MISS);
466 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
467 	}
468 	if (z->node == numa_node_id())
469 		__inc_zone_state(z, NUMA_LOCAL);
470 	else
471 		__inc_zone_state(z, NUMA_OTHER);
472 }
473 #endif
474 
475 #ifdef CONFIG_COMPACTION
476 
477 struct contig_page_info {
478 	unsigned long free_pages;
479 	unsigned long free_blocks_total;
480 	unsigned long free_blocks_suitable;
481 };
482 
483 /*
484  * Calculate the number of free pages in a zone, how many contiguous
485  * pages are free and how many are large enough to satisfy an allocation of
486  * the target size. Note that this function makes no attempt to estimate
487  * how many suitable free blocks there *might* be if MOVABLE pages were
488  * migrated. Calculating that is possible, but expensive and can be
489  * figured out from userspace
490  */
491 static void fill_contig_page_info(struct zone *zone,
492 				unsigned int suitable_order,
493 				struct contig_page_info *info)
494 {
495 	unsigned int order;
496 
497 	info->free_pages = 0;
498 	info->free_blocks_total = 0;
499 	info->free_blocks_suitable = 0;
500 
501 	for (order = 0; order < MAX_ORDER; order++) {
502 		unsigned long blocks;
503 
504 		/* Count number of free blocks */
505 		blocks = zone->free_area[order].nr_free;
506 		info->free_blocks_total += blocks;
507 
508 		/* Count free base pages */
509 		info->free_pages += blocks << order;
510 
511 		/* Count the suitable free blocks */
512 		if (order >= suitable_order)
513 			info->free_blocks_suitable += blocks <<
514 						(order - suitable_order);
515 	}
516 }
517 
518 /*
519  * A fragmentation index only makes sense if an allocation of a requested
520  * size would fail. If that is true, the fragmentation index indicates
521  * whether external fragmentation or a lack of memory was the problem.
522  * The value can be used to determine if page reclaim or compaction
523  * should be used
524  */
525 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
526 {
527 	unsigned long requested = 1UL << order;
528 
529 	if (!info->free_blocks_total)
530 		return 0;
531 
532 	/* Fragmentation index only makes sense when a request would fail */
533 	if (info->free_blocks_suitable)
534 		return -1000;
535 
536 	/*
537 	 * Index is between 0 and 1 so return within 3 decimal places
538 	 *
539 	 * 0 => allocation would fail due to lack of memory
540 	 * 1 => allocation would fail due to fragmentation
541 	 */
542 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
543 }
544 
545 /* Same as __fragmentation index but allocs contig_page_info on stack */
546 int fragmentation_index(struct zone *zone, unsigned int order)
547 {
548 	struct contig_page_info info;
549 
550 	fill_contig_page_info(zone, order, &info);
551 	return __fragmentation_index(order, &info);
552 }
553 #endif
554 
555 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
556 #include <linux/proc_fs.h>
557 #include <linux/seq_file.h>
558 
559 static char * const migratetype_names[MIGRATE_TYPES] = {
560 	"Unmovable",
561 	"Reclaimable",
562 	"Movable",
563 	"Reserve",
564 	"Isolate",
565 };
566 
567 static void *frag_start(struct seq_file *m, loff_t *pos)
568 {
569 	pg_data_t *pgdat;
570 	loff_t node = *pos;
571 	for (pgdat = first_online_pgdat();
572 	     pgdat && node;
573 	     pgdat = next_online_pgdat(pgdat))
574 		--node;
575 
576 	return pgdat;
577 }
578 
579 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
580 {
581 	pg_data_t *pgdat = (pg_data_t *)arg;
582 
583 	(*pos)++;
584 	return next_online_pgdat(pgdat);
585 }
586 
587 static void frag_stop(struct seq_file *m, void *arg)
588 {
589 }
590 
591 /* Walk all the zones in a node and print using a callback */
592 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
593 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
594 {
595 	struct zone *zone;
596 	struct zone *node_zones = pgdat->node_zones;
597 	unsigned long flags;
598 
599 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
600 		if (!populated_zone(zone))
601 			continue;
602 
603 		spin_lock_irqsave(&zone->lock, flags);
604 		print(m, pgdat, zone);
605 		spin_unlock_irqrestore(&zone->lock, flags);
606 	}
607 }
608 #endif
609 
610 #ifdef CONFIG_PROC_FS
611 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
612 						struct zone *zone)
613 {
614 	int order;
615 
616 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
617 	for (order = 0; order < MAX_ORDER; ++order)
618 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
619 	seq_putc(m, '\n');
620 }
621 
622 /*
623  * This walks the free areas for each zone.
624  */
625 static int frag_show(struct seq_file *m, void *arg)
626 {
627 	pg_data_t *pgdat = (pg_data_t *)arg;
628 	walk_zones_in_node(m, pgdat, frag_show_print);
629 	return 0;
630 }
631 
632 static void pagetypeinfo_showfree_print(struct seq_file *m,
633 					pg_data_t *pgdat, struct zone *zone)
634 {
635 	int order, mtype;
636 
637 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
638 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
639 					pgdat->node_id,
640 					zone->name,
641 					migratetype_names[mtype]);
642 		for (order = 0; order < MAX_ORDER; ++order) {
643 			unsigned long freecount = 0;
644 			struct free_area *area;
645 			struct list_head *curr;
646 
647 			area = &(zone->free_area[order]);
648 
649 			list_for_each(curr, &area->free_list[mtype])
650 				freecount++;
651 			seq_printf(m, "%6lu ", freecount);
652 		}
653 		seq_putc(m, '\n');
654 	}
655 }
656 
657 /* Print out the free pages at each order for each migatetype */
658 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
659 {
660 	int order;
661 	pg_data_t *pgdat = (pg_data_t *)arg;
662 
663 	/* Print header */
664 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
665 	for (order = 0; order < MAX_ORDER; ++order)
666 		seq_printf(m, "%6d ", order);
667 	seq_putc(m, '\n');
668 
669 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
670 
671 	return 0;
672 }
673 
674 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
675 					pg_data_t *pgdat, struct zone *zone)
676 {
677 	int mtype;
678 	unsigned long pfn;
679 	unsigned long start_pfn = zone->zone_start_pfn;
680 	unsigned long end_pfn = start_pfn + zone->spanned_pages;
681 	unsigned long count[MIGRATE_TYPES] = { 0, };
682 
683 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
684 		struct page *page;
685 
686 		if (!pfn_valid(pfn))
687 			continue;
688 
689 		page = pfn_to_page(pfn);
690 
691 		/* Watch for unexpected holes punched in the memmap */
692 		if (!memmap_valid_within(pfn, page, zone))
693 			continue;
694 
695 		mtype = get_pageblock_migratetype(page);
696 
697 		if (mtype < MIGRATE_TYPES)
698 			count[mtype]++;
699 	}
700 
701 	/* Print counts */
702 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
703 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
704 		seq_printf(m, "%12lu ", count[mtype]);
705 	seq_putc(m, '\n');
706 }
707 
708 /* Print out the free pages at each order for each migratetype */
709 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
710 {
711 	int mtype;
712 	pg_data_t *pgdat = (pg_data_t *)arg;
713 
714 	seq_printf(m, "\n%-23s", "Number of blocks type ");
715 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
716 		seq_printf(m, "%12s ", migratetype_names[mtype]);
717 	seq_putc(m, '\n');
718 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
719 
720 	return 0;
721 }
722 
723 /*
724  * This prints out statistics in relation to grouping pages by mobility.
725  * It is expensive to collect so do not constantly read the file.
726  */
727 static int pagetypeinfo_show(struct seq_file *m, void *arg)
728 {
729 	pg_data_t *pgdat = (pg_data_t *)arg;
730 
731 	/* check memoryless node */
732 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
733 		return 0;
734 
735 	seq_printf(m, "Page block order: %d\n", pageblock_order);
736 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
737 	seq_putc(m, '\n');
738 	pagetypeinfo_showfree(m, pgdat);
739 	pagetypeinfo_showblockcount(m, pgdat);
740 
741 	return 0;
742 }
743 
744 static const struct seq_operations fragmentation_op = {
745 	.start	= frag_start,
746 	.next	= frag_next,
747 	.stop	= frag_stop,
748 	.show	= frag_show,
749 };
750 
751 static int fragmentation_open(struct inode *inode, struct file *file)
752 {
753 	return seq_open(file, &fragmentation_op);
754 }
755 
756 static const struct file_operations fragmentation_file_operations = {
757 	.open		= fragmentation_open,
758 	.read		= seq_read,
759 	.llseek		= seq_lseek,
760 	.release	= seq_release,
761 };
762 
763 static const struct seq_operations pagetypeinfo_op = {
764 	.start	= frag_start,
765 	.next	= frag_next,
766 	.stop	= frag_stop,
767 	.show	= pagetypeinfo_show,
768 };
769 
770 static int pagetypeinfo_open(struct inode *inode, struct file *file)
771 {
772 	return seq_open(file, &pagetypeinfo_op);
773 }
774 
775 static const struct file_operations pagetypeinfo_file_ops = {
776 	.open		= pagetypeinfo_open,
777 	.read		= seq_read,
778 	.llseek		= seq_lseek,
779 	.release	= seq_release,
780 };
781 
782 #ifdef CONFIG_ZONE_DMA
783 #define TEXT_FOR_DMA(xx) xx "_dma",
784 #else
785 #define TEXT_FOR_DMA(xx)
786 #endif
787 
788 #ifdef CONFIG_ZONE_DMA32
789 #define TEXT_FOR_DMA32(xx) xx "_dma32",
790 #else
791 #define TEXT_FOR_DMA32(xx)
792 #endif
793 
794 #ifdef CONFIG_HIGHMEM
795 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
796 #else
797 #define TEXT_FOR_HIGHMEM(xx)
798 #endif
799 
800 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
801 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
802 
803 static const char * const vmstat_text[] = {
804 	/* Zoned VM counters */
805 	"nr_free_pages",
806 	"nr_inactive_anon",
807 	"nr_active_anon",
808 	"nr_inactive_file",
809 	"nr_active_file",
810 	"nr_unevictable",
811 	"nr_mlock",
812 	"nr_anon_pages",
813 	"nr_mapped",
814 	"nr_file_pages",
815 	"nr_dirty",
816 	"nr_writeback",
817 	"nr_slab_reclaimable",
818 	"nr_slab_unreclaimable",
819 	"nr_page_table_pages",
820 	"nr_kernel_stack",
821 	"nr_unstable",
822 	"nr_bounce",
823 	"nr_vmscan_write",
824 	"nr_writeback_temp",
825 	"nr_isolated_anon",
826 	"nr_isolated_file",
827 	"nr_shmem",
828 	"nr_dirtied",
829 	"nr_written",
830 
831 #ifdef CONFIG_NUMA
832 	"numa_hit",
833 	"numa_miss",
834 	"numa_foreign",
835 	"numa_interleave",
836 	"numa_local",
837 	"numa_other",
838 #endif
839 	"nr_dirty_threshold",
840 	"nr_dirty_background_threshold",
841 
842 #ifdef CONFIG_VM_EVENT_COUNTERS
843 	"pgpgin",
844 	"pgpgout",
845 	"pswpin",
846 	"pswpout",
847 
848 	TEXTS_FOR_ZONES("pgalloc")
849 
850 	"pgfree",
851 	"pgactivate",
852 	"pgdeactivate",
853 
854 	"pgfault",
855 	"pgmajfault",
856 
857 	TEXTS_FOR_ZONES("pgrefill")
858 	TEXTS_FOR_ZONES("pgsteal")
859 	TEXTS_FOR_ZONES("pgscan_kswapd")
860 	TEXTS_FOR_ZONES("pgscan_direct")
861 
862 #ifdef CONFIG_NUMA
863 	"zone_reclaim_failed",
864 #endif
865 	"pginodesteal",
866 	"slabs_scanned",
867 	"kswapd_steal",
868 	"kswapd_inodesteal",
869 	"kswapd_low_wmark_hit_quickly",
870 	"kswapd_high_wmark_hit_quickly",
871 	"kswapd_skip_congestion_wait",
872 	"pageoutrun",
873 	"allocstall",
874 
875 	"pgrotated",
876 
877 #ifdef CONFIG_COMPACTION
878 	"compact_blocks_moved",
879 	"compact_pages_moved",
880 	"compact_pagemigrate_failed",
881 	"compact_stall",
882 	"compact_fail",
883 	"compact_success",
884 #endif
885 
886 #ifdef CONFIG_HUGETLB_PAGE
887 	"htlb_buddy_alloc_success",
888 	"htlb_buddy_alloc_fail",
889 #endif
890 	"unevictable_pgs_culled",
891 	"unevictable_pgs_scanned",
892 	"unevictable_pgs_rescued",
893 	"unevictable_pgs_mlocked",
894 	"unevictable_pgs_munlocked",
895 	"unevictable_pgs_cleared",
896 	"unevictable_pgs_stranded",
897 	"unevictable_pgs_mlockfreed",
898 #endif
899 };
900 
901 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
902 							struct zone *zone)
903 {
904 	int i;
905 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
906 	seq_printf(m,
907 		   "\n  pages free     %lu"
908 		   "\n        min      %lu"
909 		   "\n        low      %lu"
910 		   "\n        high     %lu"
911 		   "\n        scanned  %lu"
912 		   "\n        spanned  %lu"
913 		   "\n        present  %lu",
914 		   zone_nr_free_pages(zone),
915 		   min_wmark_pages(zone),
916 		   low_wmark_pages(zone),
917 		   high_wmark_pages(zone),
918 		   zone->pages_scanned,
919 		   zone->spanned_pages,
920 		   zone->present_pages);
921 
922 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
923 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
924 				zone_page_state(zone, i));
925 
926 	seq_printf(m,
927 		   "\n        protection: (%lu",
928 		   zone->lowmem_reserve[0]);
929 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
930 		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
931 	seq_printf(m,
932 		   ")"
933 		   "\n  pagesets");
934 	for_each_online_cpu(i) {
935 		struct per_cpu_pageset *pageset;
936 
937 		pageset = per_cpu_ptr(zone->pageset, i);
938 		seq_printf(m,
939 			   "\n    cpu: %i"
940 			   "\n              count: %i"
941 			   "\n              high:  %i"
942 			   "\n              batch: %i",
943 			   i,
944 			   pageset->pcp.count,
945 			   pageset->pcp.high,
946 			   pageset->pcp.batch);
947 #ifdef CONFIG_SMP
948 		seq_printf(m, "\n  vm stats threshold: %d",
949 				pageset->stat_threshold);
950 #endif
951 	}
952 	seq_printf(m,
953 		   "\n  all_unreclaimable: %u"
954 		   "\n  start_pfn:         %lu"
955 		   "\n  inactive_ratio:    %u",
956 		   zone->all_unreclaimable,
957 		   zone->zone_start_pfn,
958 		   zone->inactive_ratio);
959 	seq_putc(m, '\n');
960 }
961 
962 /*
963  * Output information about zones in @pgdat.
964  */
965 static int zoneinfo_show(struct seq_file *m, void *arg)
966 {
967 	pg_data_t *pgdat = (pg_data_t *)arg;
968 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
969 	return 0;
970 }
971 
972 static const struct seq_operations zoneinfo_op = {
973 	.start	= frag_start, /* iterate over all zones. The same as in
974 			       * fragmentation. */
975 	.next	= frag_next,
976 	.stop	= frag_stop,
977 	.show	= zoneinfo_show,
978 };
979 
980 static int zoneinfo_open(struct inode *inode, struct file *file)
981 {
982 	return seq_open(file, &zoneinfo_op);
983 }
984 
985 static const struct file_operations proc_zoneinfo_file_operations = {
986 	.open		= zoneinfo_open,
987 	.read		= seq_read,
988 	.llseek		= seq_lseek,
989 	.release	= seq_release,
990 };
991 
992 enum writeback_stat_item {
993 	NR_DIRTY_THRESHOLD,
994 	NR_DIRTY_BG_THRESHOLD,
995 	NR_VM_WRITEBACK_STAT_ITEMS,
996 };
997 
998 static void *vmstat_start(struct seq_file *m, loff_t *pos)
999 {
1000 	unsigned long *v;
1001 	int i, stat_items_size;
1002 
1003 	if (*pos >= ARRAY_SIZE(vmstat_text))
1004 		return NULL;
1005 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1006 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1007 
1008 #ifdef CONFIG_VM_EVENT_COUNTERS
1009 	stat_items_size += sizeof(struct vm_event_state);
1010 #endif
1011 
1012 	v = kmalloc(stat_items_size, GFP_KERNEL);
1013 	m->private = v;
1014 	if (!v)
1015 		return ERR_PTR(-ENOMEM);
1016 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1017 		v[i] = global_page_state(i);
1018 	v += NR_VM_ZONE_STAT_ITEMS;
1019 
1020 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1021 			    v + NR_DIRTY_THRESHOLD);
1022 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1023 
1024 #ifdef CONFIG_VM_EVENT_COUNTERS
1025 	all_vm_events(v);
1026 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1027 	v[PGPGOUT] /= 2;
1028 #endif
1029 	return (unsigned long *)m->private + *pos;
1030 }
1031 
1032 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1033 {
1034 	(*pos)++;
1035 	if (*pos >= ARRAY_SIZE(vmstat_text))
1036 		return NULL;
1037 	return (unsigned long *)m->private + *pos;
1038 }
1039 
1040 static int vmstat_show(struct seq_file *m, void *arg)
1041 {
1042 	unsigned long *l = arg;
1043 	unsigned long off = l - (unsigned long *)m->private;
1044 
1045 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1046 	return 0;
1047 }
1048 
1049 static void vmstat_stop(struct seq_file *m, void *arg)
1050 {
1051 	kfree(m->private);
1052 	m->private = NULL;
1053 }
1054 
1055 static const struct seq_operations vmstat_op = {
1056 	.start	= vmstat_start,
1057 	.next	= vmstat_next,
1058 	.stop	= vmstat_stop,
1059 	.show	= vmstat_show,
1060 };
1061 
1062 static int vmstat_open(struct inode *inode, struct file *file)
1063 {
1064 	return seq_open(file, &vmstat_op);
1065 }
1066 
1067 static const struct file_operations proc_vmstat_file_operations = {
1068 	.open		= vmstat_open,
1069 	.read		= seq_read,
1070 	.llseek		= seq_lseek,
1071 	.release	= seq_release,
1072 };
1073 #endif /* CONFIG_PROC_FS */
1074 
1075 #ifdef CONFIG_SMP
1076 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1077 int sysctl_stat_interval __read_mostly = HZ;
1078 
1079 static void vmstat_update(struct work_struct *w)
1080 {
1081 	refresh_cpu_vm_stats(smp_processor_id());
1082 	schedule_delayed_work(&__get_cpu_var(vmstat_work),
1083 		round_jiffies_relative(sysctl_stat_interval));
1084 }
1085 
1086 static void __cpuinit start_cpu_timer(int cpu)
1087 {
1088 	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1089 
1090 	INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
1091 	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1092 }
1093 
1094 /*
1095  * Use the cpu notifier to insure that the thresholds are recalculated
1096  * when necessary.
1097  */
1098 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1099 		unsigned long action,
1100 		void *hcpu)
1101 {
1102 	long cpu = (long)hcpu;
1103 
1104 	switch (action) {
1105 	case CPU_ONLINE:
1106 	case CPU_ONLINE_FROZEN:
1107 		refresh_zone_stat_thresholds();
1108 		start_cpu_timer(cpu);
1109 		node_set_state(cpu_to_node(cpu), N_CPU);
1110 		break;
1111 	case CPU_DOWN_PREPARE:
1112 	case CPU_DOWN_PREPARE_FROZEN:
1113 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1114 		per_cpu(vmstat_work, cpu).work.func = NULL;
1115 		break;
1116 	case CPU_DOWN_FAILED:
1117 	case CPU_DOWN_FAILED_FROZEN:
1118 		start_cpu_timer(cpu);
1119 		break;
1120 	case CPU_DEAD:
1121 	case CPU_DEAD_FROZEN:
1122 		refresh_zone_stat_thresholds();
1123 		break;
1124 	default:
1125 		break;
1126 	}
1127 	return NOTIFY_OK;
1128 }
1129 
1130 static struct notifier_block __cpuinitdata vmstat_notifier =
1131 	{ &vmstat_cpuup_callback, NULL, 0 };
1132 #endif
1133 
1134 static int __init setup_vmstat(void)
1135 {
1136 #ifdef CONFIG_SMP
1137 	int cpu;
1138 
1139 	refresh_zone_stat_thresholds();
1140 	register_cpu_notifier(&vmstat_notifier);
1141 
1142 	for_each_online_cpu(cpu)
1143 		start_cpu_timer(cpu);
1144 #endif
1145 #ifdef CONFIG_PROC_FS
1146 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1147 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1148 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1149 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1150 #endif
1151 	return 0;
1152 }
1153 module_init(setup_vmstat)
1154 
1155 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1156 #include <linux/debugfs.h>
1157 
1158 static struct dentry *extfrag_debug_root;
1159 
1160 /*
1161  * Return an index indicating how much of the available free memory is
1162  * unusable for an allocation of the requested size.
1163  */
1164 static int unusable_free_index(unsigned int order,
1165 				struct contig_page_info *info)
1166 {
1167 	/* No free memory is interpreted as all free memory is unusable */
1168 	if (info->free_pages == 0)
1169 		return 1000;
1170 
1171 	/*
1172 	 * Index should be a value between 0 and 1. Return a value to 3
1173 	 * decimal places.
1174 	 *
1175 	 * 0 => no fragmentation
1176 	 * 1 => high fragmentation
1177 	 */
1178 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1179 
1180 }
1181 
1182 static void unusable_show_print(struct seq_file *m,
1183 					pg_data_t *pgdat, struct zone *zone)
1184 {
1185 	unsigned int order;
1186 	int index;
1187 	struct contig_page_info info;
1188 
1189 	seq_printf(m, "Node %d, zone %8s ",
1190 				pgdat->node_id,
1191 				zone->name);
1192 	for (order = 0; order < MAX_ORDER; ++order) {
1193 		fill_contig_page_info(zone, order, &info);
1194 		index = unusable_free_index(order, &info);
1195 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1196 	}
1197 
1198 	seq_putc(m, '\n');
1199 }
1200 
1201 /*
1202  * Display unusable free space index
1203  *
1204  * The unusable free space index measures how much of the available free
1205  * memory cannot be used to satisfy an allocation of a given size and is a
1206  * value between 0 and 1. The higher the value, the more of free memory is
1207  * unusable and by implication, the worse the external fragmentation is. This
1208  * can be expressed as a percentage by multiplying by 100.
1209  */
1210 static int unusable_show(struct seq_file *m, void *arg)
1211 {
1212 	pg_data_t *pgdat = (pg_data_t *)arg;
1213 
1214 	/* check memoryless node */
1215 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1216 		return 0;
1217 
1218 	walk_zones_in_node(m, pgdat, unusable_show_print);
1219 
1220 	return 0;
1221 }
1222 
1223 static const struct seq_operations unusable_op = {
1224 	.start	= frag_start,
1225 	.next	= frag_next,
1226 	.stop	= frag_stop,
1227 	.show	= unusable_show,
1228 };
1229 
1230 static int unusable_open(struct inode *inode, struct file *file)
1231 {
1232 	return seq_open(file, &unusable_op);
1233 }
1234 
1235 static const struct file_operations unusable_file_ops = {
1236 	.open		= unusable_open,
1237 	.read		= seq_read,
1238 	.llseek		= seq_lseek,
1239 	.release	= seq_release,
1240 };
1241 
1242 static void extfrag_show_print(struct seq_file *m,
1243 					pg_data_t *pgdat, struct zone *zone)
1244 {
1245 	unsigned int order;
1246 	int index;
1247 
1248 	/* Alloc on stack as interrupts are disabled for zone walk */
1249 	struct contig_page_info info;
1250 
1251 	seq_printf(m, "Node %d, zone %8s ",
1252 				pgdat->node_id,
1253 				zone->name);
1254 	for (order = 0; order < MAX_ORDER; ++order) {
1255 		fill_contig_page_info(zone, order, &info);
1256 		index = __fragmentation_index(order, &info);
1257 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1258 	}
1259 
1260 	seq_putc(m, '\n');
1261 }
1262 
1263 /*
1264  * Display fragmentation index for orders that allocations would fail for
1265  */
1266 static int extfrag_show(struct seq_file *m, void *arg)
1267 {
1268 	pg_data_t *pgdat = (pg_data_t *)arg;
1269 
1270 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1271 
1272 	return 0;
1273 }
1274 
1275 static const struct seq_operations extfrag_op = {
1276 	.start	= frag_start,
1277 	.next	= frag_next,
1278 	.stop	= frag_stop,
1279 	.show	= extfrag_show,
1280 };
1281 
1282 static int extfrag_open(struct inode *inode, struct file *file)
1283 {
1284 	return seq_open(file, &extfrag_op);
1285 }
1286 
1287 static const struct file_operations extfrag_file_ops = {
1288 	.open		= extfrag_open,
1289 	.read		= seq_read,
1290 	.llseek		= seq_lseek,
1291 	.release	= seq_release,
1292 };
1293 
1294 static int __init extfrag_debug_init(void)
1295 {
1296 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1297 	if (!extfrag_debug_root)
1298 		return -ENOMEM;
1299 
1300 	if (!debugfs_create_file("unusable_index", 0444,
1301 			extfrag_debug_root, NULL, &unusable_file_ops))
1302 		return -ENOMEM;
1303 
1304 	if (!debugfs_create_file("extfrag_index", 0444,
1305 			extfrag_debug_root, NULL, &extfrag_file_ops))
1306 		return -ENOMEM;
1307 
1308 	return 0;
1309 }
1310 
1311 module_init(extfrag_debug_init);
1312 #endif
1313