xref: /linux/mm/page_alloc.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/config.h>
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/interrupt.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/suspend.h>
28 #include <linux/pagevec.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/topology.h>
33 #include <linux/sysctl.h>
34 #include <linux/cpu.h>
35 #include <linux/cpuset.h>
36 #include <linux/memory_hotplug.h>
37 #include <linux/nodemask.h>
38 #include <linux/vmalloc.h>
39 #include <linux/mempolicy.h>
40 
41 #include <asm/tlbflush.h>
42 #include <asm/div64.h>
43 #include "internal.h"
44 
45 /*
46  * MCD - HACK: Find somewhere to initialize this EARLY, or make this
47  * initializer cleaner
48  */
49 nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
50 EXPORT_SYMBOL(node_online_map);
51 nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
52 EXPORT_SYMBOL(node_possible_map);
53 unsigned long totalram_pages __read_mostly;
54 unsigned long totalhigh_pages __read_mostly;
55 unsigned long totalreserve_pages __read_mostly;
56 long nr_swap_pages;
57 int percpu_pagelist_fraction;
58 
59 static void __free_pages_ok(struct page *page, unsigned int order);
60 
61 /*
62  * results with 256, 32 in the lowmem_reserve sysctl:
63  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
64  *	1G machine -> (16M dma, 784M normal, 224M high)
65  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
66  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
67  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
68  *
69  * TBD: should special case ZONE_DMA32 machines here - in those we normally
70  * don't need any ZONE_NORMAL reservation
71  */
72 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
73 
74 EXPORT_SYMBOL(totalram_pages);
75 
76 /*
77  * Used by page_zone() to look up the address of the struct zone whose
78  * id is encoded in the upper bits of page->flags
79  */
80 struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
81 EXPORT_SYMBOL(zone_table);
82 
83 static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
84 int min_free_kbytes = 1024;
85 
86 unsigned long __initdata nr_kernel_pages;
87 unsigned long __initdata nr_all_pages;
88 
89 #ifdef CONFIG_DEBUG_VM
90 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
91 {
92 	int ret = 0;
93 	unsigned seq;
94 	unsigned long pfn = page_to_pfn(page);
95 
96 	do {
97 		seq = zone_span_seqbegin(zone);
98 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
99 			ret = 1;
100 		else if (pfn < zone->zone_start_pfn)
101 			ret = 1;
102 	} while (zone_span_seqretry(zone, seq));
103 
104 	return ret;
105 }
106 
107 static int page_is_consistent(struct zone *zone, struct page *page)
108 {
109 #ifdef CONFIG_HOLES_IN_ZONE
110 	if (!pfn_valid(page_to_pfn(page)))
111 		return 0;
112 #endif
113 	if (zone != page_zone(page))
114 		return 0;
115 
116 	return 1;
117 }
118 /*
119  * Temporary debugging check for pages not lying within a given zone.
120  */
121 static int bad_range(struct zone *zone, struct page *page)
122 {
123 	if (page_outside_zone_boundaries(zone, page))
124 		return 1;
125 	if (!page_is_consistent(zone, page))
126 		return 1;
127 
128 	return 0;
129 }
130 
131 #else
132 static inline int bad_range(struct zone *zone, struct page *page)
133 {
134 	return 0;
135 }
136 #endif
137 
138 static void bad_page(struct page *page)
139 {
140 	printk(KERN_EMERG "Bad page state in process '%s'\n"
141 		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
142 		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
143 		KERN_EMERG "Backtrace:\n",
144 		current->comm, page, (int)(2*sizeof(unsigned long)),
145 		(unsigned long)page->flags, page->mapping,
146 		page_mapcount(page), page_count(page));
147 	dump_stack();
148 	page->flags &= ~(1 << PG_lru	|
149 			1 << PG_private |
150 			1 << PG_locked	|
151 			1 << PG_active	|
152 			1 << PG_dirty	|
153 			1 << PG_reclaim |
154 			1 << PG_slab    |
155 			1 << PG_swapcache |
156 			1 << PG_writeback |
157 			1 << PG_buddy );
158 	set_page_count(page, 0);
159 	reset_page_mapcount(page);
160 	page->mapping = NULL;
161 	add_taint(TAINT_BAD_PAGE);
162 }
163 
164 /*
165  * Higher-order pages are called "compound pages".  They are structured thusly:
166  *
167  * The first PAGE_SIZE page is called the "head page".
168  *
169  * The remaining PAGE_SIZE pages are called "tail pages".
170  *
171  * All pages have PG_compound set.  All pages have their ->private pointing at
172  * the head page (even the head page has this).
173  *
174  * The first tail page's ->lru.next holds the address of the compound page's
175  * put_page() function.  Its ->lru.prev holds the order of allocation.
176  * This usage means that zero-order pages may not be compound.
177  */
178 
179 static void free_compound_page(struct page *page)
180 {
181 	__free_pages_ok(page, (unsigned long)page[1].lru.prev);
182 }
183 
184 static void prep_compound_page(struct page *page, unsigned long order)
185 {
186 	int i;
187 	int nr_pages = 1 << order;
188 
189 	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
190 	page[1].lru.prev = (void *)order;
191 	for (i = 0; i < nr_pages; i++) {
192 		struct page *p = page + i;
193 
194 		__SetPageCompound(p);
195 		set_page_private(p, (unsigned long)page);
196 	}
197 }
198 
199 static void destroy_compound_page(struct page *page, unsigned long order)
200 {
201 	int i;
202 	int nr_pages = 1 << order;
203 
204 	if (unlikely((unsigned long)page[1].lru.prev != order))
205 		bad_page(page);
206 
207 	for (i = 0; i < nr_pages; i++) {
208 		struct page *p = page + i;
209 
210 		if (unlikely(!PageCompound(p) |
211 				(page_private(p) != (unsigned long)page)))
212 			bad_page(page);
213 		__ClearPageCompound(p);
214 	}
215 }
216 
217 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
218 {
219 	int i;
220 
221 	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
222 	/*
223 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
224 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
225 	 */
226 	BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
227 	for (i = 0; i < (1 << order); i++)
228 		clear_highpage(page + i);
229 }
230 
231 /*
232  * function for dealing with page's order in buddy system.
233  * zone->lock is already acquired when we use these.
234  * So, we don't need atomic page->flags operations here.
235  */
236 static inline unsigned long page_order(struct page *page)
237 {
238 	return page_private(page);
239 }
240 
241 static inline void set_page_order(struct page *page, int order)
242 {
243 	set_page_private(page, order);
244 	__SetPageBuddy(page);
245 }
246 
247 static inline void rmv_page_order(struct page *page)
248 {
249 	__ClearPageBuddy(page);
250 	set_page_private(page, 0);
251 }
252 
253 /*
254  * Locate the struct page for both the matching buddy in our
255  * pair (buddy1) and the combined O(n+1) page they form (page).
256  *
257  * 1) Any buddy B1 will have an order O twin B2 which satisfies
258  * the following equation:
259  *     B2 = B1 ^ (1 << O)
260  * For example, if the starting buddy (buddy2) is #8 its order
261  * 1 buddy is #10:
262  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
263  *
264  * 2) Any buddy B will have an order O+1 parent P which
265  * satisfies the following equation:
266  *     P = B & ~(1 << O)
267  *
268  * Assumption: *_mem_map is contigious at least up to MAX_ORDER
269  */
270 static inline struct page *
271 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
272 {
273 	unsigned long buddy_idx = page_idx ^ (1 << order);
274 
275 	return page + (buddy_idx - page_idx);
276 }
277 
278 static inline unsigned long
279 __find_combined_index(unsigned long page_idx, unsigned int order)
280 {
281 	return (page_idx & ~(1 << order));
282 }
283 
284 /*
285  * This function checks whether a page is free && is the buddy
286  * we can do coalesce a page and its buddy if
287  * (a) the buddy is not in a hole &&
288  * (b) the buddy is in the buddy system &&
289  * (c) a page and its buddy have the same order.
290  *
291  * For recording whether a page is in the buddy system, we use PG_buddy.
292  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
293  *
294  * For recording page's order, we use page_private(page).
295  */
296 static inline int page_is_buddy(struct page *page, int order)
297 {
298 #ifdef CONFIG_HOLES_IN_ZONE
299 	if (!pfn_valid(page_to_pfn(page)))
300 		return 0;
301 #endif
302 
303 	if (PageBuddy(page) && page_order(page) == order) {
304 		BUG_ON(page_count(page) != 0);
305 		return 1;
306 	}
307 	return 0;
308 }
309 
310 /*
311  * Freeing function for a buddy system allocator.
312  *
313  * The concept of a buddy system is to maintain direct-mapped table
314  * (containing bit values) for memory blocks of various "orders".
315  * The bottom level table contains the map for the smallest allocatable
316  * units of memory (here, pages), and each level above it describes
317  * pairs of units from the levels below, hence, "buddies".
318  * At a high level, all that happens here is marking the table entry
319  * at the bottom level available, and propagating the changes upward
320  * as necessary, plus some accounting needed to play nicely with other
321  * parts of the VM system.
322  * At each level, we keep a list of pages, which are heads of continuous
323  * free pages of length of (1 << order) and marked with PG_buddy. Page's
324  * order is recorded in page_private(page) field.
325  * So when we are allocating or freeing one, we can derive the state of the
326  * other.  That is, if we allocate a small block, and both were
327  * free, the remainder of the region must be split into blocks.
328  * If a block is freed, and its buddy is also free, then this
329  * triggers coalescing into a block of larger size.
330  *
331  * -- wli
332  */
333 
334 static inline void __free_one_page(struct page *page,
335 		struct zone *zone, unsigned int order)
336 {
337 	unsigned long page_idx;
338 	int order_size = 1 << order;
339 
340 	if (unlikely(PageCompound(page)))
341 		destroy_compound_page(page, order);
342 
343 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
344 
345 	BUG_ON(page_idx & (order_size - 1));
346 	BUG_ON(bad_range(zone, page));
347 
348 	zone->free_pages += order_size;
349 	while (order < MAX_ORDER-1) {
350 		unsigned long combined_idx;
351 		struct free_area *area;
352 		struct page *buddy;
353 
354 		buddy = __page_find_buddy(page, page_idx, order);
355 		if (!page_is_buddy(buddy, order))
356 			break;		/* Move the buddy up one level. */
357 
358 		list_del(&buddy->lru);
359 		area = zone->free_area + order;
360 		area->nr_free--;
361 		rmv_page_order(buddy);
362 		combined_idx = __find_combined_index(page_idx, order);
363 		page = page + (combined_idx - page_idx);
364 		page_idx = combined_idx;
365 		order++;
366 	}
367 	set_page_order(page, order);
368 	list_add(&page->lru, &zone->free_area[order].free_list);
369 	zone->free_area[order].nr_free++;
370 }
371 
372 static inline int free_pages_check(struct page *page)
373 {
374 	if (unlikely(page_mapcount(page) |
375 		(page->mapping != NULL)  |
376 		(page_count(page) != 0)  |
377 		(page->flags & (
378 			1 << PG_lru	|
379 			1 << PG_private |
380 			1 << PG_locked	|
381 			1 << PG_active	|
382 			1 << PG_reclaim	|
383 			1 << PG_slab	|
384 			1 << PG_swapcache |
385 			1 << PG_writeback |
386 			1 << PG_reserved |
387 			1 << PG_buddy ))))
388 		bad_page(page);
389 	if (PageDirty(page))
390 		__ClearPageDirty(page);
391 	/*
392 	 * For now, we report if PG_reserved was found set, but do not
393 	 * clear it, and do not free the page.  But we shall soon need
394 	 * to do more, for when the ZERO_PAGE count wraps negative.
395 	 */
396 	return PageReserved(page);
397 }
398 
399 /*
400  * Frees a list of pages.
401  * Assumes all pages on list are in same zone, and of same order.
402  * count is the number of pages to free.
403  *
404  * If the zone was previously in an "all pages pinned" state then look to
405  * see if this freeing clears that state.
406  *
407  * And clear the zone's pages_scanned counter, to hold off the "all pages are
408  * pinned" detection logic.
409  */
410 static void free_pages_bulk(struct zone *zone, int count,
411 					struct list_head *list, int order)
412 {
413 	spin_lock(&zone->lock);
414 	zone->all_unreclaimable = 0;
415 	zone->pages_scanned = 0;
416 	while (count--) {
417 		struct page *page;
418 
419 		BUG_ON(list_empty(list));
420 		page = list_entry(list->prev, struct page, lru);
421 		/* have to delete it as __free_one_page list manipulates */
422 		list_del(&page->lru);
423 		__free_one_page(page, zone, order);
424 	}
425 	spin_unlock(&zone->lock);
426 }
427 
428 static void free_one_page(struct zone *zone, struct page *page, int order)
429 {
430 	LIST_HEAD(list);
431 	list_add(&page->lru, &list);
432 	free_pages_bulk(zone, 1, &list, order);
433 }
434 
435 static void __free_pages_ok(struct page *page, unsigned int order)
436 {
437 	unsigned long flags;
438 	int i;
439 	int reserved = 0;
440 
441 	arch_free_page(page, order);
442 	if (!PageHighMem(page))
443 		mutex_debug_check_no_locks_freed(page_address(page),
444 						 PAGE_SIZE<<order);
445 
446 	for (i = 0 ; i < (1 << order) ; ++i)
447 		reserved += free_pages_check(page + i);
448 	if (reserved)
449 		return;
450 
451 	kernel_map_pages(page, 1 << order, 0);
452 	local_irq_save(flags);
453 	__mod_page_state(pgfree, 1 << order);
454 	free_one_page(page_zone(page), page, order);
455 	local_irq_restore(flags);
456 }
457 
458 /*
459  * permit the bootmem allocator to evade page validation on high-order frees
460  */
461 void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
462 {
463 	if (order == 0) {
464 		__ClearPageReserved(page);
465 		set_page_count(page, 0);
466 		set_page_refcounted(page);
467 		__free_page(page);
468 	} else {
469 		int loop;
470 
471 		prefetchw(page);
472 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
473 			struct page *p = &page[loop];
474 
475 			if (loop + 1 < BITS_PER_LONG)
476 				prefetchw(p + 1);
477 			__ClearPageReserved(p);
478 			set_page_count(p, 0);
479 		}
480 
481 		set_page_refcounted(page);
482 		__free_pages(page, order);
483 	}
484 }
485 
486 
487 /*
488  * The order of subdivision here is critical for the IO subsystem.
489  * Please do not alter this order without good reasons and regression
490  * testing. Specifically, as large blocks of memory are subdivided,
491  * the order in which smaller blocks are delivered depends on the order
492  * they're subdivided in this function. This is the primary factor
493  * influencing the order in which pages are delivered to the IO
494  * subsystem according to empirical testing, and this is also justified
495  * by considering the behavior of a buddy system containing a single
496  * large block of memory acted on by a series of small allocations.
497  * This behavior is a critical factor in sglist merging's success.
498  *
499  * -- wli
500  */
501 static inline void expand(struct zone *zone, struct page *page,
502  	int low, int high, struct free_area *area)
503 {
504 	unsigned long size = 1 << high;
505 
506 	while (high > low) {
507 		area--;
508 		high--;
509 		size >>= 1;
510 		BUG_ON(bad_range(zone, &page[size]));
511 		list_add(&page[size].lru, &area->free_list);
512 		area->nr_free++;
513 		set_page_order(&page[size], high);
514 	}
515 }
516 
517 /*
518  * This page is about to be returned from the page allocator
519  */
520 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
521 {
522 	if (unlikely(page_mapcount(page) |
523 		(page->mapping != NULL)  |
524 		(page_count(page) != 0)  |
525 		(page->flags & (
526 			1 << PG_lru	|
527 			1 << PG_private	|
528 			1 << PG_locked	|
529 			1 << PG_active	|
530 			1 << PG_dirty	|
531 			1 << PG_reclaim	|
532 			1 << PG_slab    |
533 			1 << PG_swapcache |
534 			1 << PG_writeback |
535 			1 << PG_reserved |
536 			1 << PG_buddy ))))
537 		bad_page(page);
538 
539 	/*
540 	 * For now, we report if PG_reserved was found set, but do not
541 	 * clear it, and do not allocate the page: as a safety net.
542 	 */
543 	if (PageReserved(page))
544 		return 1;
545 
546 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
547 			1 << PG_referenced | 1 << PG_arch_1 |
548 			1 << PG_checked | 1 << PG_mappedtodisk);
549 	set_page_private(page, 0);
550 	set_page_refcounted(page);
551 	kernel_map_pages(page, 1 << order, 1);
552 
553 	if (gfp_flags & __GFP_ZERO)
554 		prep_zero_page(page, order, gfp_flags);
555 
556 	if (order && (gfp_flags & __GFP_COMP))
557 		prep_compound_page(page, order);
558 
559 	return 0;
560 }
561 
562 /*
563  * Do the hard work of removing an element from the buddy allocator.
564  * Call me with the zone->lock already held.
565  */
566 static struct page *__rmqueue(struct zone *zone, unsigned int order)
567 {
568 	struct free_area * area;
569 	unsigned int current_order;
570 	struct page *page;
571 
572 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
573 		area = zone->free_area + current_order;
574 		if (list_empty(&area->free_list))
575 			continue;
576 
577 		page = list_entry(area->free_list.next, struct page, lru);
578 		list_del(&page->lru);
579 		rmv_page_order(page);
580 		area->nr_free--;
581 		zone->free_pages -= 1UL << order;
582 		expand(zone, page, order, current_order, area);
583 		return page;
584 	}
585 
586 	return NULL;
587 }
588 
589 /*
590  * Obtain a specified number of elements from the buddy allocator, all under
591  * a single hold of the lock, for efficiency.  Add them to the supplied list.
592  * Returns the number of new pages which were placed at *list.
593  */
594 static int rmqueue_bulk(struct zone *zone, unsigned int order,
595 			unsigned long count, struct list_head *list)
596 {
597 	int i;
598 
599 	spin_lock(&zone->lock);
600 	for (i = 0; i < count; ++i) {
601 		struct page *page = __rmqueue(zone, order);
602 		if (unlikely(page == NULL))
603 			break;
604 		list_add_tail(&page->lru, list);
605 	}
606 	spin_unlock(&zone->lock);
607 	return i;
608 }
609 
610 #ifdef CONFIG_NUMA
611 /*
612  * Called from the slab reaper to drain pagesets on a particular node that
613  * belong to the currently executing processor.
614  * Note that this function must be called with the thread pinned to
615  * a single processor.
616  */
617 void drain_node_pages(int nodeid)
618 {
619 	int i, z;
620 	unsigned long flags;
621 
622 	for (z = 0; z < MAX_NR_ZONES; z++) {
623 		struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
624 		struct per_cpu_pageset *pset;
625 
626 		pset = zone_pcp(zone, smp_processor_id());
627 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
628 			struct per_cpu_pages *pcp;
629 
630 			pcp = &pset->pcp[i];
631 			if (pcp->count) {
632 				local_irq_save(flags);
633 				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
634 				pcp->count = 0;
635 				local_irq_restore(flags);
636 			}
637 		}
638 	}
639 }
640 #endif
641 
642 #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
643 static void __drain_pages(unsigned int cpu)
644 {
645 	unsigned long flags;
646 	struct zone *zone;
647 	int i;
648 
649 	for_each_zone(zone) {
650 		struct per_cpu_pageset *pset;
651 
652 		pset = zone_pcp(zone, cpu);
653 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
654 			struct per_cpu_pages *pcp;
655 
656 			pcp = &pset->pcp[i];
657 			local_irq_save(flags);
658 			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
659 			pcp->count = 0;
660 			local_irq_restore(flags);
661 		}
662 	}
663 }
664 #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
665 
666 #ifdef CONFIG_PM
667 
668 void mark_free_pages(struct zone *zone)
669 {
670 	unsigned long zone_pfn, flags;
671 	int order;
672 	struct list_head *curr;
673 
674 	if (!zone->spanned_pages)
675 		return;
676 
677 	spin_lock_irqsave(&zone->lock, flags);
678 	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
679 		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
680 
681 	for (order = MAX_ORDER - 1; order >= 0; --order)
682 		list_for_each(curr, &zone->free_area[order].free_list) {
683 			unsigned long start_pfn, i;
684 
685 			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
686 
687 			for (i=0; i < (1<<order); i++)
688 				SetPageNosaveFree(pfn_to_page(start_pfn+i));
689 	}
690 	spin_unlock_irqrestore(&zone->lock, flags);
691 }
692 
693 /*
694  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
695  */
696 void drain_local_pages(void)
697 {
698 	unsigned long flags;
699 
700 	local_irq_save(flags);
701 	__drain_pages(smp_processor_id());
702 	local_irq_restore(flags);
703 }
704 #endif /* CONFIG_PM */
705 
706 static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
707 {
708 #ifdef CONFIG_NUMA
709 	pg_data_t *pg = z->zone_pgdat;
710 	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
711 	struct per_cpu_pageset *p;
712 
713 	p = zone_pcp(z, cpu);
714 	if (pg == orig) {
715 		p->numa_hit++;
716 	} else {
717 		p->numa_miss++;
718 		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
719 	}
720 	if (pg == NODE_DATA(numa_node_id()))
721 		p->local_node++;
722 	else
723 		p->other_node++;
724 #endif
725 }
726 
727 /*
728  * Free a 0-order page
729  */
730 static void fastcall free_hot_cold_page(struct page *page, int cold)
731 {
732 	struct zone *zone = page_zone(page);
733 	struct per_cpu_pages *pcp;
734 	unsigned long flags;
735 
736 	arch_free_page(page, 0);
737 
738 	if (PageAnon(page))
739 		page->mapping = NULL;
740 	if (free_pages_check(page))
741 		return;
742 
743 	kernel_map_pages(page, 1, 0);
744 
745 	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
746 	local_irq_save(flags);
747 	__inc_page_state(pgfree);
748 	list_add(&page->lru, &pcp->list);
749 	pcp->count++;
750 	if (pcp->count >= pcp->high) {
751 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
752 		pcp->count -= pcp->batch;
753 	}
754 	local_irq_restore(flags);
755 	put_cpu();
756 }
757 
758 void fastcall free_hot_page(struct page *page)
759 {
760 	free_hot_cold_page(page, 0);
761 }
762 
763 void fastcall free_cold_page(struct page *page)
764 {
765 	free_hot_cold_page(page, 1);
766 }
767 
768 /*
769  * split_page takes a non-compound higher-order page, and splits it into
770  * n (1<<order) sub-pages: page[0..n]
771  * Each sub-page must be freed individually.
772  *
773  * Note: this is probably too low level an operation for use in drivers.
774  * Please consult with lkml before using this in your driver.
775  */
776 void split_page(struct page *page, unsigned int order)
777 {
778 	int i;
779 
780 	BUG_ON(PageCompound(page));
781 	BUG_ON(!page_count(page));
782 	for (i = 1; i < (1 << order); i++)
783 		set_page_refcounted(page + i);
784 }
785 
786 /*
787  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
788  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
789  * or two.
790  */
791 static struct page *buffered_rmqueue(struct zonelist *zonelist,
792 			struct zone *zone, int order, gfp_t gfp_flags)
793 {
794 	unsigned long flags;
795 	struct page *page;
796 	int cold = !!(gfp_flags & __GFP_COLD);
797 	int cpu;
798 
799 again:
800 	cpu  = get_cpu();
801 	if (likely(order == 0)) {
802 		struct per_cpu_pages *pcp;
803 
804 		pcp = &zone_pcp(zone, cpu)->pcp[cold];
805 		local_irq_save(flags);
806 		if (!pcp->count) {
807 			pcp->count += rmqueue_bulk(zone, 0,
808 						pcp->batch, &pcp->list);
809 			if (unlikely(!pcp->count))
810 				goto failed;
811 		}
812 		page = list_entry(pcp->list.next, struct page, lru);
813 		list_del(&page->lru);
814 		pcp->count--;
815 	} else {
816 		spin_lock_irqsave(&zone->lock, flags);
817 		page = __rmqueue(zone, order);
818 		spin_unlock(&zone->lock);
819 		if (!page)
820 			goto failed;
821 	}
822 
823 	__mod_page_state_zone(zone, pgalloc, 1 << order);
824 	zone_statistics(zonelist, zone, cpu);
825 	local_irq_restore(flags);
826 	put_cpu();
827 
828 	BUG_ON(bad_range(zone, page));
829 	if (prep_new_page(page, order, gfp_flags))
830 		goto again;
831 	return page;
832 
833 failed:
834 	local_irq_restore(flags);
835 	put_cpu();
836 	return NULL;
837 }
838 
839 #define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
840 #define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
841 #define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
842 #define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
843 #define ALLOC_HARDER		0x10 /* try to alloc harder */
844 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
845 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
846 
847 /*
848  * Return 1 if free pages are above 'mark'. This takes into account the order
849  * of the allocation.
850  */
851 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
852 		      int classzone_idx, int alloc_flags)
853 {
854 	/* free_pages my go negative - that's OK */
855 	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
856 	int o;
857 
858 	if (alloc_flags & ALLOC_HIGH)
859 		min -= min / 2;
860 	if (alloc_flags & ALLOC_HARDER)
861 		min -= min / 4;
862 
863 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
864 		return 0;
865 	for (o = 0; o < order; o++) {
866 		/* At the next order, this order's pages become unavailable */
867 		free_pages -= z->free_area[o].nr_free << o;
868 
869 		/* Require fewer higher order pages to be free */
870 		min >>= 1;
871 
872 		if (free_pages <= min)
873 			return 0;
874 	}
875 	return 1;
876 }
877 
878 /*
879  * get_page_from_freeliest goes through the zonelist trying to allocate
880  * a page.
881  */
882 static struct page *
883 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
884 		struct zonelist *zonelist, int alloc_flags)
885 {
886 	struct zone **z = zonelist->zones;
887 	struct page *page = NULL;
888 	int classzone_idx = zone_idx(*z);
889 
890 	/*
891 	 * Go through the zonelist once, looking for a zone with enough free.
892 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
893 	 */
894 	do {
895 		if ((alloc_flags & ALLOC_CPUSET) &&
896 				!cpuset_zone_allowed(*z, gfp_mask))
897 			continue;
898 
899 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
900 			unsigned long mark;
901 			if (alloc_flags & ALLOC_WMARK_MIN)
902 				mark = (*z)->pages_min;
903 			else if (alloc_flags & ALLOC_WMARK_LOW)
904 				mark = (*z)->pages_low;
905 			else
906 				mark = (*z)->pages_high;
907 			if (!zone_watermark_ok(*z, order, mark,
908 				    classzone_idx, alloc_flags))
909 				if (!zone_reclaim_mode ||
910 				    !zone_reclaim(*z, gfp_mask, order))
911 					continue;
912 		}
913 
914 		page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
915 		if (page) {
916 			break;
917 		}
918 	} while (*(++z) != NULL);
919 	return page;
920 }
921 
922 /*
923  * This is the 'heart' of the zoned buddy allocator.
924  */
925 struct page * fastcall
926 __alloc_pages(gfp_t gfp_mask, unsigned int order,
927 		struct zonelist *zonelist)
928 {
929 	const gfp_t wait = gfp_mask & __GFP_WAIT;
930 	struct zone **z;
931 	struct page *page;
932 	struct reclaim_state reclaim_state;
933 	struct task_struct *p = current;
934 	int do_retry;
935 	int alloc_flags;
936 	int did_some_progress;
937 
938 	might_sleep_if(wait);
939 
940 restart:
941 	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
942 
943 	if (unlikely(*z == NULL)) {
944 		/* Should this ever happen?? */
945 		return NULL;
946 	}
947 
948 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
949 				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
950 	if (page)
951 		goto got_pg;
952 
953 	do {
954 		if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
955 			wakeup_kswapd(*z, order);
956 	} while (*(++z));
957 
958 	/*
959 	 * OK, we're below the kswapd watermark and have kicked background
960 	 * reclaim. Now things get more complex, so set up alloc_flags according
961 	 * to how we want to proceed.
962 	 *
963 	 * The caller may dip into page reserves a bit more if the caller
964 	 * cannot run direct reclaim, or if the caller has realtime scheduling
965 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
966 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
967 	 */
968 	alloc_flags = ALLOC_WMARK_MIN;
969 	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
970 		alloc_flags |= ALLOC_HARDER;
971 	if (gfp_mask & __GFP_HIGH)
972 		alloc_flags |= ALLOC_HIGH;
973 	if (wait)
974 		alloc_flags |= ALLOC_CPUSET;
975 
976 	/*
977 	 * Go through the zonelist again. Let __GFP_HIGH and allocations
978 	 * coming from realtime tasks go deeper into reserves.
979 	 *
980 	 * This is the last chance, in general, before the goto nopage.
981 	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
982 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
983 	 */
984 	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
985 	if (page)
986 		goto got_pg;
987 
988 	/* This allocation should allow future memory freeing. */
989 
990 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
991 			&& !in_interrupt()) {
992 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
993 nofail_alloc:
994 			/* go through the zonelist yet again, ignoring mins */
995 			page = get_page_from_freelist(gfp_mask, order,
996 				zonelist, ALLOC_NO_WATERMARKS);
997 			if (page)
998 				goto got_pg;
999 			if (gfp_mask & __GFP_NOFAIL) {
1000 				blk_congestion_wait(WRITE, HZ/50);
1001 				goto nofail_alloc;
1002 			}
1003 		}
1004 		goto nopage;
1005 	}
1006 
1007 	/* Atomic allocations - we can't balance anything */
1008 	if (!wait)
1009 		goto nopage;
1010 
1011 rebalance:
1012 	cond_resched();
1013 
1014 	/* We now go into synchronous reclaim */
1015 	cpuset_memory_pressure_bump();
1016 	p->flags |= PF_MEMALLOC;
1017 	reclaim_state.reclaimed_slab = 0;
1018 	p->reclaim_state = &reclaim_state;
1019 
1020 	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
1021 
1022 	p->reclaim_state = NULL;
1023 	p->flags &= ~PF_MEMALLOC;
1024 
1025 	cond_resched();
1026 
1027 	if (likely(did_some_progress)) {
1028 		page = get_page_from_freelist(gfp_mask, order,
1029 						zonelist, alloc_flags);
1030 		if (page)
1031 			goto got_pg;
1032 	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1033 		/*
1034 		 * Go through the zonelist yet one more time, keep
1035 		 * very high watermark here, this is only to catch
1036 		 * a parallel oom killing, we must fail if we're still
1037 		 * under heavy pressure.
1038 		 */
1039 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1040 				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1041 		if (page)
1042 			goto got_pg;
1043 
1044 		out_of_memory(zonelist, gfp_mask, order);
1045 		goto restart;
1046 	}
1047 
1048 	/*
1049 	 * Don't let big-order allocations loop unless the caller explicitly
1050 	 * requests that.  Wait for some write requests to complete then retry.
1051 	 *
1052 	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1053 	 * <= 3, but that may not be true in other implementations.
1054 	 */
1055 	do_retry = 0;
1056 	if (!(gfp_mask & __GFP_NORETRY)) {
1057 		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
1058 			do_retry = 1;
1059 		if (gfp_mask & __GFP_NOFAIL)
1060 			do_retry = 1;
1061 	}
1062 	if (do_retry) {
1063 		blk_congestion_wait(WRITE, HZ/50);
1064 		goto rebalance;
1065 	}
1066 
1067 nopage:
1068 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1069 		printk(KERN_WARNING "%s: page allocation failure."
1070 			" order:%d, mode:0x%x\n",
1071 			p->comm, order, gfp_mask);
1072 		dump_stack();
1073 		show_mem();
1074 	}
1075 got_pg:
1076 	return page;
1077 }
1078 
1079 EXPORT_SYMBOL(__alloc_pages);
1080 
1081 /*
1082  * Common helper functions.
1083  */
1084 fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1085 {
1086 	struct page * page;
1087 	page = alloc_pages(gfp_mask, order);
1088 	if (!page)
1089 		return 0;
1090 	return (unsigned long) page_address(page);
1091 }
1092 
1093 EXPORT_SYMBOL(__get_free_pages);
1094 
1095 fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1096 {
1097 	struct page * page;
1098 
1099 	/*
1100 	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1101 	 * a highmem page
1102 	 */
1103 	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1104 
1105 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1106 	if (page)
1107 		return (unsigned long) page_address(page);
1108 	return 0;
1109 }
1110 
1111 EXPORT_SYMBOL(get_zeroed_page);
1112 
1113 void __pagevec_free(struct pagevec *pvec)
1114 {
1115 	int i = pagevec_count(pvec);
1116 
1117 	while (--i >= 0)
1118 		free_hot_cold_page(pvec->pages[i], pvec->cold);
1119 }
1120 
1121 fastcall void __free_pages(struct page *page, unsigned int order)
1122 {
1123 	if (put_page_testzero(page)) {
1124 		if (order == 0)
1125 			free_hot_page(page);
1126 		else
1127 			__free_pages_ok(page, order);
1128 	}
1129 }
1130 
1131 EXPORT_SYMBOL(__free_pages);
1132 
1133 fastcall void free_pages(unsigned long addr, unsigned int order)
1134 {
1135 	if (addr != 0) {
1136 		BUG_ON(!virt_addr_valid((void *)addr));
1137 		__free_pages(virt_to_page((void *)addr), order);
1138 	}
1139 }
1140 
1141 EXPORT_SYMBOL(free_pages);
1142 
1143 /*
1144  * Total amount of free (allocatable) RAM:
1145  */
1146 unsigned int nr_free_pages(void)
1147 {
1148 	unsigned int sum = 0;
1149 	struct zone *zone;
1150 
1151 	for_each_zone(zone)
1152 		sum += zone->free_pages;
1153 
1154 	return sum;
1155 }
1156 
1157 EXPORT_SYMBOL(nr_free_pages);
1158 
1159 #ifdef CONFIG_NUMA
1160 unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1161 {
1162 	unsigned int i, sum = 0;
1163 
1164 	for (i = 0; i < MAX_NR_ZONES; i++)
1165 		sum += pgdat->node_zones[i].free_pages;
1166 
1167 	return sum;
1168 }
1169 #endif
1170 
1171 static unsigned int nr_free_zone_pages(int offset)
1172 {
1173 	/* Just pick one node, since fallback list is circular */
1174 	pg_data_t *pgdat = NODE_DATA(numa_node_id());
1175 	unsigned int sum = 0;
1176 
1177 	struct zonelist *zonelist = pgdat->node_zonelists + offset;
1178 	struct zone **zonep = zonelist->zones;
1179 	struct zone *zone;
1180 
1181 	for (zone = *zonep++; zone; zone = *zonep++) {
1182 		unsigned long size = zone->present_pages;
1183 		unsigned long high = zone->pages_high;
1184 		if (size > high)
1185 			sum += size - high;
1186 	}
1187 
1188 	return sum;
1189 }
1190 
1191 /*
1192  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1193  */
1194 unsigned int nr_free_buffer_pages(void)
1195 {
1196 	return nr_free_zone_pages(gfp_zone(GFP_USER));
1197 }
1198 
1199 /*
1200  * Amount of free RAM allocatable within all zones
1201  */
1202 unsigned int nr_free_pagecache_pages(void)
1203 {
1204 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
1205 }
1206 
1207 #ifdef CONFIG_HIGHMEM
1208 unsigned int nr_free_highpages (void)
1209 {
1210 	pg_data_t *pgdat;
1211 	unsigned int pages = 0;
1212 
1213 	for_each_online_pgdat(pgdat)
1214 		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1215 
1216 	return pages;
1217 }
1218 #endif
1219 
1220 #ifdef CONFIG_NUMA
1221 static void show_node(struct zone *zone)
1222 {
1223 	printk("Node %d ", zone->zone_pgdat->node_id);
1224 }
1225 #else
1226 #define show_node(zone)	do { } while (0)
1227 #endif
1228 
1229 /*
1230  * Accumulate the page_state information across all CPUs.
1231  * The result is unavoidably approximate - it can change
1232  * during and after execution of this function.
1233  */
1234 static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1235 
1236 atomic_t nr_pagecache = ATOMIC_INIT(0);
1237 EXPORT_SYMBOL(nr_pagecache);
1238 #ifdef CONFIG_SMP
1239 DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1240 #endif
1241 
1242 static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1243 {
1244 	unsigned cpu;
1245 
1246 	memset(ret, 0, nr * sizeof(unsigned long));
1247 	cpus_and(*cpumask, *cpumask, cpu_online_map);
1248 
1249 	for_each_cpu_mask(cpu, *cpumask) {
1250 		unsigned long *in;
1251 		unsigned long *out;
1252 		unsigned off;
1253 		unsigned next_cpu;
1254 
1255 		in = (unsigned long *)&per_cpu(page_states, cpu);
1256 
1257 		next_cpu = next_cpu(cpu, *cpumask);
1258 		if (likely(next_cpu < NR_CPUS))
1259 			prefetch(&per_cpu(page_states, next_cpu));
1260 
1261 		out = (unsigned long *)ret;
1262 		for (off = 0; off < nr; off++)
1263 			*out++ += *in++;
1264 	}
1265 }
1266 
1267 void get_page_state_node(struct page_state *ret, int node)
1268 {
1269 	int nr;
1270 	cpumask_t mask = node_to_cpumask(node);
1271 
1272 	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1273 	nr /= sizeof(unsigned long);
1274 
1275 	__get_page_state(ret, nr+1, &mask);
1276 }
1277 
1278 void get_page_state(struct page_state *ret)
1279 {
1280 	int nr;
1281 	cpumask_t mask = CPU_MASK_ALL;
1282 
1283 	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1284 	nr /= sizeof(unsigned long);
1285 
1286 	__get_page_state(ret, nr + 1, &mask);
1287 }
1288 
1289 void get_full_page_state(struct page_state *ret)
1290 {
1291 	cpumask_t mask = CPU_MASK_ALL;
1292 
1293 	__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1294 }
1295 
1296 unsigned long read_page_state_offset(unsigned long offset)
1297 {
1298 	unsigned long ret = 0;
1299 	int cpu;
1300 
1301 	for_each_online_cpu(cpu) {
1302 		unsigned long in;
1303 
1304 		in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1305 		ret += *((unsigned long *)in);
1306 	}
1307 	return ret;
1308 }
1309 
1310 void __mod_page_state_offset(unsigned long offset, unsigned long delta)
1311 {
1312 	void *ptr;
1313 
1314 	ptr = &__get_cpu_var(page_states);
1315 	*(unsigned long *)(ptr + offset) += delta;
1316 }
1317 EXPORT_SYMBOL(__mod_page_state_offset);
1318 
1319 void mod_page_state_offset(unsigned long offset, unsigned long delta)
1320 {
1321 	unsigned long flags;
1322 	void *ptr;
1323 
1324 	local_irq_save(flags);
1325 	ptr = &__get_cpu_var(page_states);
1326 	*(unsigned long *)(ptr + offset) += delta;
1327 	local_irq_restore(flags);
1328 }
1329 EXPORT_SYMBOL(mod_page_state_offset);
1330 
1331 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1332 			unsigned long *free, struct pglist_data *pgdat)
1333 {
1334 	struct zone *zones = pgdat->node_zones;
1335 	int i;
1336 
1337 	*active = 0;
1338 	*inactive = 0;
1339 	*free = 0;
1340 	for (i = 0; i < MAX_NR_ZONES; i++) {
1341 		*active += zones[i].nr_active;
1342 		*inactive += zones[i].nr_inactive;
1343 		*free += zones[i].free_pages;
1344 	}
1345 }
1346 
1347 void get_zone_counts(unsigned long *active,
1348 		unsigned long *inactive, unsigned long *free)
1349 {
1350 	struct pglist_data *pgdat;
1351 
1352 	*active = 0;
1353 	*inactive = 0;
1354 	*free = 0;
1355 	for_each_online_pgdat(pgdat) {
1356 		unsigned long l, m, n;
1357 		__get_zone_counts(&l, &m, &n, pgdat);
1358 		*active += l;
1359 		*inactive += m;
1360 		*free += n;
1361 	}
1362 }
1363 
1364 void si_meminfo(struct sysinfo *val)
1365 {
1366 	val->totalram = totalram_pages;
1367 	val->sharedram = 0;
1368 	val->freeram = nr_free_pages();
1369 	val->bufferram = nr_blockdev_pages();
1370 #ifdef CONFIG_HIGHMEM
1371 	val->totalhigh = totalhigh_pages;
1372 	val->freehigh = nr_free_highpages();
1373 #else
1374 	val->totalhigh = 0;
1375 	val->freehigh = 0;
1376 #endif
1377 	val->mem_unit = PAGE_SIZE;
1378 }
1379 
1380 EXPORT_SYMBOL(si_meminfo);
1381 
1382 #ifdef CONFIG_NUMA
1383 void si_meminfo_node(struct sysinfo *val, int nid)
1384 {
1385 	pg_data_t *pgdat = NODE_DATA(nid);
1386 
1387 	val->totalram = pgdat->node_present_pages;
1388 	val->freeram = nr_free_pages_pgdat(pgdat);
1389 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1390 	val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1391 	val->mem_unit = PAGE_SIZE;
1392 }
1393 #endif
1394 
1395 #define K(x) ((x) << (PAGE_SHIFT-10))
1396 
1397 /*
1398  * Show free area list (used inside shift_scroll-lock stuff)
1399  * We also calculate the percentage fragmentation. We do this by counting the
1400  * memory on each free list with the exception of the first item on the list.
1401  */
1402 void show_free_areas(void)
1403 {
1404 	struct page_state ps;
1405 	int cpu, temperature;
1406 	unsigned long active;
1407 	unsigned long inactive;
1408 	unsigned long free;
1409 	struct zone *zone;
1410 
1411 	for_each_zone(zone) {
1412 		show_node(zone);
1413 		printk("%s per-cpu:", zone->name);
1414 
1415 		if (!populated_zone(zone)) {
1416 			printk(" empty\n");
1417 			continue;
1418 		} else
1419 			printk("\n");
1420 
1421 		for_each_online_cpu(cpu) {
1422 			struct per_cpu_pageset *pageset;
1423 
1424 			pageset = zone_pcp(zone, cpu);
1425 
1426 			for (temperature = 0; temperature < 2; temperature++)
1427 				printk("cpu %d %s: high %d, batch %d used:%d\n",
1428 					cpu,
1429 					temperature ? "cold" : "hot",
1430 					pageset->pcp[temperature].high,
1431 					pageset->pcp[temperature].batch,
1432 					pageset->pcp[temperature].count);
1433 		}
1434 	}
1435 
1436 	get_page_state(&ps);
1437 	get_zone_counts(&active, &inactive, &free);
1438 
1439 	printk("Free pages: %11ukB (%ukB HighMem)\n",
1440 		K(nr_free_pages()),
1441 		K(nr_free_highpages()));
1442 
1443 	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1444 		"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1445 		active,
1446 		inactive,
1447 		ps.nr_dirty,
1448 		ps.nr_writeback,
1449 		ps.nr_unstable,
1450 		nr_free_pages(),
1451 		ps.nr_slab,
1452 		ps.nr_mapped,
1453 		ps.nr_page_table_pages);
1454 
1455 	for_each_zone(zone) {
1456 		int i;
1457 
1458 		show_node(zone);
1459 		printk("%s"
1460 			" free:%lukB"
1461 			" min:%lukB"
1462 			" low:%lukB"
1463 			" high:%lukB"
1464 			" active:%lukB"
1465 			" inactive:%lukB"
1466 			" present:%lukB"
1467 			" pages_scanned:%lu"
1468 			" all_unreclaimable? %s"
1469 			"\n",
1470 			zone->name,
1471 			K(zone->free_pages),
1472 			K(zone->pages_min),
1473 			K(zone->pages_low),
1474 			K(zone->pages_high),
1475 			K(zone->nr_active),
1476 			K(zone->nr_inactive),
1477 			K(zone->present_pages),
1478 			zone->pages_scanned,
1479 			(zone->all_unreclaimable ? "yes" : "no")
1480 			);
1481 		printk("lowmem_reserve[]:");
1482 		for (i = 0; i < MAX_NR_ZONES; i++)
1483 			printk(" %lu", zone->lowmem_reserve[i]);
1484 		printk("\n");
1485 	}
1486 
1487 	for_each_zone(zone) {
1488  		unsigned long nr, flags, order, total = 0;
1489 
1490 		show_node(zone);
1491 		printk("%s: ", zone->name);
1492 		if (!populated_zone(zone)) {
1493 			printk("empty\n");
1494 			continue;
1495 		}
1496 
1497 		spin_lock_irqsave(&zone->lock, flags);
1498 		for (order = 0; order < MAX_ORDER; order++) {
1499 			nr = zone->free_area[order].nr_free;
1500 			total += nr << order;
1501 			printk("%lu*%lukB ", nr, K(1UL) << order);
1502 		}
1503 		spin_unlock_irqrestore(&zone->lock, flags);
1504 		printk("= %lukB\n", K(total));
1505 	}
1506 
1507 	show_swap_cache_info();
1508 }
1509 
1510 /*
1511  * Builds allocation fallback zone lists.
1512  *
1513  * Add all populated zones of a node to the zonelist.
1514  */
1515 static int __init build_zonelists_node(pg_data_t *pgdat,
1516 			struct zonelist *zonelist, int nr_zones, int zone_type)
1517 {
1518 	struct zone *zone;
1519 
1520 	BUG_ON(zone_type > ZONE_HIGHMEM);
1521 
1522 	do {
1523 		zone = pgdat->node_zones + zone_type;
1524 		if (populated_zone(zone)) {
1525 #ifndef CONFIG_HIGHMEM
1526 			BUG_ON(zone_type > ZONE_NORMAL);
1527 #endif
1528 			zonelist->zones[nr_zones++] = zone;
1529 			check_highest_zone(zone_type);
1530 		}
1531 		zone_type--;
1532 
1533 	} while (zone_type >= 0);
1534 	return nr_zones;
1535 }
1536 
1537 static inline int highest_zone(int zone_bits)
1538 {
1539 	int res = ZONE_NORMAL;
1540 	if (zone_bits & (__force int)__GFP_HIGHMEM)
1541 		res = ZONE_HIGHMEM;
1542 	if (zone_bits & (__force int)__GFP_DMA32)
1543 		res = ZONE_DMA32;
1544 	if (zone_bits & (__force int)__GFP_DMA)
1545 		res = ZONE_DMA;
1546 	return res;
1547 }
1548 
1549 #ifdef CONFIG_NUMA
1550 #define MAX_NODE_LOAD (num_online_nodes())
1551 static int __initdata node_load[MAX_NUMNODES];
1552 /**
1553  * find_next_best_node - find the next node that should appear in a given node's fallback list
1554  * @node: node whose fallback list we're appending
1555  * @used_node_mask: nodemask_t of already used nodes
1556  *
1557  * We use a number of factors to determine which is the next node that should
1558  * appear on a given node's fallback list.  The node should not have appeared
1559  * already in @node's fallback list, and it should be the next closest node
1560  * according to the distance array (which contains arbitrary distance values
1561  * from each node to each node in the system), and should also prefer nodes
1562  * with no CPUs, since presumably they'll have very little allocation pressure
1563  * on them otherwise.
1564  * It returns -1 if no node is found.
1565  */
1566 static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1567 {
1568 	int n, val;
1569 	int min_val = INT_MAX;
1570 	int best_node = -1;
1571 
1572 	/* Use the local node if we haven't already */
1573 	if (!node_isset(node, *used_node_mask)) {
1574 		node_set(node, *used_node_mask);
1575 		return node;
1576 	}
1577 
1578 	for_each_online_node(n) {
1579 		cpumask_t tmp;
1580 
1581 		/* Don't want a node to appear more than once */
1582 		if (node_isset(n, *used_node_mask))
1583 			continue;
1584 
1585 		/* Use the distance array to find the distance */
1586 		val = node_distance(node, n);
1587 
1588 		/* Penalize nodes under us ("prefer the next node") */
1589 		val += (n < node);
1590 
1591 		/* Give preference to headless and unused nodes */
1592 		tmp = node_to_cpumask(n);
1593 		if (!cpus_empty(tmp))
1594 			val += PENALTY_FOR_NODE_WITH_CPUS;
1595 
1596 		/* Slight preference for less loaded node */
1597 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1598 		val += node_load[n];
1599 
1600 		if (val < min_val) {
1601 			min_val = val;
1602 			best_node = n;
1603 		}
1604 	}
1605 
1606 	if (best_node >= 0)
1607 		node_set(best_node, *used_node_mask);
1608 
1609 	return best_node;
1610 }
1611 
1612 static void __init build_zonelists(pg_data_t *pgdat)
1613 {
1614 	int i, j, k, node, local_node;
1615 	int prev_node, load;
1616 	struct zonelist *zonelist;
1617 	nodemask_t used_mask;
1618 
1619 	/* initialize zonelists */
1620 	for (i = 0; i < GFP_ZONETYPES; i++) {
1621 		zonelist = pgdat->node_zonelists + i;
1622 		zonelist->zones[0] = NULL;
1623 	}
1624 
1625 	/* NUMA-aware ordering of nodes */
1626 	local_node = pgdat->node_id;
1627 	load = num_online_nodes();
1628 	prev_node = local_node;
1629 	nodes_clear(used_mask);
1630 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1631 		int distance = node_distance(local_node, node);
1632 
1633 		/*
1634 		 * If another node is sufficiently far away then it is better
1635 		 * to reclaim pages in a zone before going off node.
1636 		 */
1637 		if (distance > RECLAIM_DISTANCE)
1638 			zone_reclaim_mode = 1;
1639 
1640 		/*
1641 		 * We don't want to pressure a particular node.
1642 		 * So adding penalty to the first node in same
1643 		 * distance group to make it round-robin.
1644 		 */
1645 
1646 		if (distance != node_distance(local_node, prev_node))
1647 			node_load[node] += load;
1648 		prev_node = node;
1649 		load--;
1650 		for (i = 0; i < GFP_ZONETYPES; i++) {
1651 			zonelist = pgdat->node_zonelists + i;
1652 			for (j = 0; zonelist->zones[j] != NULL; j++);
1653 
1654 			k = highest_zone(i);
1655 
1656 	 		j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1657 			zonelist->zones[j] = NULL;
1658 		}
1659 	}
1660 }
1661 
1662 #else	/* CONFIG_NUMA */
1663 
1664 static void __init build_zonelists(pg_data_t *pgdat)
1665 {
1666 	int i, j, k, node, local_node;
1667 
1668 	local_node = pgdat->node_id;
1669 	for (i = 0; i < GFP_ZONETYPES; i++) {
1670 		struct zonelist *zonelist;
1671 
1672 		zonelist = pgdat->node_zonelists + i;
1673 
1674 		j = 0;
1675 		k = highest_zone(i);
1676  		j = build_zonelists_node(pgdat, zonelist, j, k);
1677  		/*
1678  		 * Now we build the zonelist so that it contains the zones
1679  		 * of all the other nodes.
1680  		 * We don't want to pressure a particular node, so when
1681  		 * building the zones for node N, we make sure that the
1682  		 * zones coming right after the local ones are those from
1683  		 * node N+1 (modulo N)
1684  		 */
1685 		for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1686 			if (!node_online(node))
1687 				continue;
1688 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1689 		}
1690 		for (node = 0; node < local_node; node++) {
1691 			if (!node_online(node))
1692 				continue;
1693 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1694 		}
1695 
1696 		zonelist->zones[j] = NULL;
1697 	}
1698 }
1699 
1700 #endif	/* CONFIG_NUMA */
1701 
1702 void __init build_all_zonelists(void)
1703 {
1704 	int i;
1705 
1706 	for_each_online_node(i)
1707 		build_zonelists(NODE_DATA(i));
1708 	printk("Built %i zonelists\n", num_online_nodes());
1709 	cpuset_init_current_mems_allowed();
1710 }
1711 
1712 /*
1713  * Helper functions to size the waitqueue hash table.
1714  * Essentially these want to choose hash table sizes sufficiently
1715  * large so that collisions trying to wait on pages are rare.
1716  * But in fact, the number of active page waitqueues on typical
1717  * systems is ridiculously low, less than 200. So this is even
1718  * conservative, even though it seems large.
1719  *
1720  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1721  * waitqueues, i.e. the size of the waitq table given the number of pages.
1722  */
1723 #define PAGES_PER_WAITQUEUE	256
1724 
1725 static inline unsigned long wait_table_size(unsigned long pages)
1726 {
1727 	unsigned long size = 1;
1728 
1729 	pages /= PAGES_PER_WAITQUEUE;
1730 
1731 	while (size < pages)
1732 		size <<= 1;
1733 
1734 	/*
1735 	 * Once we have dozens or even hundreds of threads sleeping
1736 	 * on IO we've got bigger problems than wait queue collision.
1737 	 * Limit the size of the wait table to a reasonable size.
1738 	 */
1739 	size = min(size, 4096UL);
1740 
1741 	return max(size, 4UL);
1742 }
1743 
1744 /*
1745  * This is an integer logarithm so that shifts can be used later
1746  * to extract the more random high bits from the multiplicative
1747  * hash function before the remainder is taken.
1748  */
1749 static inline unsigned long wait_table_bits(unsigned long size)
1750 {
1751 	return ffz(~size);
1752 }
1753 
1754 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1755 
1756 static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
1757 		unsigned long *zones_size, unsigned long *zholes_size)
1758 {
1759 	unsigned long realtotalpages, totalpages = 0;
1760 	int i;
1761 
1762 	for (i = 0; i < MAX_NR_ZONES; i++)
1763 		totalpages += zones_size[i];
1764 	pgdat->node_spanned_pages = totalpages;
1765 
1766 	realtotalpages = totalpages;
1767 	if (zholes_size)
1768 		for (i = 0; i < MAX_NR_ZONES; i++)
1769 			realtotalpages -= zholes_size[i];
1770 	pgdat->node_present_pages = realtotalpages;
1771 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1772 }
1773 
1774 
1775 /*
1776  * Initially all pages are reserved - free ones are freed
1777  * up by free_all_bootmem() once the early boot process is
1778  * done. Non-atomic initialization, single-pass.
1779  */
1780 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1781 		unsigned long start_pfn)
1782 {
1783 	struct page *page;
1784 	unsigned long end_pfn = start_pfn + size;
1785 	unsigned long pfn;
1786 
1787 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1788 		if (!early_pfn_valid(pfn))
1789 			continue;
1790 		page = pfn_to_page(pfn);
1791 		set_page_links(page, zone, nid, pfn);
1792 		init_page_count(page);
1793 		reset_page_mapcount(page);
1794 		SetPageReserved(page);
1795 		INIT_LIST_HEAD(&page->lru);
1796 #ifdef WANT_PAGE_VIRTUAL
1797 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1798 		if (!is_highmem_idx(zone))
1799 			set_page_address(page, __va(pfn << PAGE_SHIFT));
1800 #endif
1801 	}
1802 }
1803 
1804 void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1805 				unsigned long size)
1806 {
1807 	int order;
1808 	for (order = 0; order < MAX_ORDER ; order++) {
1809 		INIT_LIST_HEAD(&zone->free_area[order].free_list);
1810 		zone->free_area[order].nr_free = 0;
1811 	}
1812 }
1813 
1814 #define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
1815 void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
1816 		unsigned long size)
1817 {
1818 	unsigned long snum = pfn_to_section_nr(pfn);
1819 	unsigned long end = pfn_to_section_nr(pfn + size);
1820 
1821 	if (FLAGS_HAS_NODE)
1822 		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
1823 	else
1824 		for (; snum <= end; snum++)
1825 			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
1826 }
1827 
1828 #ifndef __HAVE_ARCH_MEMMAP_INIT
1829 #define memmap_init(size, nid, zone, start_pfn) \
1830 	memmap_init_zone((size), (nid), (zone), (start_pfn))
1831 #endif
1832 
1833 static int __cpuinit zone_batchsize(struct zone *zone)
1834 {
1835 	int batch;
1836 
1837 	/*
1838 	 * The per-cpu-pages pools are set to around 1000th of the
1839 	 * size of the zone.  But no more than 1/2 of a meg.
1840 	 *
1841 	 * OK, so we don't know how big the cache is.  So guess.
1842 	 */
1843 	batch = zone->present_pages / 1024;
1844 	if (batch * PAGE_SIZE > 512 * 1024)
1845 		batch = (512 * 1024) / PAGE_SIZE;
1846 	batch /= 4;		/* We effectively *= 4 below */
1847 	if (batch < 1)
1848 		batch = 1;
1849 
1850 	/*
1851 	 * Clamp the batch to a 2^n - 1 value. Having a power
1852 	 * of 2 value was found to be more likely to have
1853 	 * suboptimal cache aliasing properties in some cases.
1854 	 *
1855 	 * For example if 2 tasks are alternately allocating
1856 	 * batches of pages, one task can end up with a lot
1857 	 * of pages of one half of the possible page colors
1858 	 * and the other with pages of the other colors.
1859 	 */
1860 	batch = (1 << (fls(batch + batch/2)-1)) - 1;
1861 
1862 	return batch;
1863 }
1864 
1865 inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1866 {
1867 	struct per_cpu_pages *pcp;
1868 
1869 	memset(p, 0, sizeof(*p));
1870 
1871 	pcp = &p->pcp[0];		/* hot */
1872 	pcp->count = 0;
1873 	pcp->high = 6 * batch;
1874 	pcp->batch = max(1UL, 1 * batch);
1875 	INIT_LIST_HEAD(&pcp->list);
1876 
1877 	pcp = &p->pcp[1];		/* cold*/
1878 	pcp->count = 0;
1879 	pcp->high = 2 * batch;
1880 	pcp->batch = max(1UL, batch/2);
1881 	INIT_LIST_HEAD(&pcp->list);
1882 }
1883 
1884 /*
1885  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
1886  * to the value high for the pageset p.
1887  */
1888 
1889 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1890 				unsigned long high)
1891 {
1892 	struct per_cpu_pages *pcp;
1893 
1894 	pcp = &p->pcp[0]; /* hot list */
1895 	pcp->high = high;
1896 	pcp->batch = max(1UL, high/4);
1897 	if ((high/4) > (PAGE_SHIFT * 8))
1898 		pcp->batch = PAGE_SHIFT * 8;
1899 }
1900 
1901 
1902 #ifdef CONFIG_NUMA
1903 /*
1904  * Boot pageset table. One per cpu which is going to be used for all
1905  * zones and all nodes. The parameters will be set in such a way
1906  * that an item put on a list will immediately be handed over to
1907  * the buddy list. This is safe since pageset manipulation is done
1908  * with interrupts disabled.
1909  *
1910  * Some NUMA counter updates may also be caught by the boot pagesets.
1911  *
1912  * The boot_pagesets must be kept even after bootup is complete for
1913  * unused processors and/or zones. They do play a role for bootstrapping
1914  * hotplugged processors.
1915  *
1916  * zoneinfo_show() and maybe other functions do
1917  * not check if the processor is online before following the pageset pointer.
1918  * Other parts of the kernel may not check if the zone is available.
1919  */
1920 static struct per_cpu_pageset boot_pageset[NR_CPUS];
1921 
1922 /*
1923  * Dynamically allocate memory for the
1924  * per cpu pageset array in struct zone.
1925  */
1926 static int __cpuinit process_zones(int cpu)
1927 {
1928 	struct zone *zone, *dzone;
1929 
1930 	for_each_zone(zone) {
1931 
1932 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
1933 					 GFP_KERNEL, cpu_to_node(cpu));
1934 		if (!zone_pcp(zone, cpu))
1935 			goto bad;
1936 
1937 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
1938 
1939 		if (percpu_pagelist_fraction)
1940 			setup_pagelist_highmark(zone_pcp(zone, cpu),
1941 			 	(zone->present_pages / percpu_pagelist_fraction));
1942 	}
1943 
1944 	return 0;
1945 bad:
1946 	for_each_zone(dzone) {
1947 		if (dzone == zone)
1948 			break;
1949 		kfree(zone_pcp(dzone, cpu));
1950 		zone_pcp(dzone, cpu) = NULL;
1951 	}
1952 	return -ENOMEM;
1953 }
1954 
1955 static inline void free_zone_pagesets(int cpu)
1956 {
1957 	struct zone *zone;
1958 
1959 	for_each_zone(zone) {
1960 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
1961 
1962 		zone_pcp(zone, cpu) = NULL;
1963 		kfree(pset);
1964 	}
1965 }
1966 
1967 static int pageset_cpuup_callback(struct notifier_block *nfb,
1968 		unsigned long action,
1969 		void *hcpu)
1970 {
1971 	int cpu = (long)hcpu;
1972 	int ret = NOTIFY_OK;
1973 
1974 	switch (action) {
1975 		case CPU_UP_PREPARE:
1976 			if (process_zones(cpu))
1977 				ret = NOTIFY_BAD;
1978 			break;
1979 		case CPU_UP_CANCELED:
1980 		case CPU_DEAD:
1981 			free_zone_pagesets(cpu);
1982 			break;
1983 		default:
1984 			break;
1985 	}
1986 	return ret;
1987 }
1988 
1989 static struct notifier_block pageset_notifier =
1990 	{ &pageset_cpuup_callback, NULL, 0 };
1991 
1992 void __init setup_per_cpu_pageset(void)
1993 {
1994 	int err;
1995 
1996 	/* Initialize per_cpu_pageset for cpu 0.
1997 	 * A cpuup callback will do this for every cpu
1998 	 * as it comes online
1999 	 */
2000 	err = process_zones(smp_processor_id());
2001 	BUG_ON(err);
2002 	register_cpu_notifier(&pageset_notifier);
2003 }
2004 
2005 #endif
2006 
2007 static __meminit
2008 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2009 {
2010 	int i;
2011 	struct pglist_data *pgdat = zone->zone_pgdat;
2012 
2013 	/*
2014 	 * The per-page waitqueue mechanism uses hashed waitqueues
2015 	 * per zone.
2016 	 */
2017 	zone->wait_table_size = wait_table_size(zone_size_pages);
2018 	zone->wait_table_bits =	wait_table_bits(zone->wait_table_size);
2019 	zone->wait_table = (wait_queue_head_t *)
2020 		alloc_bootmem_node(pgdat, zone->wait_table_size
2021 					* sizeof(wait_queue_head_t));
2022 
2023 	for(i = 0; i < zone->wait_table_size; ++i)
2024 		init_waitqueue_head(zone->wait_table + i);
2025 }
2026 
2027 static __meminit void zone_pcp_init(struct zone *zone)
2028 {
2029 	int cpu;
2030 	unsigned long batch = zone_batchsize(zone);
2031 
2032 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
2033 #ifdef CONFIG_NUMA
2034 		/* Early boot. Slab allocator not functional yet */
2035 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
2036 		setup_pageset(&boot_pageset[cpu],0);
2037 #else
2038 		setup_pageset(zone_pcp(zone,cpu), batch);
2039 #endif
2040 	}
2041 	if (zone->present_pages)
2042 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2043 			zone->name, zone->present_pages, batch);
2044 }
2045 
2046 static __meminit void init_currently_empty_zone(struct zone *zone,
2047 		unsigned long zone_start_pfn, unsigned long size)
2048 {
2049 	struct pglist_data *pgdat = zone->zone_pgdat;
2050 
2051 	zone_wait_table_init(zone, size);
2052 	pgdat->nr_zones = zone_idx(zone) + 1;
2053 
2054 	zone->zone_start_pfn = zone_start_pfn;
2055 
2056 	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2057 
2058 	zone_init_free_lists(pgdat, zone, zone->spanned_pages);
2059 }
2060 
2061 /*
2062  * Set up the zone data structures:
2063  *   - mark all pages reserved
2064  *   - mark all memory queues empty
2065  *   - clear the memory bitmaps
2066  */
2067 static void __init free_area_init_core(struct pglist_data *pgdat,
2068 		unsigned long *zones_size, unsigned long *zholes_size)
2069 {
2070 	unsigned long j;
2071 	int nid = pgdat->node_id;
2072 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
2073 
2074 	pgdat_resize_init(pgdat);
2075 	pgdat->nr_zones = 0;
2076 	init_waitqueue_head(&pgdat->kswapd_wait);
2077 	pgdat->kswapd_max_order = 0;
2078 
2079 	for (j = 0; j < MAX_NR_ZONES; j++) {
2080 		struct zone *zone = pgdat->node_zones + j;
2081 		unsigned long size, realsize;
2082 
2083 		realsize = size = zones_size[j];
2084 		if (zholes_size)
2085 			realsize -= zholes_size[j];
2086 
2087 		if (j < ZONE_HIGHMEM)
2088 			nr_kernel_pages += realsize;
2089 		nr_all_pages += realsize;
2090 
2091 		zone->spanned_pages = size;
2092 		zone->present_pages = realsize;
2093 		zone->name = zone_names[j];
2094 		spin_lock_init(&zone->lock);
2095 		spin_lock_init(&zone->lru_lock);
2096 		zone_seqlock_init(zone);
2097 		zone->zone_pgdat = pgdat;
2098 		zone->free_pages = 0;
2099 
2100 		zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
2101 
2102 		zone_pcp_init(zone);
2103 		INIT_LIST_HEAD(&zone->active_list);
2104 		INIT_LIST_HEAD(&zone->inactive_list);
2105 		zone->nr_scan_active = 0;
2106 		zone->nr_scan_inactive = 0;
2107 		zone->nr_active = 0;
2108 		zone->nr_inactive = 0;
2109 		atomic_set(&zone->reclaim_in_progress, 0);
2110 		if (!size)
2111 			continue;
2112 
2113 		zonetable_add(zone, nid, j, zone_start_pfn, size);
2114 		init_currently_empty_zone(zone, zone_start_pfn, size);
2115 		zone_start_pfn += size;
2116 	}
2117 }
2118 
2119 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2120 {
2121 	/* Skip empty nodes */
2122 	if (!pgdat->node_spanned_pages)
2123 		return;
2124 
2125 #ifdef CONFIG_FLAT_NODE_MEM_MAP
2126 	/* ia64 gets its own node_mem_map, before this, without bootmem */
2127 	if (!pgdat->node_mem_map) {
2128 		unsigned long size, start, end;
2129 		struct page *map;
2130 
2131 		/*
2132 		 * The zone's endpoints aren't required to be MAX_ORDER
2133 		 * aligned but the node_mem_map endpoints must be in order
2134 		 * for the buddy allocator to function correctly.
2135 		 */
2136 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
2137 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
2138 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
2139 		size =  (end - start) * sizeof(struct page);
2140 		map = alloc_remap(pgdat->node_id, size);
2141 		if (!map)
2142 			map = alloc_bootmem_node(pgdat, size);
2143 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
2144 	}
2145 #ifdef CONFIG_FLATMEM
2146 	/*
2147 	 * With no DISCONTIG, the global mem_map is just set as node 0's
2148 	 */
2149 	if (pgdat == NODE_DATA(0))
2150 		mem_map = NODE_DATA(0)->node_mem_map;
2151 #endif
2152 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
2153 }
2154 
2155 void __init free_area_init_node(int nid, struct pglist_data *pgdat,
2156 		unsigned long *zones_size, unsigned long node_start_pfn,
2157 		unsigned long *zholes_size)
2158 {
2159 	pgdat->node_id = nid;
2160 	pgdat->node_start_pfn = node_start_pfn;
2161 	calculate_zone_totalpages(pgdat, zones_size, zholes_size);
2162 
2163 	alloc_node_mem_map(pgdat);
2164 
2165 	free_area_init_core(pgdat, zones_size, zholes_size);
2166 }
2167 
2168 #ifndef CONFIG_NEED_MULTIPLE_NODES
2169 static bootmem_data_t contig_bootmem_data;
2170 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
2171 
2172 EXPORT_SYMBOL(contig_page_data);
2173 #endif
2174 
2175 void __init free_area_init(unsigned long *zones_size)
2176 {
2177 	free_area_init_node(0, NODE_DATA(0), zones_size,
2178 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
2179 }
2180 
2181 #ifdef CONFIG_PROC_FS
2182 
2183 #include <linux/seq_file.h>
2184 
2185 static void *frag_start(struct seq_file *m, loff_t *pos)
2186 {
2187 	pg_data_t *pgdat;
2188 	loff_t node = *pos;
2189 	for (pgdat = first_online_pgdat();
2190 	     pgdat && node;
2191 	     pgdat = next_online_pgdat(pgdat))
2192 		--node;
2193 
2194 	return pgdat;
2195 }
2196 
2197 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
2198 {
2199 	pg_data_t *pgdat = (pg_data_t *)arg;
2200 
2201 	(*pos)++;
2202 	return next_online_pgdat(pgdat);
2203 }
2204 
2205 static void frag_stop(struct seq_file *m, void *arg)
2206 {
2207 }
2208 
2209 /*
2210  * This walks the free areas for each zone.
2211  */
2212 static int frag_show(struct seq_file *m, void *arg)
2213 {
2214 	pg_data_t *pgdat = (pg_data_t *)arg;
2215 	struct zone *zone;
2216 	struct zone *node_zones = pgdat->node_zones;
2217 	unsigned long flags;
2218 	int order;
2219 
2220 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2221 		if (!populated_zone(zone))
2222 			continue;
2223 
2224 		spin_lock_irqsave(&zone->lock, flags);
2225 		seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
2226 		for (order = 0; order < MAX_ORDER; ++order)
2227 			seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
2228 		spin_unlock_irqrestore(&zone->lock, flags);
2229 		seq_putc(m, '\n');
2230 	}
2231 	return 0;
2232 }
2233 
2234 struct seq_operations fragmentation_op = {
2235 	.start	= frag_start,
2236 	.next	= frag_next,
2237 	.stop	= frag_stop,
2238 	.show	= frag_show,
2239 };
2240 
2241 /*
2242  * Output information about zones in @pgdat.
2243  */
2244 static int zoneinfo_show(struct seq_file *m, void *arg)
2245 {
2246 	pg_data_t *pgdat = arg;
2247 	struct zone *zone;
2248 	struct zone *node_zones = pgdat->node_zones;
2249 	unsigned long flags;
2250 
2251 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
2252 		int i;
2253 
2254 		if (!populated_zone(zone))
2255 			continue;
2256 
2257 		spin_lock_irqsave(&zone->lock, flags);
2258 		seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
2259 		seq_printf(m,
2260 			   "\n  pages free     %lu"
2261 			   "\n        min      %lu"
2262 			   "\n        low      %lu"
2263 			   "\n        high     %lu"
2264 			   "\n        active   %lu"
2265 			   "\n        inactive %lu"
2266 			   "\n        scanned  %lu (a: %lu i: %lu)"
2267 			   "\n        spanned  %lu"
2268 			   "\n        present  %lu",
2269 			   zone->free_pages,
2270 			   zone->pages_min,
2271 			   zone->pages_low,
2272 			   zone->pages_high,
2273 			   zone->nr_active,
2274 			   zone->nr_inactive,
2275 			   zone->pages_scanned,
2276 			   zone->nr_scan_active, zone->nr_scan_inactive,
2277 			   zone->spanned_pages,
2278 			   zone->present_pages);
2279 		seq_printf(m,
2280 			   "\n        protection: (%lu",
2281 			   zone->lowmem_reserve[0]);
2282 		for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
2283 			seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
2284 		seq_printf(m,
2285 			   ")"
2286 			   "\n  pagesets");
2287 		for_each_online_cpu(i) {
2288 			struct per_cpu_pageset *pageset;
2289 			int j;
2290 
2291 			pageset = zone_pcp(zone, i);
2292 			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2293 				if (pageset->pcp[j].count)
2294 					break;
2295 			}
2296 			if (j == ARRAY_SIZE(pageset->pcp))
2297 				continue;
2298 			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2299 				seq_printf(m,
2300 					   "\n    cpu: %i pcp: %i"
2301 					   "\n              count: %i"
2302 					   "\n              high:  %i"
2303 					   "\n              batch: %i",
2304 					   i, j,
2305 					   pageset->pcp[j].count,
2306 					   pageset->pcp[j].high,
2307 					   pageset->pcp[j].batch);
2308 			}
2309 #ifdef CONFIG_NUMA
2310 			seq_printf(m,
2311 				   "\n            numa_hit:       %lu"
2312 				   "\n            numa_miss:      %lu"
2313 				   "\n            numa_foreign:   %lu"
2314 				   "\n            interleave_hit: %lu"
2315 				   "\n            local_node:     %lu"
2316 				   "\n            other_node:     %lu",
2317 				   pageset->numa_hit,
2318 				   pageset->numa_miss,
2319 				   pageset->numa_foreign,
2320 				   pageset->interleave_hit,
2321 				   pageset->local_node,
2322 				   pageset->other_node);
2323 #endif
2324 		}
2325 		seq_printf(m,
2326 			   "\n  all_unreclaimable: %u"
2327 			   "\n  prev_priority:     %i"
2328 			   "\n  temp_priority:     %i"
2329 			   "\n  start_pfn:         %lu",
2330 			   zone->all_unreclaimable,
2331 			   zone->prev_priority,
2332 			   zone->temp_priority,
2333 			   zone->zone_start_pfn);
2334 		spin_unlock_irqrestore(&zone->lock, flags);
2335 		seq_putc(m, '\n');
2336 	}
2337 	return 0;
2338 }
2339 
2340 struct seq_operations zoneinfo_op = {
2341 	.start	= frag_start, /* iterate over all zones. The same as in
2342 			       * fragmentation. */
2343 	.next	= frag_next,
2344 	.stop	= frag_stop,
2345 	.show	= zoneinfo_show,
2346 };
2347 
2348 static char *vmstat_text[] = {
2349 	"nr_dirty",
2350 	"nr_writeback",
2351 	"nr_unstable",
2352 	"nr_page_table_pages",
2353 	"nr_mapped",
2354 	"nr_slab",
2355 
2356 	"pgpgin",
2357 	"pgpgout",
2358 	"pswpin",
2359 	"pswpout",
2360 
2361 	"pgalloc_high",
2362 	"pgalloc_normal",
2363 	"pgalloc_dma32",
2364 	"pgalloc_dma",
2365 
2366 	"pgfree",
2367 	"pgactivate",
2368 	"pgdeactivate",
2369 
2370 	"pgfault",
2371 	"pgmajfault",
2372 
2373 	"pgrefill_high",
2374 	"pgrefill_normal",
2375 	"pgrefill_dma32",
2376 	"pgrefill_dma",
2377 
2378 	"pgsteal_high",
2379 	"pgsteal_normal",
2380 	"pgsteal_dma32",
2381 	"pgsteal_dma",
2382 
2383 	"pgscan_kswapd_high",
2384 	"pgscan_kswapd_normal",
2385 	"pgscan_kswapd_dma32",
2386 	"pgscan_kswapd_dma",
2387 
2388 	"pgscan_direct_high",
2389 	"pgscan_direct_normal",
2390 	"pgscan_direct_dma32",
2391 	"pgscan_direct_dma",
2392 
2393 	"pginodesteal",
2394 	"slabs_scanned",
2395 	"kswapd_steal",
2396 	"kswapd_inodesteal",
2397 	"pageoutrun",
2398 	"allocstall",
2399 
2400 	"pgrotated",
2401 	"nr_bounce",
2402 };
2403 
2404 static void *vmstat_start(struct seq_file *m, loff_t *pos)
2405 {
2406 	struct page_state *ps;
2407 
2408 	if (*pos >= ARRAY_SIZE(vmstat_text))
2409 		return NULL;
2410 
2411 	ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2412 	m->private = ps;
2413 	if (!ps)
2414 		return ERR_PTR(-ENOMEM);
2415 	get_full_page_state(ps);
2416 	ps->pgpgin /= 2;		/* sectors -> kbytes */
2417 	ps->pgpgout /= 2;
2418 	return (unsigned long *)ps + *pos;
2419 }
2420 
2421 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2422 {
2423 	(*pos)++;
2424 	if (*pos >= ARRAY_SIZE(vmstat_text))
2425 		return NULL;
2426 	return (unsigned long *)m->private + *pos;
2427 }
2428 
2429 static int vmstat_show(struct seq_file *m, void *arg)
2430 {
2431 	unsigned long *l = arg;
2432 	unsigned long off = l - (unsigned long *)m->private;
2433 
2434 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2435 	return 0;
2436 }
2437 
2438 static void vmstat_stop(struct seq_file *m, void *arg)
2439 {
2440 	kfree(m->private);
2441 	m->private = NULL;
2442 }
2443 
2444 struct seq_operations vmstat_op = {
2445 	.start	= vmstat_start,
2446 	.next	= vmstat_next,
2447 	.stop	= vmstat_stop,
2448 	.show	= vmstat_show,
2449 };
2450 
2451 #endif /* CONFIG_PROC_FS */
2452 
2453 #ifdef CONFIG_HOTPLUG_CPU
2454 static int page_alloc_cpu_notify(struct notifier_block *self,
2455 				 unsigned long action, void *hcpu)
2456 {
2457 	int cpu = (unsigned long)hcpu;
2458 	long *count;
2459 	unsigned long *src, *dest;
2460 
2461 	if (action == CPU_DEAD) {
2462 		int i;
2463 
2464 		/* Drain local pagecache count. */
2465 		count = &per_cpu(nr_pagecache_local, cpu);
2466 		atomic_add(*count, &nr_pagecache);
2467 		*count = 0;
2468 		local_irq_disable();
2469 		__drain_pages(cpu);
2470 
2471 		/* Add dead cpu's page_states to our own. */
2472 		dest = (unsigned long *)&__get_cpu_var(page_states);
2473 		src = (unsigned long *)&per_cpu(page_states, cpu);
2474 
2475 		for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2476 				i++) {
2477 			dest[i] += src[i];
2478 			src[i] = 0;
2479 		}
2480 
2481 		local_irq_enable();
2482 	}
2483 	return NOTIFY_OK;
2484 }
2485 #endif /* CONFIG_HOTPLUG_CPU */
2486 
2487 void __init page_alloc_init(void)
2488 {
2489 	hotcpu_notifier(page_alloc_cpu_notify, 0);
2490 }
2491 
2492 /*
2493  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
2494  *	or min_free_kbytes changes.
2495  */
2496 static void calculate_totalreserve_pages(void)
2497 {
2498 	struct pglist_data *pgdat;
2499 	unsigned long reserve_pages = 0;
2500 	int i, j;
2501 
2502 	for_each_online_pgdat(pgdat) {
2503 		for (i = 0; i < MAX_NR_ZONES; i++) {
2504 			struct zone *zone = pgdat->node_zones + i;
2505 			unsigned long max = 0;
2506 
2507 			/* Find valid and maximum lowmem_reserve in the zone */
2508 			for (j = i; j < MAX_NR_ZONES; j++) {
2509 				if (zone->lowmem_reserve[j] > max)
2510 					max = zone->lowmem_reserve[j];
2511 			}
2512 
2513 			/* we treat pages_high as reserved pages. */
2514 			max += zone->pages_high;
2515 
2516 			if (max > zone->present_pages)
2517 				max = zone->present_pages;
2518 			reserve_pages += max;
2519 		}
2520 	}
2521 	totalreserve_pages = reserve_pages;
2522 }
2523 
2524 /*
2525  * setup_per_zone_lowmem_reserve - called whenever
2526  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
2527  *	has a correct pages reserved value, so an adequate number of
2528  *	pages are left in the zone after a successful __alloc_pages().
2529  */
2530 static void setup_per_zone_lowmem_reserve(void)
2531 {
2532 	struct pglist_data *pgdat;
2533 	int j, idx;
2534 
2535 	for_each_online_pgdat(pgdat) {
2536 		for (j = 0; j < MAX_NR_ZONES; j++) {
2537 			struct zone *zone = pgdat->node_zones + j;
2538 			unsigned long present_pages = zone->present_pages;
2539 
2540 			zone->lowmem_reserve[j] = 0;
2541 
2542 			for (idx = j-1; idx >= 0; idx--) {
2543 				struct zone *lower_zone;
2544 
2545 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
2546 					sysctl_lowmem_reserve_ratio[idx] = 1;
2547 
2548 				lower_zone = pgdat->node_zones + idx;
2549 				lower_zone->lowmem_reserve[j] = present_pages /
2550 					sysctl_lowmem_reserve_ratio[idx];
2551 				present_pages += lower_zone->present_pages;
2552 			}
2553 		}
2554 	}
2555 
2556 	/* update totalreserve_pages */
2557 	calculate_totalreserve_pages();
2558 }
2559 
2560 /*
2561  * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures
2562  *	that the pages_{min,low,high} values for each zone are set correctly
2563  *	with respect to min_free_kbytes.
2564  */
2565 void setup_per_zone_pages_min(void)
2566 {
2567 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2568 	unsigned long lowmem_pages = 0;
2569 	struct zone *zone;
2570 	unsigned long flags;
2571 
2572 	/* Calculate total number of !ZONE_HIGHMEM pages */
2573 	for_each_zone(zone) {
2574 		if (!is_highmem(zone))
2575 			lowmem_pages += zone->present_pages;
2576 	}
2577 
2578 	for_each_zone(zone) {
2579 		u64 tmp;
2580 
2581 		spin_lock_irqsave(&zone->lru_lock, flags);
2582 		tmp = (u64)pages_min * zone->present_pages;
2583 		do_div(tmp, lowmem_pages);
2584 		if (is_highmem(zone)) {
2585 			/*
2586 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
2587 			 * need highmem pages, so cap pages_min to a small
2588 			 * value here.
2589 			 *
2590 			 * The (pages_high-pages_low) and (pages_low-pages_min)
2591 			 * deltas controls asynch page reclaim, and so should
2592 			 * not be capped for highmem.
2593 			 */
2594 			int min_pages;
2595 
2596 			min_pages = zone->present_pages / 1024;
2597 			if (min_pages < SWAP_CLUSTER_MAX)
2598 				min_pages = SWAP_CLUSTER_MAX;
2599 			if (min_pages > 128)
2600 				min_pages = 128;
2601 			zone->pages_min = min_pages;
2602 		} else {
2603 			/*
2604 			 * If it's a lowmem zone, reserve a number of pages
2605 			 * proportionate to the zone's size.
2606 			 */
2607 			zone->pages_min = tmp;
2608 		}
2609 
2610 		zone->pages_low   = zone->pages_min + (tmp >> 2);
2611 		zone->pages_high  = zone->pages_min + (tmp >> 1);
2612 		spin_unlock_irqrestore(&zone->lru_lock, flags);
2613 	}
2614 
2615 	/* update totalreserve_pages */
2616 	calculate_totalreserve_pages();
2617 }
2618 
2619 /*
2620  * Initialise min_free_kbytes.
2621  *
2622  * For small machines we want it small (128k min).  For large machines
2623  * we want it large (64MB max).  But it is not linear, because network
2624  * bandwidth does not increase linearly with machine size.  We use
2625  *
2626  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2627  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
2628  *
2629  * which yields
2630  *
2631  * 16MB:	512k
2632  * 32MB:	724k
2633  * 64MB:	1024k
2634  * 128MB:	1448k
2635  * 256MB:	2048k
2636  * 512MB:	2896k
2637  * 1024MB:	4096k
2638  * 2048MB:	5792k
2639  * 4096MB:	8192k
2640  * 8192MB:	11584k
2641  * 16384MB:	16384k
2642  */
2643 static int __init init_per_zone_pages_min(void)
2644 {
2645 	unsigned long lowmem_kbytes;
2646 
2647 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2648 
2649 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2650 	if (min_free_kbytes < 128)
2651 		min_free_kbytes = 128;
2652 	if (min_free_kbytes > 65536)
2653 		min_free_kbytes = 65536;
2654 	setup_per_zone_pages_min();
2655 	setup_per_zone_lowmem_reserve();
2656 	return 0;
2657 }
2658 module_init(init_per_zone_pages_min)
2659 
2660 /*
2661  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2662  *	that we can call two helper functions whenever min_free_kbytes
2663  *	changes.
2664  */
2665 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2666 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2667 {
2668 	proc_dointvec(table, write, file, buffer, length, ppos);
2669 	setup_per_zone_pages_min();
2670 	return 0;
2671 }
2672 
2673 /*
2674  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2675  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2676  *	whenever sysctl_lowmem_reserve_ratio changes.
2677  *
2678  * The reserve ratio obviously has absolutely no relation with the
2679  * pages_min watermarks. The lowmem reserve ratio can only make sense
2680  * if in function of the boot time zone sizes.
2681  */
2682 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2683 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2684 {
2685 	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2686 	setup_per_zone_lowmem_reserve();
2687 	return 0;
2688 }
2689 
2690 /*
2691  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
2692  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
2693  * can have before it gets flushed back to buddy allocator.
2694  */
2695 
2696 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
2697 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2698 {
2699 	struct zone *zone;
2700 	unsigned int cpu;
2701 	int ret;
2702 
2703 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2704 	if (!write || (ret == -EINVAL))
2705 		return ret;
2706 	for_each_zone(zone) {
2707 		for_each_online_cpu(cpu) {
2708 			unsigned long  high;
2709 			high = zone->present_pages / percpu_pagelist_fraction;
2710 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
2711 		}
2712 	}
2713 	return 0;
2714 }
2715 
2716 __initdata int hashdist = HASHDIST_DEFAULT;
2717 
2718 #ifdef CONFIG_NUMA
2719 static int __init set_hashdist(char *str)
2720 {
2721 	if (!str)
2722 		return 0;
2723 	hashdist = simple_strtoul(str, &str, 0);
2724 	return 1;
2725 }
2726 __setup("hashdist=", set_hashdist);
2727 #endif
2728 
2729 /*
2730  * allocate a large system hash table from bootmem
2731  * - it is assumed that the hash table must contain an exact power-of-2
2732  *   quantity of entries
2733  * - limit is the number of hash buckets, not the total allocation size
2734  */
2735 void *__init alloc_large_system_hash(const char *tablename,
2736 				     unsigned long bucketsize,
2737 				     unsigned long numentries,
2738 				     int scale,
2739 				     int flags,
2740 				     unsigned int *_hash_shift,
2741 				     unsigned int *_hash_mask,
2742 				     unsigned long limit)
2743 {
2744 	unsigned long long max = limit;
2745 	unsigned long log2qty, size;
2746 	void *table = NULL;
2747 
2748 	/* allow the kernel cmdline to have a say */
2749 	if (!numentries) {
2750 		/* round applicable memory size up to nearest megabyte */
2751 		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2752 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2753 		numentries >>= 20 - PAGE_SHIFT;
2754 		numentries <<= 20 - PAGE_SHIFT;
2755 
2756 		/* limit to 1 bucket per 2^scale bytes of low memory */
2757 		if (scale > PAGE_SHIFT)
2758 			numentries >>= (scale - PAGE_SHIFT);
2759 		else
2760 			numentries <<= (PAGE_SHIFT - scale);
2761 	}
2762 	numentries = roundup_pow_of_two(numentries);
2763 
2764 	/* limit allocation size to 1/16 total memory by default */
2765 	if (max == 0) {
2766 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2767 		do_div(max, bucketsize);
2768 	}
2769 
2770 	if (numentries > max)
2771 		numentries = max;
2772 
2773 	log2qty = long_log2(numentries);
2774 
2775 	do {
2776 		size = bucketsize << log2qty;
2777 		if (flags & HASH_EARLY)
2778 			table = alloc_bootmem(size);
2779 		else if (hashdist)
2780 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
2781 		else {
2782 			unsigned long order;
2783 			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
2784 				;
2785 			table = (void*) __get_free_pages(GFP_ATOMIC, order);
2786 		}
2787 	} while (!table && size > PAGE_SIZE && --log2qty);
2788 
2789 	if (!table)
2790 		panic("Failed to allocate %s hash table\n", tablename);
2791 
2792 	printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
2793 	       tablename,
2794 	       (1U << log2qty),
2795 	       long_log2(size) - PAGE_SHIFT,
2796 	       size);
2797 
2798 	if (_hash_shift)
2799 		*_hash_shift = log2qty;
2800 	if (_hash_mask)
2801 		*_hash_mask = (1 << log2qty) - 1;
2802 
2803 	return table;
2804 }
2805 
2806 #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
2807 /*
2808  * pfn <-> page translation. out-of-line version.
2809  * (see asm-generic/memory_model.h)
2810  */
2811 #if defined(CONFIG_FLATMEM)
2812 struct page *pfn_to_page(unsigned long pfn)
2813 {
2814 	return mem_map + (pfn - ARCH_PFN_OFFSET);
2815 }
2816 unsigned long page_to_pfn(struct page *page)
2817 {
2818 	return (page - mem_map) + ARCH_PFN_OFFSET;
2819 }
2820 #elif defined(CONFIG_DISCONTIGMEM)
2821 struct page *pfn_to_page(unsigned long pfn)
2822 {
2823 	int nid = arch_pfn_to_nid(pfn);
2824 	return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
2825 }
2826 unsigned long page_to_pfn(struct page *page)
2827 {
2828 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
2829 	return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
2830 }
2831 #elif defined(CONFIG_SPARSEMEM)
2832 struct page *pfn_to_page(unsigned long pfn)
2833 {
2834 	return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
2835 }
2836 
2837 unsigned long page_to_pfn(struct page *page)
2838 {
2839 	long section_id = page_to_section(page);
2840 	return page - __section_mem_map_addr(__nr_to_section(section_id));
2841 }
2842 #endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
2843 EXPORT_SYMBOL(pfn_to_page);
2844 EXPORT_SYMBOL(page_to_pfn);
2845 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
2846