xref: /linux/mm/page_alloc.c (revision 9ffc93f203c18a70623f21950f1dd473c9ec48cd)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/memory.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/memcontrol.h>
59 #include <linux/prefetch.h>
60 #include <linux/page-debug-flags.h>
61 
62 #include <asm/tlbflush.h>
63 #include <asm/div64.h>
64 #include "internal.h"
65 
66 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
67 DEFINE_PER_CPU(int, numa_node);
68 EXPORT_PER_CPU_SYMBOL(numa_node);
69 #endif
70 
71 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
72 /*
73  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
74  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
75  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
76  * defined in <linux/topology.h>.
77  */
78 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
79 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
80 #endif
81 
82 /*
83  * Array of node states.
84  */
85 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
86 	[N_POSSIBLE] = NODE_MASK_ALL,
87 	[N_ONLINE] = { { [0] = 1UL } },
88 #ifndef CONFIG_NUMA
89 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
90 #ifdef CONFIG_HIGHMEM
91 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
92 #endif
93 	[N_CPU] = { { [0] = 1UL } },
94 #endif	/* NUMA */
95 };
96 EXPORT_SYMBOL(node_states);
97 
98 unsigned long totalram_pages __read_mostly;
99 unsigned long totalreserve_pages __read_mostly;
100 /*
101  * When calculating the number of globally allowed dirty pages, there
102  * is a certain number of per-zone reserves that should not be
103  * considered dirtyable memory.  This is the sum of those reserves
104  * over all existing zones that contribute dirtyable memory.
105  */
106 unsigned long dirty_balance_reserve __read_mostly;
107 
108 int percpu_pagelist_fraction;
109 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
110 
111 #ifdef CONFIG_PM_SLEEP
112 /*
113  * The following functions are used by the suspend/hibernate code to temporarily
114  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
115  * while devices are suspended.  To avoid races with the suspend/hibernate code,
116  * they should always be called with pm_mutex held (gfp_allowed_mask also should
117  * only be modified with pm_mutex held, unless the suspend/hibernate code is
118  * guaranteed not to run in parallel with that modification).
119  */
120 
121 static gfp_t saved_gfp_mask;
122 
123 void pm_restore_gfp_mask(void)
124 {
125 	WARN_ON(!mutex_is_locked(&pm_mutex));
126 	if (saved_gfp_mask) {
127 		gfp_allowed_mask = saved_gfp_mask;
128 		saved_gfp_mask = 0;
129 	}
130 }
131 
132 void pm_restrict_gfp_mask(void)
133 {
134 	WARN_ON(!mutex_is_locked(&pm_mutex));
135 	WARN_ON(saved_gfp_mask);
136 	saved_gfp_mask = gfp_allowed_mask;
137 	gfp_allowed_mask &= ~GFP_IOFS;
138 }
139 
140 bool pm_suspended_storage(void)
141 {
142 	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
143 		return false;
144 	return true;
145 }
146 #endif /* CONFIG_PM_SLEEP */
147 
148 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
149 int pageblock_order __read_mostly;
150 #endif
151 
152 static void __free_pages_ok(struct page *page, unsigned int order);
153 
154 /*
155  * results with 256, 32 in the lowmem_reserve sysctl:
156  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
157  *	1G machine -> (16M dma, 784M normal, 224M high)
158  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
159  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
160  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
161  *
162  * TBD: should special case ZONE_DMA32 machines here - in those we normally
163  * don't need any ZONE_NORMAL reservation
164  */
165 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
166 #ifdef CONFIG_ZONE_DMA
167 	 256,
168 #endif
169 #ifdef CONFIG_ZONE_DMA32
170 	 256,
171 #endif
172 #ifdef CONFIG_HIGHMEM
173 	 32,
174 #endif
175 	 32,
176 };
177 
178 EXPORT_SYMBOL(totalram_pages);
179 
180 static char * const zone_names[MAX_NR_ZONES] = {
181 #ifdef CONFIG_ZONE_DMA
182 	 "DMA",
183 #endif
184 #ifdef CONFIG_ZONE_DMA32
185 	 "DMA32",
186 #endif
187 	 "Normal",
188 #ifdef CONFIG_HIGHMEM
189 	 "HighMem",
190 #endif
191 	 "Movable",
192 };
193 
194 int min_free_kbytes = 1024;
195 
196 static unsigned long __meminitdata nr_kernel_pages;
197 static unsigned long __meminitdata nr_all_pages;
198 static unsigned long __meminitdata dma_reserve;
199 
200 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
201 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
202 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
203 static unsigned long __initdata required_kernelcore;
204 static unsigned long __initdata required_movablecore;
205 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
206 
207 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
208 int movable_zone;
209 EXPORT_SYMBOL(movable_zone);
210 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
211 
212 #if MAX_NUMNODES > 1
213 int nr_node_ids __read_mostly = MAX_NUMNODES;
214 int nr_online_nodes __read_mostly = 1;
215 EXPORT_SYMBOL(nr_node_ids);
216 EXPORT_SYMBOL(nr_online_nodes);
217 #endif
218 
219 int page_group_by_mobility_disabled __read_mostly;
220 
221 static void set_pageblock_migratetype(struct page *page, int migratetype)
222 {
223 
224 	if (unlikely(page_group_by_mobility_disabled))
225 		migratetype = MIGRATE_UNMOVABLE;
226 
227 	set_pageblock_flags_group(page, (unsigned long)migratetype,
228 					PB_migrate, PB_migrate_end);
229 }
230 
231 bool oom_killer_disabled __read_mostly;
232 
233 #ifdef CONFIG_DEBUG_VM
234 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
235 {
236 	int ret = 0;
237 	unsigned seq;
238 	unsigned long pfn = page_to_pfn(page);
239 
240 	do {
241 		seq = zone_span_seqbegin(zone);
242 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
243 			ret = 1;
244 		else if (pfn < zone->zone_start_pfn)
245 			ret = 1;
246 	} while (zone_span_seqretry(zone, seq));
247 
248 	return ret;
249 }
250 
251 static int page_is_consistent(struct zone *zone, struct page *page)
252 {
253 	if (!pfn_valid_within(page_to_pfn(page)))
254 		return 0;
255 	if (zone != page_zone(page))
256 		return 0;
257 
258 	return 1;
259 }
260 /*
261  * Temporary debugging check for pages not lying within a given zone.
262  */
263 static int bad_range(struct zone *zone, struct page *page)
264 {
265 	if (page_outside_zone_boundaries(zone, page))
266 		return 1;
267 	if (!page_is_consistent(zone, page))
268 		return 1;
269 
270 	return 0;
271 }
272 #else
273 static inline int bad_range(struct zone *zone, struct page *page)
274 {
275 	return 0;
276 }
277 #endif
278 
279 static void bad_page(struct page *page)
280 {
281 	static unsigned long resume;
282 	static unsigned long nr_shown;
283 	static unsigned long nr_unshown;
284 
285 	/* Don't complain about poisoned pages */
286 	if (PageHWPoison(page)) {
287 		reset_page_mapcount(page); /* remove PageBuddy */
288 		return;
289 	}
290 
291 	/*
292 	 * Allow a burst of 60 reports, then keep quiet for that minute;
293 	 * or allow a steady drip of one report per second.
294 	 */
295 	if (nr_shown == 60) {
296 		if (time_before(jiffies, resume)) {
297 			nr_unshown++;
298 			goto out;
299 		}
300 		if (nr_unshown) {
301 			printk(KERN_ALERT
302 			      "BUG: Bad page state: %lu messages suppressed\n",
303 				nr_unshown);
304 			nr_unshown = 0;
305 		}
306 		nr_shown = 0;
307 	}
308 	if (nr_shown++ == 0)
309 		resume = jiffies + 60 * HZ;
310 
311 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
312 		current->comm, page_to_pfn(page));
313 	dump_page(page);
314 
315 	print_modules();
316 	dump_stack();
317 out:
318 	/* Leave bad fields for debug, except PageBuddy could make trouble */
319 	reset_page_mapcount(page); /* remove PageBuddy */
320 	add_taint(TAINT_BAD_PAGE);
321 }
322 
323 /*
324  * Higher-order pages are called "compound pages".  They are structured thusly:
325  *
326  * The first PAGE_SIZE page is called the "head page".
327  *
328  * The remaining PAGE_SIZE pages are called "tail pages".
329  *
330  * All pages have PG_compound set.  All tail pages have their ->first_page
331  * pointing at the head page.
332  *
333  * The first tail page's ->lru.next holds the address of the compound page's
334  * put_page() function.  Its ->lru.prev holds the order of allocation.
335  * This usage means that zero-order pages may not be compound.
336  */
337 
338 static void free_compound_page(struct page *page)
339 {
340 	__free_pages_ok(page, compound_order(page));
341 }
342 
343 void prep_compound_page(struct page *page, unsigned long order)
344 {
345 	int i;
346 	int nr_pages = 1 << order;
347 
348 	set_compound_page_dtor(page, free_compound_page);
349 	set_compound_order(page, order);
350 	__SetPageHead(page);
351 	for (i = 1; i < nr_pages; i++) {
352 		struct page *p = page + i;
353 		__SetPageTail(p);
354 		set_page_count(p, 0);
355 		p->first_page = page;
356 	}
357 }
358 
359 /* update __split_huge_page_refcount if you change this function */
360 static int destroy_compound_page(struct page *page, unsigned long order)
361 {
362 	int i;
363 	int nr_pages = 1 << order;
364 	int bad = 0;
365 
366 	if (unlikely(compound_order(page) != order) ||
367 	    unlikely(!PageHead(page))) {
368 		bad_page(page);
369 		bad++;
370 	}
371 
372 	__ClearPageHead(page);
373 
374 	for (i = 1; i < nr_pages; i++) {
375 		struct page *p = page + i;
376 
377 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
378 			bad_page(page);
379 			bad++;
380 		}
381 		__ClearPageTail(p);
382 	}
383 
384 	return bad;
385 }
386 
387 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
388 {
389 	int i;
390 
391 	/*
392 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
393 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
394 	 */
395 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
396 	for (i = 0; i < (1 << order); i++)
397 		clear_highpage(page + i);
398 }
399 
400 #ifdef CONFIG_DEBUG_PAGEALLOC
401 unsigned int _debug_guardpage_minorder;
402 
403 static int __init debug_guardpage_minorder_setup(char *buf)
404 {
405 	unsigned long res;
406 
407 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
408 		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
409 		return 0;
410 	}
411 	_debug_guardpage_minorder = res;
412 	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
413 	return 0;
414 }
415 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
416 
417 static inline void set_page_guard_flag(struct page *page)
418 {
419 	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
420 }
421 
422 static inline void clear_page_guard_flag(struct page *page)
423 {
424 	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
425 }
426 #else
427 static inline void set_page_guard_flag(struct page *page) { }
428 static inline void clear_page_guard_flag(struct page *page) { }
429 #endif
430 
431 static inline void set_page_order(struct page *page, int order)
432 {
433 	set_page_private(page, order);
434 	__SetPageBuddy(page);
435 }
436 
437 static inline void rmv_page_order(struct page *page)
438 {
439 	__ClearPageBuddy(page);
440 	set_page_private(page, 0);
441 }
442 
443 /*
444  * Locate the struct page for both the matching buddy in our
445  * pair (buddy1) and the combined O(n+1) page they form (page).
446  *
447  * 1) Any buddy B1 will have an order O twin B2 which satisfies
448  * the following equation:
449  *     B2 = B1 ^ (1 << O)
450  * For example, if the starting buddy (buddy2) is #8 its order
451  * 1 buddy is #10:
452  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
453  *
454  * 2) Any buddy B will have an order O+1 parent P which
455  * satisfies the following equation:
456  *     P = B & ~(1 << O)
457  *
458  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
459  */
460 static inline unsigned long
461 __find_buddy_index(unsigned long page_idx, unsigned int order)
462 {
463 	return page_idx ^ (1 << order);
464 }
465 
466 /*
467  * This function checks whether a page is free && is the buddy
468  * we can do coalesce a page and its buddy if
469  * (a) the buddy is not in a hole &&
470  * (b) the buddy is in the buddy system &&
471  * (c) a page and its buddy have the same order &&
472  * (d) a page and its buddy are in the same zone.
473  *
474  * For recording whether a page is in the buddy system, we set ->_mapcount -2.
475  * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
476  *
477  * For recording page's order, we use page_private(page).
478  */
479 static inline int page_is_buddy(struct page *page, struct page *buddy,
480 								int order)
481 {
482 	if (!pfn_valid_within(page_to_pfn(buddy)))
483 		return 0;
484 
485 	if (page_zone_id(page) != page_zone_id(buddy))
486 		return 0;
487 
488 	if (page_is_guard(buddy) && page_order(buddy) == order) {
489 		VM_BUG_ON(page_count(buddy) != 0);
490 		return 1;
491 	}
492 
493 	if (PageBuddy(buddy) && page_order(buddy) == order) {
494 		VM_BUG_ON(page_count(buddy) != 0);
495 		return 1;
496 	}
497 	return 0;
498 }
499 
500 /*
501  * Freeing function for a buddy system allocator.
502  *
503  * The concept of a buddy system is to maintain direct-mapped table
504  * (containing bit values) for memory blocks of various "orders".
505  * The bottom level table contains the map for the smallest allocatable
506  * units of memory (here, pages), and each level above it describes
507  * pairs of units from the levels below, hence, "buddies".
508  * At a high level, all that happens here is marking the table entry
509  * at the bottom level available, and propagating the changes upward
510  * as necessary, plus some accounting needed to play nicely with other
511  * parts of the VM system.
512  * At each level, we keep a list of pages, which are heads of continuous
513  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
514  * order is recorded in page_private(page) field.
515  * So when we are allocating or freeing one, we can derive the state of the
516  * other.  That is, if we allocate a small block, and both were
517  * free, the remainder of the region must be split into blocks.
518  * If a block is freed, and its buddy is also free, then this
519  * triggers coalescing into a block of larger size.
520  *
521  * -- wli
522  */
523 
524 static inline void __free_one_page(struct page *page,
525 		struct zone *zone, unsigned int order,
526 		int migratetype)
527 {
528 	unsigned long page_idx;
529 	unsigned long combined_idx;
530 	unsigned long uninitialized_var(buddy_idx);
531 	struct page *buddy;
532 
533 	if (unlikely(PageCompound(page)))
534 		if (unlikely(destroy_compound_page(page, order)))
535 			return;
536 
537 	VM_BUG_ON(migratetype == -1);
538 
539 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
540 
541 	VM_BUG_ON(page_idx & ((1 << order) - 1));
542 	VM_BUG_ON(bad_range(zone, page));
543 
544 	while (order < MAX_ORDER-1) {
545 		buddy_idx = __find_buddy_index(page_idx, order);
546 		buddy = page + (buddy_idx - page_idx);
547 		if (!page_is_buddy(page, buddy, order))
548 			break;
549 		/*
550 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
551 		 * merge with it and move up one order.
552 		 */
553 		if (page_is_guard(buddy)) {
554 			clear_page_guard_flag(buddy);
555 			set_page_private(page, 0);
556 			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
557 		} else {
558 			list_del(&buddy->lru);
559 			zone->free_area[order].nr_free--;
560 			rmv_page_order(buddy);
561 		}
562 		combined_idx = buddy_idx & page_idx;
563 		page = page + (combined_idx - page_idx);
564 		page_idx = combined_idx;
565 		order++;
566 	}
567 	set_page_order(page, order);
568 
569 	/*
570 	 * If this is not the largest possible page, check if the buddy
571 	 * of the next-highest order is free. If it is, it's possible
572 	 * that pages are being freed that will coalesce soon. In case,
573 	 * that is happening, add the free page to the tail of the list
574 	 * so it's less likely to be used soon and more likely to be merged
575 	 * as a higher order page
576 	 */
577 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
578 		struct page *higher_page, *higher_buddy;
579 		combined_idx = buddy_idx & page_idx;
580 		higher_page = page + (combined_idx - page_idx);
581 		buddy_idx = __find_buddy_index(combined_idx, order + 1);
582 		higher_buddy = page + (buddy_idx - combined_idx);
583 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
584 			list_add_tail(&page->lru,
585 				&zone->free_area[order].free_list[migratetype]);
586 			goto out;
587 		}
588 	}
589 
590 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
591 out:
592 	zone->free_area[order].nr_free++;
593 }
594 
595 /*
596  * free_page_mlock() -- clean up attempts to free and mlocked() page.
597  * Page should not be on lru, so no need to fix that up.
598  * free_pages_check() will verify...
599  */
600 static inline void free_page_mlock(struct page *page)
601 {
602 	__dec_zone_page_state(page, NR_MLOCK);
603 	__count_vm_event(UNEVICTABLE_MLOCKFREED);
604 }
605 
606 static inline int free_pages_check(struct page *page)
607 {
608 	if (unlikely(page_mapcount(page) |
609 		(page->mapping != NULL)  |
610 		(atomic_read(&page->_count) != 0) |
611 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
612 		(mem_cgroup_bad_page_check(page)))) {
613 		bad_page(page);
614 		return 1;
615 	}
616 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
617 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
618 	return 0;
619 }
620 
621 /*
622  * Frees a number of pages from the PCP lists
623  * Assumes all pages on list are in same zone, and of same order.
624  * count is the number of pages to free.
625  *
626  * If the zone was previously in an "all pages pinned" state then look to
627  * see if this freeing clears that state.
628  *
629  * And clear the zone's pages_scanned counter, to hold off the "all pages are
630  * pinned" detection logic.
631  */
632 static void free_pcppages_bulk(struct zone *zone, int count,
633 					struct per_cpu_pages *pcp)
634 {
635 	int migratetype = 0;
636 	int batch_free = 0;
637 	int to_free = count;
638 
639 	spin_lock(&zone->lock);
640 	zone->all_unreclaimable = 0;
641 	zone->pages_scanned = 0;
642 
643 	while (to_free) {
644 		struct page *page;
645 		struct list_head *list;
646 
647 		/*
648 		 * Remove pages from lists in a round-robin fashion. A
649 		 * batch_free count is maintained that is incremented when an
650 		 * empty list is encountered.  This is so more pages are freed
651 		 * off fuller lists instead of spinning excessively around empty
652 		 * lists
653 		 */
654 		do {
655 			batch_free++;
656 			if (++migratetype == MIGRATE_PCPTYPES)
657 				migratetype = 0;
658 			list = &pcp->lists[migratetype];
659 		} while (list_empty(list));
660 
661 		/* This is the only non-empty list. Free them all. */
662 		if (batch_free == MIGRATE_PCPTYPES)
663 			batch_free = to_free;
664 
665 		do {
666 			page = list_entry(list->prev, struct page, lru);
667 			/* must delete as __free_one_page list manipulates */
668 			list_del(&page->lru);
669 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
670 			__free_one_page(page, zone, 0, page_private(page));
671 			trace_mm_page_pcpu_drain(page, 0, page_private(page));
672 		} while (--to_free && --batch_free && !list_empty(list));
673 	}
674 	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
675 	spin_unlock(&zone->lock);
676 }
677 
678 static void free_one_page(struct zone *zone, struct page *page, int order,
679 				int migratetype)
680 {
681 	spin_lock(&zone->lock);
682 	zone->all_unreclaimable = 0;
683 	zone->pages_scanned = 0;
684 
685 	__free_one_page(page, zone, order, migratetype);
686 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
687 	spin_unlock(&zone->lock);
688 }
689 
690 static bool free_pages_prepare(struct page *page, unsigned int order)
691 {
692 	int i;
693 	int bad = 0;
694 
695 	trace_mm_page_free(page, order);
696 	kmemcheck_free_shadow(page, order);
697 
698 	if (PageAnon(page))
699 		page->mapping = NULL;
700 	for (i = 0; i < (1 << order); i++)
701 		bad += free_pages_check(page + i);
702 	if (bad)
703 		return false;
704 
705 	if (!PageHighMem(page)) {
706 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
707 		debug_check_no_obj_freed(page_address(page),
708 					   PAGE_SIZE << order);
709 	}
710 	arch_free_page(page, order);
711 	kernel_map_pages(page, 1 << order, 0);
712 
713 	return true;
714 }
715 
716 static void __free_pages_ok(struct page *page, unsigned int order)
717 {
718 	unsigned long flags;
719 	int wasMlocked = __TestClearPageMlocked(page);
720 
721 	if (!free_pages_prepare(page, order))
722 		return;
723 
724 	local_irq_save(flags);
725 	if (unlikely(wasMlocked))
726 		free_page_mlock(page);
727 	__count_vm_events(PGFREE, 1 << order);
728 	free_one_page(page_zone(page), page, order,
729 					get_pageblock_migratetype(page));
730 	local_irq_restore(flags);
731 }
732 
733 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
734 {
735 	unsigned int nr_pages = 1 << order;
736 	unsigned int loop;
737 
738 	prefetchw(page);
739 	for (loop = 0; loop < nr_pages; loop++) {
740 		struct page *p = &page[loop];
741 
742 		if (loop + 1 < nr_pages)
743 			prefetchw(p + 1);
744 		__ClearPageReserved(p);
745 		set_page_count(p, 0);
746 	}
747 
748 	set_page_refcounted(page);
749 	__free_pages(page, order);
750 }
751 
752 
753 /*
754  * The order of subdivision here is critical for the IO subsystem.
755  * Please do not alter this order without good reasons and regression
756  * testing. Specifically, as large blocks of memory are subdivided,
757  * the order in which smaller blocks are delivered depends on the order
758  * they're subdivided in this function. This is the primary factor
759  * influencing the order in which pages are delivered to the IO
760  * subsystem according to empirical testing, and this is also justified
761  * by considering the behavior of a buddy system containing a single
762  * large block of memory acted on by a series of small allocations.
763  * This behavior is a critical factor in sglist merging's success.
764  *
765  * -- wli
766  */
767 static inline void expand(struct zone *zone, struct page *page,
768 	int low, int high, struct free_area *area,
769 	int migratetype)
770 {
771 	unsigned long size = 1 << high;
772 
773 	while (high > low) {
774 		area--;
775 		high--;
776 		size >>= 1;
777 		VM_BUG_ON(bad_range(zone, &page[size]));
778 
779 #ifdef CONFIG_DEBUG_PAGEALLOC
780 		if (high < debug_guardpage_minorder()) {
781 			/*
782 			 * Mark as guard pages (or page), that will allow to
783 			 * merge back to allocator when buddy will be freed.
784 			 * Corresponding page table entries will not be touched,
785 			 * pages will stay not present in virtual address space
786 			 */
787 			INIT_LIST_HEAD(&page[size].lru);
788 			set_page_guard_flag(&page[size]);
789 			set_page_private(&page[size], high);
790 			/* Guard pages are not available for any usage */
791 			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
792 			continue;
793 		}
794 #endif
795 		list_add(&page[size].lru, &area->free_list[migratetype]);
796 		area->nr_free++;
797 		set_page_order(&page[size], high);
798 	}
799 }
800 
801 /*
802  * This page is about to be returned from the page allocator
803  */
804 static inline int check_new_page(struct page *page)
805 {
806 	if (unlikely(page_mapcount(page) |
807 		(page->mapping != NULL)  |
808 		(atomic_read(&page->_count) != 0)  |
809 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
810 		(mem_cgroup_bad_page_check(page)))) {
811 		bad_page(page);
812 		return 1;
813 	}
814 	return 0;
815 }
816 
817 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
818 {
819 	int i;
820 
821 	for (i = 0; i < (1 << order); i++) {
822 		struct page *p = page + i;
823 		if (unlikely(check_new_page(p)))
824 			return 1;
825 	}
826 
827 	set_page_private(page, 0);
828 	set_page_refcounted(page);
829 
830 	arch_alloc_page(page, order);
831 	kernel_map_pages(page, 1 << order, 1);
832 
833 	if (gfp_flags & __GFP_ZERO)
834 		prep_zero_page(page, order, gfp_flags);
835 
836 	if (order && (gfp_flags & __GFP_COMP))
837 		prep_compound_page(page, order);
838 
839 	return 0;
840 }
841 
842 /*
843  * Go through the free lists for the given migratetype and remove
844  * the smallest available page from the freelists
845  */
846 static inline
847 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
848 						int migratetype)
849 {
850 	unsigned int current_order;
851 	struct free_area * area;
852 	struct page *page;
853 
854 	/* Find a page of the appropriate size in the preferred list */
855 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
856 		area = &(zone->free_area[current_order]);
857 		if (list_empty(&area->free_list[migratetype]))
858 			continue;
859 
860 		page = list_entry(area->free_list[migratetype].next,
861 							struct page, lru);
862 		list_del(&page->lru);
863 		rmv_page_order(page);
864 		area->nr_free--;
865 		expand(zone, page, order, current_order, area, migratetype);
866 		return page;
867 	}
868 
869 	return NULL;
870 }
871 
872 
873 /*
874  * This array describes the order lists are fallen back to when
875  * the free lists for the desirable migrate type are depleted
876  */
877 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
878 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
879 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
880 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
881 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
882 };
883 
884 /*
885  * Move the free pages in a range to the free lists of the requested type.
886  * Note that start_page and end_pages are not aligned on a pageblock
887  * boundary. If alignment is required, use move_freepages_block()
888  */
889 static int move_freepages(struct zone *zone,
890 			  struct page *start_page, struct page *end_page,
891 			  int migratetype)
892 {
893 	struct page *page;
894 	unsigned long order;
895 	int pages_moved = 0;
896 
897 #ifndef CONFIG_HOLES_IN_ZONE
898 	/*
899 	 * page_zone is not safe to call in this context when
900 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
901 	 * anyway as we check zone boundaries in move_freepages_block().
902 	 * Remove at a later date when no bug reports exist related to
903 	 * grouping pages by mobility
904 	 */
905 	BUG_ON(page_zone(start_page) != page_zone(end_page));
906 #endif
907 
908 	for (page = start_page; page <= end_page;) {
909 		/* Make sure we are not inadvertently changing nodes */
910 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
911 
912 		if (!pfn_valid_within(page_to_pfn(page))) {
913 			page++;
914 			continue;
915 		}
916 
917 		if (!PageBuddy(page)) {
918 			page++;
919 			continue;
920 		}
921 
922 		order = page_order(page);
923 		list_move(&page->lru,
924 			  &zone->free_area[order].free_list[migratetype]);
925 		page += 1 << order;
926 		pages_moved += 1 << order;
927 	}
928 
929 	return pages_moved;
930 }
931 
932 static int move_freepages_block(struct zone *zone, struct page *page,
933 				int migratetype)
934 {
935 	unsigned long start_pfn, end_pfn;
936 	struct page *start_page, *end_page;
937 
938 	start_pfn = page_to_pfn(page);
939 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
940 	start_page = pfn_to_page(start_pfn);
941 	end_page = start_page + pageblock_nr_pages - 1;
942 	end_pfn = start_pfn + pageblock_nr_pages - 1;
943 
944 	/* Do not cross zone boundaries */
945 	if (start_pfn < zone->zone_start_pfn)
946 		start_page = page;
947 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
948 		return 0;
949 
950 	return move_freepages(zone, start_page, end_page, migratetype);
951 }
952 
953 static void change_pageblock_range(struct page *pageblock_page,
954 					int start_order, int migratetype)
955 {
956 	int nr_pageblocks = 1 << (start_order - pageblock_order);
957 
958 	while (nr_pageblocks--) {
959 		set_pageblock_migratetype(pageblock_page, migratetype);
960 		pageblock_page += pageblock_nr_pages;
961 	}
962 }
963 
964 /* Remove an element from the buddy allocator from the fallback list */
965 static inline struct page *
966 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
967 {
968 	struct free_area * area;
969 	int current_order;
970 	struct page *page;
971 	int migratetype, i;
972 
973 	/* Find the largest possible block of pages in the other list */
974 	for (current_order = MAX_ORDER-1; current_order >= order;
975 						--current_order) {
976 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
977 			migratetype = fallbacks[start_migratetype][i];
978 
979 			/* MIGRATE_RESERVE handled later if necessary */
980 			if (migratetype == MIGRATE_RESERVE)
981 				continue;
982 
983 			area = &(zone->free_area[current_order]);
984 			if (list_empty(&area->free_list[migratetype]))
985 				continue;
986 
987 			page = list_entry(area->free_list[migratetype].next,
988 					struct page, lru);
989 			area->nr_free--;
990 
991 			/*
992 			 * If breaking a large block of pages, move all free
993 			 * pages to the preferred allocation list. If falling
994 			 * back for a reclaimable kernel allocation, be more
995 			 * aggressive about taking ownership of free pages
996 			 */
997 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
998 					start_migratetype == MIGRATE_RECLAIMABLE ||
999 					page_group_by_mobility_disabled) {
1000 				unsigned long pages;
1001 				pages = move_freepages_block(zone, page,
1002 								start_migratetype);
1003 
1004 				/* Claim the whole block if over half of it is free */
1005 				if (pages >= (1 << (pageblock_order-1)) ||
1006 						page_group_by_mobility_disabled)
1007 					set_pageblock_migratetype(page,
1008 								start_migratetype);
1009 
1010 				migratetype = start_migratetype;
1011 			}
1012 
1013 			/* Remove the page from the freelists */
1014 			list_del(&page->lru);
1015 			rmv_page_order(page);
1016 
1017 			/* Take ownership for orders >= pageblock_order */
1018 			if (current_order >= pageblock_order)
1019 				change_pageblock_range(page, current_order,
1020 							start_migratetype);
1021 
1022 			expand(zone, page, order, current_order, area, migratetype);
1023 
1024 			trace_mm_page_alloc_extfrag(page, order, current_order,
1025 				start_migratetype, migratetype);
1026 
1027 			return page;
1028 		}
1029 	}
1030 
1031 	return NULL;
1032 }
1033 
1034 /*
1035  * Do the hard work of removing an element from the buddy allocator.
1036  * Call me with the zone->lock already held.
1037  */
1038 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1039 						int migratetype)
1040 {
1041 	struct page *page;
1042 
1043 retry_reserve:
1044 	page = __rmqueue_smallest(zone, order, migratetype);
1045 
1046 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1047 		page = __rmqueue_fallback(zone, order, migratetype);
1048 
1049 		/*
1050 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1051 		 * is used because __rmqueue_smallest is an inline function
1052 		 * and we want just one call site
1053 		 */
1054 		if (!page) {
1055 			migratetype = MIGRATE_RESERVE;
1056 			goto retry_reserve;
1057 		}
1058 	}
1059 
1060 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1061 	return page;
1062 }
1063 
1064 /*
1065  * Obtain a specified number of elements from the buddy allocator, all under
1066  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1067  * Returns the number of new pages which were placed at *list.
1068  */
1069 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1070 			unsigned long count, struct list_head *list,
1071 			int migratetype, int cold)
1072 {
1073 	int i;
1074 
1075 	spin_lock(&zone->lock);
1076 	for (i = 0; i < count; ++i) {
1077 		struct page *page = __rmqueue(zone, order, migratetype);
1078 		if (unlikely(page == NULL))
1079 			break;
1080 
1081 		/*
1082 		 * Split buddy pages returned by expand() are received here
1083 		 * in physical page order. The page is added to the callers and
1084 		 * list and the list head then moves forward. From the callers
1085 		 * perspective, the linked list is ordered by page number in
1086 		 * some conditions. This is useful for IO devices that can
1087 		 * merge IO requests if the physical pages are ordered
1088 		 * properly.
1089 		 */
1090 		if (likely(cold == 0))
1091 			list_add(&page->lru, list);
1092 		else
1093 			list_add_tail(&page->lru, list);
1094 		set_page_private(page, migratetype);
1095 		list = &page->lru;
1096 	}
1097 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1098 	spin_unlock(&zone->lock);
1099 	return i;
1100 }
1101 
1102 #ifdef CONFIG_NUMA
1103 /*
1104  * Called from the vmstat counter updater to drain pagesets of this
1105  * currently executing processor on remote nodes after they have
1106  * expired.
1107  *
1108  * Note that this function must be called with the thread pinned to
1109  * a single processor.
1110  */
1111 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1112 {
1113 	unsigned long flags;
1114 	int to_drain;
1115 
1116 	local_irq_save(flags);
1117 	if (pcp->count >= pcp->batch)
1118 		to_drain = pcp->batch;
1119 	else
1120 		to_drain = pcp->count;
1121 	free_pcppages_bulk(zone, to_drain, pcp);
1122 	pcp->count -= to_drain;
1123 	local_irq_restore(flags);
1124 }
1125 #endif
1126 
1127 /*
1128  * Drain pages of the indicated processor.
1129  *
1130  * The processor must either be the current processor and the
1131  * thread pinned to the current processor or a processor that
1132  * is not online.
1133  */
1134 static void drain_pages(unsigned int cpu)
1135 {
1136 	unsigned long flags;
1137 	struct zone *zone;
1138 
1139 	for_each_populated_zone(zone) {
1140 		struct per_cpu_pageset *pset;
1141 		struct per_cpu_pages *pcp;
1142 
1143 		local_irq_save(flags);
1144 		pset = per_cpu_ptr(zone->pageset, cpu);
1145 
1146 		pcp = &pset->pcp;
1147 		if (pcp->count) {
1148 			free_pcppages_bulk(zone, pcp->count, pcp);
1149 			pcp->count = 0;
1150 		}
1151 		local_irq_restore(flags);
1152 	}
1153 }
1154 
1155 /*
1156  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1157  */
1158 void drain_local_pages(void *arg)
1159 {
1160 	drain_pages(smp_processor_id());
1161 }
1162 
1163 /*
1164  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1165  */
1166 void drain_all_pages(void)
1167 {
1168 	on_each_cpu(drain_local_pages, NULL, 1);
1169 }
1170 
1171 #ifdef CONFIG_HIBERNATION
1172 
1173 void mark_free_pages(struct zone *zone)
1174 {
1175 	unsigned long pfn, max_zone_pfn;
1176 	unsigned long flags;
1177 	int order, t;
1178 	struct list_head *curr;
1179 
1180 	if (!zone->spanned_pages)
1181 		return;
1182 
1183 	spin_lock_irqsave(&zone->lock, flags);
1184 
1185 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1186 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1187 		if (pfn_valid(pfn)) {
1188 			struct page *page = pfn_to_page(pfn);
1189 
1190 			if (!swsusp_page_is_forbidden(page))
1191 				swsusp_unset_page_free(page);
1192 		}
1193 
1194 	for_each_migratetype_order(order, t) {
1195 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1196 			unsigned long i;
1197 
1198 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1199 			for (i = 0; i < (1UL << order); i++)
1200 				swsusp_set_page_free(pfn_to_page(pfn + i));
1201 		}
1202 	}
1203 	spin_unlock_irqrestore(&zone->lock, flags);
1204 }
1205 #endif /* CONFIG_PM */
1206 
1207 /*
1208  * Free a 0-order page
1209  * cold == 1 ? free a cold page : free a hot page
1210  */
1211 void free_hot_cold_page(struct page *page, int cold)
1212 {
1213 	struct zone *zone = page_zone(page);
1214 	struct per_cpu_pages *pcp;
1215 	unsigned long flags;
1216 	int migratetype;
1217 	int wasMlocked = __TestClearPageMlocked(page);
1218 
1219 	if (!free_pages_prepare(page, 0))
1220 		return;
1221 
1222 	migratetype = get_pageblock_migratetype(page);
1223 	set_page_private(page, migratetype);
1224 	local_irq_save(flags);
1225 	if (unlikely(wasMlocked))
1226 		free_page_mlock(page);
1227 	__count_vm_event(PGFREE);
1228 
1229 	/*
1230 	 * We only track unmovable, reclaimable and movable on pcp lists.
1231 	 * Free ISOLATE pages back to the allocator because they are being
1232 	 * offlined but treat RESERVE as movable pages so we can get those
1233 	 * areas back if necessary. Otherwise, we may have to free
1234 	 * excessively into the page allocator
1235 	 */
1236 	if (migratetype >= MIGRATE_PCPTYPES) {
1237 		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1238 			free_one_page(zone, page, 0, migratetype);
1239 			goto out;
1240 		}
1241 		migratetype = MIGRATE_MOVABLE;
1242 	}
1243 
1244 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1245 	if (cold)
1246 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1247 	else
1248 		list_add(&page->lru, &pcp->lists[migratetype]);
1249 	pcp->count++;
1250 	if (pcp->count >= pcp->high) {
1251 		free_pcppages_bulk(zone, pcp->batch, pcp);
1252 		pcp->count -= pcp->batch;
1253 	}
1254 
1255 out:
1256 	local_irq_restore(flags);
1257 }
1258 
1259 /*
1260  * Free a list of 0-order pages
1261  */
1262 void free_hot_cold_page_list(struct list_head *list, int cold)
1263 {
1264 	struct page *page, *next;
1265 
1266 	list_for_each_entry_safe(page, next, list, lru) {
1267 		trace_mm_page_free_batched(page, cold);
1268 		free_hot_cold_page(page, cold);
1269 	}
1270 }
1271 
1272 /*
1273  * split_page takes a non-compound higher-order page, and splits it into
1274  * n (1<<order) sub-pages: page[0..n]
1275  * Each sub-page must be freed individually.
1276  *
1277  * Note: this is probably too low level an operation for use in drivers.
1278  * Please consult with lkml before using this in your driver.
1279  */
1280 void split_page(struct page *page, unsigned int order)
1281 {
1282 	int i;
1283 
1284 	VM_BUG_ON(PageCompound(page));
1285 	VM_BUG_ON(!page_count(page));
1286 
1287 #ifdef CONFIG_KMEMCHECK
1288 	/*
1289 	 * Split shadow pages too, because free(page[0]) would
1290 	 * otherwise free the whole shadow.
1291 	 */
1292 	if (kmemcheck_page_is_tracked(page))
1293 		split_page(virt_to_page(page[0].shadow), order);
1294 #endif
1295 
1296 	for (i = 1; i < (1 << order); i++)
1297 		set_page_refcounted(page + i);
1298 }
1299 
1300 /*
1301  * Similar to split_page except the page is already free. As this is only
1302  * being used for migration, the migratetype of the block also changes.
1303  * As this is called with interrupts disabled, the caller is responsible
1304  * for calling arch_alloc_page() and kernel_map_page() after interrupts
1305  * are enabled.
1306  *
1307  * Note: this is probably too low level an operation for use in drivers.
1308  * Please consult with lkml before using this in your driver.
1309  */
1310 int split_free_page(struct page *page)
1311 {
1312 	unsigned int order;
1313 	unsigned long watermark;
1314 	struct zone *zone;
1315 
1316 	BUG_ON(!PageBuddy(page));
1317 
1318 	zone = page_zone(page);
1319 	order = page_order(page);
1320 
1321 	/* Obey watermarks as if the page was being allocated */
1322 	watermark = low_wmark_pages(zone) + (1 << order);
1323 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1324 		return 0;
1325 
1326 	/* Remove page from free list */
1327 	list_del(&page->lru);
1328 	zone->free_area[order].nr_free--;
1329 	rmv_page_order(page);
1330 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1331 
1332 	/* Split into individual pages */
1333 	set_page_refcounted(page);
1334 	split_page(page, order);
1335 
1336 	if (order >= pageblock_order - 1) {
1337 		struct page *endpage = page + (1 << order) - 1;
1338 		for (; page < endpage; page += pageblock_nr_pages)
1339 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1340 	}
1341 
1342 	return 1 << order;
1343 }
1344 
1345 /*
1346  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1347  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1348  * or two.
1349  */
1350 static inline
1351 struct page *buffered_rmqueue(struct zone *preferred_zone,
1352 			struct zone *zone, int order, gfp_t gfp_flags,
1353 			int migratetype)
1354 {
1355 	unsigned long flags;
1356 	struct page *page;
1357 	int cold = !!(gfp_flags & __GFP_COLD);
1358 
1359 again:
1360 	if (likely(order == 0)) {
1361 		struct per_cpu_pages *pcp;
1362 		struct list_head *list;
1363 
1364 		local_irq_save(flags);
1365 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1366 		list = &pcp->lists[migratetype];
1367 		if (list_empty(list)) {
1368 			pcp->count += rmqueue_bulk(zone, 0,
1369 					pcp->batch, list,
1370 					migratetype, cold);
1371 			if (unlikely(list_empty(list)))
1372 				goto failed;
1373 		}
1374 
1375 		if (cold)
1376 			page = list_entry(list->prev, struct page, lru);
1377 		else
1378 			page = list_entry(list->next, struct page, lru);
1379 
1380 		list_del(&page->lru);
1381 		pcp->count--;
1382 	} else {
1383 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1384 			/*
1385 			 * __GFP_NOFAIL is not to be used in new code.
1386 			 *
1387 			 * All __GFP_NOFAIL callers should be fixed so that they
1388 			 * properly detect and handle allocation failures.
1389 			 *
1390 			 * We most definitely don't want callers attempting to
1391 			 * allocate greater than order-1 page units with
1392 			 * __GFP_NOFAIL.
1393 			 */
1394 			WARN_ON_ONCE(order > 1);
1395 		}
1396 		spin_lock_irqsave(&zone->lock, flags);
1397 		page = __rmqueue(zone, order, migratetype);
1398 		spin_unlock(&zone->lock);
1399 		if (!page)
1400 			goto failed;
1401 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1402 	}
1403 
1404 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1405 	zone_statistics(preferred_zone, zone, gfp_flags);
1406 	local_irq_restore(flags);
1407 
1408 	VM_BUG_ON(bad_range(zone, page));
1409 	if (prep_new_page(page, order, gfp_flags))
1410 		goto again;
1411 	return page;
1412 
1413 failed:
1414 	local_irq_restore(flags);
1415 	return NULL;
1416 }
1417 
1418 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1419 #define ALLOC_WMARK_MIN		WMARK_MIN
1420 #define ALLOC_WMARK_LOW		WMARK_LOW
1421 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1422 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1423 
1424 /* Mask to get the watermark bits */
1425 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1426 
1427 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1428 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1429 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1430 
1431 #ifdef CONFIG_FAIL_PAGE_ALLOC
1432 
1433 static struct {
1434 	struct fault_attr attr;
1435 
1436 	u32 ignore_gfp_highmem;
1437 	u32 ignore_gfp_wait;
1438 	u32 min_order;
1439 } fail_page_alloc = {
1440 	.attr = FAULT_ATTR_INITIALIZER,
1441 	.ignore_gfp_wait = 1,
1442 	.ignore_gfp_highmem = 1,
1443 	.min_order = 1,
1444 };
1445 
1446 static int __init setup_fail_page_alloc(char *str)
1447 {
1448 	return setup_fault_attr(&fail_page_alloc.attr, str);
1449 }
1450 __setup("fail_page_alloc=", setup_fail_page_alloc);
1451 
1452 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1453 {
1454 	if (order < fail_page_alloc.min_order)
1455 		return 0;
1456 	if (gfp_mask & __GFP_NOFAIL)
1457 		return 0;
1458 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1459 		return 0;
1460 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1461 		return 0;
1462 
1463 	return should_fail(&fail_page_alloc.attr, 1 << order);
1464 }
1465 
1466 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1467 
1468 static int __init fail_page_alloc_debugfs(void)
1469 {
1470 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1471 	struct dentry *dir;
1472 
1473 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1474 					&fail_page_alloc.attr);
1475 	if (IS_ERR(dir))
1476 		return PTR_ERR(dir);
1477 
1478 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1479 				&fail_page_alloc.ignore_gfp_wait))
1480 		goto fail;
1481 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1482 				&fail_page_alloc.ignore_gfp_highmem))
1483 		goto fail;
1484 	if (!debugfs_create_u32("min-order", mode, dir,
1485 				&fail_page_alloc.min_order))
1486 		goto fail;
1487 
1488 	return 0;
1489 fail:
1490 	debugfs_remove_recursive(dir);
1491 
1492 	return -ENOMEM;
1493 }
1494 
1495 late_initcall(fail_page_alloc_debugfs);
1496 
1497 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1498 
1499 #else /* CONFIG_FAIL_PAGE_ALLOC */
1500 
1501 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1502 {
1503 	return 0;
1504 }
1505 
1506 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1507 
1508 /*
1509  * Return true if free pages are above 'mark'. This takes into account the order
1510  * of the allocation.
1511  */
1512 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1513 		      int classzone_idx, int alloc_flags, long free_pages)
1514 {
1515 	/* free_pages my go negative - that's OK */
1516 	long min = mark;
1517 	int o;
1518 
1519 	free_pages -= (1 << order) - 1;
1520 	if (alloc_flags & ALLOC_HIGH)
1521 		min -= min / 2;
1522 	if (alloc_flags & ALLOC_HARDER)
1523 		min -= min / 4;
1524 
1525 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1526 		return false;
1527 	for (o = 0; o < order; o++) {
1528 		/* At the next order, this order's pages become unavailable */
1529 		free_pages -= z->free_area[o].nr_free << o;
1530 
1531 		/* Require fewer higher order pages to be free */
1532 		min >>= 1;
1533 
1534 		if (free_pages <= min)
1535 			return false;
1536 	}
1537 	return true;
1538 }
1539 
1540 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1541 		      int classzone_idx, int alloc_flags)
1542 {
1543 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1544 					zone_page_state(z, NR_FREE_PAGES));
1545 }
1546 
1547 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1548 		      int classzone_idx, int alloc_flags)
1549 {
1550 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
1551 
1552 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1553 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1554 
1555 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1556 								free_pages);
1557 }
1558 
1559 #ifdef CONFIG_NUMA
1560 /*
1561  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1562  * skip over zones that are not allowed by the cpuset, or that have
1563  * been recently (in last second) found to be nearly full.  See further
1564  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1565  * that have to skip over a lot of full or unallowed zones.
1566  *
1567  * If the zonelist cache is present in the passed in zonelist, then
1568  * returns a pointer to the allowed node mask (either the current
1569  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1570  *
1571  * If the zonelist cache is not available for this zonelist, does
1572  * nothing and returns NULL.
1573  *
1574  * If the fullzones BITMAP in the zonelist cache is stale (more than
1575  * a second since last zap'd) then we zap it out (clear its bits.)
1576  *
1577  * We hold off even calling zlc_setup, until after we've checked the
1578  * first zone in the zonelist, on the theory that most allocations will
1579  * be satisfied from that first zone, so best to examine that zone as
1580  * quickly as we can.
1581  */
1582 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1583 {
1584 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1585 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1586 
1587 	zlc = zonelist->zlcache_ptr;
1588 	if (!zlc)
1589 		return NULL;
1590 
1591 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1592 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1593 		zlc->last_full_zap = jiffies;
1594 	}
1595 
1596 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1597 					&cpuset_current_mems_allowed :
1598 					&node_states[N_HIGH_MEMORY];
1599 	return allowednodes;
1600 }
1601 
1602 /*
1603  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1604  * if it is worth looking at further for free memory:
1605  *  1) Check that the zone isn't thought to be full (doesn't have its
1606  *     bit set in the zonelist_cache fullzones BITMAP).
1607  *  2) Check that the zones node (obtained from the zonelist_cache
1608  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1609  * Return true (non-zero) if zone is worth looking at further, or
1610  * else return false (zero) if it is not.
1611  *
1612  * This check -ignores- the distinction between various watermarks,
1613  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1614  * found to be full for any variation of these watermarks, it will
1615  * be considered full for up to one second by all requests, unless
1616  * we are so low on memory on all allowed nodes that we are forced
1617  * into the second scan of the zonelist.
1618  *
1619  * In the second scan we ignore this zonelist cache and exactly
1620  * apply the watermarks to all zones, even it is slower to do so.
1621  * We are low on memory in the second scan, and should leave no stone
1622  * unturned looking for a free page.
1623  */
1624 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1625 						nodemask_t *allowednodes)
1626 {
1627 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1628 	int i;				/* index of *z in zonelist zones */
1629 	int n;				/* node that zone *z is on */
1630 
1631 	zlc = zonelist->zlcache_ptr;
1632 	if (!zlc)
1633 		return 1;
1634 
1635 	i = z - zonelist->_zonerefs;
1636 	n = zlc->z_to_n[i];
1637 
1638 	/* This zone is worth trying if it is allowed but not full */
1639 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1640 }
1641 
1642 /*
1643  * Given 'z' scanning a zonelist, set the corresponding bit in
1644  * zlc->fullzones, so that subsequent attempts to allocate a page
1645  * from that zone don't waste time re-examining it.
1646  */
1647 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1648 {
1649 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1650 	int i;				/* index of *z in zonelist zones */
1651 
1652 	zlc = zonelist->zlcache_ptr;
1653 	if (!zlc)
1654 		return;
1655 
1656 	i = z - zonelist->_zonerefs;
1657 
1658 	set_bit(i, zlc->fullzones);
1659 }
1660 
1661 /*
1662  * clear all zones full, called after direct reclaim makes progress so that
1663  * a zone that was recently full is not skipped over for up to a second
1664  */
1665 static void zlc_clear_zones_full(struct zonelist *zonelist)
1666 {
1667 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1668 
1669 	zlc = zonelist->zlcache_ptr;
1670 	if (!zlc)
1671 		return;
1672 
1673 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1674 }
1675 
1676 #else	/* CONFIG_NUMA */
1677 
1678 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1679 {
1680 	return NULL;
1681 }
1682 
1683 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1684 				nodemask_t *allowednodes)
1685 {
1686 	return 1;
1687 }
1688 
1689 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1690 {
1691 }
1692 
1693 static void zlc_clear_zones_full(struct zonelist *zonelist)
1694 {
1695 }
1696 #endif	/* CONFIG_NUMA */
1697 
1698 /*
1699  * get_page_from_freelist goes through the zonelist trying to allocate
1700  * a page.
1701  */
1702 static struct page *
1703 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1704 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1705 		struct zone *preferred_zone, int migratetype)
1706 {
1707 	struct zoneref *z;
1708 	struct page *page = NULL;
1709 	int classzone_idx;
1710 	struct zone *zone;
1711 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1712 	int zlc_active = 0;		/* set if using zonelist_cache */
1713 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1714 
1715 	classzone_idx = zone_idx(preferred_zone);
1716 zonelist_scan:
1717 	/*
1718 	 * Scan zonelist, looking for a zone with enough free.
1719 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1720 	 */
1721 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1722 						high_zoneidx, nodemask) {
1723 		if (NUMA_BUILD && zlc_active &&
1724 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1725 				continue;
1726 		if ((alloc_flags & ALLOC_CPUSET) &&
1727 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1728 				continue;
1729 		/*
1730 		 * When allocating a page cache page for writing, we
1731 		 * want to get it from a zone that is within its dirty
1732 		 * limit, such that no single zone holds more than its
1733 		 * proportional share of globally allowed dirty pages.
1734 		 * The dirty limits take into account the zone's
1735 		 * lowmem reserves and high watermark so that kswapd
1736 		 * should be able to balance it without having to
1737 		 * write pages from its LRU list.
1738 		 *
1739 		 * This may look like it could increase pressure on
1740 		 * lower zones by failing allocations in higher zones
1741 		 * before they are full.  But the pages that do spill
1742 		 * over are limited as the lower zones are protected
1743 		 * by this very same mechanism.  It should not become
1744 		 * a practical burden to them.
1745 		 *
1746 		 * XXX: For now, allow allocations to potentially
1747 		 * exceed the per-zone dirty limit in the slowpath
1748 		 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1749 		 * which is important when on a NUMA setup the allowed
1750 		 * zones are together not big enough to reach the
1751 		 * global limit.  The proper fix for these situations
1752 		 * will require awareness of zones in the
1753 		 * dirty-throttling and the flusher threads.
1754 		 */
1755 		if ((alloc_flags & ALLOC_WMARK_LOW) &&
1756 		    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1757 			goto this_zone_full;
1758 
1759 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1760 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1761 			unsigned long mark;
1762 			int ret;
1763 
1764 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1765 			if (zone_watermark_ok(zone, order, mark,
1766 				    classzone_idx, alloc_flags))
1767 				goto try_this_zone;
1768 
1769 			if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1770 				/*
1771 				 * we do zlc_setup if there are multiple nodes
1772 				 * and before considering the first zone allowed
1773 				 * by the cpuset.
1774 				 */
1775 				allowednodes = zlc_setup(zonelist, alloc_flags);
1776 				zlc_active = 1;
1777 				did_zlc_setup = 1;
1778 			}
1779 
1780 			if (zone_reclaim_mode == 0)
1781 				goto this_zone_full;
1782 
1783 			/*
1784 			 * As we may have just activated ZLC, check if the first
1785 			 * eligible zone has failed zone_reclaim recently.
1786 			 */
1787 			if (NUMA_BUILD && zlc_active &&
1788 				!zlc_zone_worth_trying(zonelist, z, allowednodes))
1789 				continue;
1790 
1791 			ret = zone_reclaim(zone, gfp_mask, order);
1792 			switch (ret) {
1793 			case ZONE_RECLAIM_NOSCAN:
1794 				/* did not scan */
1795 				continue;
1796 			case ZONE_RECLAIM_FULL:
1797 				/* scanned but unreclaimable */
1798 				continue;
1799 			default:
1800 				/* did we reclaim enough */
1801 				if (!zone_watermark_ok(zone, order, mark,
1802 						classzone_idx, alloc_flags))
1803 					goto this_zone_full;
1804 			}
1805 		}
1806 
1807 try_this_zone:
1808 		page = buffered_rmqueue(preferred_zone, zone, order,
1809 						gfp_mask, migratetype);
1810 		if (page)
1811 			break;
1812 this_zone_full:
1813 		if (NUMA_BUILD)
1814 			zlc_mark_zone_full(zonelist, z);
1815 	}
1816 
1817 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1818 		/* Disable zlc cache for second zonelist scan */
1819 		zlc_active = 0;
1820 		goto zonelist_scan;
1821 	}
1822 	return page;
1823 }
1824 
1825 /*
1826  * Large machines with many possible nodes should not always dump per-node
1827  * meminfo in irq context.
1828  */
1829 static inline bool should_suppress_show_mem(void)
1830 {
1831 	bool ret = false;
1832 
1833 #if NODES_SHIFT > 8
1834 	ret = in_interrupt();
1835 #endif
1836 	return ret;
1837 }
1838 
1839 static DEFINE_RATELIMIT_STATE(nopage_rs,
1840 		DEFAULT_RATELIMIT_INTERVAL,
1841 		DEFAULT_RATELIMIT_BURST);
1842 
1843 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1844 {
1845 	unsigned int filter = SHOW_MEM_FILTER_NODES;
1846 
1847 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
1848 	    debug_guardpage_minorder() > 0)
1849 		return;
1850 
1851 	/*
1852 	 * This documents exceptions given to allocations in certain
1853 	 * contexts that are allowed to allocate outside current's set
1854 	 * of allowed nodes.
1855 	 */
1856 	if (!(gfp_mask & __GFP_NOMEMALLOC))
1857 		if (test_thread_flag(TIF_MEMDIE) ||
1858 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
1859 			filter &= ~SHOW_MEM_FILTER_NODES;
1860 	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1861 		filter &= ~SHOW_MEM_FILTER_NODES;
1862 
1863 	if (fmt) {
1864 		struct va_format vaf;
1865 		va_list args;
1866 
1867 		va_start(args, fmt);
1868 
1869 		vaf.fmt = fmt;
1870 		vaf.va = &args;
1871 
1872 		pr_warn("%pV", &vaf);
1873 
1874 		va_end(args);
1875 	}
1876 
1877 	pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
1878 		current->comm, order, gfp_mask);
1879 
1880 	dump_stack();
1881 	if (!should_suppress_show_mem())
1882 		show_mem(filter);
1883 }
1884 
1885 static inline int
1886 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1887 				unsigned long did_some_progress,
1888 				unsigned long pages_reclaimed)
1889 {
1890 	/* Do not loop if specifically requested */
1891 	if (gfp_mask & __GFP_NORETRY)
1892 		return 0;
1893 
1894 	/* Always retry if specifically requested */
1895 	if (gfp_mask & __GFP_NOFAIL)
1896 		return 1;
1897 
1898 	/*
1899 	 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
1900 	 * making forward progress without invoking OOM. Suspend also disables
1901 	 * storage devices so kswapd will not help. Bail if we are suspending.
1902 	 */
1903 	if (!did_some_progress && pm_suspended_storage())
1904 		return 0;
1905 
1906 	/*
1907 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1908 	 * means __GFP_NOFAIL, but that may not be true in other
1909 	 * implementations.
1910 	 */
1911 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1912 		return 1;
1913 
1914 	/*
1915 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1916 	 * specified, then we retry until we no longer reclaim any pages
1917 	 * (above), or we've reclaimed an order of pages at least as
1918 	 * large as the allocation's order. In both cases, if the
1919 	 * allocation still fails, we stop retrying.
1920 	 */
1921 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1922 		return 1;
1923 
1924 	return 0;
1925 }
1926 
1927 static inline struct page *
1928 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1929 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1930 	nodemask_t *nodemask, struct zone *preferred_zone,
1931 	int migratetype)
1932 {
1933 	struct page *page;
1934 
1935 	/* Acquire the OOM killer lock for the zones in zonelist */
1936 	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1937 		schedule_timeout_uninterruptible(1);
1938 		return NULL;
1939 	}
1940 
1941 	/*
1942 	 * Go through the zonelist yet one more time, keep very high watermark
1943 	 * here, this is only to catch a parallel oom killing, we must fail if
1944 	 * we're still under heavy pressure.
1945 	 */
1946 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1947 		order, zonelist, high_zoneidx,
1948 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1949 		preferred_zone, migratetype);
1950 	if (page)
1951 		goto out;
1952 
1953 	if (!(gfp_mask & __GFP_NOFAIL)) {
1954 		/* The OOM killer will not help higher order allocs */
1955 		if (order > PAGE_ALLOC_COSTLY_ORDER)
1956 			goto out;
1957 		/* The OOM killer does not needlessly kill tasks for lowmem */
1958 		if (high_zoneidx < ZONE_NORMAL)
1959 			goto out;
1960 		/*
1961 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1962 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1963 		 * The caller should handle page allocation failure by itself if
1964 		 * it specifies __GFP_THISNODE.
1965 		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1966 		 */
1967 		if (gfp_mask & __GFP_THISNODE)
1968 			goto out;
1969 	}
1970 	/* Exhausted what can be done so it's blamo time */
1971 	out_of_memory(zonelist, gfp_mask, order, nodemask, false);
1972 
1973 out:
1974 	clear_zonelist_oom(zonelist, gfp_mask);
1975 	return page;
1976 }
1977 
1978 #ifdef CONFIG_COMPACTION
1979 /* Try memory compaction for high-order allocations before reclaim */
1980 static struct page *
1981 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1982 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1983 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1984 	int migratetype, bool sync_migration,
1985 	bool *deferred_compaction,
1986 	unsigned long *did_some_progress)
1987 {
1988 	struct page *page;
1989 
1990 	if (!order)
1991 		return NULL;
1992 
1993 	if (compaction_deferred(preferred_zone, order)) {
1994 		*deferred_compaction = true;
1995 		return NULL;
1996 	}
1997 
1998 	current->flags |= PF_MEMALLOC;
1999 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2000 						nodemask, sync_migration);
2001 	current->flags &= ~PF_MEMALLOC;
2002 	if (*did_some_progress != COMPACT_SKIPPED) {
2003 
2004 		/* Page migration frees to the PCP lists but we want merging */
2005 		drain_pages(get_cpu());
2006 		put_cpu();
2007 
2008 		page = get_page_from_freelist(gfp_mask, nodemask,
2009 				order, zonelist, high_zoneidx,
2010 				alloc_flags, preferred_zone,
2011 				migratetype);
2012 		if (page) {
2013 			preferred_zone->compact_considered = 0;
2014 			preferred_zone->compact_defer_shift = 0;
2015 			if (order >= preferred_zone->compact_order_failed)
2016 				preferred_zone->compact_order_failed = order + 1;
2017 			count_vm_event(COMPACTSUCCESS);
2018 			return page;
2019 		}
2020 
2021 		/*
2022 		 * It's bad if compaction run occurs and fails.
2023 		 * The most likely reason is that pages exist,
2024 		 * but not enough to satisfy watermarks.
2025 		 */
2026 		count_vm_event(COMPACTFAIL);
2027 
2028 		/*
2029 		 * As async compaction considers a subset of pageblocks, only
2030 		 * defer if the failure was a sync compaction failure.
2031 		 */
2032 		if (sync_migration)
2033 			defer_compaction(preferred_zone, order);
2034 
2035 		cond_resched();
2036 	}
2037 
2038 	return NULL;
2039 }
2040 #else
2041 static inline struct page *
2042 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2043 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2044 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2045 	int migratetype, bool sync_migration,
2046 	bool *deferred_compaction,
2047 	unsigned long *did_some_progress)
2048 {
2049 	return NULL;
2050 }
2051 #endif /* CONFIG_COMPACTION */
2052 
2053 /* The really slow allocator path where we enter direct reclaim */
2054 static inline struct page *
2055 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2056 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2057 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2058 	int migratetype, unsigned long *did_some_progress)
2059 {
2060 	struct page *page = NULL;
2061 	struct reclaim_state reclaim_state;
2062 	bool drained = false;
2063 
2064 	cond_resched();
2065 
2066 	/* We now go into synchronous reclaim */
2067 	cpuset_memory_pressure_bump();
2068 	current->flags |= PF_MEMALLOC;
2069 	lockdep_set_current_reclaim_state(gfp_mask);
2070 	reclaim_state.reclaimed_slab = 0;
2071 	current->reclaim_state = &reclaim_state;
2072 
2073 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2074 
2075 	current->reclaim_state = NULL;
2076 	lockdep_clear_current_reclaim_state();
2077 	current->flags &= ~PF_MEMALLOC;
2078 
2079 	cond_resched();
2080 
2081 	if (unlikely(!(*did_some_progress)))
2082 		return NULL;
2083 
2084 	/* After successful reclaim, reconsider all zones for allocation */
2085 	if (NUMA_BUILD)
2086 		zlc_clear_zones_full(zonelist);
2087 
2088 retry:
2089 	page = get_page_from_freelist(gfp_mask, nodemask, order,
2090 					zonelist, high_zoneidx,
2091 					alloc_flags, preferred_zone,
2092 					migratetype);
2093 
2094 	/*
2095 	 * If an allocation failed after direct reclaim, it could be because
2096 	 * pages are pinned on the per-cpu lists. Drain them and try again
2097 	 */
2098 	if (!page && !drained) {
2099 		drain_all_pages();
2100 		drained = true;
2101 		goto retry;
2102 	}
2103 
2104 	return page;
2105 }
2106 
2107 /*
2108  * This is called in the allocator slow-path if the allocation request is of
2109  * sufficient urgency to ignore watermarks and take other desperate measures
2110  */
2111 static inline struct page *
2112 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2113 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2114 	nodemask_t *nodemask, struct zone *preferred_zone,
2115 	int migratetype)
2116 {
2117 	struct page *page;
2118 
2119 	do {
2120 		page = get_page_from_freelist(gfp_mask, nodemask, order,
2121 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2122 			preferred_zone, migratetype);
2123 
2124 		if (!page && gfp_mask & __GFP_NOFAIL)
2125 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2126 	} while (!page && (gfp_mask & __GFP_NOFAIL));
2127 
2128 	return page;
2129 }
2130 
2131 static inline
2132 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2133 						enum zone_type high_zoneidx,
2134 						enum zone_type classzone_idx)
2135 {
2136 	struct zoneref *z;
2137 	struct zone *zone;
2138 
2139 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2140 		wakeup_kswapd(zone, order, classzone_idx);
2141 }
2142 
2143 static inline int
2144 gfp_to_alloc_flags(gfp_t gfp_mask)
2145 {
2146 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2147 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2148 
2149 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2150 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2151 
2152 	/*
2153 	 * The caller may dip into page reserves a bit more if the caller
2154 	 * cannot run direct reclaim, or if the caller has realtime scheduling
2155 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2156 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2157 	 */
2158 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2159 
2160 	if (!wait) {
2161 		/*
2162 		 * Not worth trying to allocate harder for
2163 		 * __GFP_NOMEMALLOC even if it can't schedule.
2164 		 */
2165 		if  (!(gfp_mask & __GFP_NOMEMALLOC))
2166 			alloc_flags |= ALLOC_HARDER;
2167 		/*
2168 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2169 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2170 		 */
2171 		alloc_flags &= ~ALLOC_CPUSET;
2172 	} else if (unlikely(rt_task(current)) && !in_interrupt())
2173 		alloc_flags |= ALLOC_HARDER;
2174 
2175 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2176 		if (!in_interrupt() &&
2177 		    ((current->flags & PF_MEMALLOC) ||
2178 		     unlikely(test_thread_flag(TIF_MEMDIE))))
2179 			alloc_flags |= ALLOC_NO_WATERMARKS;
2180 	}
2181 
2182 	return alloc_flags;
2183 }
2184 
2185 static inline struct page *
2186 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2187 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2188 	nodemask_t *nodemask, struct zone *preferred_zone,
2189 	int migratetype)
2190 {
2191 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2192 	struct page *page = NULL;
2193 	int alloc_flags;
2194 	unsigned long pages_reclaimed = 0;
2195 	unsigned long did_some_progress;
2196 	bool sync_migration = false;
2197 	bool deferred_compaction = false;
2198 
2199 	/*
2200 	 * In the slowpath, we sanity check order to avoid ever trying to
2201 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2202 	 * be using allocators in order of preference for an area that is
2203 	 * too large.
2204 	 */
2205 	if (order >= MAX_ORDER) {
2206 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2207 		return NULL;
2208 	}
2209 
2210 	/*
2211 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2212 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2213 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2214 	 * using a larger set of nodes after it has established that the
2215 	 * allowed per node queues are empty and that nodes are
2216 	 * over allocated.
2217 	 */
2218 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2219 		goto nopage;
2220 
2221 restart:
2222 	if (!(gfp_mask & __GFP_NO_KSWAPD))
2223 		wake_all_kswapd(order, zonelist, high_zoneidx,
2224 						zone_idx(preferred_zone));
2225 
2226 	/*
2227 	 * OK, we're below the kswapd watermark and have kicked background
2228 	 * reclaim. Now things get more complex, so set up alloc_flags according
2229 	 * to how we want to proceed.
2230 	 */
2231 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2232 
2233 	/*
2234 	 * Find the true preferred zone if the allocation is unconstrained by
2235 	 * cpusets.
2236 	 */
2237 	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2238 		first_zones_zonelist(zonelist, high_zoneidx, NULL,
2239 					&preferred_zone);
2240 
2241 rebalance:
2242 	/* This is the last chance, in general, before the goto nopage. */
2243 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2244 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2245 			preferred_zone, migratetype);
2246 	if (page)
2247 		goto got_pg;
2248 
2249 	/* Allocate without watermarks if the context allows */
2250 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
2251 		page = __alloc_pages_high_priority(gfp_mask, order,
2252 				zonelist, high_zoneidx, nodemask,
2253 				preferred_zone, migratetype);
2254 		if (page)
2255 			goto got_pg;
2256 	}
2257 
2258 	/* Atomic allocations - we can't balance anything */
2259 	if (!wait)
2260 		goto nopage;
2261 
2262 	/* Avoid recursion of direct reclaim */
2263 	if (current->flags & PF_MEMALLOC)
2264 		goto nopage;
2265 
2266 	/* Avoid allocations with no watermarks from looping endlessly */
2267 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2268 		goto nopage;
2269 
2270 	/*
2271 	 * Try direct compaction. The first pass is asynchronous. Subsequent
2272 	 * attempts after direct reclaim are synchronous
2273 	 */
2274 	page = __alloc_pages_direct_compact(gfp_mask, order,
2275 					zonelist, high_zoneidx,
2276 					nodemask,
2277 					alloc_flags, preferred_zone,
2278 					migratetype, sync_migration,
2279 					&deferred_compaction,
2280 					&did_some_progress);
2281 	if (page)
2282 		goto got_pg;
2283 	sync_migration = true;
2284 
2285 	/*
2286 	 * If compaction is deferred for high-order allocations, it is because
2287 	 * sync compaction recently failed. In this is the case and the caller
2288 	 * has requested the system not be heavily disrupted, fail the
2289 	 * allocation now instead of entering direct reclaim
2290 	 */
2291 	if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
2292 		goto nopage;
2293 
2294 	/* Try direct reclaim and then allocating */
2295 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2296 					zonelist, high_zoneidx,
2297 					nodemask,
2298 					alloc_flags, preferred_zone,
2299 					migratetype, &did_some_progress);
2300 	if (page)
2301 		goto got_pg;
2302 
2303 	/*
2304 	 * If we failed to make any progress reclaiming, then we are
2305 	 * running out of options and have to consider going OOM
2306 	 */
2307 	if (!did_some_progress) {
2308 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2309 			if (oom_killer_disabled)
2310 				goto nopage;
2311 			page = __alloc_pages_may_oom(gfp_mask, order,
2312 					zonelist, high_zoneidx,
2313 					nodemask, preferred_zone,
2314 					migratetype);
2315 			if (page)
2316 				goto got_pg;
2317 
2318 			if (!(gfp_mask & __GFP_NOFAIL)) {
2319 				/*
2320 				 * The oom killer is not called for high-order
2321 				 * allocations that may fail, so if no progress
2322 				 * is being made, there are no other options and
2323 				 * retrying is unlikely to help.
2324 				 */
2325 				if (order > PAGE_ALLOC_COSTLY_ORDER)
2326 					goto nopage;
2327 				/*
2328 				 * The oom killer is not called for lowmem
2329 				 * allocations to prevent needlessly killing
2330 				 * innocent tasks.
2331 				 */
2332 				if (high_zoneidx < ZONE_NORMAL)
2333 					goto nopage;
2334 			}
2335 
2336 			goto restart;
2337 		}
2338 	}
2339 
2340 	/* Check if we should retry the allocation */
2341 	pages_reclaimed += did_some_progress;
2342 	if (should_alloc_retry(gfp_mask, order, did_some_progress,
2343 						pages_reclaimed)) {
2344 		/* Wait for some write requests to complete then retry */
2345 		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2346 		goto rebalance;
2347 	} else {
2348 		/*
2349 		 * High-order allocations do not necessarily loop after
2350 		 * direct reclaim and reclaim/compaction depends on compaction
2351 		 * being called after reclaim so call directly if necessary
2352 		 */
2353 		page = __alloc_pages_direct_compact(gfp_mask, order,
2354 					zonelist, high_zoneidx,
2355 					nodemask,
2356 					alloc_flags, preferred_zone,
2357 					migratetype, sync_migration,
2358 					&deferred_compaction,
2359 					&did_some_progress);
2360 		if (page)
2361 			goto got_pg;
2362 	}
2363 
2364 nopage:
2365 	warn_alloc_failed(gfp_mask, order, NULL);
2366 	return page;
2367 got_pg:
2368 	if (kmemcheck_enabled)
2369 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2370 	return page;
2371 
2372 }
2373 
2374 /*
2375  * This is the 'heart' of the zoned buddy allocator.
2376  */
2377 struct page *
2378 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2379 			struct zonelist *zonelist, nodemask_t *nodemask)
2380 {
2381 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2382 	struct zone *preferred_zone;
2383 	struct page *page = NULL;
2384 	int migratetype = allocflags_to_migratetype(gfp_mask);
2385 	unsigned int cpuset_mems_cookie;
2386 
2387 	gfp_mask &= gfp_allowed_mask;
2388 
2389 	lockdep_trace_alloc(gfp_mask);
2390 
2391 	might_sleep_if(gfp_mask & __GFP_WAIT);
2392 
2393 	if (should_fail_alloc_page(gfp_mask, order))
2394 		return NULL;
2395 
2396 	/*
2397 	 * Check the zones suitable for the gfp_mask contain at least one
2398 	 * valid zone. It's possible to have an empty zonelist as a result
2399 	 * of GFP_THISNODE and a memoryless node
2400 	 */
2401 	if (unlikely(!zonelist->_zonerefs->zone))
2402 		return NULL;
2403 
2404 retry_cpuset:
2405 	cpuset_mems_cookie = get_mems_allowed();
2406 
2407 	/* The preferred zone is used for statistics later */
2408 	first_zones_zonelist(zonelist, high_zoneidx,
2409 				nodemask ? : &cpuset_current_mems_allowed,
2410 				&preferred_zone);
2411 	if (!preferred_zone)
2412 		goto out;
2413 
2414 	/* First allocation attempt */
2415 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2416 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2417 			preferred_zone, migratetype);
2418 	if (unlikely(!page))
2419 		page = __alloc_pages_slowpath(gfp_mask, order,
2420 				zonelist, high_zoneidx, nodemask,
2421 				preferred_zone, migratetype);
2422 
2423 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2424 
2425 out:
2426 	/*
2427 	 * When updating a task's mems_allowed, it is possible to race with
2428 	 * parallel threads in such a way that an allocation can fail while
2429 	 * the mask is being updated. If a page allocation is about to fail,
2430 	 * check if the cpuset changed during allocation and if so, retry.
2431 	 */
2432 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2433 		goto retry_cpuset;
2434 
2435 	return page;
2436 }
2437 EXPORT_SYMBOL(__alloc_pages_nodemask);
2438 
2439 /*
2440  * Common helper functions.
2441  */
2442 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2443 {
2444 	struct page *page;
2445 
2446 	/*
2447 	 * __get_free_pages() returns a 32-bit address, which cannot represent
2448 	 * a highmem page
2449 	 */
2450 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2451 
2452 	page = alloc_pages(gfp_mask, order);
2453 	if (!page)
2454 		return 0;
2455 	return (unsigned long) page_address(page);
2456 }
2457 EXPORT_SYMBOL(__get_free_pages);
2458 
2459 unsigned long get_zeroed_page(gfp_t gfp_mask)
2460 {
2461 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2462 }
2463 EXPORT_SYMBOL(get_zeroed_page);
2464 
2465 void __free_pages(struct page *page, unsigned int order)
2466 {
2467 	if (put_page_testzero(page)) {
2468 		if (order == 0)
2469 			free_hot_cold_page(page, 0);
2470 		else
2471 			__free_pages_ok(page, order);
2472 	}
2473 }
2474 
2475 EXPORT_SYMBOL(__free_pages);
2476 
2477 void free_pages(unsigned long addr, unsigned int order)
2478 {
2479 	if (addr != 0) {
2480 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2481 		__free_pages(virt_to_page((void *)addr), order);
2482 	}
2483 }
2484 
2485 EXPORT_SYMBOL(free_pages);
2486 
2487 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2488 {
2489 	if (addr) {
2490 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2491 		unsigned long used = addr + PAGE_ALIGN(size);
2492 
2493 		split_page(virt_to_page((void *)addr), order);
2494 		while (used < alloc_end) {
2495 			free_page(used);
2496 			used += PAGE_SIZE;
2497 		}
2498 	}
2499 	return (void *)addr;
2500 }
2501 
2502 /**
2503  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2504  * @size: the number of bytes to allocate
2505  * @gfp_mask: GFP flags for the allocation
2506  *
2507  * This function is similar to alloc_pages(), except that it allocates the
2508  * minimum number of pages to satisfy the request.  alloc_pages() can only
2509  * allocate memory in power-of-two pages.
2510  *
2511  * This function is also limited by MAX_ORDER.
2512  *
2513  * Memory allocated by this function must be released by free_pages_exact().
2514  */
2515 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2516 {
2517 	unsigned int order = get_order(size);
2518 	unsigned long addr;
2519 
2520 	addr = __get_free_pages(gfp_mask, order);
2521 	return make_alloc_exact(addr, order, size);
2522 }
2523 EXPORT_SYMBOL(alloc_pages_exact);
2524 
2525 /**
2526  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2527  *			   pages on a node.
2528  * @nid: the preferred node ID where memory should be allocated
2529  * @size: the number of bytes to allocate
2530  * @gfp_mask: GFP flags for the allocation
2531  *
2532  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2533  * back.
2534  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2535  * but is not exact.
2536  */
2537 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2538 {
2539 	unsigned order = get_order(size);
2540 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
2541 	if (!p)
2542 		return NULL;
2543 	return make_alloc_exact((unsigned long)page_address(p), order, size);
2544 }
2545 EXPORT_SYMBOL(alloc_pages_exact_nid);
2546 
2547 /**
2548  * free_pages_exact - release memory allocated via alloc_pages_exact()
2549  * @virt: the value returned by alloc_pages_exact.
2550  * @size: size of allocation, same value as passed to alloc_pages_exact().
2551  *
2552  * Release the memory allocated by a previous call to alloc_pages_exact.
2553  */
2554 void free_pages_exact(void *virt, size_t size)
2555 {
2556 	unsigned long addr = (unsigned long)virt;
2557 	unsigned long end = addr + PAGE_ALIGN(size);
2558 
2559 	while (addr < end) {
2560 		free_page(addr);
2561 		addr += PAGE_SIZE;
2562 	}
2563 }
2564 EXPORT_SYMBOL(free_pages_exact);
2565 
2566 static unsigned int nr_free_zone_pages(int offset)
2567 {
2568 	struct zoneref *z;
2569 	struct zone *zone;
2570 
2571 	/* Just pick one node, since fallback list is circular */
2572 	unsigned int sum = 0;
2573 
2574 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2575 
2576 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2577 		unsigned long size = zone->present_pages;
2578 		unsigned long high = high_wmark_pages(zone);
2579 		if (size > high)
2580 			sum += size - high;
2581 	}
2582 
2583 	return sum;
2584 }
2585 
2586 /*
2587  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2588  */
2589 unsigned int nr_free_buffer_pages(void)
2590 {
2591 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2592 }
2593 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2594 
2595 /*
2596  * Amount of free RAM allocatable within all zones
2597  */
2598 unsigned int nr_free_pagecache_pages(void)
2599 {
2600 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2601 }
2602 
2603 static inline void show_node(struct zone *zone)
2604 {
2605 	if (NUMA_BUILD)
2606 		printk("Node %d ", zone_to_nid(zone));
2607 }
2608 
2609 void si_meminfo(struct sysinfo *val)
2610 {
2611 	val->totalram = totalram_pages;
2612 	val->sharedram = 0;
2613 	val->freeram = global_page_state(NR_FREE_PAGES);
2614 	val->bufferram = nr_blockdev_pages();
2615 	val->totalhigh = totalhigh_pages;
2616 	val->freehigh = nr_free_highpages();
2617 	val->mem_unit = PAGE_SIZE;
2618 }
2619 
2620 EXPORT_SYMBOL(si_meminfo);
2621 
2622 #ifdef CONFIG_NUMA
2623 void si_meminfo_node(struct sysinfo *val, int nid)
2624 {
2625 	pg_data_t *pgdat = NODE_DATA(nid);
2626 
2627 	val->totalram = pgdat->node_present_pages;
2628 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2629 #ifdef CONFIG_HIGHMEM
2630 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2631 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2632 			NR_FREE_PAGES);
2633 #else
2634 	val->totalhigh = 0;
2635 	val->freehigh = 0;
2636 #endif
2637 	val->mem_unit = PAGE_SIZE;
2638 }
2639 #endif
2640 
2641 /*
2642  * Determine whether the node should be displayed or not, depending on whether
2643  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2644  */
2645 bool skip_free_areas_node(unsigned int flags, int nid)
2646 {
2647 	bool ret = false;
2648 	unsigned int cpuset_mems_cookie;
2649 
2650 	if (!(flags & SHOW_MEM_FILTER_NODES))
2651 		goto out;
2652 
2653 	do {
2654 		cpuset_mems_cookie = get_mems_allowed();
2655 		ret = !node_isset(nid, cpuset_current_mems_allowed);
2656 	} while (!put_mems_allowed(cpuset_mems_cookie));
2657 out:
2658 	return ret;
2659 }
2660 
2661 #define K(x) ((x) << (PAGE_SHIFT-10))
2662 
2663 /*
2664  * Show free area list (used inside shift_scroll-lock stuff)
2665  * We also calculate the percentage fragmentation. We do this by counting the
2666  * memory on each free list with the exception of the first item on the list.
2667  * Suppresses nodes that are not allowed by current's cpuset if
2668  * SHOW_MEM_FILTER_NODES is passed.
2669  */
2670 void show_free_areas(unsigned int filter)
2671 {
2672 	int cpu;
2673 	struct zone *zone;
2674 
2675 	for_each_populated_zone(zone) {
2676 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2677 			continue;
2678 		show_node(zone);
2679 		printk("%s per-cpu:\n", zone->name);
2680 
2681 		for_each_online_cpu(cpu) {
2682 			struct per_cpu_pageset *pageset;
2683 
2684 			pageset = per_cpu_ptr(zone->pageset, cpu);
2685 
2686 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2687 			       cpu, pageset->pcp.high,
2688 			       pageset->pcp.batch, pageset->pcp.count);
2689 		}
2690 	}
2691 
2692 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2693 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2694 		" unevictable:%lu"
2695 		" dirty:%lu writeback:%lu unstable:%lu\n"
2696 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2697 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2698 		global_page_state(NR_ACTIVE_ANON),
2699 		global_page_state(NR_INACTIVE_ANON),
2700 		global_page_state(NR_ISOLATED_ANON),
2701 		global_page_state(NR_ACTIVE_FILE),
2702 		global_page_state(NR_INACTIVE_FILE),
2703 		global_page_state(NR_ISOLATED_FILE),
2704 		global_page_state(NR_UNEVICTABLE),
2705 		global_page_state(NR_FILE_DIRTY),
2706 		global_page_state(NR_WRITEBACK),
2707 		global_page_state(NR_UNSTABLE_NFS),
2708 		global_page_state(NR_FREE_PAGES),
2709 		global_page_state(NR_SLAB_RECLAIMABLE),
2710 		global_page_state(NR_SLAB_UNRECLAIMABLE),
2711 		global_page_state(NR_FILE_MAPPED),
2712 		global_page_state(NR_SHMEM),
2713 		global_page_state(NR_PAGETABLE),
2714 		global_page_state(NR_BOUNCE));
2715 
2716 	for_each_populated_zone(zone) {
2717 		int i;
2718 
2719 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2720 			continue;
2721 		show_node(zone);
2722 		printk("%s"
2723 			" free:%lukB"
2724 			" min:%lukB"
2725 			" low:%lukB"
2726 			" high:%lukB"
2727 			" active_anon:%lukB"
2728 			" inactive_anon:%lukB"
2729 			" active_file:%lukB"
2730 			" inactive_file:%lukB"
2731 			" unevictable:%lukB"
2732 			" isolated(anon):%lukB"
2733 			" isolated(file):%lukB"
2734 			" present:%lukB"
2735 			" mlocked:%lukB"
2736 			" dirty:%lukB"
2737 			" writeback:%lukB"
2738 			" mapped:%lukB"
2739 			" shmem:%lukB"
2740 			" slab_reclaimable:%lukB"
2741 			" slab_unreclaimable:%lukB"
2742 			" kernel_stack:%lukB"
2743 			" pagetables:%lukB"
2744 			" unstable:%lukB"
2745 			" bounce:%lukB"
2746 			" writeback_tmp:%lukB"
2747 			" pages_scanned:%lu"
2748 			" all_unreclaimable? %s"
2749 			"\n",
2750 			zone->name,
2751 			K(zone_page_state(zone, NR_FREE_PAGES)),
2752 			K(min_wmark_pages(zone)),
2753 			K(low_wmark_pages(zone)),
2754 			K(high_wmark_pages(zone)),
2755 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2756 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2757 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2758 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2759 			K(zone_page_state(zone, NR_UNEVICTABLE)),
2760 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2761 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2762 			K(zone->present_pages),
2763 			K(zone_page_state(zone, NR_MLOCK)),
2764 			K(zone_page_state(zone, NR_FILE_DIRTY)),
2765 			K(zone_page_state(zone, NR_WRITEBACK)),
2766 			K(zone_page_state(zone, NR_FILE_MAPPED)),
2767 			K(zone_page_state(zone, NR_SHMEM)),
2768 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2769 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2770 			zone_page_state(zone, NR_KERNEL_STACK) *
2771 				THREAD_SIZE / 1024,
2772 			K(zone_page_state(zone, NR_PAGETABLE)),
2773 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2774 			K(zone_page_state(zone, NR_BOUNCE)),
2775 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2776 			zone->pages_scanned,
2777 			(zone->all_unreclaimable ? "yes" : "no")
2778 			);
2779 		printk("lowmem_reserve[]:");
2780 		for (i = 0; i < MAX_NR_ZONES; i++)
2781 			printk(" %lu", zone->lowmem_reserve[i]);
2782 		printk("\n");
2783 	}
2784 
2785 	for_each_populated_zone(zone) {
2786  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2787 
2788 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2789 			continue;
2790 		show_node(zone);
2791 		printk("%s: ", zone->name);
2792 
2793 		spin_lock_irqsave(&zone->lock, flags);
2794 		for (order = 0; order < MAX_ORDER; order++) {
2795 			nr[order] = zone->free_area[order].nr_free;
2796 			total += nr[order] << order;
2797 		}
2798 		spin_unlock_irqrestore(&zone->lock, flags);
2799 		for (order = 0; order < MAX_ORDER; order++)
2800 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2801 		printk("= %lukB\n", K(total));
2802 	}
2803 
2804 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2805 
2806 	show_swap_cache_info();
2807 }
2808 
2809 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2810 {
2811 	zoneref->zone = zone;
2812 	zoneref->zone_idx = zone_idx(zone);
2813 }
2814 
2815 /*
2816  * Builds allocation fallback zone lists.
2817  *
2818  * Add all populated zones of a node to the zonelist.
2819  */
2820 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2821 				int nr_zones, enum zone_type zone_type)
2822 {
2823 	struct zone *zone;
2824 
2825 	BUG_ON(zone_type >= MAX_NR_ZONES);
2826 	zone_type++;
2827 
2828 	do {
2829 		zone_type--;
2830 		zone = pgdat->node_zones + zone_type;
2831 		if (populated_zone(zone)) {
2832 			zoneref_set_zone(zone,
2833 				&zonelist->_zonerefs[nr_zones++]);
2834 			check_highest_zone(zone_type);
2835 		}
2836 
2837 	} while (zone_type);
2838 	return nr_zones;
2839 }
2840 
2841 
2842 /*
2843  *  zonelist_order:
2844  *  0 = automatic detection of better ordering.
2845  *  1 = order by ([node] distance, -zonetype)
2846  *  2 = order by (-zonetype, [node] distance)
2847  *
2848  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2849  *  the same zonelist. So only NUMA can configure this param.
2850  */
2851 #define ZONELIST_ORDER_DEFAULT  0
2852 #define ZONELIST_ORDER_NODE     1
2853 #define ZONELIST_ORDER_ZONE     2
2854 
2855 /* zonelist order in the kernel.
2856  * set_zonelist_order() will set this to NODE or ZONE.
2857  */
2858 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2859 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2860 
2861 
2862 #ifdef CONFIG_NUMA
2863 /* The value user specified ....changed by config */
2864 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2865 /* string for sysctl */
2866 #define NUMA_ZONELIST_ORDER_LEN	16
2867 char numa_zonelist_order[16] = "default";
2868 
2869 /*
2870  * interface for configure zonelist ordering.
2871  * command line option "numa_zonelist_order"
2872  *	= "[dD]efault	- default, automatic configuration.
2873  *	= "[nN]ode 	- order by node locality, then by zone within node
2874  *	= "[zZ]one      - order by zone, then by locality within zone
2875  */
2876 
2877 static int __parse_numa_zonelist_order(char *s)
2878 {
2879 	if (*s == 'd' || *s == 'D') {
2880 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2881 	} else if (*s == 'n' || *s == 'N') {
2882 		user_zonelist_order = ZONELIST_ORDER_NODE;
2883 	} else if (*s == 'z' || *s == 'Z') {
2884 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2885 	} else {
2886 		printk(KERN_WARNING
2887 			"Ignoring invalid numa_zonelist_order value:  "
2888 			"%s\n", s);
2889 		return -EINVAL;
2890 	}
2891 	return 0;
2892 }
2893 
2894 static __init int setup_numa_zonelist_order(char *s)
2895 {
2896 	int ret;
2897 
2898 	if (!s)
2899 		return 0;
2900 
2901 	ret = __parse_numa_zonelist_order(s);
2902 	if (ret == 0)
2903 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2904 
2905 	return ret;
2906 }
2907 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2908 
2909 /*
2910  * sysctl handler for numa_zonelist_order
2911  */
2912 int numa_zonelist_order_handler(ctl_table *table, int write,
2913 		void __user *buffer, size_t *length,
2914 		loff_t *ppos)
2915 {
2916 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2917 	int ret;
2918 	static DEFINE_MUTEX(zl_order_mutex);
2919 
2920 	mutex_lock(&zl_order_mutex);
2921 	if (write)
2922 		strcpy(saved_string, (char*)table->data);
2923 	ret = proc_dostring(table, write, buffer, length, ppos);
2924 	if (ret)
2925 		goto out;
2926 	if (write) {
2927 		int oldval = user_zonelist_order;
2928 		if (__parse_numa_zonelist_order((char*)table->data)) {
2929 			/*
2930 			 * bogus value.  restore saved string
2931 			 */
2932 			strncpy((char*)table->data, saved_string,
2933 				NUMA_ZONELIST_ORDER_LEN);
2934 			user_zonelist_order = oldval;
2935 		} else if (oldval != user_zonelist_order) {
2936 			mutex_lock(&zonelists_mutex);
2937 			build_all_zonelists(NULL);
2938 			mutex_unlock(&zonelists_mutex);
2939 		}
2940 	}
2941 out:
2942 	mutex_unlock(&zl_order_mutex);
2943 	return ret;
2944 }
2945 
2946 
2947 #define MAX_NODE_LOAD (nr_online_nodes)
2948 static int node_load[MAX_NUMNODES];
2949 
2950 /**
2951  * find_next_best_node - find the next node that should appear in a given node's fallback list
2952  * @node: node whose fallback list we're appending
2953  * @used_node_mask: nodemask_t of already used nodes
2954  *
2955  * We use a number of factors to determine which is the next node that should
2956  * appear on a given node's fallback list.  The node should not have appeared
2957  * already in @node's fallback list, and it should be the next closest node
2958  * according to the distance array (which contains arbitrary distance values
2959  * from each node to each node in the system), and should also prefer nodes
2960  * with no CPUs, since presumably they'll have very little allocation pressure
2961  * on them otherwise.
2962  * It returns -1 if no node is found.
2963  */
2964 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2965 {
2966 	int n, val;
2967 	int min_val = INT_MAX;
2968 	int best_node = -1;
2969 	const struct cpumask *tmp = cpumask_of_node(0);
2970 
2971 	/* Use the local node if we haven't already */
2972 	if (!node_isset(node, *used_node_mask)) {
2973 		node_set(node, *used_node_mask);
2974 		return node;
2975 	}
2976 
2977 	for_each_node_state(n, N_HIGH_MEMORY) {
2978 
2979 		/* Don't want a node to appear more than once */
2980 		if (node_isset(n, *used_node_mask))
2981 			continue;
2982 
2983 		/* Use the distance array to find the distance */
2984 		val = node_distance(node, n);
2985 
2986 		/* Penalize nodes under us ("prefer the next node") */
2987 		val += (n < node);
2988 
2989 		/* Give preference to headless and unused nodes */
2990 		tmp = cpumask_of_node(n);
2991 		if (!cpumask_empty(tmp))
2992 			val += PENALTY_FOR_NODE_WITH_CPUS;
2993 
2994 		/* Slight preference for less loaded node */
2995 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2996 		val += node_load[n];
2997 
2998 		if (val < min_val) {
2999 			min_val = val;
3000 			best_node = n;
3001 		}
3002 	}
3003 
3004 	if (best_node >= 0)
3005 		node_set(best_node, *used_node_mask);
3006 
3007 	return best_node;
3008 }
3009 
3010 
3011 /*
3012  * Build zonelists ordered by node and zones within node.
3013  * This results in maximum locality--normal zone overflows into local
3014  * DMA zone, if any--but risks exhausting DMA zone.
3015  */
3016 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3017 {
3018 	int j;
3019 	struct zonelist *zonelist;
3020 
3021 	zonelist = &pgdat->node_zonelists[0];
3022 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3023 		;
3024 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3025 							MAX_NR_ZONES - 1);
3026 	zonelist->_zonerefs[j].zone = NULL;
3027 	zonelist->_zonerefs[j].zone_idx = 0;
3028 }
3029 
3030 /*
3031  * Build gfp_thisnode zonelists
3032  */
3033 static void build_thisnode_zonelists(pg_data_t *pgdat)
3034 {
3035 	int j;
3036 	struct zonelist *zonelist;
3037 
3038 	zonelist = &pgdat->node_zonelists[1];
3039 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3040 	zonelist->_zonerefs[j].zone = NULL;
3041 	zonelist->_zonerefs[j].zone_idx = 0;
3042 }
3043 
3044 /*
3045  * Build zonelists ordered by zone and nodes within zones.
3046  * This results in conserving DMA zone[s] until all Normal memory is
3047  * exhausted, but results in overflowing to remote node while memory
3048  * may still exist in local DMA zone.
3049  */
3050 static int node_order[MAX_NUMNODES];
3051 
3052 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3053 {
3054 	int pos, j, node;
3055 	int zone_type;		/* needs to be signed */
3056 	struct zone *z;
3057 	struct zonelist *zonelist;
3058 
3059 	zonelist = &pgdat->node_zonelists[0];
3060 	pos = 0;
3061 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3062 		for (j = 0; j < nr_nodes; j++) {
3063 			node = node_order[j];
3064 			z = &NODE_DATA(node)->node_zones[zone_type];
3065 			if (populated_zone(z)) {
3066 				zoneref_set_zone(z,
3067 					&zonelist->_zonerefs[pos++]);
3068 				check_highest_zone(zone_type);
3069 			}
3070 		}
3071 	}
3072 	zonelist->_zonerefs[pos].zone = NULL;
3073 	zonelist->_zonerefs[pos].zone_idx = 0;
3074 }
3075 
3076 static int default_zonelist_order(void)
3077 {
3078 	int nid, zone_type;
3079 	unsigned long low_kmem_size,total_size;
3080 	struct zone *z;
3081 	int average_size;
3082 	/*
3083          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3084 	 * If they are really small and used heavily, the system can fall
3085 	 * into OOM very easily.
3086 	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
3087 	 */
3088 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3089 	low_kmem_size = 0;
3090 	total_size = 0;
3091 	for_each_online_node(nid) {
3092 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3093 			z = &NODE_DATA(nid)->node_zones[zone_type];
3094 			if (populated_zone(z)) {
3095 				if (zone_type < ZONE_NORMAL)
3096 					low_kmem_size += z->present_pages;
3097 				total_size += z->present_pages;
3098 			} else if (zone_type == ZONE_NORMAL) {
3099 				/*
3100 				 * If any node has only lowmem, then node order
3101 				 * is preferred to allow kernel allocations
3102 				 * locally; otherwise, they can easily infringe
3103 				 * on other nodes when there is an abundance of
3104 				 * lowmem available to allocate from.
3105 				 */
3106 				return ZONELIST_ORDER_NODE;
3107 			}
3108 		}
3109 	}
3110 	if (!low_kmem_size ||  /* there are no DMA area. */
3111 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3112 		return ZONELIST_ORDER_NODE;
3113 	/*
3114 	 * look into each node's config.
3115   	 * If there is a node whose DMA/DMA32 memory is very big area on
3116  	 * local memory, NODE_ORDER may be suitable.
3117          */
3118 	average_size = total_size /
3119 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
3120 	for_each_online_node(nid) {
3121 		low_kmem_size = 0;
3122 		total_size = 0;
3123 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3124 			z = &NODE_DATA(nid)->node_zones[zone_type];
3125 			if (populated_zone(z)) {
3126 				if (zone_type < ZONE_NORMAL)
3127 					low_kmem_size += z->present_pages;
3128 				total_size += z->present_pages;
3129 			}
3130 		}
3131 		if (low_kmem_size &&
3132 		    total_size > average_size && /* ignore small node */
3133 		    low_kmem_size > total_size * 70/100)
3134 			return ZONELIST_ORDER_NODE;
3135 	}
3136 	return ZONELIST_ORDER_ZONE;
3137 }
3138 
3139 static void set_zonelist_order(void)
3140 {
3141 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3142 		current_zonelist_order = default_zonelist_order();
3143 	else
3144 		current_zonelist_order = user_zonelist_order;
3145 }
3146 
3147 static void build_zonelists(pg_data_t *pgdat)
3148 {
3149 	int j, node, load;
3150 	enum zone_type i;
3151 	nodemask_t used_mask;
3152 	int local_node, prev_node;
3153 	struct zonelist *zonelist;
3154 	int order = current_zonelist_order;
3155 
3156 	/* initialize zonelists */
3157 	for (i = 0; i < MAX_ZONELISTS; i++) {
3158 		zonelist = pgdat->node_zonelists + i;
3159 		zonelist->_zonerefs[0].zone = NULL;
3160 		zonelist->_zonerefs[0].zone_idx = 0;
3161 	}
3162 
3163 	/* NUMA-aware ordering of nodes */
3164 	local_node = pgdat->node_id;
3165 	load = nr_online_nodes;
3166 	prev_node = local_node;
3167 	nodes_clear(used_mask);
3168 
3169 	memset(node_order, 0, sizeof(node_order));
3170 	j = 0;
3171 
3172 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3173 		int distance = node_distance(local_node, node);
3174 
3175 		/*
3176 		 * If another node is sufficiently far away then it is better
3177 		 * to reclaim pages in a zone before going off node.
3178 		 */
3179 		if (distance > RECLAIM_DISTANCE)
3180 			zone_reclaim_mode = 1;
3181 
3182 		/*
3183 		 * We don't want to pressure a particular node.
3184 		 * So adding penalty to the first node in same
3185 		 * distance group to make it round-robin.
3186 		 */
3187 		if (distance != node_distance(local_node, prev_node))
3188 			node_load[node] = load;
3189 
3190 		prev_node = node;
3191 		load--;
3192 		if (order == ZONELIST_ORDER_NODE)
3193 			build_zonelists_in_node_order(pgdat, node);
3194 		else
3195 			node_order[j++] = node;	/* remember order */
3196 	}
3197 
3198 	if (order == ZONELIST_ORDER_ZONE) {
3199 		/* calculate node order -- i.e., DMA last! */
3200 		build_zonelists_in_zone_order(pgdat, j);
3201 	}
3202 
3203 	build_thisnode_zonelists(pgdat);
3204 }
3205 
3206 /* Construct the zonelist performance cache - see further mmzone.h */
3207 static void build_zonelist_cache(pg_data_t *pgdat)
3208 {
3209 	struct zonelist *zonelist;
3210 	struct zonelist_cache *zlc;
3211 	struct zoneref *z;
3212 
3213 	zonelist = &pgdat->node_zonelists[0];
3214 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3215 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3216 	for (z = zonelist->_zonerefs; z->zone; z++)
3217 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3218 }
3219 
3220 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3221 /*
3222  * Return node id of node used for "local" allocations.
3223  * I.e., first node id of first zone in arg node's generic zonelist.
3224  * Used for initializing percpu 'numa_mem', which is used primarily
3225  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3226  */
3227 int local_memory_node(int node)
3228 {
3229 	struct zone *zone;
3230 
3231 	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3232 				   gfp_zone(GFP_KERNEL),
3233 				   NULL,
3234 				   &zone);
3235 	return zone->node;
3236 }
3237 #endif
3238 
3239 #else	/* CONFIG_NUMA */
3240 
3241 static void set_zonelist_order(void)
3242 {
3243 	current_zonelist_order = ZONELIST_ORDER_ZONE;
3244 }
3245 
3246 static void build_zonelists(pg_data_t *pgdat)
3247 {
3248 	int node, local_node;
3249 	enum zone_type j;
3250 	struct zonelist *zonelist;
3251 
3252 	local_node = pgdat->node_id;
3253 
3254 	zonelist = &pgdat->node_zonelists[0];
3255 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3256 
3257 	/*
3258 	 * Now we build the zonelist so that it contains the zones
3259 	 * of all the other nodes.
3260 	 * We don't want to pressure a particular node, so when
3261 	 * building the zones for node N, we make sure that the
3262 	 * zones coming right after the local ones are those from
3263 	 * node N+1 (modulo N)
3264 	 */
3265 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3266 		if (!node_online(node))
3267 			continue;
3268 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3269 							MAX_NR_ZONES - 1);
3270 	}
3271 	for (node = 0; node < local_node; node++) {
3272 		if (!node_online(node))
3273 			continue;
3274 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3275 							MAX_NR_ZONES - 1);
3276 	}
3277 
3278 	zonelist->_zonerefs[j].zone = NULL;
3279 	zonelist->_zonerefs[j].zone_idx = 0;
3280 }
3281 
3282 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3283 static void build_zonelist_cache(pg_data_t *pgdat)
3284 {
3285 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
3286 }
3287 
3288 #endif	/* CONFIG_NUMA */
3289 
3290 /*
3291  * Boot pageset table. One per cpu which is going to be used for all
3292  * zones and all nodes. The parameters will be set in such a way
3293  * that an item put on a list will immediately be handed over to
3294  * the buddy list. This is safe since pageset manipulation is done
3295  * with interrupts disabled.
3296  *
3297  * The boot_pagesets must be kept even after bootup is complete for
3298  * unused processors and/or zones. They do play a role for bootstrapping
3299  * hotplugged processors.
3300  *
3301  * zoneinfo_show() and maybe other functions do
3302  * not check if the processor is online before following the pageset pointer.
3303  * Other parts of the kernel may not check if the zone is available.
3304  */
3305 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3306 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3307 static void setup_zone_pageset(struct zone *zone);
3308 
3309 /*
3310  * Global mutex to protect against size modification of zonelists
3311  * as well as to serialize pageset setup for the new populated zone.
3312  */
3313 DEFINE_MUTEX(zonelists_mutex);
3314 
3315 /* return values int ....just for stop_machine() */
3316 static __init_refok int __build_all_zonelists(void *data)
3317 {
3318 	int nid;
3319 	int cpu;
3320 
3321 #ifdef CONFIG_NUMA
3322 	memset(node_load, 0, sizeof(node_load));
3323 #endif
3324 	for_each_online_node(nid) {
3325 		pg_data_t *pgdat = NODE_DATA(nid);
3326 
3327 		build_zonelists(pgdat);
3328 		build_zonelist_cache(pgdat);
3329 	}
3330 
3331 	/*
3332 	 * Initialize the boot_pagesets that are going to be used
3333 	 * for bootstrapping processors. The real pagesets for
3334 	 * each zone will be allocated later when the per cpu
3335 	 * allocator is available.
3336 	 *
3337 	 * boot_pagesets are used also for bootstrapping offline
3338 	 * cpus if the system is already booted because the pagesets
3339 	 * are needed to initialize allocators on a specific cpu too.
3340 	 * F.e. the percpu allocator needs the page allocator which
3341 	 * needs the percpu allocator in order to allocate its pagesets
3342 	 * (a chicken-egg dilemma).
3343 	 */
3344 	for_each_possible_cpu(cpu) {
3345 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3346 
3347 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3348 		/*
3349 		 * We now know the "local memory node" for each node--
3350 		 * i.e., the node of the first zone in the generic zonelist.
3351 		 * Set up numa_mem percpu variable for on-line cpus.  During
3352 		 * boot, only the boot cpu should be on-line;  we'll init the
3353 		 * secondary cpus' numa_mem as they come on-line.  During
3354 		 * node/memory hotplug, we'll fixup all on-line cpus.
3355 		 */
3356 		if (cpu_online(cpu))
3357 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3358 #endif
3359 	}
3360 
3361 	return 0;
3362 }
3363 
3364 /*
3365  * Called with zonelists_mutex held always
3366  * unless system_state == SYSTEM_BOOTING.
3367  */
3368 void __ref build_all_zonelists(void *data)
3369 {
3370 	set_zonelist_order();
3371 
3372 	if (system_state == SYSTEM_BOOTING) {
3373 		__build_all_zonelists(NULL);
3374 		mminit_verify_zonelist();
3375 		cpuset_init_current_mems_allowed();
3376 	} else {
3377 		/* we have to stop all cpus to guarantee there is no user
3378 		   of zonelist */
3379 #ifdef CONFIG_MEMORY_HOTPLUG
3380 		if (data)
3381 			setup_zone_pageset((struct zone *)data);
3382 #endif
3383 		stop_machine(__build_all_zonelists, NULL, NULL);
3384 		/* cpuset refresh routine should be here */
3385 	}
3386 	vm_total_pages = nr_free_pagecache_pages();
3387 	/*
3388 	 * Disable grouping by mobility if the number of pages in the
3389 	 * system is too low to allow the mechanism to work. It would be
3390 	 * more accurate, but expensive to check per-zone. This check is
3391 	 * made on memory-hotadd so a system can start with mobility
3392 	 * disabled and enable it later
3393 	 */
3394 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3395 		page_group_by_mobility_disabled = 1;
3396 	else
3397 		page_group_by_mobility_disabled = 0;
3398 
3399 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
3400 		"Total pages: %ld\n",
3401 			nr_online_nodes,
3402 			zonelist_order_name[current_zonelist_order],
3403 			page_group_by_mobility_disabled ? "off" : "on",
3404 			vm_total_pages);
3405 #ifdef CONFIG_NUMA
3406 	printk("Policy zone: %s\n", zone_names[policy_zone]);
3407 #endif
3408 }
3409 
3410 /*
3411  * Helper functions to size the waitqueue hash table.
3412  * Essentially these want to choose hash table sizes sufficiently
3413  * large so that collisions trying to wait on pages are rare.
3414  * But in fact, the number of active page waitqueues on typical
3415  * systems is ridiculously low, less than 200. So this is even
3416  * conservative, even though it seems large.
3417  *
3418  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3419  * waitqueues, i.e. the size of the waitq table given the number of pages.
3420  */
3421 #define PAGES_PER_WAITQUEUE	256
3422 
3423 #ifndef CONFIG_MEMORY_HOTPLUG
3424 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3425 {
3426 	unsigned long size = 1;
3427 
3428 	pages /= PAGES_PER_WAITQUEUE;
3429 
3430 	while (size < pages)
3431 		size <<= 1;
3432 
3433 	/*
3434 	 * Once we have dozens or even hundreds of threads sleeping
3435 	 * on IO we've got bigger problems than wait queue collision.
3436 	 * Limit the size of the wait table to a reasonable size.
3437 	 */
3438 	size = min(size, 4096UL);
3439 
3440 	return max(size, 4UL);
3441 }
3442 #else
3443 /*
3444  * A zone's size might be changed by hot-add, so it is not possible to determine
3445  * a suitable size for its wait_table.  So we use the maximum size now.
3446  *
3447  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3448  *
3449  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3450  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3451  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3452  *
3453  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3454  * or more by the traditional way. (See above).  It equals:
3455  *
3456  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3457  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3458  *    powerpc (64K page size)             : =  (32G +16M)byte.
3459  */
3460 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3461 {
3462 	return 4096UL;
3463 }
3464 #endif
3465 
3466 /*
3467  * This is an integer logarithm so that shifts can be used later
3468  * to extract the more random high bits from the multiplicative
3469  * hash function before the remainder is taken.
3470  */
3471 static inline unsigned long wait_table_bits(unsigned long size)
3472 {
3473 	return ffz(~size);
3474 }
3475 
3476 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3477 
3478 /*
3479  * Check if a pageblock contains reserved pages
3480  */
3481 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3482 {
3483 	unsigned long pfn;
3484 
3485 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3486 		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3487 			return 1;
3488 	}
3489 	return 0;
3490 }
3491 
3492 /*
3493  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3494  * of blocks reserved is based on min_wmark_pages(zone). The memory within
3495  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3496  * higher will lead to a bigger reserve which will get freed as contiguous
3497  * blocks as reclaim kicks in
3498  */
3499 static void setup_zone_migrate_reserve(struct zone *zone)
3500 {
3501 	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3502 	struct page *page;
3503 	unsigned long block_migratetype;
3504 	int reserve;
3505 
3506 	/*
3507 	 * Get the start pfn, end pfn and the number of blocks to reserve
3508 	 * We have to be careful to be aligned to pageblock_nr_pages to
3509 	 * make sure that we always check pfn_valid for the first page in
3510 	 * the block.
3511 	 */
3512 	start_pfn = zone->zone_start_pfn;
3513 	end_pfn = start_pfn + zone->spanned_pages;
3514 	start_pfn = roundup(start_pfn, pageblock_nr_pages);
3515 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3516 							pageblock_order;
3517 
3518 	/*
3519 	 * Reserve blocks are generally in place to help high-order atomic
3520 	 * allocations that are short-lived. A min_free_kbytes value that
3521 	 * would result in more than 2 reserve blocks for atomic allocations
3522 	 * is assumed to be in place to help anti-fragmentation for the
3523 	 * future allocation of hugepages at runtime.
3524 	 */
3525 	reserve = min(2, reserve);
3526 
3527 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3528 		if (!pfn_valid(pfn))
3529 			continue;
3530 		page = pfn_to_page(pfn);
3531 
3532 		/* Watch out for overlapping nodes */
3533 		if (page_to_nid(page) != zone_to_nid(zone))
3534 			continue;
3535 
3536 		block_migratetype = get_pageblock_migratetype(page);
3537 
3538 		/* Only test what is necessary when the reserves are not met */
3539 		if (reserve > 0) {
3540 			/*
3541 			 * Blocks with reserved pages will never free, skip
3542 			 * them.
3543 			 */
3544 			block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3545 			if (pageblock_is_reserved(pfn, block_end_pfn))
3546 				continue;
3547 
3548 			/* If this block is reserved, account for it */
3549 			if (block_migratetype == MIGRATE_RESERVE) {
3550 				reserve--;
3551 				continue;
3552 			}
3553 
3554 			/* Suitable for reserving if this block is movable */
3555 			if (block_migratetype == MIGRATE_MOVABLE) {
3556 				set_pageblock_migratetype(page,
3557 							MIGRATE_RESERVE);
3558 				move_freepages_block(zone, page,
3559 							MIGRATE_RESERVE);
3560 				reserve--;
3561 				continue;
3562 			}
3563 		}
3564 
3565 		/*
3566 		 * If the reserve is met and this is a previous reserved block,
3567 		 * take it back
3568 		 */
3569 		if (block_migratetype == MIGRATE_RESERVE) {
3570 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3571 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3572 		}
3573 	}
3574 }
3575 
3576 /*
3577  * Initially all pages are reserved - free ones are freed
3578  * up by free_all_bootmem() once the early boot process is
3579  * done. Non-atomic initialization, single-pass.
3580  */
3581 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3582 		unsigned long start_pfn, enum memmap_context context)
3583 {
3584 	struct page *page;
3585 	unsigned long end_pfn = start_pfn + size;
3586 	unsigned long pfn;
3587 	struct zone *z;
3588 
3589 	if (highest_memmap_pfn < end_pfn - 1)
3590 		highest_memmap_pfn = end_pfn - 1;
3591 
3592 	z = &NODE_DATA(nid)->node_zones[zone];
3593 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3594 		/*
3595 		 * There can be holes in boot-time mem_map[]s
3596 		 * handed to this function.  They do not
3597 		 * exist on hotplugged memory.
3598 		 */
3599 		if (context == MEMMAP_EARLY) {
3600 			if (!early_pfn_valid(pfn))
3601 				continue;
3602 			if (!early_pfn_in_nid(pfn, nid))
3603 				continue;
3604 		}
3605 		page = pfn_to_page(pfn);
3606 		set_page_links(page, zone, nid, pfn);
3607 		mminit_verify_page_links(page, zone, nid, pfn);
3608 		init_page_count(page);
3609 		reset_page_mapcount(page);
3610 		SetPageReserved(page);
3611 		/*
3612 		 * Mark the block movable so that blocks are reserved for
3613 		 * movable at startup. This will force kernel allocations
3614 		 * to reserve their blocks rather than leaking throughout
3615 		 * the address space during boot when many long-lived
3616 		 * kernel allocations are made. Later some blocks near
3617 		 * the start are marked MIGRATE_RESERVE by
3618 		 * setup_zone_migrate_reserve()
3619 		 *
3620 		 * bitmap is created for zone's valid pfn range. but memmap
3621 		 * can be created for invalid pages (for alignment)
3622 		 * check here not to call set_pageblock_migratetype() against
3623 		 * pfn out of zone.
3624 		 */
3625 		if ((z->zone_start_pfn <= pfn)
3626 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3627 		    && !(pfn & (pageblock_nr_pages - 1)))
3628 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3629 
3630 		INIT_LIST_HEAD(&page->lru);
3631 #ifdef WANT_PAGE_VIRTUAL
3632 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3633 		if (!is_highmem_idx(zone))
3634 			set_page_address(page, __va(pfn << PAGE_SHIFT));
3635 #endif
3636 	}
3637 }
3638 
3639 static void __meminit zone_init_free_lists(struct zone *zone)
3640 {
3641 	int order, t;
3642 	for_each_migratetype_order(order, t) {
3643 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3644 		zone->free_area[order].nr_free = 0;
3645 	}
3646 }
3647 
3648 #ifndef __HAVE_ARCH_MEMMAP_INIT
3649 #define memmap_init(size, nid, zone, start_pfn) \
3650 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3651 #endif
3652 
3653 static int zone_batchsize(struct zone *zone)
3654 {
3655 #ifdef CONFIG_MMU
3656 	int batch;
3657 
3658 	/*
3659 	 * The per-cpu-pages pools are set to around 1000th of the
3660 	 * size of the zone.  But no more than 1/2 of a meg.
3661 	 *
3662 	 * OK, so we don't know how big the cache is.  So guess.
3663 	 */
3664 	batch = zone->present_pages / 1024;
3665 	if (batch * PAGE_SIZE > 512 * 1024)
3666 		batch = (512 * 1024) / PAGE_SIZE;
3667 	batch /= 4;		/* We effectively *= 4 below */
3668 	if (batch < 1)
3669 		batch = 1;
3670 
3671 	/*
3672 	 * Clamp the batch to a 2^n - 1 value. Having a power
3673 	 * of 2 value was found to be more likely to have
3674 	 * suboptimal cache aliasing properties in some cases.
3675 	 *
3676 	 * For example if 2 tasks are alternately allocating
3677 	 * batches of pages, one task can end up with a lot
3678 	 * of pages of one half of the possible page colors
3679 	 * and the other with pages of the other colors.
3680 	 */
3681 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3682 
3683 	return batch;
3684 
3685 #else
3686 	/* The deferral and batching of frees should be suppressed under NOMMU
3687 	 * conditions.
3688 	 *
3689 	 * The problem is that NOMMU needs to be able to allocate large chunks
3690 	 * of contiguous memory as there's no hardware page translation to
3691 	 * assemble apparent contiguous memory from discontiguous pages.
3692 	 *
3693 	 * Queueing large contiguous runs of pages for batching, however,
3694 	 * causes the pages to actually be freed in smaller chunks.  As there
3695 	 * can be a significant delay between the individual batches being
3696 	 * recycled, this leads to the once large chunks of space being
3697 	 * fragmented and becoming unavailable for high-order allocations.
3698 	 */
3699 	return 0;
3700 #endif
3701 }
3702 
3703 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3704 {
3705 	struct per_cpu_pages *pcp;
3706 	int migratetype;
3707 
3708 	memset(p, 0, sizeof(*p));
3709 
3710 	pcp = &p->pcp;
3711 	pcp->count = 0;
3712 	pcp->high = 6 * batch;
3713 	pcp->batch = max(1UL, 1 * batch);
3714 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3715 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3716 }
3717 
3718 /*
3719  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3720  * to the value high for the pageset p.
3721  */
3722 
3723 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3724 				unsigned long high)
3725 {
3726 	struct per_cpu_pages *pcp;
3727 
3728 	pcp = &p->pcp;
3729 	pcp->high = high;
3730 	pcp->batch = max(1UL, high/4);
3731 	if ((high/4) > (PAGE_SHIFT * 8))
3732 		pcp->batch = PAGE_SHIFT * 8;
3733 }
3734 
3735 static void setup_zone_pageset(struct zone *zone)
3736 {
3737 	int cpu;
3738 
3739 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
3740 
3741 	for_each_possible_cpu(cpu) {
3742 		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3743 
3744 		setup_pageset(pcp, zone_batchsize(zone));
3745 
3746 		if (percpu_pagelist_fraction)
3747 			setup_pagelist_highmark(pcp,
3748 				(zone->present_pages /
3749 					percpu_pagelist_fraction));
3750 	}
3751 }
3752 
3753 /*
3754  * Allocate per cpu pagesets and initialize them.
3755  * Before this call only boot pagesets were available.
3756  */
3757 void __init setup_per_cpu_pageset(void)
3758 {
3759 	struct zone *zone;
3760 
3761 	for_each_populated_zone(zone)
3762 		setup_zone_pageset(zone);
3763 }
3764 
3765 static noinline __init_refok
3766 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3767 {
3768 	int i;
3769 	struct pglist_data *pgdat = zone->zone_pgdat;
3770 	size_t alloc_size;
3771 
3772 	/*
3773 	 * The per-page waitqueue mechanism uses hashed waitqueues
3774 	 * per zone.
3775 	 */
3776 	zone->wait_table_hash_nr_entries =
3777 		 wait_table_hash_nr_entries(zone_size_pages);
3778 	zone->wait_table_bits =
3779 		wait_table_bits(zone->wait_table_hash_nr_entries);
3780 	alloc_size = zone->wait_table_hash_nr_entries
3781 					* sizeof(wait_queue_head_t);
3782 
3783 	if (!slab_is_available()) {
3784 		zone->wait_table = (wait_queue_head_t *)
3785 			alloc_bootmem_node_nopanic(pgdat, alloc_size);
3786 	} else {
3787 		/*
3788 		 * This case means that a zone whose size was 0 gets new memory
3789 		 * via memory hot-add.
3790 		 * But it may be the case that a new node was hot-added.  In
3791 		 * this case vmalloc() will not be able to use this new node's
3792 		 * memory - this wait_table must be initialized to use this new
3793 		 * node itself as well.
3794 		 * To use this new node's memory, further consideration will be
3795 		 * necessary.
3796 		 */
3797 		zone->wait_table = vmalloc(alloc_size);
3798 	}
3799 	if (!zone->wait_table)
3800 		return -ENOMEM;
3801 
3802 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3803 		init_waitqueue_head(zone->wait_table + i);
3804 
3805 	return 0;
3806 }
3807 
3808 static int __zone_pcp_update(void *data)
3809 {
3810 	struct zone *zone = data;
3811 	int cpu;
3812 	unsigned long batch = zone_batchsize(zone), flags;
3813 
3814 	for_each_possible_cpu(cpu) {
3815 		struct per_cpu_pageset *pset;
3816 		struct per_cpu_pages *pcp;
3817 
3818 		pset = per_cpu_ptr(zone->pageset, cpu);
3819 		pcp = &pset->pcp;
3820 
3821 		local_irq_save(flags);
3822 		free_pcppages_bulk(zone, pcp->count, pcp);
3823 		setup_pageset(pset, batch);
3824 		local_irq_restore(flags);
3825 	}
3826 	return 0;
3827 }
3828 
3829 void zone_pcp_update(struct zone *zone)
3830 {
3831 	stop_machine(__zone_pcp_update, zone, NULL);
3832 }
3833 
3834 static __meminit void zone_pcp_init(struct zone *zone)
3835 {
3836 	/*
3837 	 * per cpu subsystem is not up at this point. The following code
3838 	 * relies on the ability of the linker to provide the
3839 	 * offset of a (static) per cpu variable into the per cpu area.
3840 	 */
3841 	zone->pageset = &boot_pageset;
3842 
3843 	if (zone->present_pages)
3844 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3845 			zone->name, zone->present_pages,
3846 					 zone_batchsize(zone));
3847 }
3848 
3849 __meminit int init_currently_empty_zone(struct zone *zone,
3850 					unsigned long zone_start_pfn,
3851 					unsigned long size,
3852 					enum memmap_context context)
3853 {
3854 	struct pglist_data *pgdat = zone->zone_pgdat;
3855 	int ret;
3856 	ret = zone_wait_table_init(zone, size);
3857 	if (ret)
3858 		return ret;
3859 	pgdat->nr_zones = zone_idx(zone) + 1;
3860 
3861 	zone->zone_start_pfn = zone_start_pfn;
3862 
3863 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3864 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3865 			pgdat->node_id,
3866 			(unsigned long)zone_idx(zone),
3867 			zone_start_pfn, (zone_start_pfn + size));
3868 
3869 	zone_init_free_lists(zone);
3870 
3871 	return 0;
3872 }
3873 
3874 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
3875 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3876 /*
3877  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3878  * Architectures may implement their own version but if add_active_range()
3879  * was used and there are no special requirements, this is a convenient
3880  * alternative
3881  */
3882 int __meminit __early_pfn_to_nid(unsigned long pfn)
3883 {
3884 	unsigned long start_pfn, end_pfn;
3885 	int i, nid;
3886 
3887 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
3888 		if (start_pfn <= pfn && pfn < end_pfn)
3889 			return nid;
3890 	/* This is a memory hole */
3891 	return -1;
3892 }
3893 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3894 
3895 int __meminit early_pfn_to_nid(unsigned long pfn)
3896 {
3897 	int nid;
3898 
3899 	nid = __early_pfn_to_nid(pfn);
3900 	if (nid >= 0)
3901 		return nid;
3902 	/* just returns 0 */
3903 	return 0;
3904 }
3905 
3906 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3907 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3908 {
3909 	int nid;
3910 
3911 	nid = __early_pfn_to_nid(pfn);
3912 	if (nid >= 0 && nid != node)
3913 		return false;
3914 	return true;
3915 }
3916 #endif
3917 
3918 /**
3919  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3920  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3921  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3922  *
3923  * If an architecture guarantees that all ranges registered with
3924  * add_active_ranges() contain no holes and may be freed, this
3925  * this function may be used instead of calling free_bootmem() manually.
3926  */
3927 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
3928 {
3929 	unsigned long start_pfn, end_pfn;
3930 	int i, this_nid;
3931 
3932 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
3933 		start_pfn = min(start_pfn, max_low_pfn);
3934 		end_pfn = min(end_pfn, max_low_pfn);
3935 
3936 		if (start_pfn < end_pfn)
3937 			free_bootmem_node(NODE_DATA(this_nid),
3938 					  PFN_PHYS(start_pfn),
3939 					  (end_pfn - start_pfn) << PAGE_SHIFT);
3940 	}
3941 }
3942 
3943 /**
3944  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3945  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3946  *
3947  * If an architecture guarantees that all ranges registered with
3948  * add_active_ranges() contain no holes and may be freed, this
3949  * function may be used instead of calling memory_present() manually.
3950  */
3951 void __init sparse_memory_present_with_active_regions(int nid)
3952 {
3953 	unsigned long start_pfn, end_pfn;
3954 	int i, this_nid;
3955 
3956 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
3957 		memory_present(this_nid, start_pfn, end_pfn);
3958 }
3959 
3960 /**
3961  * get_pfn_range_for_nid - Return the start and end page frames for a node
3962  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3963  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3964  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3965  *
3966  * It returns the start and end page frame of a node based on information
3967  * provided by an arch calling add_active_range(). If called for a node
3968  * with no available memory, a warning is printed and the start and end
3969  * PFNs will be 0.
3970  */
3971 void __meminit get_pfn_range_for_nid(unsigned int nid,
3972 			unsigned long *start_pfn, unsigned long *end_pfn)
3973 {
3974 	unsigned long this_start_pfn, this_end_pfn;
3975 	int i;
3976 
3977 	*start_pfn = -1UL;
3978 	*end_pfn = 0;
3979 
3980 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
3981 		*start_pfn = min(*start_pfn, this_start_pfn);
3982 		*end_pfn = max(*end_pfn, this_end_pfn);
3983 	}
3984 
3985 	if (*start_pfn == -1UL)
3986 		*start_pfn = 0;
3987 }
3988 
3989 /*
3990  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3991  * assumption is made that zones within a node are ordered in monotonic
3992  * increasing memory addresses so that the "highest" populated zone is used
3993  */
3994 static void __init find_usable_zone_for_movable(void)
3995 {
3996 	int zone_index;
3997 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3998 		if (zone_index == ZONE_MOVABLE)
3999 			continue;
4000 
4001 		if (arch_zone_highest_possible_pfn[zone_index] >
4002 				arch_zone_lowest_possible_pfn[zone_index])
4003 			break;
4004 	}
4005 
4006 	VM_BUG_ON(zone_index == -1);
4007 	movable_zone = zone_index;
4008 }
4009 
4010 /*
4011  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4012  * because it is sized independent of architecture. Unlike the other zones,
4013  * the starting point for ZONE_MOVABLE is not fixed. It may be different
4014  * in each node depending on the size of each node and how evenly kernelcore
4015  * is distributed. This helper function adjusts the zone ranges
4016  * provided by the architecture for a given node by using the end of the
4017  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4018  * zones within a node are in order of monotonic increases memory addresses
4019  */
4020 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4021 					unsigned long zone_type,
4022 					unsigned long node_start_pfn,
4023 					unsigned long node_end_pfn,
4024 					unsigned long *zone_start_pfn,
4025 					unsigned long *zone_end_pfn)
4026 {
4027 	/* Only adjust if ZONE_MOVABLE is on this node */
4028 	if (zone_movable_pfn[nid]) {
4029 		/* Size ZONE_MOVABLE */
4030 		if (zone_type == ZONE_MOVABLE) {
4031 			*zone_start_pfn = zone_movable_pfn[nid];
4032 			*zone_end_pfn = min(node_end_pfn,
4033 				arch_zone_highest_possible_pfn[movable_zone]);
4034 
4035 		/* Adjust for ZONE_MOVABLE starting within this range */
4036 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4037 				*zone_end_pfn > zone_movable_pfn[nid]) {
4038 			*zone_end_pfn = zone_movable_pfn[nid];
4039 
4040 		/* Check if this whole range is within ZONE_MOVABLE */
4041 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
4042 			*zone_start_pfn = *zone_end_pfn;
4043 	}
4044 }
4045 
4046 /*
4047  * Return the number of pages a zone spans in a node, including holes
4048  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4049  */
4050 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4051 					unsigned long zone_type,
4052 					unsigned long *ignored)
4053 {
4054 	unsigned long node_start_pfn, node_end_pfn;
4055 	unsigned long zone_start_pfn, zone_end_pfn;
4056 
4057 	/* Get the start and end of the node and zone */
4058 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4059 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4060 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4061 	adjust_zone_range_for_zone_movable(nid, zone_type,
4062 				node_start_pfn, node_end_pfn,
4063 				&zone_start_pfn, &zone_end_pfn);
4064 
4065 	/* Check that this node has pages within the zone's required range */
4066 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4067 		return 0;
4068 
4069 	/* Move the zone boundaries inside the node if necessary */
4070 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4071 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4072 
4073 	/* Return the spanned pages */
4074 	return zone_end_pfn - zone_start_pfn;
4075 }
4076 
4077 /*
4078  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4079  * then all holes in the requested range will be accounted for.
4080  */
4081 unsigned long __meminit __absent_pages_in_range(int nid,
4082 				unsigned long range_start_pfn,
4083 				unsigned long range_end_pfn)
4084 {
4085 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
4086 	unsigned long start_pfn, end_pfn;
4087 	int i;
4088 
4089 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4090 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4091 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4092 		nr_absent -= end_pfn - start_pfn;
4093 	}
4094 	return nr_absent;
4095 }
4096 
4097 /**
4098  * absent_pages_in_range - Return number of page frames in holes within a range
4099  * @start_pfn: The start PFN to start searching for holes
4100  * @end_pfn: The end PFN to stop searching for holes
4101  *
4102  * It returns the number of pages frames in memory holes within a range.
4103  */
4104 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4105 							unsigned long end_pfn)
4106 {
4107 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4108 }
4109 
4110 /* Return the number of page frames in holes in a zone on a node */
4111 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4112 					unsigned long zone_type,
4113 					unsigned long *ignored)
4114 {
4115 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4116 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4117 	unsigned long node_start_pfn, node_end_pfn;
4118 	unsigned long zone_start_pfn, zone_end_pfn;
4119 
4120 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4121 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4122 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4123 
4124 	adjust_zone_range_for_zone_movable(nid, zone_type,
4125 			node_start_pfn, node_end_pfn,
4126 			&zone_start_pfn, &zone_end_pfn);
4127 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4128 }
4129 
4130 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4131 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4132 					unsigned long zone_type,
4133 					unsigned long *zones_size)
4134 {
4135 	return zones_size[zone_type];
4136 }
4137 
4138 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4139 						unsigned long zone_type,
4140 						unsigned long *zholes_size)
4141 {
4142 	if (!zholes_size)
4143 		return 0;
4144 
4145 	return zholes_size[zone_type];
4146 }
4147 
4148 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4149 
4150 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4151 		unsigned long *zones_size, unsigned long *zholes_size)
4152 {
4153 	unsigned long realtotalpages, totalpages = 0;
4154 	enum zone_type i;
4155 
4156 	for (i = 0; i < MAX_NR_ZONES; i++)
4157 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4158 								zones_size);
4159 	pgdat->node_spanned_pages = totalpages;
4160 
4161 	realtotalpages = totalpages;
4162 	for (i = 0; i < MAX_NR_ZONES; i++)
4163 		realtotalpages -=
4164 			zone_absent_pages_in_node(pgdat->node_id, i,
4165 								zholes_size);
4166 	pgdat->node_present_pages = realtotalpages;
4167 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4168 							realtotalpages);
4169 }
4170 
4171 #ifndef CONFIG_SPARSEMEM
4172 /*
4173  * Calculate the size of the zone->blockflags rounded to an unsigned long
4174  * Start by making sure zonesize is a multiple of pageblock_order by rounding
4175  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4176  * round what is now in bits to nearest long in bits, then return it in
4177  * bytes.
4178  */
4179 static unsigned long __init usemap_size(unsigned long zonesize)
4180 {
4181 	unsigned long usemapsize;
4182 
4183 	usemapsize = roundup(zonesize, pageblock_nr_pages);
4184 	usemapsize = usemapsize >> pageblock_order;
4185 	usemapsize *= NR_PAGEBLOCK_BITS;
4186 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4187 
4188 	return usemapsize / 8;
4189 }
4190 
4191 static void __init setup_usemap(struct pglist_data *pgdat,
4192 				struct zone *zone, unsigned long zonesize)
4193 {
4194 	unsigned long usemapsize = usemap_size(zonesize);
4195 	zone->pageblock_flags = NULL;
4196 	if (usemapsize)
4197 		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4198 								   usemapsize);
4199 }
4200 #else
4201 static inline void setup_usemap(struct pglist_data *pgdat,
4202 				struct zone *zone, unsigned long zonesize) {}
4203 #endif /* CONFIG_SPARSEMEM */
4204 
4205 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4206 
4207 /* Return a sensible default order for the pageblock size. */
4208 static inline int pageblock_default_order(void)
4209 {
4210 	if (HPAGE_SHIFT > PAGE_SHIFT)
4211 		return HUGETLB_PAGE_ORDER;
4212 
4213 	return MAX_ORDER-1;
4214 }
4215 
4216 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4217 static inline void __init set_pageblock_order(unsigned int order)
4218 {
4219 	/* Check that pageblock_nr_pages has not already been setup */
4220 	if (pageblock_order)
4221 		return;
4222 
4223 	/*
4224 	 * Assume the largest contiguous order of interest is a huge page.
4225 	 * This value may be variable depending on boot parameters on IA64
4226 	 */
4227 	pageblock_order = order;
4228 }
4229 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4230 
4231 /*
4232  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4233  * and pageblock_default_order() are unused as pageblock_order is set
4234  * at compile-time. See include/linux/pageblock-flags.h for the values of
4235  * pageblock_order based on the kernel config
4236  */
4237 static inline int pageblock_default_order(unsigned int order)
4238 {
4239 	return MAX_ORDER-1;
4240 }
4241 #define set_pageblock_order(x)	do {} while (0)
4242 
4243 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4244 
4245 /*
4246  * Set up the zone data structures:
4247  *   - mark all pages reserved
4248  *   - mark all memory queues empty
4249  *   - clear the memory bitmaps
4250  */
4251 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4252 		unsigned long *zones_size, unsigned long *zholes_size)
4253 {
4254 	enum zone_type j;
4255 	int nid = pgdat->node_id;
4256 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
4257 	int ret;
4258 
4259 	pgdat_resize_init(pgdat);
4260 	pgdat->nr_zones = 0;
4261 	init_waitqueue_head(&pgdat->kswapd_wait);
4262 	pgdat->kswapd_max_order = 0;
4263 	pgdat_page_cgroup_init(pgdat);
4264 
4265 	for (j = 0; j < MAX_NR_ZONES; j++) {
4266 		struct zone *zone = pgdat->node_zones + j;
4267 		unsigned long size, realsize, memmap_pages;
4268 		enum lru_list lru;
4269 
4270 		size = zone_spanned_pages_in_node(nid, j, zones_size);
4271 		realsize = size - zone_absent_pages_in_node(nid, j,
4272 								zholes_size);
4273 
4274 		/*
4275 		 * Adjust realsize so that it accounts for how much memory
4276 		 * is used by this zone for memmap. This affects the watermark
4277 		 * and per-cpu initialisations
4278 		 */
4279 		memmap_pages =
4280 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4281 		if (realsize >= memmap_pages) {
4282 			realsize -= memmap_pages;
4283 			if (memmap_pages)
4284 				printk(KERN_DEBUG
4285 				       "  %s zone: %lu pages used for memmap\n",
4286 				       zone_names[j], memmap_pages);
4287 		} else
4288 			printk(KERN_WARNING
4289 				"  %s zone: %lu pages exceeds realsize %lu\n",
4290 				zone_names[j], memmap_pages, realsize);
4291 
4292 		/* Account for reserved pages */
4293 		if (j == 0 && realsize > dma_reserve) {
4294 			realsize -= dma_reserve;
4295 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4296 					zone_names[0], dma_reserve);
4297 		}
4298 
4299 		if (!is_highmem_idx(j))
4300 			nr_kernel_pages += realsize;
4301 		nr_all_pages += realsize;
4302 
4303 		zone->spanned_pages = size;
4304 		zone->present_pages = realsize;
4305 #ifdef CONFIG_NUMA
4306 		zone->node = nid;
4307 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4308 						/ 100;
4309 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4310 #endif
4311 		zone->name = zone_names[j];
4312 		spin_lock_init(&zone->lock);
4313 		spin_lock_init(&zone->lru_lock);
4314 		zone_seqlock_init(zone);
4315 		zone->zone_pgdat = pgdat;
4316 
4317 		zone_pcp_init(zone);
4318 		for_each_lru(lru)
4319 			INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
4320 		zone->reclaim_stat.recent_rotated[0] = 0;
4321 		zone->reclaim_stat.recent_rotated[1] = 0;
4322 		zone->reclaim_stat.recent_scanned[0] = 0;
4323 		zone->reclaim_stat.recent_scanned[1] = 0;
4324 		zap_zone_vm_stats(zone);
4325 		zone->flags = 0;
4326 		if (!size)
4327 			continue;
4328 
4329 		set_pageblock_order(pageblock_default_order());
4330 		setup_usemap(pgdat, zone, size);
4331 		ret = init_currently_empty_zone(zone, zone_start_pfn,
4332 						size, MEMMAP_EARLY);
4333 		BUG_ON(ret);
4334 		memmap_init(size, nid, j, zone_start_pfn);
4335 		zone_start_pfn += size;
4336 	}
4337 }
4338 
4339 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4340 {
4341 	/* Skip empty nodes */
4342 	if (!pgdat->node_spanned_pages)
4343 		return;
4344 
4345 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4346 	/* ia64 gets its own node_mem_map, before this, without bootmem */
4347 	if (!pgdat->node_mem_map) {
4348 		unsigned long size, start, end;
4349 		struct page *map;
4350 
4351 		/*
4352 		 * The zone's endpoints aren't required to be MAX_ORDER
4353 		 * aligned but the node_mem_map endpoints must be in order
4354 		 * for the buddy allocator to function correctly.
4355 		 */
4356 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4357 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4358 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4359 		size =  (end - start) * sizeof(struct page);
4360 		map = alloc_remap(pgdat->node_id, size);
4361 		if (!map)
4362 			map = alloc_bootmem_node_nopanic(pgdat, size);
4363 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4364 	}
4365 #ifndef CONFIG_NEED_MULTIPLE_NODES
4366 	/*
4367 	 * With no DISCONTIG, the global mem_map is just set as node 0's
4368 	 */
4369 	if (pgdat == NODE_DATA(0)) {
4370 		mem_map = NODE_DATA(0)->node_mem_map;
4371 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4372 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4373 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4374 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4375 	}
4376 #endif
4377 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
4378 }
4379 
4380 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4381 		unsigned long node_start_pfn, unsigned long *zholes_size)
4382 {
4383 	pg_data_t *pgdat = NODE_DATA(nid);
4384 
4385 	pgdat->node_id = nid;
4386 	pgdat->node_start_pfn = node_start_pfn;
4387 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4388 
4389 	alloc_node_mem_map(pgdat);
4390 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4391 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4392 		nid, (unsigned long)pgdat,
4393 		(unsigned long)pgdat->node_mem_map);
4394 #endif
4395 
4396 	free_area_init_core(pgdat, zones_size, zholes_size);
4397 }
4398 
4399 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4400 
4401 #if MAX_NUMNODES > 1
4402 /*
4403  * Figure out the number of possible node ids.
4404  */
4405 static void __init setup_nr_node_ids(void)
4406 {
4407 	unsigned int node;
4408 	unsigned int highest = 0;
4409 
4410 	for_each_node_mask(node, node_possible_map)
4411 		highest = node;
4412 	nr_node_ids = highest + 1;
4413 }
4414 #else
4415 static inline void setup_nr_node_ids(void)
4416 {
4417 }
4418 #endif
4419 
4420 /**
4421  * node_map_pfn_alignment - determine the maximum internode alignment
4422  *
4423  * This function should be called after node map is populated and sorted.
4424  * It calculates the maximum power of two alignment which can distinguish
4425  * all the nodes.
4426  *
4427  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4428  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4429  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4430  * shifted, 1GiB is enough and this function will indicate so.
4431  *
4432  * This is used to test whether pfn -> nid mapping of the chosen memory
4433  * model has fine enough granularity to avoid incorrect mapping for the
4434  * populated node map.
4435  *
4436  * Returns the determined alignment in pfn's.  0 if there is no alignment
4437  * requirement (single node).
4438  */
4439 unsigned long __init node_map_pfn_alignment(void)
4440 {
4441 	unsigned long accl_mask = 0, last_end = 0;
4442 	unsigned long start, end, mask;
4443 	int last_nid = -1;
4444 	int i, nid;
4445 
4446 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4447 		if (!start || last_nid < 0 || last_nid == nid) {
4448 			last_nid = nid;
4449 			last_end = end;
4450 			continue;
4451 		}
4452 
4453 		/*
4454 		 * Start with a mask granular enough to pin-point to the
4455 		 * start pfn and tick off bits one-by-one until it becomes
4456 		 * too coarse to separate the current node from the last.
4457 		 */
4458 		mask = ~((1 << __ffs(start)) - 1);
4459 		while (mask && last_end <= (start & (mask << 1)))
4460 			mask <<= 1;
4461 
4462 		/* accumulate all internode masks */
4463 		accl_mask |= mask;
4464 	}
4465 
4466 	/* convert mask to number of pages */
4467 	return ~accl_mask + 1;
4468 }
4469 
4470 /* Find the lowest pfn for a node */
4471 static unsigned long __init find_min_pfn_for_node(int nid)
4472 {
4473 	unsigned long min_pfn = ULONG_MAX;
4474 	unsigned long start_pfn;
4475 	int i;
4476 
4477 	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4478 		min_pfn = min(min_pfn, start_pfn);
4479 
4480 	if (min_pfn == ULONG_MAX) {
4481 		printk(KERN_WARNING
4482 			"Could not find start_pfn for node %d\n", nid);
4483 		return 0;
4484 	}
4485 
4486 	return min_pfn;
4487 }
4488 
4489 /**
4490  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4491  *
4492  * It returns the minimum PFN based on information provided via
4493  * add_active_range().
4494  */
4495 unsigned long __init find_min_pfn_with_active_regions(void)
4496 {
4497 	return find_min_pfn_for_node(MAX_NUMNODES);
4498 }
4499 
4500 /*
4501  * early_calculate_totalpages()
4502  * Sum pages in active regions for movable zone.
4503  * Populate N_HIGH_MEMORY for calculating usable_nodes.
4504  */
4505 static unsigned long __init early_calculate_totalpages(void)
4506 {
4507 	unsigned long totalpages = 0;
4508 	unsigned long start_pfn, end_pfn;
4509 	int i, nid;
4510 
4511 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4512 		unsigned long pages = end_pfn - start_pfn;
4513 
4514 		totalpages += pages;
4515 		if (pages)
4516 			node_set_state(nid, N_HIGH_MEMORY);
4517 	}
4518   	return totalpages;
4519 }
4520 
4521 /*
4522  * Find the PFN the Movable zone begins in each node. Kernel memory
4523  * is spread evenly between nodes as long as the nodes have enough
4524  * memory. When they don't, some nodes will have more kernelcore than
4525  * others
4526  */
4527 static void __init find_zone_movable_pfns_for_nodes(void)
4528 {
4529 	int i, nid;
4530 	unsigned long usable_startpfn;
4531 	unsigned long kernelcore_node, kernelcore_remaining;
4532 	/* save the state before borrow the nodemask */
4533 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4534 	unsigned long totalpages = early_calculate_totalpages();
4535 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4536 
4537 	/*
4538 	 * If movablecore was specified, calculate what size of
4539 	 * kernelcore that corresponds so that memory usable for
4540 	 * any allocation type is evenly spread. If both kernelcore
4541 	 * and movablecore are specified, then the value of kernelcore
4542 	 * will be used for required_kernelcore if it's greater than
4543 	 * what movablecore would have allowed.
4544 	 */
4545 	if (required_movablecore) {
4546 		unsigned long corepages;
4547 
4548 		/*
4549 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4550 		 * was requested by the user
4551 		 */
4552 		required_movablecore =
4553 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4554 		corepages = totalpages - required_movablecore;
4555 
4556 		required_kernelcore = max(required_kernelcore, corepages);
4557 	}
4558 
4559 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4560 	if (!required_kernelcore)
4561 		goto out;
4562 
4563 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4564 	find_usable_zone_for_movable();
4565 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4566 
4567 restart:
4568 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4569 	kernelcore_node = required_kernelcore / usable_nodes;
4570 	for_each_node_state(nid, N_HIGH_MEMORY) {
4571 		unsigned long start_pfn, end_pfn;
4572 
4573 		/*
4574 		 * Recalculate kernelcore_node if the division per node
4575 		 * now exceeds what is necessary to satisfy the requested
4576 		 * amount of memory for the kernel
4577 		 */
4578 		if (required_kernelcore < kernelcore_node)
4579 			kernelcore_node = required_kernelcore / usable_nodes;
4580 
4581 		/*
4582 		 * As the map is walked, we track how much memory is usable
4583 		 * by the kernel using kernelcore_remaining. When it is
4584 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4585 		 */
4586 		kernelcore_remaining = kernelcore_node;
4587 
4588 		/* Go through each range of PFNs within this node */
4589 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4590 			unsigned long size_pages;
4591 
4592 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4593 			if (start_pfn >= end_pfn)
4594 				continue;
4595 
4596 			/* Account for what is only usable for kernelcore */
4597 			if (start_pfn < usable_startpfn) {
4598 				unsigned long kernel_pages;
4599 				kernel_pages = min(end_pfn, usable_startpfn)
4600 								- start_pfn;
4601 
4602 				kernelcore_remaining -= min(kernel_pages,
4603 							kernelcore_remaining);
4604 				required_kernelcore -= min(kernel_pages,
4605 							required_kernelcore);
4606 
4607 				/* Continue if range is now fully accounted */
4608 				if (end_pfn <= usable_startpfn) {
4609 
4610 					/*
4611 					 * Push zone_movable_pfn to the end so
4612 					 * that if we have to rebalance
4613 					 * kernelcore across nodes, we will
4614 					 * not double account here
4615 					 */
4616 					zone_movable_pfn[nid] = end_pfn;
4617 					continue;
4618 				}
4619 				start_pfn = usable_startpfn;
4620 			}
4621 
4622 			/*
4623 			 * The usable PFN range for ZONE_MOVABLE is from
4624 			 * start_pfn->end_pfn. Calculate size_pages as the
4625 			 * number of pages used as kernelcore
4626 			 */
4627 			size_pages = end_pfn - start_pfn;
4628 			if (size_pages > kernelcore_remaining)
4629 				size_pages = kernelcore_remaining;
4630 			zone_movable_pfn[nid] = start_pfn + size_pages;
4631 
4632 			/*
4633 			 * Some kernelcore has been met, update counts and
4634 			 * break if the kernelcore for this node has been
4635 			 * satisified
4636 			 */
4637 			required_kernelcore -= min(required_kernelcore,
4638 								size_pages);
4639 			kernelcore_remaining -= size_pages;
4640 			if (!kernelcore_remaining)
4641 				break;
4642 		}
4643 	}
4644 
4645 	/*
4646 	 * If there is still required_kernelcore, we do another pass with one
4647 	 * less node in the count. This will push zone_movable_pfn[nid] further
4648 	 * along on the nodes that still have memory until kernelcore is
4649 	 * satisified
4650 	 */
4651 	usable_nodes--;
4652 	if (usable_nodes && required_kernelcore > usable_nodes)
4653 		goto restart;
4654 
4655 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4656 	for (nid = 0; nid < MAX_NUMNODES; nid++)
4657 		zone_movable_pfn[nid] =
4658 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4659 
4660 out:
4661 	/* restore the node_state */
4662 	node_states[N_HIGH_MEMORY] = saved_node_state;
4663 }
4664 
4665 /* Any regular memory on that node ? */
4666 static void check_for_regular_memory(pg_data_t *pgdat)
4667 {
4668 #ifdef CONFIG_HIGHMEM
4669 	enum zone_type zone_type;
4670 
4671 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4672 		struct zone *zone = &pgdat->node_zones[zone_type];
4673 		if (zone->present_pages) {
4674 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4675 			break;
4676 		}
4677 	}
4678 #endif
4679 }
4680 
4681 /**
4682  * free_area_init_nodes - Initialise all pg_data_t and zone data
4683  * @max_zone_pfn: an array of max PFNs for each zone
4684  *
4685  * This will call free_area_init_node() for each active node in the system.
4686  * Using the page ranges provided by add_active_range(), the size of each
4687  * zone in each node and their holes is calculated. If the maximum PFN
4688  * between two adjacent zones match, it is assumed that the zone is empty.
4689  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4690  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4691  * starts where the previous one ended. For example, ZONE_DMA32 starts
4692  * at arch_max_dma_pfn.
4693  */
4694 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4695 {
4696 	unsigned long start_pfn, end_pfn;
4697 	int i, nid;
4698 
4699 	/* Record where the zone boundaries are */
4700 	memset(arch_zone_lowest_possible_pfn, 0,
4701 				sizeof(arch_zone_lowest_possible_pfn));
4702 	memset(arch_zone_highest_possible_pfn, 0,
4703 				sizeof(arch_zone_highest_possible_pfn));
4704 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4705 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4706 	for (i = 1; i < MAX_NR_ZONES; i++) {
4707 		if (i == ZONE_MOVABLE)
4708 			continue;
4709 		arch_zone_lowest_possible_pfn[i] =
4710 			arch_zone_highest_possible_pfn[i-1];
4711 		arch_zone_highest_possible_pfn[i] =
4712 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4713 	}
4714 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4715 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4716 
4717 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4718 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4719 	find_zone_movable_pfns_for_nodes();
4720 
4721 	/* Print out the zone ranges */
4722 	printk("Zone PFN ranges:\n");
4723 	for (i = 0; i < MAX_NR_ZONES; i++) {
4724 		if (i == ZONE_MOVABLE)
4725 			continue;
4726 		printk("  %-8s ", zone_names[i]);
4727 		if (arch_zone_lowest_possible_pfn[i] ==
4728 				arch_zone_highest_possible_pfn[i])
4729 			printk("empty\n");
4730 		else
4731 			printk("%0#10lx -> %0#10lx\n",
4732 				arch_zone_lowest_possible_pfn[i],
4733 				arch_zone_highest_possible_pfn[i]);
4734 	}
4735 
4736 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4737 	printk("Movable zone start PFN for each node\n");
4738 	for (i = 0; i < MAX_NUMNODES; i++) {
4739 		if (zone_movable_pfn[i])
4740 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4741 	}
4742 
4743 	/* Print out the early_node_map[] */
4744 	printk("Early memory PFN ranges\n");
4745 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4746 		printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
4747 
4748 	/* Initialise every node */
4749 	mminit_verify_pageflags_layout();
4750 	setup_nr_node_ids();
4751 	for_each_online_node(nid) {
4752 		pg_data_t *pgdat = NODE_DATA(nid);
4753 		free_area_init_node(nid, NULL,
4754 				find_min_pfn_for_node(nid), NULL);
4755 
4756 		/* Any memory on that node */
4757 		if (pgdat->node_present_pages)
4758 			node_set_state(nid, N_HIGH_MEMORY);
4759 		check_for_regular_memory(pgdat);
4760 	}
4761 }
4762 
4763 static int __init cmdline_parse_core(char *p, unsigned long *core)
4764 {
4765 	unsigned long long coremem;
4766 	if (!p)
4767 		return -EINVAL;
4768 
4769 	coremem = memparse(p, &p);
4770 	*core = coremem >> PAGE_SHIFT;
4771 
4772 	/* Paranoid check that UL is enough for the coremem value */
4773 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4774 
4775 	return 0;
4776 }
4777 
4778 /*
4779  * kernelcore=size sets the amount of memory for use for allocations that
4780  * cannot be reclaimed or migrated.
4781  */
4782 static int __init cmdline_parse_kernelcore(char *p)
4783 {
4784 	return cmdline_parse_core(p, &required_kernelcore);
4785 }
4786 
4787 /*
4788  * movablecore=size sets the amount of memory for use for allocations that
4789  * can be reclaimed or migrated.
4790  */
4791 static int __init cmdline_parse_movablecore(char *p)
4792 {
4793 	return cmdline_parse_core(p, &required_movablecore);
4794 }
4795 
4796 early_param("kernelcore", cmdline_parse_kernelcore);
4797 early_param("movablecore", cmdline_parse_movablecore);
4798 
4799 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4800 
4801 /**
4802  * set_dma_reserve - set the specified number of pages reserved in the first zone
4803  * @new_dma_reserve: The number of pages to mark reserved
4804  *
4805  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4806  * In the DMA zone, a significant percentage may be consumed by kernel image
4807  * and other unfreeable allocations which can skew the watermarks badly. This
4808  * function may optionally be used to account for unfreeable pages in the
4809  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4810  * smaller per-cpu batchsize.
4811  */
4812 void __init set_dma_reserve(unsigned long new_dma_reserve)
4813 {
4814 	dma_reserve = new_dma_reserve;
4815 }
4816 
4817 void __init free_area_init(unsigned long *zones_size)
4818 {
4819 	free_area_init_node(0, zones_size,
4820 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4821 }
4822 
4823 static int page_alloc_cpu_notify(struct notifier_block *self,
4824 				 unsigned long action, void *hcpu)
4825 {
4826 	int cpu = (unsigned long)hcpu;
4827 
4828 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4829 		lru_add_drain_cpu(cpu);
4830 		drain_pages(cpu);
4831 
4832 		/*
4833 		 * Spill the event counters of the dead processor
4834 		 * into the current processors event counters.
4835 		 * This artificially elevates the count of the current
4836 		 * processor.
4837 		 */
4838 		vm_events_fold_cpu(cpu);
4839 
4840 		/*
4841 		 * Zero the differential counters of the dead processor
4842 		 * so that the vm statistics are consistent.
4843 		 *
4844 		 * This is only okay since the processor is dead and cannot
4845 		 * race with what we are doing.
4846 		 */
4847 		refresh_cpu_vm_stats(cpu);
4848 	}
4849 	return NOTIFY_OK;
4850 }
4851 
4852 void __init page_alloc_init(void)
4853 {
4854 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4855 }
4856 
4857 /*
4858  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4859  *	or min_free_kbytes changes.
4860  */
4861 static void calculate_totalreserve_pages(void)
4862 {
4863 	struct pglist_data *pgdat;
4864 	unsigned long reserve_pages = 0;
4865 	enum zone_type i, j;
4866 
4867 	for_each_online_pgdat(pgdat) {
4868 		for (i = 0; i < MAX_NR_ZONES; i++) {
4869 			struct zone *zone = pgdat->node_zones + i;
4870 			unsigned long max = 0;
4871 
4872 			/* Find valid and maximum lowmem_reserve in the zone */
4873 			for (j = i; j < MAX_NR_ZONES; j++) {
4874 				if (zone->lowmem_reserve[j] > max)
4875 					max = zone->lowmem_reserve[j];
4876 			}
4877 
4878 			/* we treat the high watermark as reserved pages. */
4879 			max += high_wmark_pages(zone);
4880 
4881 			if (max > zone->present_pages)
4882 				max = zone->present_pages;
4883 			reserve_pages += max;
4884 			/*
4885 			 * Lowmem reserves are not available to
4886 			 * GFP_HIGHUSER page cache allocations and
4887 			 * kswapd tries to balance zones to their high
4888 			 * watermark.  As a result, neither should be
4889 			 * regarded as dirtyable memory, to prevent a
4890 			 * situation where reclaim has to clean pages
4891 			 * in order to balance the zones.
4892 			 */
4893 			zone->dirty_balance_reserve = max;
4894 		}
4895 	}
4896 	dirty_balance_reserve = reserve_pages;
4897 	totalreserve_pages = reserve_pages;
4898 }
4899 
4900 /*
4901  * setup_per_zone_lowmem_reserve - called whenever
4902  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4903  *	has a correct pages reserved value, so an adequate number of
4904  *	pages are left in the zone after a successful __alloc_pages().
4905  */
4906 static void setup_per_zone_lowmem_reserve(void)
4907 {
4908 	struct pglist_data *pgdat;
4909 	enum zone_type j, idx;
4910 
4911 	for_each_online_pgdat(pgdat) {
4912 		for (j = 0; j < MAX_NR_ZONES; j++) {
4913 			struct zone *zone = pgdat->node_zones + j;
4914 			unsigned long present_pages = zone->present_pages;
4915 
4916 			zone->lowmem_reserve[j] = 0;
4917 
4918 			idx = j;
4919 			while (idx) {
4920 				struct zone *lower_zone;
4921 
4922 				idx--;
4923 
4924 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4925 					sysctl_lowmem_reserve_ratio[idx] = 1;
4926 
4927 				lower_zone = pgdat->node_zones + idx;
4928 				lower_zone->lowmem_reserve[j] = present_pages /
4929 					sysctl_lowmem_reserve_ratio[idx];
4930 				present_pages += lower_zone->present_pages;
4931 			}
4932 		}
4933 	}
4934 
4935 	/* update totalreserve_pages */
4936 	calculate_totalreserve_pages();
4937 }
4938 
4939 /**
4940  * setup_per_zone_wmarks - called when min_free_kbytes changes
4941  * or when memory is hot-{added|removed}
4942  *
4943  * Ensures that the watermark[min,low,high] values for each zone are set
4944  * correctly with respect to min_free_kbytes.
4945  */
4946 void setup_per_zone_wmarks(void)
4947 {
4948 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4949 	unsigned long lowmem_pages = 0;
4950 	struct zone *zone;
4951 	unsigned long flags;
4952 
4953 	/* Calculate total number of !ZONE_HIGHMEM pages */
4954 	for_each_zone(zone) {
4955 		if (!is_highmem(zone))
4956 			lowmem_pages += zone->present_pages;
4957 	}
4958 
4959 	for_each_zone(zone) {
4960 		u64 tmp;
4961 
4962 		spin_lock_irqsave(&zone->lock, flags);
4963 		tmp = (u64)pages_min * zone->present_pages;
4964 		do_div(tmp, lowmem_pages);
4965 		if (is_highmem(zone)) {
4966 			/*
4967 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4968 			 * need highmem pages, so cap pages_min to a small
4969 			 * value here.
4970 			 *
4971 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4972 			 * deltas controls asynch page reclaim, and so should
4973 			 * not be capped for highmem.
4974 			 */
4975 			int min_pages;
4976 
4977 			min_pages = zone->present_pages / 1024;
4978 			if (min_pages < SWAP_CLUSTER_MAX)
4979 				min_pages = SWAP_CLUSTER_MAX;
4980 			if (min_pages > 128)
4981 				min_pages = 128;
4982 			zone->watermark[WMARK_MIN] = min_pages;
4983 		} else {
4984 			/*
4985 			 * If it's a lowmem zone, reserve a number of pages
4986 			 * proportionate to the zone's size.
4987 			 */
4988 			zone->watermark[WMARK_MIN] = tmp;
4989 		}
4990 
4991 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4992 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4993 		setup_zone_migrate_reserve(zone);
4994 		spin_unlock_irqrestore(&zone->lock, flags);
4995 	}
4996 
4997 	/* update totalreserve_pages */
4998 	calculate_totalreserve_pages();
4999 }
5000 
5001 /*
5002  * The inactive anon list should be small enough that the VM never has to
5003  * do too much work, but large enough that each inactive page has a chance
5004  * to be referenced again before it is swapped out.
5005  *
5006  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5007  * INACTIVE_ANON pages on this zone's LRU, maintained by the
5008  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5009  * the anonymous pages are kept on the inactive list.
5010  *
5011  * total     target    max
5012  * memory    ratio     inactive anon
5013  * -------------------------------------
5014  *   10MB       1         5MB
5015  *  100MB       1        50MB
5016  *    1GB       3       250MB
5017  *   10GB      10       0.9GB
5018  *  100GB      31         3GB
5019  *    1TB     101        10GB
5020  *   10TB     320        32GB
5021  */
5022 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5023 {
5024 	unsigned int gb, ratio;
5025 
5026 	/* Zone size in gigabytes */
5027 	gb = zone->present_pages >> (30 - PAGE_SHIFT);
5028 	if (gb)
5029 		ratio = int_sqrt(10 * gb);
5030 	else
5031 		ratio = 1;
5032 
5033 	zone->inactive_ratio = ratio;
5034 }
5035 
5036 static void __meminit setup_per_zone_inactive_ratio(void)
5037 {
5038 	struct zone *zone;
5039 
5040 	for_each_zone(zone)
5041 		calculate_zone_inactive_ratio(zone);
5042 }
5043 
5044 /*
5045  * Initialise min_free_kbytes.
5046  *
5047  * For small machines we want it small (128k min).  For large machines
5048  * we want it large (64MB max).  But it is not linear, because network
5049  * bandwidth does not increase linearly with machine size.  We use
5050  *
5051  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5052  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
5053  *
5054  * which yields
5055  *
5056  * 16MB:	512k
5057  * 32MB:	724k
5058  * 64MB:	1024k
5059  * 128MB:	1448k
5060  * 256MB:	2048k
5061  * 512MB:	2896k
5062  * 1024MB:	4096k
5063  * 2048MB:	5792k
5064  * 4096MB:	8192k
5065  * 8192MB:	11584k
5066  * 16384MB:	16384k
5067  */
5068 int __meminit init_per_zone_wmark_min(void)
5069 {
5070 	unsigned long lowmem_kbytes;
5071 
5072 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5073 
5074 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5075 	if (min_free_kbytes < 128)
5076 		min_free_kbytes = 128;
5077 	if (min_free_kbytes > 65536)
5078 		min_free_kbytes = 65536;
5079 	setup_per_zone_wmarks();
5080 	refresh_zone_stat_thresholds();
5081 	setup_per_zone_lowmem_reserve();
5082 	setup_per_zone_inactive_ratio();
5083 	return 0;
5084 }
5085 module_init(init_per_zone_wmark_min)
5086 
5087 /*
5088  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5089  *	that we can call two helper functions whenever min_free_kbytes
5090  *	changes.
5091  */
5092 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5093 	void __user *buffer, size_t *length, loff_t *ppos)
5094 {
5095 	proc_dointvec(table, write, buffer, length, ppos);
5096 	if (write)
5097 		setup_per_zone_wmarks();
5098 	return 0;
5099 }
5100 
5101 #ifdef CONFIG_NUMA
5102 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5103 	void __user *buffer, size_t *length, loff_t *ppos)
5104 {
5105 	struct zone *zone;
5106 	int rc;
5107 
5108 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5109 	if (rc)
5110 		return rc;
5111 
5112 	for_each_zone(zone)
5113 		zone->min_unmapped_pages = (zone->present_pages *
5114 				sysctl_min_unmapped_ratio) / 100;
5115 	return 0;
5116 }
5117 
5118 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5119 	void __user *buffer, size_t *length, loff_t *ppos)
5120 {
5121 	struct zone *zone;
5122 	int rc;
5123 
5124 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5125 	if (rc)
5126 		return rc;
5127 
5128 	for_each_zone(zone)
5129 		zone->min_slab_pages = (zone->present_pages *
5130 				sysctl_min_slab_ratio) / 100;
5131 	return 0;
5132 }
5133 #endif
5134 
5135 /*
5136  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5137  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5138  *	whenever sysctl_lowmem_reserve_ratio changes.
5139  *
5140  * The reserve ratio obviously has absolutely no relation with the
5141  * minimum watermarks. The lowmem reserve ratio can only make sense
5142  * if in function of the boot time zone sizes.
5143  */
5144 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5145 	void __user *buffer, size_t *length, loff_t *ppos)
5146 {
5147 	proc_dointvec_minmax(table, write, buffer, length, ppos);
5148 	setup_per_zone_lowmem_reserve();
5149 	return 0;
5150 }
5151 
5152 /*
5153  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5154  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5155  * can have before it gets flushed back to buddy allocator.
5156  */
5157 
5158 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5159 	void __user *buffer, size_t *length, loff_t *ppos)
5160 {
5161 	struct zone *zone;
5162 	unsigned int cpu;
5163 	int ret;
5164 
5165 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5166 	if (!write || (ret == -EINVAL))
5167 		return ret;
5168 	for_each_populated_zone(zone) {
5169 		for_each_possible_cpu(cpu) {
5170 			unsigned long  high;
5171 			high = zone->present_pages / percpu_pagelist_fraction;
5172 			setup_pagelist_highmark(
5173 				per_cpu_ptr(zone->pageset, cpu), high);
5174 		}
5175 	}
5176 	return 0;
5177 }
5178 
5179 int hashdist = HASHDIST_DEFAULT;
5180 
5181 #ifdef CONFIG_NUMA
5182 static int __init set_hashdist(char *str)
5183 {
5184 	if (!str)
5185 		return 0;
5186 	hashdist = simple_strtoul(str, &str, 0);
5187 	return 1;
5188 }
5189 __setup("hashdist=", set_hashdist);
5190 #endif
5191 
5192 /*
5193  * allocate a large system hash table from bootmem
5194  * - it is assumed that the hash table must contain an exact power-of-2
5195  *   quantity of entries
5196  * - limit is the number of hash buckets, not the total allocation size
5197  */
5198 void *__init alloc_large_system_hash(const char *tablename,
5199 				     unsigned long bucketsize,
5200 				     unsigned long numentries,
5201 				     int scale,
5202 				     int flags,
5203 				     unsigned int *_hash_shift,
5204 				     unsigned int *_hash_mask,
5205 				     unsigned long limit)
5206 {
5207 	unsigned long long max = limit;
5208 	unsigned long log2qty, size;
5209 	void *table = NULL;
5210 
5211 	/* allow the kernel cmdline to have a say */
5212 	if (!numentries) {
5213 		/* round applicable memory size up to nearest megabyte */
5214 		numentries = nr_kernel_pages;
5215 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5216 		numentries >>= 20 - PAGE_SHIFT;
5217 		numentries <<= 20 - PAGE_SHIFT;
5218 
5219 		/* limit to 1 bucket per 2^scale bytes of low memory */
5220 		if (scale > PAGE_SHIFT)
5221 			numentries >>= (scale - PAGE_SHIFT);
5222 		else
5223 			numentries <<= (PAGE_SHIFT - scale);
5224 
5225 		/* Make sure we've got at least a 0-order allocation.. */
5226 		if (unlikely(flags & HASH_SMALL)) {
5227 			/* Makes no sense without HASH_EARLY */
5228 			WARN_ON(!(flags & HASH_EARLY));
5229 			if (!(numentries >> *_hash_shift)) {
5230 				numentries = 1UL << *_hash_shift;
5231 				BUG_ON(!numentries);
5232 			}
5233 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5234 			numentries = PAGE_SIZE / bucketsize;
5235 	}
5236 	numentries = roundup_pow_of_two(numentries);
5237 
5238 	/* limit allocation size to 1/16 total memory by default */
5239 	if (max == 0) {
5240 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5241 		do_div(max, bucketsize);
5242 	}
5243 	max = min(max, 0x80000000ULL);
5244 
5245 	if (numentries > max)
5246 		numentries = max;
5247 
5248 	log2qty = ilog2(numentries);
5249 
5250 	do {
5251 		size = bucketsize << log2qty;
5252 		if (flags & HASH_EARLY)
5253 			table = alloc_bootmem_nopanic(size);
5254 		else if (hashdist)
5255 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5256 		else {
5257 			/*
5258 			 * If bucketsize is not a power-of-two, we may free
5259 			 * some pages at the end of hash table which
5260 			 * alloc_pages_exact() automatically does
5261 			 */
5262 			if (get_order(size) < MAX_ORDER) {
5263 				table = alloc_pages_exact(size, GFP_ATOMIC);
5264 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5265 			}
5266 		}
5267 	} while (!table && size > PAGE_SIZE && --log2qty);
5268 
5269 	if (!table)
5270 		panic("Failed to allocate %s hash table\n", tablename);
5271 
5272 	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5273 	       tablename,
5274 	       (1UL << log2qty),
5275 	       ilog2(size) - PAGE_SHIFT,
5276 	       size);
5277 
5278 	if (_hash_shift)
5279 		*_hash_shift = log2qty;
5280 	if (_hash_mask)
5281 		*_hash_mask = (1 << log2qty) - 1;
5282 
5283 	return table;
5284 }
5285 
5286 /* Return a pointer to the bitmap storing bits affecting a block of pages */
5287 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5288 							unsigned long pfn)
5289 {
5290 #ifdef CONFIG_SPARSEMEM
5291 	return __pfn_to_section(pfn)->pageblock_flags;
5292 #else
5293 	return zone->pageblock_flags;
5294 #endif /* CONFIG_SPARSEMEM */
5295 }
5296 
5297 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5298 {
5299 #ifdef CONFIG_SPARSEMEM
5300 	pfn &= (PAGES_PER_SECTION-1);
5301 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5302 #else
5303 	pfn = pfn - zone->zone_start_pfn;
5304 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5305 #endif /* CONFIG_SPARSEMEM */
5306 }
5307 
5308 /**
5309  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5310  * @page: The page within the block of interest
5311  * @start_bitidx: The first bit of interest to retrieve
5312  * @end_bitidx: The last bit of interest
5313  * returns pageblock_bits flags
5314  */
5315 unsigned long get_pageblock_flags_group(struct page *page,
5316 					int start_bitidx, int end_bitidx)
5317 {
5318 	struct zone *zone;
5319 	unsigned long *bitmap;
5320 	unsigned long pfn, bitidx;
5321 	unsigned long flags = 0;
5322 	unsigned long value = 1;
5323 
5324 	zone = page_zone(page);
5325 	pfn = page_to_pfn(page);
5326 	bitmap = get_pageblock_bitmap(zone, pfn);
5327 	bitidx = pfn_to_bitidx(zone, pfn);
5328 
5329 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5330 		if (test_bit(bitidx + start_bitidx, bitmap))
5331 			flags |= value;
5332 
5333 	return flags;
5334 }
5335 
5336 /**
5337  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5338  * @page: The page within the block of interest
5339  * @start_bitidx: The first bit of interest
5340  * @end_bitidx: The last bit of interest
5341  * @flags: The flags to set
5342  */
5343 void set_pageblock_flags_group(struct page *page, unsigned long flags,
5344 					int start_bitidx, int end_bitidx)
5345 {
5346 	struct zone *zone;
5347 	unsigned long *bitmap;
5348 	unsigned long pfn, bitidx;
5349 	unsigned long value = 1;
5350 
5351 	zone = page_zone(page);
5352 	pfn = page_to_pfn(page);
5353 	bitmap = get_pageblock_bitmap(zone, pfn);
5354 	bitidx = pfn_to_bitidx(zone, pfn);
5355 	VM_BUG_ON(pfn < zone->zone_start_pfn);
5356 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5357 
5358 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5359 		if (flags & value)
5360 			__set_bit(bitidx + start_bitidx, bitmap);
5361 		else
5362 			__clear_bit(bitidx + start_bitidx, bitmap);
5363 }
5364 
5365 /*
5366  * This is designed as sub function...plz see page_isolation.c also.
5367  * set/clear page block's type to be ISOLATE.
5368  * page allocater never alloc memory from ISOLATE block.
5369  */
5370 
5371 static int
5372 __count_immobile_pages(struct zone *zone, struct page *page, int count)
5373 {
5374 	unsigned long pfn, iter, found;
5375 	/*
5376 	 * For avoiding noise data, lru_add_drain_all() should be called
5377 	 * If ZONE_MOVABLE, the zone never contains immobile pages
5378 	 */
5379 	if (zone_idx(zone) == ZONE_MOVABLE)
5380 		return true;
5381 
5382 	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5383 		return true;
5384 
5385 	pfn = page_to_pfn(page);
5386 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5387 		unsigned long check = pfn + iter;
5388 
5389 		if (!pfn_valid_within(check))
5390 			continue;
5391 
5392 		page = pfn_to_page(check);
5393 		if (!page_count(page)) {
5394 			if (PageBuddy(page))
5395 				iter += (1 << page_order(page)) - 1;
5396 			continue;
5397 		}
5398 		if (!PageLRU(page))
5399 			found++;
5400 		/*
5401 		 * If there are RECLAIMABLE pages, we need to check it.
5402 		 * But now, memory offline itself doesn't call shrink_slab()
5403 		 * and it still to be fixed.
5404 		 */
5405 		/*
5406 		 * If the page is not RAM, page_count()should be 0.
5407 		 * we don't need more check. This is an _used_ not-movable page.
5408 		 *
5409 		 * The problematic thing here is PG_reserved pages. PG_reserved
5410 		 * is set to both of a memory hole page and a _used_ kernel
5411 		 * page at boot.
5412 		 */
5413 		if (found > count)
5414 			return false;
5415 	}
5416 	return true;
5417 }
5418 
5419 bool is_pageblock_removable_nolock(struct page *page)
5420 {
5421 	struct zone *zone;
5422 	unsigned long pfn;
5423 
5424 	/*
5425 	 * We have to be careful here because we are iterating over memory
5426 	 * sections which are not zone aware so we might end up outside of
5427 	 * the zone but still within the section.
5428 	 * We have to take care about the node as well. If the node is offline
5429 	 * its NODE_DATA will be NULL - see page_zone.
5430 	 */
5431 	if (!node_online(page_to_nid(page)))
5432 		return false;
5433 
5434 	zone = page_zone(page);
5435 	pfn = page_to_pfn(page);
5436 	if (zone->zone_start_pfn > pfn ||
5437 			zone->zone_start_pfn + zone->spanned_pages <= pfn)
5438 		return false;
5439 
5440 	return __count_immobile_pages(zone, page, 0);
5441 }
5442 
5443 int set_migratetype_isolate(struct page *page)
5444 {
5445 	struct zone *zone;
5446 	unsigned long flags, pfn;
5447 	struct memory_isolate_notify arg;
5448 	int notifier_ret;
5449 	int ret = -EBUSY;
5450 
5451 	zone = page_zone(page);
5452 
5453 	spin_lock_irqsave(&zone->lock, flags);
5454 
5455 	pfn = page_to_pfn(page);
5456 	arg.start_pfn = pfn;
5457 	arg.nr_pages = pageblock_nr_pages;
5458 	arg.pages_found = 0;
5459 
5460 	/*
5461 	 * It may be possible to isolate a pageblock even if the
5462 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5463 	 * notifier chain is used by balloon drivers to return the
5464 	 * number of pages in a range that are held by the balloon
5465 	 * driver to shrink memory. If all the pages are accounted for
5466 	 * by balloons, are free, or on the LRU, isolation can continue.
5467 	 * Later, for example, when memory hotplug notifier runs, these
5468 	 * pages reported as "can be isolated" should be isolated(freed)
5469 	 * by the balloon driver through the memory notifier chain.
5470 	 */
5471 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5472 	notifier_ret = notifier_to_errno(notifier_ret);
5473 	if (notifier_ret)
5474 		goto out;
5475 	/*
5476 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5477 	 * We just check MOVABLE pages.
5478 	 */
5479 	if (__count_immobile_pages(zone, page, arg.pages_found))
5480 		ret = 0;
5481 
5482 	/*
5483 	 * immobile means "not-on-lru" paes. If immobile is larger than
5484 	 * removable-by-driver pages reported by notifier, we'll fail.
5485 	 */
5486 
5487 out:
5488 	if (!ret) {
5489 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5490 		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5491 	}
5492 
5493 	spin_unlock_irqrestore(&zone->lock, flags);
5494 	if (!ret)
5495 		drain_all_pages();
5496 	return ret;
5497 }
5498 
5499 void unset_migratetype_isolate(struct page *page)
5500 {
5501 	struct zone *zone;
5502 	unsigned long flags;
5503 	zone = page_zone(page);
5504 	spin_lock_irqsave(&zone->lock, flags);
5505 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5506 		goto out;
5507 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5508 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5509 out:
5510 	spin_unlock_irqrestore(&zone->lock, flags);
5511 }
5512 
5513 #ifdef CONFIG_MEMORY_HOTREMOVE
5514 /*
5515  * All pages in the range must be isolated before calling this.
5516  */
5517 void
5518 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5519 {
5520 	struct page *page;
5521 	struct zone *zone;
5522 	int order, i;
5523 	unsigned long pfn;
5524 	unsigned long flags;
5525 	/* find the first valid pfn */
5526 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5527 		if (pfn_valid(pfn))
5528 			break;
5529 	if (pfn == end_pfn)
5530 		return;
5531 	zone = page_zone(pfn_to_page(pfn));
5532 	spin_lock_irqsave(&zone->lock, flags);
5533 	pfn = start_pfn;
5534 	while (pfn < end_pfn) {
5535 		if (!pfn_valid(pfn)) {
5536 			pfn++;
5537 			continue;
5538 		}
5539 		page = pfn_to_page(pfn);
5540 		BUG_ON(page_count(page));
5541 		BUG_ON(!PageBuddy(page));
5542 		order = page_order(page);
5543 #ifdef CONFIG_DEBUG_VM
5544 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5545 		       pfn, 1 << order, end_pfn);
5546 #endif
5547 		list_del(&page->lru);
5548 		rmv_page_order(page);
5549 		zone->free_area[order].nr_free--;
5550 		__mod_zone_page_state(zone, NR_FREE_PAGES,
5551 				      - (1UL << order));
5552 		for (i = 0; i < (1 << order); i++)
5553 			SetPageReserved((page+i));
5554 		pfn += (1 << order);
5555 	}
5556 	spin_unlock_irqrestore(&zone->lock, flags);
5557 }
5558 #endif
5559 
5560 #ifdef CONFIG_MEMORY_FAILURE
5561 bool is_free_buddy_page(struct page *page)
5562 {
5563 	struct zone *zone = page_zone(page);
5564 	unsigned long pfn = page_to_pfn(page);
5565 	unsigned long flags;
5566 	int order;
5567 
5568 	spin_lock_irqsave(&zone->lock, flags);
5569 	for (order = 0; order < MAX_ORDER; order++) {
5570 		struct page *page_head = page - (pfn & ((1 << order) - 1));
5571 
5572 		if (PageBuddy(page_head) && page_order(page_head) >= order)
5573 			break;
5574 	}
5575 	spin_unlock_irqrestore(&zone->lock, flags);
5576 
5577 	return order < MAX_ORDER;
5578 }
5579 #endif
5580 
5581 static struct trace_print_flags pageflag_names[] = {
5582 	{1UL << PG_locked,		"locked"	},
5583 	{1UL << PG_error,		"error"		},
5584 	{1UL << PG_referenced,		"referenced"	},
5585 	{1UL << PG_uptodate,		"uptodate"	},
5586 	{1UL << PG_dirty,		"dirty"		},
5587 	{1UL << PG_lru,			"lru"		},
5588 	{1UL << PG_active,		"active"	},
5589 	{1UL << PG_slab,		"slab"		},
5590 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5591 	{1UL << PG_arch_1,		"arch_1"	},
5592 	{1UL << PG_reserved,		"reserved"	},
5593 	{1UL << PG_private,		"private"	},
5594 	{1UL << PG_private_2,		"private_2"	},
5595 	{1UL << PG_writeback,		"writeback"	},
5596 #ifdef CONFIG_PAGEFLAGS_EXTENDED
5597 	{1UL << PG_head,		"head"		},
5598 	{1UL << PG_tail,		"tail"		},
5599 #else
5600 	{1UL << PG_compound,		"compound"	},
5601 #endif
5602 	{1UL << PG_swapcache,		"swapcache"	},
5603 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5604 	{1UL << PG_reclaim,		"reclaim"	},
5605 	{1UL << PG_swapbacked,		"swapbacked"	},
5606 	{1UL << PG_unevictable,		"unevictable"	},
5607 #ifdef CONFIG_MMU
5608 	{1UL << PG_mlocked,		"mlocked"	},
5609 #endif
5610 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
5611 	{1UL << PG_uncached,		"uncached"	},
5612 #endif
5613 #ifdef CONFIG_MEMORY_FAILURE
5614 	{1UL << PG_hwpoison,		"hwpoison"	},
5615 #endif
5616 	{-1UL,				NULL		},
5617 };
5618 
5619 static void dump_page_flags(unsigned long flags)
5620 {
5621 	const char *delim = "";
5622 	unsigned long mask;
5623 	int i;
5624 
5625 	printk(KERN_ALERT "page flags: %#lx(", flags);
5626 
5627 	/* remove zone id */
5628 	flags &= (1UL << NR_PAGEFLAGS) - 1;
5629 
5630 	for (i = 0; pageflag_names[i].name && flags; i++) {
5631 
5632 		mask = pageflag_names[i].mask;
5633 		if ((flags & mask) != mask)
5634 			continue;
5635 
5636 		flags &= ~mask;
5637 		printk("%s%s", delim, pageflag_names[i].name);
5638 		delim = "|";
5639 	}
5640 
5641 	/* check for left over flags */
5642 	if (flags)
5643 		printk("%s%#lx", delim, flags);
5644 
5645 	printk(")\n");
5646 }
5647 
5648 void dump_page(struct page *page)
5649 {
5650 	printk(KERN_ALERT
5651 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5652 		page, atomic_read(&page->_count), page_mapcount(page),
5653 		page->mapping, page->index);
5654 	dump_page_flags(page->flags);
5655 	mem_cgroup_print_bad_page(page);
5656 }
5657