xref: /linux/mm/page_alloc.c (revision 615f2e5c531bc57d5a190f321d697988e950ae4d)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/compaction.h>
55 #include <trace/events/kmem.h>
56 #include <linux/ftrace_event.h>
57 #include <linux/memcontrol.h>
58 #include <linux/prefetch.h>
59 #include <linux/migrate.h>
60 #include <linux/page-debug-flags.h>
61 #include <linux/sched/rt.h>
62 
63 #include <asm/tlbflush.h>
64 #include <asm/div64.h>
65 #include "internal.h"
66 
67 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
68 DEFINE_PER_CPU(int, numa_node);
69 EXPORT_PER_CPU_SYMBOL(numa_node);
70 #endif
71 
72 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
73 /*
74  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
75  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
76  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
77  * defined in <linux/topology.h>.
78  */
79 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
80 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
81 #endif
82 
83 /*
84  * Array of node states.
85  */
86 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
87 	[N_POSSIBLE] = NODE_MASK_ALL,
88 	[N_ONLINE] = { { [0] = 1UL } },
89 #ifndef CONFIG_NUMA
90 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
91 #ifdef CONFIG_HIGHMEM
92 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
93 #endif
94 #ifdef CONFIG_MOVABLE_NODE
95 	[N_MEMORY] = { { [0] = 1UL } },
96 #endif
97 	[N_CPU] = { { [0] = 1UL } },
98 #endif	/* NUMA */
99 };
100 EXPORT_SYMBOL(node_states);
101 
102 unsigned long totalram_pages __read_mostly;
103 unsigned long totalreserve_pages __read_mostly;
104 /*
105  * When calculating the number of globally allowed dirty pages, there
106  * is a certain number of per-zone reserves that should not be
107  * considered dirtyable memory.  This is the sum of those reserves
108  * over all existing zones that contribute dirtyable memory.
109  */
110 unsigned long dirty_balance_reserve __read_mostly;
111 
112 int percpu_pagelist_fraction;
113 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
114 
115 #ifdef CONFIG_PM_SLEEP
116 /*
117  * The following functions are used by the suspend/hibernate code to temporarily
118  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
119  * while devices are suspended.  To avoid races with the suspend/hibernate code,
120  * they should always be called with pm_mutex held (gfp_allowed_mask also should
121  * only be modified with pm_mutex held, unless the suspend/hibernate code is
122  * guaranteed not to run in parallel with that modification).
123  */
124 
125 static gfp_t saved_gfp_mask;
126 
127 void pm_restore_gfp_mask(void)
128 {
129 	WARN_ON(!mutex_is_locked(&pm_mutex));
130 	if (saved_gfp_mask) {
131 		gfp_allowed_mask = saved_gfp_mask;
132 		saved_gfp_mask = 0;
133 	}
134 }
135 
136 void pm_restrict_gfp_mask(void)
137 {
138 	WARN_ON(!mutex_is_locked(&pm_mutex));
139 	WARN_ON(saved_gfp_mask);
140 	saved_gfp_mask = gfp_allowed_mask;
141 	gfp_allowed_mask &= ~GFP_IOFS;
142 }
143 
144 bool pm_suspended_storage(void)
145 {
146 	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
147 		return false;
148 	return true;
149 }
150 #endif /* CONFIG_PM_SLEEP */
151 
152 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
153 int pageblock_order __read_mostly;
154 #endif
155 
156 static void __free_pages_ok(struct page *page, unsigned int order);
157 
158 /*
159  * results with 256, 32 in the lowmem_reserve sysctl:
160  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
161  *	1G machine -> (16M dma, 784M normal, 224M high)
162  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
163  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
164  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
165  *
166  * TBD: should special case ZONE_DMA32 machines here - in those we normally
167  * don't need any ZONE_NORMAL reservation
168  */
169 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
170 #ifdef CONFIG_ZONE_DMA
171 	 256,
172 #endif
173 #ifdef CONFIG_ZONE_DMA32
174 	 256,
175 #endif
176 #ifdef CONFIG_HIGHMEM
177 	 32,
178 #endif
179 	 32,
180 };
181 
182 EXPORT_SYMBOL(totalram_pages);
183 
184 static char * const zone_names[MAX_NR_ZONES] = {
185 #ifdef CONFIG_ZONE_DMA
186 	 "DMA",
187 #endif
188 #ifdef CONFIG_ZONE_DMA32
189 	 "DMA32",
190 #endif
191 	 "Normal",
192 #ifdef CONFIG_HIGHMEM
193 	 "HighMem",
194 #endif
195 	 "Movable",
196 };
197 
198 int min_free_kbytes = 1024;
199 
200 static unsigned long __meminitdata nr_kernel_pages;
201 static unsigned long __meminitdata nr_all_pages;
202 static unsigned long __meminitdata dma_reserve;
203 
204 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
205 /* Movable memory ranges, will also be used by memblock subsystem. */
206 struct movablemem_map movablemem_map = {
207 	.acpi = false,
208 	.nr_map = 0,
209 };
210 
211 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
212 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
213 static unsigned long __initdata required_kernelcore;
214 static unsigned long __initdata required_movablecore;
215 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
216 static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
217 
218 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
219 int movable_zone;
220 EXPORT_SYMBOL(movable_zone);
221 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
222 
223 #if MAX_NUMNODES > 1
224 int nr_node_ids __read_mostly = MAX_NUMNODES;
225 int nr_online_nodes __read_mostly = 1;
226 EXPORT_SYMBOL(nr_node_ids);
227 EXPORT_SYMBOL(nr_online_nodes);
228 #endif
229 
230 int page_group_by_mobility_disabled __read_mostly;
231 
232 void set_pageblock_migratetype(struct page *page, int migratetype)
233 {
234 
235 	if (unlikely(page_group_by_mobility_disabled))
236 		migratetype = MIGRATE_UNMOVABLE;
237 
238 	set_pageblock_flags_group(page, (unsigned long)migratetype,
239 					PB_migrate, PB_migrate_end);
240 }
241 
242 bool oom_killer_disabled __read_mostly;
243 
244 #ifdef CONFIG_DEBUG_VM
245 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
246 {
247 	int ret = 0;
248 	unsigned seq;
249 	unsigned long pfn = page_to_pfn(page);
250 	unsigned long sp, start_pfn;
251 
252 	do {
253 		seq = zone_span_seqbegin(zone);
254 		start_pfn = zone->zone_start_pfn;
255 		sp = zone->spanned_pages;
256 		if (!zone_spans_pfn(zone, pfn))
257 			ret = 1;
258 	} while (zone_span_seqretry(zone, seq));
259 
260 	if (ret)
261 		pr_err("page %lu outside zone [ %lu - %lu ]\n",
262 			pfn, start_pfn, start_pfn + sp);
263 
264 	return ret;
265 }
266 
267 static int page_is_consistent(struct zone *zone, struct page *page)
268 {
269 	if (!pfn_valid_within(page_to_pfn(page)))
270 		return 0;
271 	if (zone != page_zone(page))
272 		return 0;
273 
274 	return 1;
275 }
276 /*
277  * Temporary debugging check for pages not lying within a given zone.
278  */
279 static int bad_range(struct zone *zone, struct page *page)
280 {
281 	if (page_outside_zone_boundaries(zone, page))
282 		return 1;
283 	if (!page_is_consistent(zone, page))
284 		return 1;
285 
286 	return 0;
287 }
288 #else
289 static inline int bad_range(struct zone *zone, struct page *page)
290 {
291 	return 0;
292 }
293 #endif
294 
295 static void bad_page(struct page *page)
296 {
297 	static unsigned long resume;
298 	static unsigned long nr_shown;
299 	static unsigned long nr_unshown;
300 
301 	/* Don't complain about poisoned pages */
302 	if (PageHWPoison(page)) {
303 		page_mapcount_reset(page); /* remove PageBuddy */
304 		return;
305 	}
306 
307 	/*
308 	 * Allow a burst of 60 reports, then keep quiet for that minute;
309 	 * or allow a steady drip of one report per second.
310 	 */
311 	if (nr_shown == 60) {
312 		if (time_before(jiffies, resume)) {
313 			nr_unshown++;
314 			goto out;
315 		}
316 		if (nr_unshown) {
317 			printk(KERN_ALERT
318 			      "BUG: Bad page state: %lu messages suppressed\n",
319 				nr_unshown);
320 			nr_unshown = 0;
321 		}
322 		nr_shown = 0;
323 	}
324 	if (nr_shown++ == 0)
325 		resume = jiffies + 60 * HZ;
326 
327 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
328 		current->comm, page_to_pfn(page));
329 	dump_page(page);
330 
331 	print_modules();
332 	dump_stack();
333 out:
334 	/* Leave bad fields for debug, except PageBuddy could make trouble */
335 	page_mapcount_reset(page); /* remove PageBuddy */
336 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
337 }
338 
339 /*
340  * Higher-order pages are called "compound pages".  They are structured thusly:
341  *
342  * The first PAGE_SIZE page is called the "head page".
343  *
344  * The remaining PAGE_SIZE pages are called "tail pages".
345  *
346  * All pages have PG_compound set.  All tail pages have their ->first_page
347  * pointing at the head page.
348  *
349  * The first tail page's ->lru.next holds the address of the compound page's
350  * put_page() function.  Its ->lru.prev holds the order of allocation.
351  * This usage means that zero-order pages may not be compound.
352  */
353 
354 static void free_compound_page(struct page *page)
355 {
356 	__free_pages_ok(page, compound_order(page));
357 }
358 
359 void prep_compound_page(struct page *page, unsigned long order)
360 {
361 	int i;
362 	int nr_pages = 1 << order;
363 
364 	set_compound_page_dtor(page, free_compound_page);
365 	set_compound_order(page, order);
366 	__SetPageHead(page);
367 	for (i = 1; i < nr_pages; i++) {
368 		struct page *p = page + i;
369 		__SetPageTail(p);
370 		set_page_count(p, 0);
371 		p->first_page = page;
372 	}
373 }
374 
375 /* update __split_huge_page_refcount if you change this function */
376 static int destroy_compound_page(struct page *page, unsigned long order)
377 {
378 	int i;
379 	int nr_pages = 1 << order;
380 	int bad = 0;
381 
382 	if (unlikely(compound_order(page) != order)) {
383 		bad_page(page);
384 		bad++;
385 	}
386 
387 	__ClearPageHead(page);
388 
389 	for (i = 1; i < nr_pages; i++) {
390 		struct page *p = page + i;
391 
392 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
393 			bad_page(page);
394 			bad++;
395 		}
396 		__ClearPageTail(p);
397 	}
398 
399 	return bad;
400 }
401 
402 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
403 {
404 	int i;
405 
406 	/*
407 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
408 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
409 	 */
410 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
411 	for (i = 0; i < (1 << order); i++)
412 		clear_highpage(page + i);
413 }
414 
415 #ifdef CONFIG_DEBUG_PAGEALLOC
416 unsigned int _debug_guardpage_minorder;
417 
418 static int __init debug_guardpage_minorder_setup(char *buf)
419 {
420 	unsigned long res;
421 
422 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
423 		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
424 		return 0;
425 	}
426 	_debug_guardpage_minorder = res;
427 	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
428 	return 0;
429 }
430 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
431 
432 static inline void set_page_guard_flag(struct page *page)
433 {
434 	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
435 }
436 
437 static inline void clear_page_guard_flag(struct page *page)
438 {
439 	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
440 }
441 #else
442 static inline void set_page_guard_flag(struct page *page) { }
443 static inline void clear_page_guard_flag(struct page *page) { }
444 #endif
445 
446 static inline void set_page_order(struct page *page, int order)
447 {
448 	set_page_private(page, order);
449 	__SetPageBuddy(page);
450 }
451 
452 static inline void rmv_page_order(struct page *page)
453 {
454 	__ClearPageBuddy(page);
455 	set_page_private(page, 0);
456 }
457 
458 /*
459  * Locate the struct page for both the matching buddy in our
460  * pair (buddy1) and the combined O(n+1) page they form (page).
461  *
462  * 1) Any buddy B1 will have an order O twin B2 which satisfies
463  * the following equation:
464  *     B2 = B1 ^ (1 << O)
465  * For example, if the starting buddy (buddy2) is #8 its order
466  * 1 buddy is #10:
467  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
468  *
469  * 2) Any buddy B will have an order O+1 parent P which
470  * satisfies the following equation:
471  *     P = B & ~(1 << O)
472  *
473  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
474  */
475 static inline unsigned long
476 __find_buddy_index(unsigned long page_idx, unsigned int order)
477 {
478 	return page_idx ^ (1 << order);
479 }
480 
481 /*
482  * This function checks whether a page is free && is the buddy
483  * we can do coalesce a page and its buddy if
484  * (a) the buddy is not in a hole &&
485  * (b) the buddy is in the buddy system &&
486  * (c) a page and its buddy have the same order &&
487  * (d) a page and its buddy are in the same zone.
488  *
489  * For recording whether a page is in the buddy system, we set ->_mapcount -2.
490  * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
491  *
492  * For recording page's order, we use page_private(page).
493  */
494 static inline int page_is_buddy(struct page *page, struct page *buddy,
495 								int order)
496 {
497 	if (!pfn_valid_within(page_to_pfn(buddy)))
498 		return 0;
499 
500 	if (page_zone_id(page) != page_zone_id(buddy))
501 		return 0;
502 
503 	if (page_is_guard(buddy) && page_order(buddy) == order) {
504 		VM_BUG_ON(page_count(buddy) != 0);
505 		return 1;
506 	}
507 
508 	if (PageBuddy(buddy) && page_order(buddy) == order) {
509 		VM_BUG_ON(page_count(buddy) != 0);
510 		return 1;
511 	}
512 	return 0;
513 }
514 
515 /*
516  * Freeing function for a buddy system allocator.
517  *
518  * The concept of a buddy system is to maintain direct-mapped table
519  * (containing bit values) for memory blocks of various "orders".
520  * The bottom level table contains the map for the smallest allocatable
521  * units of memory (here, pages), and each level above it describes
522  * pairs of units from the levels below, hence, "buddies".
523  * At a high level, all that happens here is marking the table entry
524  * at the bottom level available, and propagating the changes upward
525  * as necessary, plus some accounting needed to play nicely with other
526  * parts of the VM system.
527  * At each level, we keep a list of pages, which are heads of continuous
528  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
529  * order is recorded in page_private(page) field.
530  * So when we are allocating or freeing one, we can derive the state of the
531  * other.  That is, if we allocate a small block, and both were
532  * free, the remainder of the region must be split into blocks.
533  * If a block is freed, and its buddy is also free, then this
534  * triggers coalescing into a block of larger size.
535  *
536  * -- nyc
537  */
538 
539 static inline void __free_one_page(struct page *page,
540 		struct zone *zone, unsigned int order,
541 		int migratetype)
542 {
543 	unsigned long page_idx;
544 	unsigned long combined_idx;
545 	unsigned long uninitialized_var(buddy_idx);
546 	struct page *buddy;
547 
548 	VM_BUG_ON(!zone_is_initialized(zone));
549 
550 	if (unlikely(PageCompound(page)))
551 		if (unlikely(destroy_compound_page(page, order)))
552 			return;
553 
554 	VM_BUG_ON(migratetype == -1);
555 
556 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
557 
558 	VM_BUG_ON(page_idx & ((1 << order) - 1));
559 	VM_BUG_ON(bad_range(zone, page));
560 
561 	while (order < MAX_ORDER-1) {
562 		buddy_idx = __find_buddy_index(page_idx, order);
563 		buddy = page + (buddy_idx - page_idx);
564 		if (!page_is_buddy(page, buddy, order))
565 			break;
566 		/*
567 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
568 		 * merge with it and move up one order.
569 		 */
570 		if (page_is_guard(buddy)) {
571 			clear_page_guard_flag(buddy);
572 			set_page_private(page, 0);
573 			__mod_zone_freepage_state(zone, 1 << order,
574 						  migratetype);
575 		} else {
576 			list_del(&buddy->lru);
577 			zone->free_area[order].nr_free--;
578 			rmv_page_order(buddy);
579 		}
580 		combined_idx = buddy_idx & page_idx;
581 		page = page + (combined_idx - page_idx);
582 		page_idx = combined_idx;
583 		order++;
584 	}
585 	set_page_order(page, order);
586 
587 	/*
588 	 * If this is not the largest possible page, check if the buddy
589 	 * of the next-highest order is free. If it is, it's possible
590 	 * that pages are being freed that will coalesce soon. In case,
591 	 * that is happening, add the free page to the tail of the list
592 	 * so it's less likely to be used soon and more likely to be merged
593 	 * as a higher order page
594 	 */
595 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
596 		struct page *higher_page, *higher_buddy;
597 		combined_idx = buddy_idx & page_idx;
598 		higher_page = page + (combined_idx - page_idx);
599 		buddy_idx = __find_buddy_index(combined_idx, order + 1);
600 		higher_buddy = higher_page + (buddy_idx - combined_idx);
601 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
602 			list_add_tail(&page->lru,
603 				&zone->free_area[order].free_list[migratetype]);
604 			goto out;
605 		}
606 	}
607 
608 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
609 out:
610 	zone->free_area[order].nr_free++;
611 }
612 
613 static inline int free_pages_check(struct page *page)
614 {
615 	if (unlikely(page_mapcount(page) |
616 		(page->mapping != NULL)  |
617 		(atomic_read(&page->_count) != 0) |
618 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
619 		(mem_cgroup_bad_page_check(page)))) {
620 		bad_page(page);
621 		return 1;
622 	}
623 	page_nid_reset_last(page);
624 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
625 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
626 	return 0;
627 }
628 
629 /*
630  * Frees a number of pages from the PCP lists
631  * Assumes all pages on list are in same zone, and of same order.
632  * count is the number of pages to free.
633  *
634  * If the zone was previously in an "all pages pinned" state then look to
635  * see if this freeing clears that state.
636  *
637  * And clear the zone's pages_scanned counter, to hold off the "all pages are
638  * pinned" detection logic.
639  */
640 static void free_pcppages_bulk(struct zone *zone, int count,
641 					struct per_cpu_pages *pcp)
642 {
643 	int migratetype = 0;
644 	int batch_free = 0;
645 	int to_free = count;
646 
647 	spin_lock(&zone->lock);
648 	zone->all_unreclaimable = 0;
649 	zone->pages_scanned = 0;
650 
651 	while (to_free) {
652 		struct page *page;
653 		struct list_head *list;
654 
655 		/*
656 		 * Remove pages from lists in a round-robin fashion. A
657 		 * batch_free count is maintained that is incremented when an
658 		 * empty list is encountered.  This is so more pages are freed
659 		 * off fuller lists instead of spinning excessively around empty
660 		 * lists
661 		 */
662 		do {
663 			batch_free++;
664 			if (++migratetype == MIGRATE_PCPTYPES)
665 				migratetype = 0;
666 			list = &pcp->lists[migratetype];
667 		} while (list_empty(list));
668 
669 		/* This is the only non-empty list. Free them all. */
670 		if (batch_free == MIGRATE_PCPTYPES)
671 			batch_free = to_free;
672 
673 		do {
674 			int mt;	/* migratetype of the to-be-freed page */
675 
676 			page = list_entry(list->prev, struct page, lru);
677 			/* must delete as __free_one_page list manipulates */
678 			list_del(&page->lru);
679 			mt = get_freepage_migratetype(page);
680 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
681 			__free_one_page(page, zone, 0, mt);
682 			trace_mm_page_pcpu_drain(page, 0, mt);
683 			if (likely(!is_migrate_isolate_page(page))) {
684 				__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
685 				if (is_migrate_cma(mt))
686 					__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
687 			}
688 		} while (--to_free && --batch_free && !list_empty(list));
689 	}
690 	spin_unlock(&zone->lock);
691 }
692 
693 static void free_one_page(struct zone *zone, struct page *page, int order,
694 				int migratetype)
695 {
696 	spin_lock(&zone->lock);
697 	zone->all_unreclaimable = 0;
698 	zone->pages_scanned = 0;
699 
700 	__free_one_page(page, zone, order, migratetype);
701 	if (unlikely(!is_migrate_isolate(migratetype)))
702 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
703 	spin_unlock(&zone->lock);
704 }
705 
706 static bool free_pages_prepare(struct page *page, unsigned int order)
707 {
708 	int i;
709 	int bad = 0;
710 
711 	trace_mm_page_free(page, order);
712 	kmemcheck_free_shadow(page, order);
713 
714 	if (PageAnon(page))
715 		page->mapping = NULL;
716 	for (i = 0; i < (1 << order); i++)
717 		bad += free_pages_check(page + i);
718 	if (bad)
719 		return false;
720 
721 	if (!PageHighMem(page)) {
722 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
723 		debug_check_no_obj_freed(page_address(page),
724 					   PAGE_SIZE << order);
725 	}
726 	arch_free_page(page, order);
727 	kernel_map_pages(page, 1 << order, 0);
728 
729 	return true;
730 }
731 
732 static void __free_pages_ok(struct page *page, unsigned int order)
733 {
734 	unsigned long flags;
735 	int migratetype;
736 
737 	if (!free_pages_prepare(page, order))
738 		return;
739 
740 	local_irq_save(flags);
741 	__count_vm_events(PGFREE, 1 << order);
742 	migratetype = get_pageblock_migratetype(page);
743 	set_freepage_migratetype(page, migratetype);
744 	free_one_page(page_zone(page), page, order, migratetype);
745 	local_irq_restore(flags);
746 }
747 
748 /*
749  * Read access to zone->managed_pages is safe because it's unsigned long,
750  * but we still need to serialize writers. Currently all callers of
751  * __free_pages_bootmem() except put_page_bootmem() should only be used
752  * at boot time. So for shorter boot time, we shift the burden to
753  * put_page_bootmem() to serialize writers.
754  */
755 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
756 {
757 	unsigned int nr_pages = 1 << order;
758 	unsigned int loop;
759 
760 	prefetchw(page);
761 	for (loop = 0; loop < nr_pages; loop++) {
762 		struct page *p = &page[loop];
763 
764 		if (loop + 1 < nr_pages)
765 			prefetchw(p + 1);
766 		__ClearPageReserved(p);
767 		set_page_count(p, 0);
768 	}
769 
770 	page_zone(page)->managed_pages += 1 << order;
771 	set_page_refcounted(page);
772 	__free_pages(page, order);
773 }
774 
775 #ifdef CONFIG_CMA
776 /* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
777 void __init init_cma_reserved_pageblock(struct page *page)
778 {
779 	unsigned i = pageblock_nr_pages;
780 	struct page *p = page;
781 
782 	do {
783 		__ClearPageReserved(p);
784 		set_page_count(p, 0);
785 	} while (++p, --i);
786 
787 	set_page_refcounted(page);
788 	set_pageblock_migratetype(page, MIGRATE_CMA);
789 	__free_pages(page, pageblock_order);
790 	totalram_pages += pageblock_nr_pages;
791 #ifdef CONFIG_HIGHMEM
792 	if (PageHighMem(page))
793 		totalhigh_pages += pageblock_nr_pages;
794 #endif
795 }
796 #endif
797 
798 /*
799  * The order of subdivision here is critical for the IO subsystem.
800  * Please do not alter this order without good reasons and regression
801  * testing. Specifically, as large blocks of memory are subdivided,
802  * the order in which smaller blocks are delivered depends on the order
803  * they're subdivided in this function. This is the primary factor
804  * influencing the order in which pages are delivered to the IO
805  * subsystem according to empirical testing, and this is also justified
806  * by considering the behavior of a buddy system containing a single
807  * large block of memory acted on by a series of small allocations.
808  * This behavior is a critical factor in sglist merging's success.
809  *
810  * -- nyc
811  */
812 static inline void expand(struct zone *zone, struct page *page,
813 	int low, int high, struct free_area *area,
814 	int migratetype)
815 {
816 	unsigned long size = 1 << high;
817 
818 	while (high > low) {
819 		area--;
820 		high--;
821 		size >>= 1;
822 		VM_BUG_ON(bad_range(zone, &page[size]));
823 
824 #ifdef CONFIG_DEBUG_PAGEALLOC
825 		if (high < debug_guardpage_minorder()) {
826 			/*
827 			 * Mark as guard pages (or page), that will allow to
828 			 * merge back to allocator when buddy will be freed.
829 			 * Corresponding page table entries will not be touched,
830 			 * pages will stay not present in virtual address space
831 			 */
832 			INIT_LIST_HEAD(&page[size].lru);
833 			set_page_guard_flag(&page[size]);
834 			set_page_private(&page[size], high);
835 			/* Guard pages are not available for any usage */
836 			__mod_zone_freepage_state(zone, -(1 << high),
837 						  migratetype);
838 			continue;
839 		}
840 #endif
841 		list_add(&page[size].lru, &area->free_list[migratetype]);
842 		area->nr_free++;
843 		set_page_order(&page[size], high);
844 	}
845 }
846 
847 /*
848  * This page is about to be returned from the page allocator
849  */
850 static inline int check_new_page(struct page *page)
851 {
852 	if (unlikely(page_mapcount(page) |
853 		(page->mapping != NULL)  |
854 		(atomic_read(&page->_count) != 0)  |
855 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
856 		(mem_cgroup_bad_page_check(page)))) {
857 		bad_page(page);
858 		return 1;
859 	}
860 	return 0;
861 }
862 
863 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
864 {
865 	int i;
866 
867 	for (i = 0; i < (1 << order); i++) {
868 		struct page *p = page + i;
869 		if (unlikely(check_new_page(p)))
870 			return 1;
871 	}
872 
873 	set_page_private(page, 0);
874 	set_page_refcounted(page);
875 
876 	arch_alloc_page(page, order);
877 	kernel_map_pages(page, 1 << order, 1);
878 
879 	if (gfp_flags & __GFP_ZERO)
880 		prep_zero_page(page, order, gfp_flags);
881 
882 	if (order && (gfp_flags & __GFP_COMP))
883 		prep_compound_page(page, order);
884 
885 	return 0;
886 }
887 
888 /*
889  * Go through the free lists for the given migratetype and remove
890  * the smallest available page from the freelists
891  */
892 static inline
893 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
894 						int migratetype)
895 {
896 	unsigned int current_order;
897 	struct free_area * area;
898 	struct page *page;
899 
900 	/* Find a page of the appropriate size in the preferred list */
901 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
902 		area = &(zone->free_area[current_order]);
903 		if (list_empty(&area->free_list[migratetype]))
904 			continue;
905 
906 		page = list_entry(area->free_list[migratetype].next,
907 							struct page, lru);
908 		list_del(&page->lru);
909 		rmv_page_order(page);
910 		area->nr_free--;
911 		expand(zone, page, order, current_order, area, migratetype);
912 		return page;
913 	}
914 
915 	return NULL;
916 }
917 
918 
919 /*
920  * This array describes the order lists are fallen back to when
921  * the free lists for the desirable migrate type are depleted
922  */
923 static int fallbacks[MIGRATE_TYPES][4] = {
924 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
925 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
926 #ifdef CONFIG_CMA
927 	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
928 	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
929 #else
930 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
931 #endif
932 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
933 #ifdef CONFIG_MEMORY_ISOLATION
934 	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
935 #endif
936 };
937 
938 /*
939  * Move the free pages in a range to the free lists of the requested type.
940  * Note that start_page and end_pages are not aligned on a pageblock
941  * boundary. If alignment is required, use move_freepages_block()
942  */
943 int move_freepages(struct zone *zone,
944 			  struct page *start_page, struct page *end_page,
945 			  int migratetype)
946 {
947 	struct page *page;
948 	unsigned long order;
949 	int pages_moved = 0;
950 
951 #ifndef CONFIG_HOLES_IN_ZONE
952 	/*
953 	 * page_zone is not safe to call in this context when
954 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
955 	 * anyway as we check zone boundaries in move_freepages_block().
956 	 * Remove at a later date when no bug reports exist related to
957 	 * grouping pages by mobility
958 	 */
959 	BUG_ON(page_zone(start_page) != page_zone(end_page));
960 #endif
961 
962 	for (page = start_page; page <= end_page;) {
963 		/* Make sure we are not inadvertently changing nodes */
964 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
965 
966 		if (!pfn_valid_within(page_to_pfn(page))) {
967 			page++;
968 			continue;
969 		}
970 
971 		if (!PageBuddy(page)) {
972 			page++;
973 			continue;
974 		}
975 
976 		order = page_order(page);
977 		list_move(&page->lru,
978 			  &zone->free_area[order].free_list[migratetype]);
979 		set_freepage_migratetype(page, migratetype);
980 		page += 1 << order;
981 		pages_moved += 1 << order;
982 	}
983 
984 	return pages_moved;
985 }
986 
987 int move_freepages_block(struct zone *zone, struct page *page,
988 				int migratetype)
989 {
990 	unsigned long start_pfn, end_pfn;
991 	struct page *start_page, *end_page;
992 
993 	start_pfn = page_to_pfn(page);
994 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
995 	start_page = pfn_to_page(start_pfn);
996 	end_page = start_page + pageblock_nr_pages - 1;
997 	end_pfn = start_pfn + pageblock_nr_pages - 1;
998 
999 	/* Do not cross zone boundaries */
1000 	if (!zone_spans_pfn(zone, start_pfn))
1001 		start_page = page;
1002 	if (!zone_spans_pfn(zone, end_pfn))
1003 		return 0;
1004 
1005 	return move_freepages(zone, start_page, end_page, migratetype);
1006 }
1007 
1008 static void change_pageblock_range(struct page *pageblock_page,
1009 					int start_order, int migratetype)
1010 {
1011 	int nr_pageblocks = 1 << (start_order - pageblock_order);
1012 
1013 	while (nr_pageblocks--) {
1014 		set_pageblock_migratetype(pageblock_page, migratetype);
1015 		pageblock_page += pageblock_nr_pages;
1016 	}
1017 }
1018 
1019 /* Remove an element from the buddy allocator from the fallback list */
1020 static inline struct page *
1021 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1022 {
1023 	struct free_area * area;
1024 	int current_order;
1025 	struct page *page;
1026 	int migratetype, i;
1027 
1028 	/* Find the largest possible block of pages in the other list */
1029 	for (current_order = MAX_ORDER-1; current_order >= order;
1030 						--current_order) {
1031 		for (i = 0;; i++) {
1032 			migratetype = fallbacks[start_migratetype][i];
1033 
1034 			/* MIGRATE_RESERVE handled later if necessary */
1035 			if (migratetype == MIGRATE_RESERVE)
1036 				break;
1037 
1038 			area = &(zone->free_area[current_order]);
1039 			if (list_empty(&area->free_list[migratetype]))
1040 				continue;
1041 
1042 			page = list_entry(area->free_list[migratetype].next,
1043 					struct page, lru);
1044 			area->nr_free--;
1045 
1046 			/*
1047 			 * If breaking a large block of pages, move all free
1048 			 * pages to the preferred allocation list. If falling
1049 			 * back for a reclaimable kernel allocation, be more
1050 			 * aggressive about taking ownership of free pages
1051 			 *
1052 			 * On the other hand, never change migration
1053 			 * type of MIGRATE_CMA pageblocks nor move CMA
1054 			 * pages on different free lists. We don't
1055 			 * want unmovable pages to be allocated from
1056 			 * MIGRATE_CMA areas.
1057 			 */
1058 			if (!is_migrate_cma(migratetype) &&
1059 			    (unlikely(current_order >= pageblock_order / 2) ||
1060 			     start_migratetype == MIGRATE_RECLAIMABLE ||
1061 			     page_group_by_mobility_disabled)) {
1062 				int pages;
1063 				pages = move_freepages_block(zone, page,
1064 								start_migratetype);
1065 
1066 				/* Claim the whole block if over half of it is free */
1067 				if (pages >= (1 << (pageblock_order-1)) ||
1068 						page_group_by_mobility_disabled)
1069 					set_pageblock_migratetype(page,
1070 								start_migratetype);
1071 
1072 				migratetype = start_migratetype;
1073 			}
1074 
1075 			/* Remove the page from the freelists */
1076 			list_del(&page->lru);
1077 			rmv_page_order(page);
1078 
1079 			/* Take ownership for orders >= pageblock_order */
1080 			if (current_order >= pageblock_order &&
1081 			    !is_migrate_cma(migratetype))
1082 				change_pageblock_range(page, current_order,
1083 							start_migratetype);
1084 
1085 			expand(zone, page, order, current_order, area,
1086 			       is_migrate_cma(migratetype)
1087 			     ? migratetype : start_migratetype);
1088 
1089 			trace_mm_page_alloc_extfrag(page, order, current_order,
1090 				start_migratetype, migratetype);
1091 
1092 			return page;
1093 		}
1094 	}
1095 
1096 	return NULL;
1097 }
1098 
1099 /*
1100  * Do the hard work of removing an element from the buddy allocator.
1101  * Call me with the zone->lock already held.
1102  */
1103 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1104 						int migratetype)
1105 {
1106 	struct page *page;
1107 
1108 retry_reserve:
1109 	page = __rmqueue_smallest(zone, order, migratetype);
1110 
1111 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1112 		page = __rmqueue_fallback(zone, order, migratetype);
1113 
1114 		/*
1115 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1116 		 * is used because __rmqueue_smallest is an inline function
1117 		 * and we want just one call site
1118 		 */
1119 		if (!page) {
1120 			migratetype = MIGRATE_RESERVE;
1121 			goto retry_reserve;
1122 		}
1123 	}
1124 
1125 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1126 	return page;
1127 }
1128 
1129 /*
1130  * Obtain a specified number of elements from the buddy allocator, all under
1131  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1132  * Returns the number of new pages which were placed at *list.
1133  */
1134 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1135 			unsigned long count, struct list_head *list,
1136 			int migratetype, int cold)
1137 {
1138 	int mt = migratetype, i;
1139 
1140 	spin_lock(&zone->lock);
1141 	for (i = 0; i < count; ++i) {
1142 		struct page *page = __rmqueue(zone, order, migratetype);
1143 		if (unlikely(page == NULL))
1144 			break;
1145 
1146 		/*
1147 		 * Split buddy pages returned by expand() are received here
1148 		 * in physical page order. The page is added to the callers and
1149 		 * list and the list head then moves forward. From the callers
1150 		 * perspective, the linked list is ordered by page number in
1151 		 * some conditions. This is useful for IO devices that can
1152 		 * merge IO requests if the physical pages are ordered
1153 		 * properly.
1154 		 */
1155 		if (likely(cold == 0))
1156 			list_add(&page->lru, list);
1157 		else
1158 			list_add_tail(&page->lru, list);
1159 		if (IS_ENABLED(CONFIG_CMA)) {
1160 			mt = get_pageblock_migratetype(page);
1161 			if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
1162 				mt = migratetype;
1163 		}
1164 		set_freepage_migratetype(page, mt);
1165 		list = &page->lru;
1166 		if (is_migrate_cma(mt))
1167 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1168 					      -(1 << order));
1169 	}
1170 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1171 	spin_unlock(&zone->lock);
1172 	return i;
1173 }
1174 
1175 #ifdef CONFIG_NUMA
1176 /*
1177  * Called from the vmstat counter updater to drain pagesets of this
1178  * currently executing processor on remote nodes after they have
1179  * expired.
1180  *
1181  * Note that this function must be called with the thread pinned to
1182  * a single processor.
1183  */
1184 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1185 {
1186 	unsigned long flags;
1187 	int to_drain;
1188 
1189 	local_irq_save(flags);
1190 	if (pcp->count >= pcp->batch)
1191 		to_drain = pcp->batch;
1192 	else
1193 		to_drain = pcp->count;
1194 	if (to_drain > 0) {
1195 		free_pcppages_bulk(zone, to_drain, pcp);
1196 		pcp->count -= to_drain;
1197 	}
1198 	local_irq_restore(flags);
1199 }
1200 #endif
1201 
1202 /*
1203  * Drain pages of the indicated processor.
1204  *
1205  * The processor must either be the current processor and the
1206  * thread pinned to the current processor or a processor that
1207  * is not online.
1208  */
1209 static void drain_pages(unsigned int cpu)
1210 {
1211 	unsigned long flags;
1212 	struct zone *zone;
1213 
1214 	for_each_populated_zone(zone) {
1215 		struct per_cpu_pageset *pset;
1216 		struct per_cpu_pages *pcp;
1217 
1218 		local_irq_save(flags);
1219 		pset = per_cpu_ptr(zone->pageset, cpu);
1220 
1221 		pcp = &pset->pcp;
1222 		if (pcp->count) {
1223 			free_pcppages_bulk(zone, pcp->count, pcp);
1224 			pcp->count = 0;
1225 		}
1226 		local_irq_restore(flags);
1227 	}
1228 }
1229 
1230 /*
1231  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1232  */
1233 void drain_local_pages(void *arg)
1234 {
1235 	drain_pages(smp_processor_id());
1236 }
1237 
1238 /*
1239  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1240  *
1241  * Note that this code is protected against sending an IPI to an offline
1242  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1243  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1244  * nothing keeps CPUs from showing up after we populated the cpumask and
1245  * before the call to on_each_cpu_mask().
1246  */
1247 void drain_all_pages(void)
1248 {
1249 	int cpu;
1250 	struct per_cpu_pageset *pcp;
1251 	struct zone *zone;
1252 
1253 	/*
1254 	 * Allocate in the BSS so we wont require allocation in
1255 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1256 	 */
1257 	static cpumask_t cpus_with_pcps;
1258 
1259 	/*
1260 	 * We don't care about racing with CPU hotplug event
1261 	 * as offline notification will cause the notified
1262 	 * cpu to drain that CPU pcps and on_each_cpu_mask
1263 	 * disables preemption as part of its processing
1264 	 */
1265 	for_each_online_cpu(cpu) {
1266 		bool has_pcps = false;
1267 		for_each_populated_zone(zone) {
1268 			pcp = per_cpu_ptr(zone->pageset, cpu);
1269 			if (pcp->pcp.count) {
1270 				has_pcps = true;
1271 				break;
1272 			}
1273 		}
1274 		if (has_pcps)
1275 			cpumask_set_cpu(cpu, &cpus_with_pcps);
1276 		else
1277 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
1278 	}
1279 	on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1280 }
1281 
1282 #ifdef CONFIG_HIBERNATION
1283 
1284 void mark_free_pages(struct zone *zone)
1285 {
1286 	unsigned long pfn, max_zone_pfn;
1287 	unsigned long flags;
1288 	int order, t;
1289 	struct list_head *curr;
1290 
1291 	if (!zone->spanned_pages)
1292 		return;
1293 
1294 	spin_lock_irqsave(&zone->lock, flags);
1295 
1296 	max_zone_pfn = zone_end_pfn(zone);
1297 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1298 		if (pfn_valid(pfn)) {
1299 			struct page *page = pfn_to_page(pfn);
1300 
1301 			if (!swsusp_page_is_forbidden(page))
1302 				swsusp_unset_page_free(page);
1303 		}
1304 
1305 	for_each_migratetype_order(order, t) {
1306 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1307 			unsigned long i;
1308 
1309 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1310 			for (i = 0; i < (1UL << order); i++)
1311 				swsusp_set_page_free(pfn_to_page(pfn + i));
1312 		}
1313 	}
1314 	spin_unlock_irqrestore(&zone->lock, flags);
1315 }
1316 #endif /* CONFIG_PM */
1317 
1318 /*
1319  * Free a 0-order page
1320  * cold == 1 ? free a cold page : free a hot page
1321  */
1322 void free_hot_cold_page(struct page *page, int cold)
1323 {
1324 	struct zone *zone = page_zone(page);
1325 	struct per_cpu_pages *pcp;
1326 	unsigned long flags;
1327 	int migratetype;
1328 
1329 	if (!free_pages_prepare(page, 0))
1330 		return;
1331 
1332 	migratetype = get_pageblock_migratetype(page);
1333 	set_freepage_migratetype(page, migratetype);
1334 	local_irq_save(flags);
1335 	__count_vm_event(PGFREE);
1336 
1337 	/*
1338 	 * We only track unmovable, reclaimable and movable on pcp lists.
1339 	 * Free ISOLATE pages back to the allocator because they are being
1340 	 * offlined but treat RESERVE as movable pages so we can get those
1341 	 * areas back if necessary. Otherwise, we may have to free
1342 	 * excessively into the page allocator
1343 	 */
1344 	if (migratetype >= MIGRATE_PCPTYPES) {
1345 		if (unlikely(is_migrate_isolate(migratetype))) {
1346 			free_one_page(zone, page, 0, migratetype);
1347 			goto out;
1348 		}
1349 		migratetype = MIGRATE_MOVABLE;
1350 	}
1351 
1352 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1353 	if (cold)
1354 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1355 	else
1356 		list_add(&page->lru, &pcp->lists[migratetype]);
1357 	pcp->count++;
1358 	if (pcp->count >= pcp->high) {
1359 		free_pcppages_bulk(zone, pcp->batch, pcp);
1360 		pcp->count -= pcp->batch;
1361 	}
1362 
1363 out:
1364 	local_irq_restore(flags);
1365 }
1366 
1367 /*
1368  * Free a list of 0-order pages
1369  */
1370 void free_hot_cold_page_list(struct list_head *list, int cold)
1371 {
1372 	struct page *page, *next;
1373 
1374 	list_for_each_entry_safe(page, next, list, lru) {
1375 		trace_mm_page_free_batched(page, cold);
1376 		free_hot_cold_page(page, cold);
1377 	}
1378 }
1379 
1380 /*
1381  * split_page takes a non-compound higher-order page, and splits it into
1382  * n (1<<order) sub-pages: page[0..n]
1383  * Each sub-page must be freed individually.
1384  *
1385  * Note: this is probably too low level an operation for use in drivers.
1386  * Please consult with lkml before using this in your driver.
1387  */
1388 void split_page(struct page *page, unsigned int order)
1389 {
1390 	int i;
1391 
1392 	VM_BUG_ON(PageCompound(page));
1393 	VM_BUG_ON(!page_count(page));
1394 
1395 #ifdef CONFIG_KMEMCHECK
1396 	/*
1397 	 * Split shadow pages too, because free(page[0]) would
1398 	 * otherwise free the whole shadow.
1399 	 */
1400 	if (kmemcheck_page_is_tracked(page))
1401 		split_page(virt_to_page(page[0].shadow), order);
1402 #endif
1403 
1404 	for (i = 1; i < (1 << order); i++)
1405 		set_page_refcounted(page + i);
1406 }
1407 
1408 static int __isolate_free_page(struct page *page, unsigned int order)
1409 {
1410 	unsigned long watermark;
1411 	struct zone *zone;
1412 	int mt;
1413 
1414 	BUG_ON(!PageBuddy(page));
1415 
1416 	zone = page_zone(page);
1417 	mt = get_pageblock_migratetype(page);
1418 
1419 	if (!is_migrate_isolate(mt)) {
1420 		/* Obey watermarks as if the page was being allocated */
1421 		watermark = low_wmark_pages(zone) + (1 << order);
1422 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1423 			return 0;
1424 
1425 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
1426 	}
1427 
1428 	/* Remove page from free list */
1429 	list_del(&page->lru);
1430 	zone->free_area[order].nr_free--;
1431 	rmv_page_order(page);
1432 
1433 	/* Set the pageblock if the isolated page is at least a pageblock */
1434 	if (order >= pageblock_order - 1) {
1435 		struct page *endpage = page + (1 << order) - 1;
1436 		for (; page < endpage; page += pageblock_nr_pages) {
1437 			int mt = get_pageblock_migratetype(page);
1438 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
1439 				set_pageblock_migratetype(page,
1440 							  MIGRATE_MOVABLE);
1441 		}
1442 	}
1443 
1444 	return 1UL << order;
1445 }
1446 
1447 /*
1448  * Similar to split_page except the page is already free. As this is only
1449  * being used for migration, the migratetype of the block also changes.
1450  * As this is called with interrupts disabled, the caller is responsible
1451  * for calling arch_alloc_page() and kernel_map_page() after interrupts
1452  * are enabled.
1453  *
1454  * Note: this is probably too low level an operation for use in drivers.
1455  * Please consult with lkml before using this in your driver.
1456  */
1457 int split_free_page(struct page *page)
1458 {
1459 	unsigned int order;
1460 	int nr_pages;
1461 
1462 	order = page_order(page);
1463 
1464 	nr_pages = __isolate_free_page(page, order);
1465 	if (!nr_pages)
1466 		return 0;
1467 
1468 	/* Split into individual pages */
1469 	set_page_refcounted(page);
1470 	split_page(page, order);
1471 	return nr_pages;
1472 }
1473 
1474 /*
1475  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1476  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1477  * or two.
1478  */
1479 static inline
1480 struct page *buffered_rmqueue(struct zone *preferred_zone,
1481 			struct zone *zone, int order, gfp_t gfp_flags,
1482 			int migratetype)
1483 {
1484 	unsigned long flags;
1485 	struct page *page;
1486 	int cold = !!(gfp_flags & __GFP_COLD);
1487 
1488 again:
1489 	if (likely(order == 0)) {
1490 		struct per_cpu_pages *pcp;
1491 		struct list_head *list;
1492 
1493 		local_irq_save(flags);
1494 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1495 		list = &pcp->lists[migratetype];
1496 		if (list_empty(list)) {
1497 			pcp->count += rmqueue_bulk(zone, 0,
1498 					pcp->batch, list,
1499 					migratetype, cold);
1500 			if (unlikely(list_empty(list)))
1501 				goto failed;
1502 		}
1503 
1504 		if (cold)
1505 			page = list_entry(list->prev, struct page, lru);
1506 		else
1507 			page = list_entry(list->next, struct page, lru);
1508 
1509 		list_del(&page->lru);
1510 		pcp->count--;
1511 	} else {
1512 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1513 			/*
1514 			 * __GFP_NOFAIL is not to be used in new code.
1515 			 *
1516 			 * All __GFP_NOFAIL callers should be fixed so that they
1517 			 * properly detect and handle allocation failures.
1518 			 *
1519 			 * We most definitely don't want callers attempting to
1520 			 * allocate greater than order-1 page units with
1521 			 * __GFP_NOFAIL.
1522 			 */
1523 			WARN_ON_ONCE(order > 1);
1524 		}
1525 		spin_lock_irqsave(&zone->lock, flags);
1526 		page = __rmqueue(zone, order, migratetype);
1527 		spin_unlock(&zone->lock);
1528 		if (!page)
1529 			goto failed;
1530 		__mod_zone_freepage_state(zone, -(1 << order),
1531 					  get_pageblock_migratetype(page));
1532 	}
1533 
1534 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1535 	zone_statistics(preferred_zone, zone, gfp_flags);
1536 	local_irq_restore(flags);
1537 
1538 	VM_BUG_ON(bad_range(zone, page));
1539 	if (prep_new_page(page, order, gfp_flags))
1540 		goto again;
1541 	return page;
1542 
1543 failed:
1544 	local_irq_restore(flags);
1545 	return NULL;
1546 }
1547 
1548 #ifdef CONFIG_FAIL_PAGE_ALLOC
1549 
1550 static struct {
1551 	struct fault_attr attr;
1552 
1553 	u32 ignore_gfp_highmem;
1554 	u32 ignore_gfp_wait;
1555 	u32 min_order;
1556 } fail_page_alloc = {
1557 	.attr = FAULT_ATTR_INITIALIZER,
1558 	.ignore_gfp_wait = 1,
1559 	.ignore_gfp_highmem = 1,
1560 	.min_order = 1,
1561 };
1562 
1563 static int __init setup_fail_page_alloc(char *str)
1564 {
1565 	return setup_fault_attr(&fail_page_alloc.attr, str);
1566 }
1567 __setup("fail_page_alloc=", setup_fail_page_alloc);
1568 
1569 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1570 {
1571 	if (order < fail_page_alloc.min_order)
1572 		return false;
1573 	if (gfp_mask & __GFP_NOFAIL)
1574 		return false;
1575 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1576 		return false;
1577 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1578 		return false;
1579 
1580 	return should_fail(&fail_page_alloc.attr, 1 << order);
1581 }
1582 
1583 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1584 
1585 static int __init fail_page_alloc_debugfs(void)
1586 {
1587 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1588 	struct dentry *dir;
1589 
1590 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1591 					&fail_page_alloc.attr);
1592 	if (IS_ERR(dir))
1593 		return PTR_ERR(dir);
1594 
1595 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1596 				&fail_page_alloc.ignore_gfp_wait))
1597 		goto fail;
1598 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1599 				&fail_page_alloc.ignore_gfp_highmem))
1600 		goto fail;
1601 	if (!debugfs_create_u32("min-order", mode, dir,
1602 				&fail_page_alloc.min_order))
1603 		goto fail;
1604 
1605 	return 0;
1606 fail:
1607 	debugfs_remove_recursive(dir);
1608 
1609 	return -ENOMEM;
1610 }
1611 
1612 late_initcall(fail_page_alloc_debugfs);
1613 
1614 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1615 
1616 #else /* CONFIG_FAIL_PAGE_ALLOC */
1617 
1618 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1619 {
1620 	return false;
1621 }
1622 
1623 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1624 
1625 /*
1626  * Return true if free pages are above 'mark'. This takes into account the order
1627  * of the allocation.
1628  */
1629 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1630 		      int classzone_idx, int alloc_flags, long free_pages)
1631 {
1632 	/* free_pages my go negative - that's OK */
1633 	long min = mark;
1634 	long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1635 	int o;
1636 
1637 	free_pages -= (1 << order) - 1;
1638 	if (alloc_flags & ALLOC_HIGH)
1639 		min -= min / 2;
1640 	if (alloc_flags & ALLOC_HARDER)
1641 		min -= min / 4;
1642 #ifdef CONFIG_CMA
1643 	/* If allocation can't use CMA areas don't use free CMA pages */
1644 	if (!(alloc_flags & ALLOC_CMA))
1645 		free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1646 #endif
1647 	if (free_pages <= min + lowmem_reserve)
1648 		return false;
1649 	for (o = 0; o < order; o++) {
1650 		/* At the next order, this order's pages become unavailable */
1651 		free_pages -= z->free_area[o].nr_free << o;
1652 
1653 		/* Require fewer higher order pages to be free */
1654 		min >>= 1;
1655 
1656 		if (free_pages <= min)
1657 			return false;
1658 	}
1659 	return true;
1660 }
1661 
1662 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1663 		      int classzone_idx, int alloc_flags)
1664 {
1665 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1666 					zone_page_state(z, NR_FREE_PAGES));
1667 }
1668 
1669 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1670 		      int classzone_idx, int alloc_flags)
1671 {
1672 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
1673 
1674 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1675 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1676 
1677 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1678 								free_pages);
1679 }
1680 
1681 #ifdef CONFIG_NUMA
1682 /*
1683  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1684  * skip over zones that are not allowed by the cpuset, or that have
1685  * been recently (in last second) found to be nearly full.  See further
1686  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1687  * that have to skip over a lot of full or unallowed zones.
1688  *
1689  * If the zonelist cache is present in the passed in zonelist, then
1690  * returns a pointer to the allowed node mask (either the current
1691  * tasks mems_allowed, or node_states[N_MEMORY].)
1692  *
1693  * If the zonelist cache is not available for this zonelist, does
1694  * nothing and returns NULL.
1695  *
1696  * If the fullzones BITMAP in the zonelist cache is stale (more than
1697  * a second since last zap'd) then we zap it out (clear its bits.)
1698  *
1699  * We hold off even calling zlc_setup, until after we've checked the
1700  * first zone in the zonelist, on the theory that most allocations will
1701  * be satisfied from that first zone, so best to examine that zone as
1702  * quickly as we can.
1703  */
1704 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1705 {
1706 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1707 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1708 
1709 	zlc = zonelist->zlcache_ptr;
1710 	if (!zlc)
1711 		return NULL;
1712 
1713 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1714 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1715 		zlc->last_full_zap = jiffies;
1716 	}
1717 
1718 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1719 					&cpuset_current_mems_allowed :
1720 					&node_states[N_MEMORY];
1721 	return allowednodes;
1722 }
1723 
1724 /*
1725  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1726  * if it is worth looking at further for free memory:
1727  *  1) Check that the zone isn't thought to be full (doesn't have its
1728  *     bit set in the zonelist_cache fullzones BITMAP).
1729  *  2) Check that the zones node (obtained from the zonelist_cache
1730  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1731  * Return true (non-zero) if zone is worth looking at further, or
1732  * else return false (zero) if it is not.
1733  *
1734  * This check -ignores- the distinction between various watermarks,
1735  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1736  * found to be full for any variation of these watermarks, it will
1737  * be considered full for up to one second by all requests, unless
1738  * we are so low on memory on all allowed nodes that we are forced
1739  * into the second scan of the zonelist.
1740  *
1741  * In the second scan we ignore this zonelist cache and exactly
1742  * apply the watermarks to all zones, even it is slower to do so.
1743  * We are low on memory in the second scan, and should leave no stone
1744  * unturned looking for a free page.
1745  */
1746 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1747 						nodemask_t *allowednodes)
1748 {
1749 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1750 	int i;				/* index of *z in zonelist zones */
1751 	int n;				/* node that zone *z is on */
1752 
1753 	zlc = zonelist->zlcache_ptr;
1754 	if (!zlc)
1755 		return 1;
1756 
1757 	i = z - zonelist->_zonerefs;
1758 	n = zlc->z_to_n[i];
1759 
1760 	/* This zone is worth trying if it is allowed but not full */
1761 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1762 }
1763 
1764 /*
1765  * Given 'z' scanning a zonelist, set the corresponding bit in
1766  * zlc->fullzones, so that subsequent attempts to allocate a page
1767  * from that zone don't waste time re-examining it.
1768  */
1769 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1770 {
1771 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1772 	int i;				/* index of *z in zonelist zones */
1773 
1774 	zlc = zonelist->zlcache_ptr;
1775 	if (!zlc)
1776 		return;
1777 
1778 	i = z - zonelist->_zonerefs;
1779 
1780 	set_bit(i, zlc->fullzones);
1781 }
1782 
1783 /*
1784  * clear all zones full, called after direct reclaim makes progress so that
1785  * a zone that was recently full is not skipped over for up to a second
1786  */
1787 static void zlc_clear_zones_full(struct zonelist *zonelist)
1788 {
1789 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1790 
1791 	zlc = zonelist->zlcache_ptr;
1792 	if (!zlc)
1793 		return;
1794 
1795 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1796 }
1797 
1798 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1799 {
1800 	return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1801 }
1802 
1803 static void __paginginit init_zone_allows_reclaim(int nid)
1804 {
1805 	int i;
1806 
1807 	for_each_online_node(i)
1808 		if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1809 			node_set(i, NODE_DATA(nid)->reclaim_nodes);
1810 		else
1811 			zone_reclaim_mode = 1;
1812 }
1813 
1814 #else	/* CONFIG_NUMA */
1815 
1816 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1817 {
1818 	return NULL;
1819 }
1820 
1821 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1822 				nodemask_t *allowednodes)
1823 {
1824 	return 1;
1825 }
1826 
1827 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1828 {
1829 }
1830 
1831 static void zlc_clear_zones_full(struct zonelist *zonelist)
1832 {
1833 }
1834 
1835 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1836 {
1837 	return true;
1838 }
1839 
1840 static inline void init_zone_allows_reclaim(int nid)
1841 {
1842 }
1843 #endif	/* CONFIG_NUMA */
1844 
1845 /*
1846  * get_page_from_freelist goes through the zonelist trying to allocate
1847  * a page.
1848  */
1849 static struct page *
1850 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1851 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1852 		struct zone *preferred_zone, int migratetype)
1853 {
1854 	struct zoneref *z;
1855 	struct page *page = NULL;
1856 	int classzone_idx;
1857 	struct zone *zone;
1858 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1859 	int zlc_active = 0;		/* set if using zonelist_cache */
1860 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1861 
1862 	classzone_idx = zone_idx(preferred_zone);
1863 zonelist_scan:
1864 	/*
1865 	 * Scan zonelist, looking for a zone with enough free.
1866 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1867 	 */
1868 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1869 						high_zoneidx, nodemask) {
1870 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1871 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1872 				continue;
1873 		if ((alloc_flags & ALLOC_CPUSET) &&
1874 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1875 				continue;
1876 		/*
1877 		 * When allocating a page cache page for writing, we
1878 		 * want to get it from a zone that is within its dirty
1879 		 * limit, such that no single zone holds more than its
1880 		 * proportional share of globally allowed dirty pages.
1881 		 * The dirty limits take into account the zone's
1882 		 * lowmem reserves and high watermark so that kswapd
1883 		 * should be able to balance it without having to
1884 		 * write pages from its LRU list.
1885 		 *
1886 		 * This may look like it could increase pressure on
1887 		 * lower zones by failing allocations in higher zones
1888 		 * before they are full.  But the pages that do spill
1889 		 * over are limited as the lower zones are protected
1890 		 * by this very same mechanism.  It should not become
1891 		 * a practical burden to them.
1892 		 *
1893 		 * XXX: For now, allow allocations to potentially
1894 		 * exceed the per-zone dirty limit in the slowpath
1895 		 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1896 		 * which is important when on a NUMA setup the allowed
1897 		 * zones are together not big enough to reach the
1898 		 * global limit.  The proper fix for these situations
1899 		 * will require awareness of zones in the
1900 		 * dirty-throttling and the flusher threads.
1901 		 */
1902 		if ((alloc_flags & ALLOC_WMARK_LOW) &&
1903 		    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1904 			goto this_zone_full;
1905 
1906 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1907 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1908 			unsigned long mark;
1909 			int ret;
1910 
1911 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1912 			if (zone_watermark_ok(zone, order, mark,
1913 				    classzone_idx, alloc_flags))
1914 				goto try_this_zone;
1915 
1916 			if (IS_ENABLED(CONFIG_NUMA) &&
1917 					!did_zlc_setup && nr_online_nodes > 1) {
1918 				/*
1919 				 * we do zlc_setup if there are multiple nodes
1920 				 * and before considering the first zone allowed
1921 				 * by the cpuset.
1922 				 */
1923 				allowednodes = zlc_setup(zonelist, alloc_flags);
1924 				zlc_active = 1;
1925 				did_zlc_setup = 1;
1926 			}
1927 
1928 			if (zone_reclaim_mode == 0 ||
1929 			    !zone_allows_reclaim(preferred_zone, zone))
1930 				goto this_zone_full;
1931 
1932 			/*
1933 			 * As we may have just activated ZLC, check if the first
1934 			 * eligible zone has failed zone_reclaim recently.
1935 			 */
1936 			if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1937 				!zlc_zone_worth_trying(zonelist, z, allowednodes))
1938 				continue;
1939 
1940 			ret = zone_reclaim(zone, gfp_mask, order);
1941 			switch (ret) {
1942 			case ZONE_RECLAIM_NOSCAN:
1943 				/* did not scan */
1944 				continue;
1945 			case ZONE_RECLAIM_FULL:
1946 				/* scanned but unreclaimable */
1947 				continue;
1948 			default:
1949 				/* did we reclaim enough */
1950 				if (!zone_watermark_ok(zone, order, mark,
1951 						classzone_idx, alloc_flags))
1952 					goto this_zone_full;
1953 			}
1954 		}
1955 
1956 try_this_zone:
1957 		page = buffered_rmqueue(preferred_zone, zone, order,
1958 						gfp_mask, migratetype);
1959 		if (page)
1960 			break;
1961 this_zone_full:
1962 		if (IS_ENABLED(CONFIG_NUMA))
1963 			zlc_mark_zone_full(zonelist, z);
1964 	}
1965 
1966 	if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
1967 		/* Disable zlc cache for second zonelist scan */
1968 		zlc_active = 0;
1969 		goto zonelist_scan;
1970 	}
1971 
1972 	if (page)
1973 		/*
1974 		 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1975 		 * necessary to allocate the page. The expectation is
1976 		 * that the caller is taking steps that will free more
1977 		 * memory. The caller should avoid the page being used
1978 		 * for !PFMEMALLOC purposes.
1979 		 */
1980 		page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1981 
1982 	return page;
1983 }
1984 
1985 /*
1986  * Large machines with many possible nodes should not always dump per-node
1987  * meminfo in irq context.
1988  */
1989 static inline bool should_suppress_show_mem(void)
1990 {
1991 	bool ret = false;
1992 
1993 #if NODES_SHIFT > 8
1994 	ret = in_interrupt();
1995 #endif
1996 	return ret;
1997 }
1998 
1999 static DEFINE_RATELIMIT_STATE(nopage_rs,
2000 		DEFAULT_RATELIMIT_INTERVAL,
2001 		DEFAULT_RATELIMIT_BURST);
2002 
2003 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2004 {
2005 	unsigned int filter = SHOW_MEM_FILTER_NODES;
2006 
2007 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2008 	    debug_guardpage_minorder() > 0)
2009 		return;
2010 
2011 	/*
2012 	 * This documents exceptions given to allocations in certain
2013 	 * contexts that are allowed to allocate outside current's set
2014 	 * of allowed nodes.
2015 	 */
2016 	if (!(gfp_mask & __GFP_NOMEMALLOC))
2017 		if (test_thread_flag(TIF_MEMDIE) ||
2018 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
2019 			filter &= ~SHOW_MEM_FILTER_NODES;
2020 	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2021 		filter &= ~SHOW_MEM_FILTER_NODES;
2022 
2023 	if (fmt) {
2024 		struct va_format vaf;
2025 		va_list args;
2026 
2027 		va_start(args, fmt);
2028 
2029 		vaf.fmt = fmt;
2030 		vaf.va = &args;
2031 
2032 		pr_warn("%pV", &vaf);
2033 
2034 		va_end(args);
2035 	}
2036 
2037 	pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2038 		current->comm, order, gfp_mask);
2039 
2040 	dump_stack();
2041 	if (!should_suppress_show_mem())
2042 		show_mem(filter);
2043 }
2044 
2045 static inline int
2046 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2047 				unsigned long did_some_progress,
2048 				unsigned long pages_reclaimed)
2049 {
2050 	/* Do not loop if specifically requested */
2051 	if (gfp_mask & __GFP_NORETRY)
2052 		return 0;
2053 
2054 	/* Always retry if specifically requested */
2055 	if (gfp_mask & __GFP_NOFAIL)
2056 		return 1;
2057 
2058 	/*
2059 	 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2060 	 * making forward progress without invoking OOM. Suspend also disables
2061 	 * storage devices so kswapd will not help. Bail if we are suspending.
2062 	 */
2063 	if (!did_some_progress && pm_suspended_storage())
2064 		return 0;
2065 
2066 	/*
2067 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2068 	 * means __GFP_NOFAIL, but that may not be true in other
2069 	 * implementations.
2070 	 */
2071 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
2072 		return 1;
2073 
2074 	/*
2075 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2076 	 * specified, then we retry until we no longer reclaim any pages
2077 	 * (above), or we've reclaimed an order of pages at least as
2078 	 * large as the allocation's order. In both cases, if the
2079 	 * allocation still fails, we stop retrying.
2080 	 */
2081 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2082 		return 1;
2083 
2084 	return 0;
2085 }
2086 
2087 static inline struct page *
2088 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2089 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2090 	nodemask_t *nodemask, struct zone *preferred_zone,
2091 	int migratetype)
2092 {
2093 	struct page *page;
2094 
2095 	/* Acquire the OOM killer lock for the zones in zonelist */
2096 	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
2097 		schedule_timeout_uninterruptible(1);
2098 		return NULL;
2099 	}
2100 
2101 	/*
2102 	 * Go through the zonelist yet one more time, keep very high watermark
2103 	 * here, this is only to catch a parallel oom killing, we must fail if
2104 	 * we're still under heavy pressure.
2105 	 */
2106 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2107 		order, zonelist, high_zoneidx,
2108 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2109 		preferred_zone, migratetype);
2110 	if (page)
2111 		goto out;
2112 
2113 	if (!(gfp_mask & __GFP_NOFAIL)) {
2114 		/* The OOM killer will not help higher order allocs */
2115 		if (order > PAGE_ALLOC_COSTLY_ORDER)
2116 			goto out;
2117 		/* The OOM killer does not needlessly kill tasks for lowmem */
2118 		if (high_zoneidx < ZONE_NORMAL)
2119 			goto out;
2120 		/*
2121 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2122 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2123 		 * The caller should handle page allocation failure by itself if
2124 		 * it specifies __GFP_THISNODE.
2125 		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2126 		 */
2127 		if (gfp_mask & __GFP_THISNODE)
2128 			goto out;
2129 	}
2130 	/* Exhausted what can be done so it's blamo time */
2131 	out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2132 
2133 out:
2134 	clear_zonelist_oom(zonelist, gfp_mask);
2135 	return page;
2136 }
2137 
2138 #ifdef CONFIG_COMPACTION
2139 /* Try memory compaction for high-order allocations before reclaim */
2140 static struct page *
2141 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2142 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2143 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2144 	int migratetype, bool sync_migration,
2145 	bool *contended_compaction, bool *deferred_compaction,
2146 	unsigned long *did_some_progress)
2147 {
2148 	if (!order)
2149 		return NULL;
2150 
2151 	if (compaction_deferred(preferred_zone, order)) {
2152 		*deferred_compaction = true;
2153 		return NULL;
2154 	}
2155 
2156 	current->flags |= PF_MEMALLOC;
2157 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2158 						nodemask, sync_migration,
2159 						contended_compaction);
2160 	current->flags &= ~PF_MEMALLOC;
2161 
2162 	if (*did_some_progress != COMPACT_SKIPPED) {
2163 		struct page *page;
2164 
2165 		/* Page migration frees to the PCP lists but we want merging */
2166 		drain_pages(get_cpu());
2167 		put_cpu();
2168 
2169 		page = get_page_from_freelist(gfp_mask, nodemask,
2170 				order, zonelist, high_zoneidx,
2171 				alloc_flags & ~ALLOC_NO_WATERMARKS,
2172 				preferred_zone, migratetype);
2173 		if (page) {
2174 			preferred_zone->compact_blockskip_flush = false;
2175 			preferred_zone->compact_considered = 0;
2176 			preferred_zone->compact_defer_shift = 0;
2177 			if (order >= preferred_zone->compact_order_failed)
2178 				preferred_zone->compact_order_failed = order + 1;
2179 			count_vm_event(COMPACTSUCCESS);
2180 			return page;
2181 		}
2182 
2183 		/*
2184 		 * It's bad if compaction run occurs and fails.
2185 		 * The most likely reason is that pages exist,
2186 		 * but not enough to satisfy watermarks.
2187 		 */
2188 		count_vm_event(COMPACTFAIL);
2189 
2190 		/*
2191 		 * As async compaction considers a subset of pageblocks, only
2192 		 * defer if the failure was a sync compaction failure.
2193 		 */
2194 		if (sync_migration)
2195 			defer_compaction(preferred_zone, order);
2196 
2197 		cond_resched();
2198 	}
2199 
2200 	return NULL;
2201 }
2202 #else
2203 static inline struct page *
2204 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2205 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2206 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2207 	int migratetype, bool sync_migration,
2208 	bool *contended_compaction, bool *deferred_compaction,
2209 	unsigned long *did_some_progress)
2210 {
2211 	return NULL;
2212 }
2213 #endif /* CONFIG_COMPACTION */
2214 
2215 /* Perform direct synchronous page reclaim */
2216 static int
2217 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2218 		  nodemask_t *nodemask)
2219 {
2220 	struct reclaim_state reclaim_state;
2221 	int progress;
2222 
2223 	cond_resched();
2224 
2225 	/* We now go into synchronous reclaim */
2226 	cpuset_memory_pressure_bump();
2227 	current->flags |= PF_MEMALLOC;
2228 	lockdep_set_current_reclaim_state(gfp_mask);
2229 	reclaim_state.reclaimed_slab = 0;
2230 	current->reclaim_state = &reclaim_state;
2231 
2232 	progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2233 
2234 	current->reclaim_state = NULL;
2235 	lockdep_clear_current_reclaim_state();
2236 	current->flags &= ~PF_MEMALLOC;
2237 
2238 	cond_resched();
2239 
2240 	return progress;
2241 }
2242 
2243 /* The really slow allocator path where we enter direct reclaim */
2244 static inline struct page *
2245 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2246 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2247 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2248 	int migratetype, unsigned long *did_some_progress)
2249 {
2250 	struct page *page = NULL;
2251 	bool drained = false;
2252 
2253 	*did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2254 					       nodemask);
2255 	if (unlikely(!(*did_some_progress)))
2256 		return NULL;
2257 
2258 	/* After successful reclaim, reconsider all zones for allocation */
2259 	if (IS_ENABLED(CONFIG_NUMA))
2260 		zlc_clear_zones_full(zonelist);
2261 
2262 retry:
2263 	page = get_page_from_freelist(gfp_mask, nodemask, order,
2264 					zonelist, high_zoneidx,
2265 					alloc_flags & ~ALLOC_NO_WATERMARKS,
2266 					preferred_zone, migratetype);
2267 
2268 	/*
2269 	 * If an allocation failed after direct reclaim, it could be because
2270 	 * pages are pinned on the per-cpu lists. Drain them and try again
2271 	 */
2272 	if (!page && !drained) {
2273 		drain_all_pages();
2274 		drained = true;
2275 		goto retry;
2276 	}
2277 
2278 	return page;
2279 }
2280 
2281 /*
2282  * This is called in the allocator slow-path if the allocation request is of
2283  * sufficient urgency to ignore watermarks and take other desperate measures
2284  */
2285 static inline struct page *
2286 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2287 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2288 	nodemask_t *nodemask, struct zone *preferred_zone,
2289 	int migratetype)
2290 {
2291 	struct page *page;
2292 
2293 	do {
2294 		page = get_page_from_freelist(gfp_mask, nodemask, order,
2295 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2296 			preferred_zone, migratetype);
2297 
2298 		if (!page && gfp_mask & __GFP_NOFAIL)
2299 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2300 	} while (!page && (gfp_mask & __GFP_NOFAIL));
2301 
2302 	return page;
2303 }
2304 
2305 static inline
2306 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2307 						enum zone_type high_zoneidx,
2308 						enum zone_type classzone_idx)
2309 {
2310 	struct zoneref *z;
2311 	struct zone *zone;
2312 
2313 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2314 		wakeup_kswapd(zone, order, classzone_idx);
2315 }
2316 
2317 static inline int
2318 gfp_to_alloc_flags(gfp_t gfp_mask)
2319 {
2320 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2321 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2322 
2323 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2324 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2325 
2326 	/*
2327 	 * The caller may dip into page reserves a bit more if the caller
2328 	 * cannot run direct reclaim, or if the caller has realtime scheduling
2329 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2330 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2331 	 */
2332 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2333 
2334 	if (!wait) {
2335 		/*
2336 		 * Not worth trying to allocate harder for
2337 		 * __GFP_NOMEMALLOC even if it can't schedule.
2338 		 */
2339 		if  (!(gfp_mask & __GFP_NOMEMALLOC))
2340 			alloc_flags |= ALLOC_HARDER;
2341 		/*
2342 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2343 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2344 		 */
2345 		alloc_flags &= ~ALLOC_CPUSET;
2346 	} else if (unlikely(rt_task(current)) && !in_interrupt())
2347 		alloc_flags |= ALLOC_HARDER;
2348 
2349 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2350 		if (gfp_mask & __GFP_MEMALLOC)
2351 			alloc_flags |= ALLOC_NO_WATERMARKS;
2352 		else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2353 			alloc_flags |= ALLOC_NO_WATERMARKS;
2354 		else if (!in_interrupt() &&
2355 				((current->flags & PF_MEMALLOC) ||
2356 				 unlikely(test_thread_flag(TIF_MEMDIE))))
2357 			alloc_flags |= ALLOC_NO_WATERMARKS;
2358 	}
2359 #ifdef CONFIG_CMA
2360 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2361 		alloc_flags |= ALLOC_CMA;
2362 #endif
2363 	return alloc_flags;
2364 }
2365 
2366 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2367 {
2368 	return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2369 }
2370 
2371 static inline struct page *
2372 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2373 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2374 	nodemask_t *nodemask, struct zone *preferred_zone,
2375 	int migratetype)
2376 {
2377 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2378 	struct page *page = NULL;
2379 	int alloc_flags;
2380 	unsigned long pages_reclaimed = 0;
2381 	unsigned long did_some_progress;
2382 	bool sync_migration = false;
2383 	bool deferred_compaction = false;
2384 	bool contended_compaction = false;
2385 
2386 	/*
2387 	 * In the slowpath, we sanity check order to avoid ever trying to
2388 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2389 	 * be using allocators in order of preference for an area that is
2390 	 * too large.
2391 	 */
2392 	if (order >= MAX_ORDER) {
2393 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2394 		return NULL;
2395 	}
2396 
2397 	/*
2398 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2399 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2400 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2401 	 * using a larger set of nodes after it has established that the
2402 	 * allowed per node queues are empty and that nodes are
2403 	 * over allocated.
2404 	 */
2405 	if (IS_ENABLED(CONFIG_NUMA) &&
2406 			(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2407 		goto nopage;
2408 
2409 restart:
2410 	if (!(gfp_mask & __GFP_NO_KSWAPD))
2411 		wake_all_kswapd(order, zonelist, high_zoneidx,
2412 						zone_idx(preferred_zone));
2413 
2414 	/*
2415 	 * OK, we're below the kswapd watermark and have kicked background
2416 	 * reclaim. Now things get more complex, so set up alloc_flags according
2417 	 * to how we want to proceed.
2418 	 */
2419 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2420 
2421 	/*
2422 	 * Find the true preferred zone if the allocation is unconstrained by
2423 	 * cpusets.
2424 	 */
2425 	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2426 		first_zones_zonelist(zonelist, high_zoneidx, NULL,
2427 					&preferred_zone);
2428 
2429 rebalance:
2430 	/* This is the last chance, in general, before the goto nopage. */
2431 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2432 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2433 			preferred_zone, migratetype);
2434 	if (page)
2435 		goto got_pg;
2436 
2437 	/* Allocate without watermarks if the context allows */
2438 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
2439 		/*
2440 		 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2441 		 * the allocation is high priority and these type of
2442 		 * allocations are system rather than user orientated
2443 		 */
2444 		zonelist = node_zonelist(numa_node_id(), gfp_mask);
2445 
2446 		page = __alloc_pages_high_priority(gfp_mask, order,
2447 				zonelist, high_zoneidx, nodemask,
2448 				preferred_zone, migratetype);
2449 		if (page) {
2450 			goto got_pg;
2451 		}
2452 	}
2453 
2454 	/* Atomic allocations - we can't balance anything */
2455 	if (!wait)
2456 		goto nopage;
2457 
2458 	/* Avoid recursion of direct reclaim */
2459 	if (current->flags & PF_MEMALLOC)
2460 		goto nopage;
2461 
2462 	/* Avoid allocations with no watermarks from looping endlessly */
2463 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2464 		goto nopage;
2465 
2466 	/*
2467 	 * Try direct compaction. The first pass is asynchronous. Subsequent
2468 	 * attempts after direct reclaim are synchronous
2469 	 */
2470 	page = __alloc_pages_direct_compact(gfp_mask, order,
2471 					zonelist, high_zoneidx,
2472 					nodemask,
2473 					alloc_flags, preferred_zone,
2474 					migratetype, sync_migration,
2475 					&contended_compaction,
2476 					&deferred_compaction,
2477 					&did_some_progress);
2478 	if (page)
2479 		goto got_pg;
2480 	sync_migration = true;
2481 
2482 	/*
2483 	 * If compaction is deferred for high-order allocations, it is because
2484 	 * sync compaction recently failed. In this is the case and the caller
2485 	 * requested a movable allocation that does not heavily disrupt the
2486 	 * system then fail the allocation instead of entering direct reclaim.
2487 	 */
2488 	if ((deferred_compaction || contended_compaction) &&
2489 						(gfp_mask & __GFP_NO_KSWAPD))
2490 		goto nopage;
2491 
2492 	/* Try direct reclaim and then allocating */
2493 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2494 					zonelist, high_zoneidx,
2495 					nodemask,
2496 					alloc_flags, preferred_zone,
2497 					migratetype, &did_some_progress);
2498 	if (page)
2499 		goto got_pg;
2500 
2501 	/*
2502 	 * If we failed to make any progress reclaiming, then we are
2503 	 * running out of options and have to consider going OOM
2504 	 */
2505 	if (!did_some_progress) {
2506 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2507 			if (oom_killer_disabled)
2508 				goto nopage;
2509 			/* Coredumps can quickly deplete all memory reserves */
2510 			if ((current->flags & PF_DUMPCORE) &&
2511 			    !(gfp_mask & __GFP_NOFAIL))
2512 				goto nopage;
2513 			page = __alloc_pages_may_oom(gfp_mask, order,
2514 					zonelist, high_zoneidx,
2515 					nodemask, preferred_zone,
2516 					migratetype);
2517 			if (page)
2518 				goto got_pg;
2519 
2520 			if (!(gfp_mask & __GFP_NOFAIL)) {
2521 				/*
2522 				 * The oom killer is not called for high-order
2523 				 * allocations that may fail, so if no progress
2524 				 * is being made, there are no other options and
2525 				 * retrying is unlikely to help.
2526 				 */
2527 				if (order > PAGE_ALLOC_COSTLY_ORDER)
2528 					goto nopage;
2529 				/*
2530 				 * The oom killer is not called for lowmem
2531 				 * allocations to prevent needlessly killing
2532 				 * innocent tasks.
2533 				 */
2534 				if (high_zoneidx < ZONE_NORMAL)
2535 					goto nopage;
2536 			}
2537 
2538 			goto restart;
2539 		}
2540 	}
2541 
2542 	/* Check if we should retry the allocation */
2543 	pages_reclaimed += did_some_progress;
2544 	if (should_alloc_retry(gfp_mask, order, did_some_progress,
2545 						pages_reclaimed)) {
2546 		/* Wait for some write requests to complete then retry */
2547 		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2548 		goto rebalance;
2549 	} else {
2550 		/*
2551 		 * High-order allocations do not necessarily loop after
2552 		 * direct reclaim and reclaim/compaction depends on compaction
2553 		 * being called after reclaim so call directly if necessary
2554 		 */
2555 		page = __alloc_pages_direct_compact(gfp_mask, order,
2556 					zonelist, high_zoneidx,
2557 					nodemask,
2558 					alloc_flags, preferred_zone,
2559 					migratetype, sync_migration,
2560 					&contended_compaction,
2561 					&deferred_compaction,
2562 					&did_some_progress);
2563 		if (page)
2564 			goto got_pg;
2565 	}
2566 
2567 nopage:
2568 	warn_alloc_failed(gfp_mask, order, NULL);
2569 	return page;
2570 got_pg:
2571 	if (kmemcheck_enabled)
2572 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2573 
2574 	return page;
2575 }
2576 
2577 /*
2578  * This is the 'heart' of the zoned buddy allocator.
2579  */
2580 struct page *
2581 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2582 			struct zonelist *zonelist, nodemask_t *nodemask)
2583 {
2584 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2585 	struct zone *preferred_zone;
2586 	struct page *page = NULL;
2587 	int migratetype = allocflags_to_migratetype(gfp_mask);
2588 	unsigned int cpuset_mems_cookie;
2589 	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2590 	struct mem_cgroup *memcg = NULL;
2591 
2592 	gfp_mask &= gfp_allowed_mask;
2593 
2594 	lockdep_trace_alloc(gfp_mask);
2595 
2596 	might_sleep_if(gfp_mask & __GFP_WAIT);
2597 
2598 	if (should_fail_alloc_page(gfp_mask, order))
2599 		return NULL;
2600 
2601 	/*
2602 	 * Check the zones suitable for the gfp_mask contain at least one
2603 	 * valid zone. It's possible to have an empty zonelist as a result
2604 	 * of GFP_THISNODE and a memoryless node
2605 	 */
2606 	if (unlikely(!zonelist->_zonerefs->zone))
2607 		return NULL;
2608 
2609 	/*
2610 	 * Will only have any effect when __GFP_KMEMCG is set.  This is
2611 	 * verified in the (always inline) callee
2612 	 */
2613 	if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2614 		return NULL;
2615 
2616 retry_cpuset:
2617 	cpuset_mems_cookie = get_mems_allowed();
2618 
2619 	/* The preferred zone is used for statistics later */
2620 	first_zones_zonelist(zonelist, high_zoneidx,
2621 				nodemask ? : &cpuset_current_mems_allowed,
2622 				&preferred_zone);
2623 	if (!preferred_zone)
2624 		goto out;
2625 
2626 #ifdef CONFIG_CMA
2627 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2628 		alloc_flags |= ALLOC_CMA;
2629 #endif
2630 	/* First allocation attempt */
2631 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2632 			zonelist, high_zoneidx, alloc_flags,
2633 			preferred_zone, migratetype);
2634 	if (unlikely(!page)) {
2635 		/*
2636 		 * Runtime PM, block IO and its error handling path
2637 		 * can deadlock because I/O on the device might not
2638 		 * complete.
2639 		 */
2640 		gfp_mask = memalloc_noio_flags(gfp_mask);
2641 		page = __alloc_pages_slowpath(gfp_mask, order,
2642 				zonelist, high_zoneidx, nodemask,
2643 				preferred_zone, migratetype);
2644 	}
2645 
2646 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2647 
2648 out:
2649 	/*
2650 	 * When updating a task's mems_allowed, it is possible to race with
2651 	 * parallel threads in such a way that an allocation can fail while
2652 	 * the mask is being updated. If a page allocation is about to fail,
2653 	 * check if the cpuset changed during allocation and if so, retry.
2654 	 */
2655 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2656 		goto retry_cpuset;
2657 
2658 	memcg_kmem_commit_charge(page, memcg, order);
2659 
2660 	return page;
2661 }
2662 EXPORT_SYMBOL(__alloc_pages_nodemask);
2663 
2664 /*
2665  * Common helper functions.
2666  */
2667 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2668 {
2669 	struct page *page;
2670 
2671 	/*
2672 	 * __get_free_pages() returns a 32-bit address, which cannot represent
2673 	 * a highmem page
2674 	 */
2675 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2676 
2677 	page = alloc_pages(gfp_mask, order);
2678 	if (!page)
2679 		return 0;
2680 	return (unsigned long) page_address(page);
2681 }
2682 EXPORT_SYMBOL(__get_free_pages);
2683 
2684 unsigned long get_zeroed_page(gfp_t gfp_mask)
2685 {
2686 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2687 }
2688 EXPORT_SYMBOL(get_zeroed_page);
2689 
2690 void __free_pages(struct page *page, unsigned int order)
2691 {
2692 	if (put_page_testzero(page)) {
2693 		if (order == 0)
2694 			free_hot_cold_page(page, 0);
2695 		else
2696 			__free_pages_ok(page, order);
2697 	}
2698 }
2699 
2700 EXPORT_SYMBOL(__free_pages);
2701 
2702 void free_pages(unsigned long addr, unsigned int order)
2703 {
2704 	if (addr != 0) {
2705 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2706 		__free_pages(virt_to_page((void *)addr), order);
2707 	}
2708 }
2709 
2710 EXPORT_SYMBOL(free_pages);
2711 
2712 /*
2713  * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2714  * pages allocated with __GFP_KMEMCG.
2715  *
2716  * Those pages are accounted to a particular memcg, embedded in the
2717  * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2718  * for that information only to find out that it is NULL for users who have no
2719  * interest in that whatsoever, we provide these functions.
2720  *
2721  * The caller knows better which flags it relies on.
2722  */
2723 void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2724 {
2725 	memcg_kmem_uncharge_pages(page, order);
2726 	__free_pages(page, order);
2727 }
2728 
2729 void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2730 {
2731 	if (addr != 0) {
2732 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2733 		__free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2734 	}
2735 }
2736 
2737 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2738 {
2739 	if (addr) {
2740 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2741 		unsigned long used = addr + PAGE_ALIGN(size);
2742 
2743 		split_page(virt_to_page((void *)addr), order);
2744 		while (used < alloc_end) {
2745 			free_page(used);
2746 			used += PAGE_SIZE;
2747 		}
2748 	}
2749 	return (void *)addr;
2750 }
2751 
2752 /**
2753  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2754  * @size: the number of bytes to allocate
2755  * @gfp_mask: GFP flags for the allocation
2756  *
2757  * This function is similar to alloc_pages(), except that it allocates the
2758  * minimum number of pages to satisfy the request.  alloc_pages() can only
2759  * allocate memory in power-of-two pages.
2760  *
2761  * This function is also limited by MAX_ORDER.
2762  *
2763  * Memory allocated by this function must be released by free_pages_exact().
2764  */
2765 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2766 {
2767 	unsigned int order = get_order(size);
2768 	unsigned long addr;
2769 
2770 	addr = __get_free_pages(gfp_mask, order);
2771 	return make_alloc_exact(addr, order, size);
2772 }
2773 EXPORT_SYMBOL(alloc_pages_exact);
2774 
2775 /**
2776  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2777  *			   pages on a node.
2778  * @nid: the preferred node ID where memory should be allocated
2779  * @size: the number of bytes to allocate
2780  * @gfp_mask: GFP flags for the allocation
2781  *
2782  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2783  * back.
2784  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2785  * but is not exact.
2786  */
2787 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2788 {
2789 	unsigned order = get_order(size);
2790 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
2791 	if (!p)
2792 		return NULL;
2793 	return make_alloc_exact((unsigned long)page_address(p), order, size);
2794 }
2795 EXPORT_SYMBOL(alloc_pages_exact_nid);
2796 
2797 /**
2798  * free_pages_exact - release memory allocated via alloc_pages_exact()
2799  * @virt: the value returned by alloc_pages_exact.
2800  * @size: size of allocation, same value as passed to alloc_pages_exact().
2801  *
2802  * Release the memory allocated by a previous call to alloc_pages_exact.
2803  */
2804 void free_pages_exact(void *virt, size_t size)
2805 {
2806 	unsigned long addr = (unsigned long)virt;
2807 	unsigned long end = addr + PAGE_ALIGN(size);
2808 
2809 	while (addr < end) {
2810 		free_page(addr);
2811 		addr += PAGE_SIZE;
2812 	}
2813 }
2814 EXPORT_SYMBOL(free_pages_exact);
2815 
2816 /**
2817  * nr_free_zone_pages - count number of pages beyond high watermark
2818  * @offset: The zone index of the highest zone
2819  *
2820  * nr_free_zone_pages() counts the number of counts pages which are beyond the
2821  * high watermark within all zones at or below a given zone index.  For each
2822  * zone, the number of pages is calculated as:
2823  *     present_pages - high_pages
2824  */
2825 static unsigned long nr_free_zone_pages(int offset)
2826 {
2827 	struct zoneref *z;
2828 	struct zone *zone;
2829 
2830 	/* Just pick one node, since fallback list is circular */
2831 	unsigned long sum = 0;
2832 
2833 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2834 
2835 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2836 		unsigned long size = zone->managed_pages;
2837 		unsigned long high = high_wmark_pages(zone);
2838 		if (size > high)
2839 			sum += size - high;
2840 	}
2841 
2842 	return sum;
2843 }
2844 
2845 /**
2846  * nr_free_buffer_pages - count number of pages beyond high watermark
2847  *
2848  * nr_free_buffer_pages() counts the number of pages which are beyond the high
2849  * watermark within ZONE_DMA and ZONE_NORMAL.
2850  */
2851 unsigned long nr_free_buffer_pages(void)
2852 {
2853 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2854 }
2855 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2856 
2857 /**
2858  * nr_free_pagecache_pages - count number of pages beyond high watermark
2859  *
2860  * nr_free_pagecache_pages() counts the number of pages which are beyond the
2861  * high watermark within all zones.
2862  */
2863 unsigned long nr_free_pagecache_pages(void)
2864 {
2865 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2866 }
2867 
2868 static inline void show_node(struct zone *zone)
2869 {
2870 	if (IS_ENABLED(CONFIG_NUMA))
2871 		printk("Node %d ", zone_to_nid(zone));
2872 }
2873 
2874 void si_meminfo(struct sysinfo *val)
2875 {
2876 	val->totalram = totalram_pages;
2877 	val->sharedram = 0;
2878 	val->freeram = global_page_state(NR_FREE_PAGES);
2879 	val->bufferram = nr_blockdev_pages();
2880 	val->totalhigh = totalhigh_pages;
2881 	val->freehigh = nr_free_highpages();
2882 	val->mem_unit = PAGE_SIZE;
2883 }
2884 
2885 EXPORT_SYMBOL(si_meminfo);
2886 
2887 #ifdef CONFIG_NUMA
2888 void si_meminfo_node(struct sysinfo *val, int nid)
2889 {
2890 	pg_data_t *pgdat = NODE_DATA(nid);
2891 
2892 	val->totalram = pgdat->node_present_pages;
2893 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2894 #ifdef CONFIG_HIGHMEM
2895 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
2896 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2897 			NR_FREE_PAGES);
2898 #else
2899 	val->totalhigh = 0;
2900 	val->freehigh = 0;
2901 #endif
2902 	val->mem_unit = PAGE_SIZE;
2903 }
2904 #endif
2905 
2906 /*
2907  * Determine whether the node should be displayed or not, depending on whether
2908  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2909  */
2910 bool skip_free_areas_node(unsigned int flags, int nid)
2911 {
2912 	bool ret = false;
2913 	unsigned int cpuset_mems_cookie;
2914 
2915 	if (!(flags & SHOW_MEM_FILTER_NODES))
2916 		goto out;
2917 
2918 	do {
2919 		cpuset_mems_cookie = get_mems_allowed();
2920 		ret = !node_isset(nid, cpuset_current_mems_allowed);
2921 	} while (!put_mems_allowed(cpuset_mems_cookie));
2922 out:
2923 	return ret;
2924 }
2925 
2926 #define K(x) ((x) << (PAGE_SHIFT-10))
2927 
2928 static void show_migration_types(unsigned char type)
2929 {
2930 	static const char types[MIGRATE_TYPES] = {
2931 		[MIGRATE_UNMOVABLE]	= 'U',
2932 		[MIGRATE_RECLAIMABLE]	= 'E',
2933 		[MIGRATE_MOVABLE]	= 'M',
2934 		[MIGRATE_RESERVE]	= 'R',
2935 #ifdef CONFIG_CMA
2936 		[MIGRATE_CMA]		= 'C',
2937 #endif
2938 #ifdef CONFIG_MEMORY_ISOLATION
2939 		[MIGRATE_ISOLATE]	= 'I',
2940 #endif
2941 	};
2942 	char tmp[MIGRATE_TYPES + 1];
2943 	char *p = tmp;
2944 	int i;
2945 
2946 	for (i = 0; i < MIGRATE_TYPES; i++) {
2947 		if (type & (1 << i))
2948 			*p++ = types[i];
2949 	}
2950 
2951 	*p = '\0';
2952 	printk("(%s) ", tmp);
2953 }
2954 
2955 /*
2956  * Show free area list (used inside shift_scroll-lock stuff)
2957  * We also calculate the percentage fragmentation. We do this by counting the
2958  * memory on each free list with the exception of the first item on the list.
2959  * Suppresses nodes that are not allowed by current's cpuset if
2960  * SHOW_MEM_FILTER_NODES is passed.
2961  */
2962 void show_free_areas(unsigned int filter)
2963 {
2964 	int cpu;
2965 	struct zone *zone;
2966 
2967 	for_each_populated_zone(zone) {
2968 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2969 			continue;
2970 		show_node(zone);
2971 		printk("%s per-cpu:\n", zone->name);
2972 
2973 		for_each_online_cpu(cpu) {
2974 			struct per_cpu_pageset *pageset;
2975 
2976 			pageset = per_cpu_ptr(zone->pageset, cpu);
2977 
2978 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2979 			       cpu, pageset->pcp.high,
2980 			       pageset->pcp.batch, pageset->pcp.count);
2981 		}
2982 	}
2983 
2984 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2985 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2986 		" unevictable:%lu"
2987 		" dirty:%lu writeback:%lu unstable:%lu\n"
2988 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2989 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2990 		" free_cma:%lu\n",
2991 		global_page_state(NR_ACTIVE_ANON),
2992 		global_page_state(NR_INACTIVE_ANON),
2993 		global_page_state(NR_ISOLATED_ANON),
2994 		global_page_state(NR_ACTIVE_FILE),
2995 		global_page_state(NR_INACTIVE_FILE),
2996 		global_page_state(NR_ISOLATED_FILE),
2997 		global_page_state(NR_UNEVICTABLE),
2998 		global_page_state(NR_FILE_DIRTY),
2999 		global_page_state(NR_WRITEBACK),
3000 		global_page_state(NR_UNSTABLE_NFS),
3001 		global_page_state(NR_FREE_PAGES),
3002 		global_page_state(NR_SLAB_RECLAIMABLE),
3003 		global_page_state(NR_SLAB_UNRECLAIMABLE),
3004 		global_page_state(NR_FILE_MAPPED),
3005 		global_page_state(NR_SHMEM),
3006 		global_page_state(NR_PAGETABLE),
3007 		global_page_state(NR_BOUNCE),
3008 		global_page_state(NR_FREE_CMA_PAGES));
3009 
3010 	for_each_populated_zone(zone) {
3011 		int i;
3012 
3013 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3014 			continue;
3015 		show_node(zone);
3016 		printk("%s"
3017 			" free:%lukB"
3018 			" min:%lukB"
3019 			" low:%lukB"
3020 			" high:%lukB"
3021 			" active_anon:%lukB"
3022 			" inactive_anon:%lukB"
3023 			" active_file:%lukB"
3024 			" inactive_file:%lukB"
3025 			" unevictable:%lukB"
3026 			" isolated(anon):%lukB"
3027 			" isolated(file):%lukB"
3028 			" present:%lukB"
3029 			" managed:%lukB"
3030 			" mlocked:%lukB"
3031 			" dirty:%lukB"
3032 			" writeback:%lukB"
3033 			" mapped:%lukB"
3034 			" shmem:%lukB"
3035 			" slab_reclaimable:%lukB"
3036 			" slab_unreclaimable:%lukB"
3037 			" kernel_stack:%lukB"
3038 			" pagetables:%lukB"
3039 			" unstable:%lukB"
3040 			" bounce:%lukB"
3041 			" free_cma:%lukB"
3042 			" writeback_tmp:%lukB"
3043 			" pages_scanned:%lu"
3044 			" all_unreclaimable? %s"
3045 			"\n",
3046 			zone->name,
3047 			K(zone_page_state(zone, NR_FREE_PAGES)),
3048 			K(min_wmark_pages(zone)),
3049 			K(low_wmark_pages(zone)),
3050 			K(high_wmark_pages(zone)),
3051 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
3052 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
3053 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
3054 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
3055 			K(zone_page_state(zone, NR_UNEVICTABLE)),
3056 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
3057 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
3058 			K(zone->present_pages),
3059 			K(zone->managed_pages),
3060 			K(zone_page_state(zone, NR_MLOCK)),
3061 			K(zone_page_state(zone, NR_FILE_DIRTY)),
3062 			K(zone_page_state(zone, NR_WRITEBACK)),
3063 			K(zone_page_state(zone, NR_FILE_MAPPED)),
3064 			K(zone_page_state(zone, NR_SHMEM)),
3065 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3066 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3067 			zone_page_state(zone, NR_KERNEL_STACK) *
3068 				THREAD_SIZE / 1024,
3069 			K(zone_page_state(zone, NR_PAGETABLE)),
3070 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3071 			K(zone_page_state(zone, NR_BOUNCE)),
3072 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3073 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3074 			zone->pages_scanned,
3075 			(zone->all_unreclaimable ? "yes" : "no")
3076 			);
3077 		printk("lowmem_reserve[]:");
3078 		for (i = 0; i < MAX_NR_ZONES; i++)
3079 			printk(" %lu", zone->lowmem_reserve[i]);
3080 		printk("\n");
3081 	}
3082 
3083 	for_each_populated_zone(zone) {
3084  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
3085 		unsigned char types[MAX_ORDER];
3086 
3087 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3088 			continue;
3089 		show_node(zone);
3090 		printk("%s: ", zone->name);
3091 
3092 		spin_lock_irqsave(&zone->lock, flags);
3093 		for (order = 0; order < MAX_ORDER; order++) {
3094 			struct free_area *area = &zone->free_area[order];
3095 			int type;
3096 
3097 			nr[order] = area->nr_free;
3098 			total += nr[order] << order;
3099 
3100 			types[order] = 0;
3101 			for (type = 0; type < MIGRATE_TYPES; type++) {
3102 				if (!list_empty(&area->free_list[type]))
3103 					types[order] |= 1 << type;
3104 			}
3105 		}
3106 		spin_unlock_irqrestore(&zone->lock, flags);
3107 		for (order = 0; order < MAX_ORDER; order++) {
3108 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
3109 			if (nr[order])
3110 				show_migration_types(types[order]);
3111 		}
3112 		printk("= %lukB\n", K(total));
3113 	}
3114 
3115 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3116 
3117 	show_swap_cache_info();
3118 }
3119 
3120 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3121 {
3122 	zoneref->zone = zone;
3123 	zoneref->zone_idx = zone_idx(zone);
3124 }
3125 
3126 /*
3127  * Builds allocation fallback zone lists.
3128  *
3129  * Add all populated zones of a node to the zonelist.
3130  */
3131 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3132 				int nr_zones, enum zone_type zone_type)
3133 {
3134 	struct zone *zone;
3135 
3136 	BUG_ON(zone_type >= MAX_NR_ZONES);
3137 	zone_type++;
3138 
3139 	do {
3140 		zone_type--;
3141 		zone = pgdat->node_zones + zone_type;
3142 		if (populated_zone(zone)) {
3143 			zoneref_set_zone(zone,
3144 				&zonelist->_zonerefs[nr_zones++]);
3145 			check_highest_zone(zone_type);
3146 		}
3147 
3148 	} while (zone_type);
3149 	return nr_zones;
3150 }
3151 
3152 
3153 /*
3154  *  zonelist_order:
3155  *  0 = automatic detection of better ordering.
3156  *  1 = order by ([node] distance, -zonetype)
3157  *  2 = order by (-zonetype, [node] distance)
3158  *
3159  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3160  *  the same zonelist. So only NUMA can configure this param.
3161  */
3162 #define ZONELIST_ORDER_DEFAULT  0
3163 #define ZONELIST_ORDER_NODE     1
3164 #define ZONELIST_ORDER_ZONE     2
3165 
3166 /* zonelist order in the kernel.
3167  * set_zonelist_order() will set this to NODE or ZONE.
3168  */
3169 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3170 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3171 
3172 
3173 #ifdef CONFIG_NUMA
3174 /* The value user specified ....changed by config */
3175 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3176 /* string for sysctl */
3177 #define NUMA_ZONELIST_ORDER_LEN	16
3178 char numa_zonelist_order[16] = "default";
3179 
3180 /*
3181  * interface for configure zonelist ordering.
3182  * command line option "numa_zonelist_order"
3183  *	= "[dD]efault	- default, automatic configuration.
3184  *	= "[nN]ode 	- order by node locality, then by zone within node
3185  *	= "[zZ]one      - order by zone, then by locality within zone
3186  */
3187 
3188 static int __parse_numa_zonelist_order(char *s)
3189 {
3190 	if (*s == 'd' || *s == 'D') {
3191 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3192 	} else if (*s == 'n' || *s == 'N') {
3193 		user_zonelist_order = ZONELIST_ORDER_NODE;
3194 	} else if (*s == 'z' || *s == 'Z') {
3195 		user_zonelist_order = ZONELIST_ORDER_ZONE;
3196 	} else {
3197 		printk(KERN_WARNING
3198 			"Ignoring invalid numa_zonelist_order value:  "
3199 			"%s\n", s);
3200 		return -EINVAL;
3201 	}
3202 	return 0;
3203 }
3204 
3205 static __init int setup_numa_zonelist_order(char *s)
3206 {
3207 	int ret;
3208 
3209 	if (!s)
3210 		return 0;
3211 
3212 	ret = __parse_numa_zonelist_order(s);
3213 	if (ret == 0)
3214 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3215 
3216 	return ret;
3217 }
3218 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3219 
3220 /*
3221  * sysctl handler for numa_zonelist_order
3222  */
3223 int numa_zonelist_order_handler(ctl_table *table, int write,
3224 		void __user *buffer, size_t *length,
3225 		loff_t *ppos)
3226 {
3227 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
3228 	int ret;
3229 	static DEFINE_MUTEX(zl_order_mutex);
3230 
3231 	mutex_lock(&zl_order_mutex);
3232 	if (write)
3233 		strcpy(saved_string, (char*)table->data);
3234 	ret = proc_dostring(table, write, buffer, length, ppos);
3235 	if (ret)
3236 		goto out;
3237 	if (write) {
3238 		int oldval = user_zonelist_order;
3239 		if (__parse_numa_zonelist_order((char*)table->data)) {
3240 			/*
3241 			 * bogus value.  restore saved string
3242 			 */
3243 			strncpy((char*)table->data, saved_string,
3244 				NUMA_ZONELIST_ORDER_LEN);
3245 			user_zonelist_order = oldval;
3246 		} else if (oldval != user_zonelist_order) {
3247 			mutex_lock(&zonelists_mutex);
3248 			build_all_zonelists(NULL, NULL);
3249 			mutex_unlock(&zonelists_mutex);
3250 		}
3251 	}
3252 out:
3253 	mutex_unlock(&zl_order_mutex);
3254 	return ret;
3255 }
3256 
3257 
3258 #define MAX_NODE_LOAD (nr_online_nodes)
3259 static int node_load[MAX_NUMNODES];
3260 
3261 /**
3262  * find_next_best_node - find the next node that should appear in a given node's fallback list
3263  * @node: node whose fallback list we're appending
3264  * @used_node_mask: nodemask_t of already used nodes
3265  *
3266  * We use a number of factors to determine which is the next node that should
3267  * appear on a given node's fallback list.  The node should not have appeared
3268  * already in @node's fallback list, and it should be the next closest node
3269  * according to the distance array (which contains arbitrary distance values
3270  * from each node to each node in the system), and should also prefer nodes
3271  * with no CPUs, since presumably they'll have very little allocation pressure
3272  * on them otherwise.
3273  * It returns -1 if no node is found.
3274  */
3275 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3276 {
3277 	int n, val;
3278 	int min_val = INT_MAX;
3279 	int best_node = NUMA_NO_NODE;
3280 	const struct cpumask *tmp = cpumask_of_node(0);
3281 
3282 	/* Use the local node if we haven't already */
3283 	if (!node_isset(node, *used_node_mask)) {
3284 		node_set(node, *used_node_mask);
3285 		return node;
3286 	}
3287 
3288 	for_each_node_state(n, N_MEMORY) {
3289 
3290 		/* Don't want a node to appear more than once */
3291 		if (node_isset(n, *used_node_mask))
3292 			continue;
3293 
3294 		/* Use the distance array to find the distance */
3295 		val = node_distance(node, n);
3296 
3297 		/* Penalize nodes under us ("prefer the next node") */
3298 		val += (n < node);
3299 
3300 		/* Give preference to headless and unused nodes */
3301 		tmp = cpumask_of_node(n);
3302 		if (!cpumask_empty(tmp))
3303 			val += PENALTY_FOR_NODE_WITH_CPUS;
3304 
3305 		/* Slight preference for less loaded node */
3306 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3307 		val += node_load[n];
3308 
3309 		if (val < min_val) {
3310 			min_val = val;
3311 			best_node = n;
3312 		}
3313 	}
3314 
3315 	if (best_node >= 0)
3316 		node_set(best_node, *used_node_mask);
3317 
3318 	return best_node;
3319 }
3320 
3321 
3322 /*
3323  * Build zonelists ordered by node and zones within node.
3324  * This results in maximum locality--normal zone overflows into local
3325  * DMA zone, if any--but risks exhausting DMA zone.
3326  */
3327 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3328 {
3329 	int j;
3330 	struct zonelist *zonelist;
3331 
3332 	zonelist = &pgdat->node_zonelists[0];
3333 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3334 		;
3335 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3336 							MAX_NR_ZONES - 1);
3337 	zonelist->_zonerefs[j].zone = NULL;
3338 	zonelist->_zonerefs[j].zone_idx = 0;
3339 }
3340 
3341 /*
3342  * Build gfp_thisnode zonelists
3343  */
3344 static void build_thisnode_zonelists(pg_data_t *pgdat)
3345 {
3346 	int j;
3347 	struct zonelist *zonelist;
3348 
3349 	zonelist = &pgdat->node_zonelists[1];
3350 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3351 	zonelist->_zonerefs[j].zone = NULL;
3352 	zonelist->_zonerefs[j].zone_idx = 0;
3353 }
3354 
3355 /*
3356  * Build zonelists ordered by zone and nodes within zones.
3357  * This results in conserving DMA zone[s] until all Normal memory is
3358  * exhausted, but results in overflowing to remote node while memory
3359  * may still exist in local DMA zone.
3360  */
3361 static int node_order[MAX_NUMNODES];
3362 
3363 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3364 {
3365 	int pos, j, node;
3366 	int zone_type;		/* needs to be signed */
3367 	struct zone *z;
3368 	struct zonelist *zonelist;
3369 
3370 	zonelist = &pgdat->node_zonelists[0];
3371 	pos = 0;
3372 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3373 		for (j = 0; j < nr_nodes; j++) {
3374 			node = node_order[j];
3375 			z = &NODE_DATA(node)->node_zones[zone_type];
3376 			if (populated_zone(z)) {
3377 				zoneref_set_zone(z,
3378 					&zonelist->_zonerefs[pos++]);
3379 				check_highest_zone(zone_type);
3380 			}
3381 		}
3382 	}
3383 	zonelist->_zonerefs[pos].zone = NULL;
3384 	zonelist->_zonerefs[pos].zone_idx = 0;
3385 }
3386 
3387 static int default_zonelist_order(void)
3388 {
3389 	int nid, zone_type;
3390 	unsigned long low_kmem_size,total_size;
3391 	struct zone *z;
3392 	int average_size;
3393 	/*
3394          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3395 	 * If they are really small and used heavily, the system can fall
3396 	 * into OOM very easily.
3397 	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
3398 	 */
3399 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3400 	low_kmem_size = 0;
3401 	total_size = 0;
3402 	for_each_online_node(nid) {
3403 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3404 			z = &NODE_DATA(nid)->node_zones[zone_type];
3405 			if (populated_zone(z)) {
3406 				if (zone_type < ZONE_NORMAL)
3407 					low_kmem_size += z->present_pages;
3408 				total_size += z->present_pages;
3409 			} else if (zone_type == ZONE_NORMAL) {
3410 				/*
3411 				 * If any node has only lowmem, then node order
3412 				 * is preferred to allow kernel allocations
3413 				 * locally; otherwise, they can easily infringe
3414 				 * on other nodes when there is an abundance of
3415 				 * lowmem available to allocate from.
3416 				 */
3417 				return ZONELIST_ORDER_NODE;
3418 			}
3419 		}
3420 	}
3421 	if (!low_kmem_size ||  /* there are no DMA area. */
3422 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3423 		return ZONELIST_ORDER_NODE;
3424 	/*
3425 	 * look into each node's config.
3426   	 * If there is a node whose DMA/DMA32 memory is very big area on
3427  	 * local memory, NODE_ORDER may be suitable.
3428          */
3429 	average_size = total_size /
3430 				(nodes_weight(node_states[N_MEMORY]) + 1);
3431 	for_each_online_node(nid) {
3432 		low_kmem_size = 0;
3433 		total_size = 0;
3434 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3435 			z = &NODE_DATA(nid)->node_zones[zone_type];
3436 			if (populated_zone(z)) {
3437 				if (zone_type < ZONE_NORMAL)
3438 					low_kmem_size += z->present_pages;
3439 				total_size += z->present_pages;
3440 			}
3441 		}
3442 		if (low_kmem_size &&
3443 		    total_size > average_size && /* ignore small node */
3444 		    low_kmem_size > total_size * 70/100)
3445 			return ZONELIST_ORDER_NODE;
3446 	}
3447 	return ZONELIST_ORDER_ZONE;
3448 }
3449 
3450 static void set_zonelist_order(void)
3451 {
3452 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3453 		current_zonelist_order = default_zonelist_order();
3454 	else
3455 		current_zonelist_order = user_zonelist_order;
3456 }
3457 
3458 static void build_zonelists(pg_data_t *pgdat)
3459 {
3460 	int j, node, load;
3461 	enum zone_type i;
3462 	nodemask_t used_mask;
3463 	int local_node, prev_node;
3464 	struct zonelist *zonelist;
3465 	int order = current_zonelist_order;
3466 
3467 	/* initialize zonelists */
3468 	for (i = 0; i < MAX_ZONELISTS; i++) {
3469 		zonelist = pgdat->node_zonelists + i;
3470 		zonelist->_zonerefs[0].zone = NULL;
3471 		zonelist->_zonerefs[0].zone_idx = 0;
3472 	}
3473 
3474 	/* NUMA-aware ordering of nodes */
3475 	local_node = pgdat->node_id;
3476 	load = nr_online_nodes;
3477 	prev_node = local_node;
3478 	nodes_clear(used_mask);
3479 
3480 	memset(node_order, 0, sizeof(node_order));
3481 	j = 0;
3482 
3483 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3484 		/*
3485 		 * We don't want to pressure a particular node.
3486 		 * So adding penalty to the first node in same
3487 		 * distance group to make it round-robin.
3488 		 */
3489 		if (node_distance(local_node, node) !=
3490 		    node_distance(local_node, prev_node))
3491 			node_load[node] = load;
3492 
3493 		prev_node = node;
3494 		load--;
3495 		if (order == ZONELIST_ORDER_NODE)
3496 			build_zonelists_in_node_order(pgdat, node);
3497 		else
3498 			node_order[j++] = node;	/* remember order */
3499 	}
3500 
3501 	if (order == ZONELIST_ORDER_ZONE) {
3502 		/* calculate node order -- i.e., DMA last! */
3503 		build_zonelists_in_zone_order(pgdat, j);
3504 	}
3505 
3506 	build_thisnode_zonelists(pgdat);
3507 }
3508 
3509 /* Construct the zonelist performance cache - see further mmzone.h */
3510 static void build_zonelist_cache(pg_data_t *pgdat)
3511 {
3512 	struct zonelist *zonelist;
3513 	struct zonelist_cache *zlc;
3514 	struct zoneref *z;
3515 
3516 	zonelist = &pgdat->node_zonelists[0];
3517 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3518 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3519 	for (z = zonelist->_zonerefs; z->zone; z++)
3520 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3521 }
3522 
3523 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3524 /*
3525  * Return node id of node used for "local" allocations.
3526  * I.e., first node id of first zone in arg node's generic zonelist.
3527  * Used for initializing percpu 'numa_mem', which is used primarily
3528  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3529  */
3530 int local_memory_node(int node)
3531 {
3532 	struct zone *zone;
3533 
3534 	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3535 				   gfp_zone(GFP_KERNEL),
3536 				   NULL,
3537 				   &zone);
3538 	return zone->node;
3539 }
3540 #endif
3541 
3542 #else	/* CONFIG_NUMA */
3543 
3544 static void set_zonelist_order(void)
3545 {
3546 	current_zonelist_order = ZONELIST_ORDER_ZONE;
3547 }
3548 
3549 static void build_zonelists(pg_data_t *pgdat)
3550 {
3551 	int node, local_node;
3552 	enum zone_type j;
3553 	struct zonelist *zonelist;
3554 
3555 	local_node = pgdat->node_id;
3556 
3557 	zonelist = &pgdat->node_zonelists[0];
3558 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3559 
3560 	/*
3561 	 * Now we build the zonelist so that it contains the zones
3562 	 * of all the other nodes.
3563 	 * We don't want to pressure a particular node, so when
3564 	 * building the zones for node N, we make sure that the
3565 	 * zones coming right after the local ones are those from
3566 	 * node N+1 (modulo N)
3567 	 */
3568 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3569 		if (!node_online(node))
3570 			continue;
3571 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3572 							MAX_NR_ZONES - 1);
3573 	}
3574 	for (node = 0; node < local_node; node++) {
3575 		if (!node_online(node))
3576 			continue;
3577 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3578 							MAX_NR_ZONES - 1);
3579 	}
3580 
3581 	zonelist->_zonerefs[j].zone = NULL;
3582 	zonelist->_zonerefs[j].zone_idx = 0;
3583 }
3584 
3585 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3586 static void build_zonelist_cache(pg_data_t *pgdat)
3587 {
3588 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
3589 }
3590 
3591 #endif	/* CONFIG_NUMA */
3592 
3593 /*
3594  * Boot pageset table. One per cpu which is going to be used for all
3595  * zones and all nodes. The parameters will be set in such a way
3596  * that an item put on a list will immediately be handed over to
3597  * the buddy list. This is safe since pageset manipulation is done
3598  * with interrupts disabled.
3599  *
3600  * The boot_pagesets must be kept even after bootup is complete for
3601  * unused processors and/or zones. They do play a role for bootstrapping
3602  * hotplugged processors.
3603  *
3604  * zoneinfo_show() and maybe other functions do
3605  * not check if the processor is online before following the pageset pointer.
3606  * Other parts of the kernel may not check if the zone is available.
3607  */
3608 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3609 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3610 static void setup_zone_pageset(struct zone *zone);
3611 
3612 /*
3613  * Global mutex to protect against size modification of zonelists
3614  * as well as to serialize pageset setup for the new populated zone.
3615  */
3616 DEFINE_MUTEX(zonelists_mutex);
3617 
3618 /* return values int ....just for stop_machine() */
3619 static int __build_all_zonelists(void *data)
3620 {
3621 	int nid;
3622 	int cpu;
3623 	pg_data_t *self = data;
3624 
3625 #ifdef CONFIG_NUMA
3626 	memset(node_load, 0, sizeof(node_load));
3627 #endif
3628 
3629 	if (self && !node_online(self->node_id)) {
3630 		build_zonelists(self);
3631 		build_zonelist_cache(self);
3632 	}
3633 
3634 	for_each_online_node(nid) {
3635 		pg_data_t *pgdat = NODE_DATA(nid);
3636 
3637 		build_zonelists(pgdat);
3638 		build_zonelist_cache(pgdat);
3639 	}
3640 
3641 	/*
3642 	 * Initialize the boot_pagesets that are going to be used
3643 	 * for bootstrapping processors. The real pagesets for
3644 	 * each zone will be allocated later when the per cpu
3645 	 * allocator is available.
3646 	 *
3647 	 * boot_pagesets are used also for bootstrapping offline
3648 	 * cpus if the system is already booted because the pagesets
3649 	 * are needed to initialize allocators on a specific cpu too.
3650 	 * F.e. the percpu allocator needs the page allocator which
3651 	 * needs the percpu allocator in order to allocate its pagesets
3652 	 * (a chicken-egg dilemma).
3653 	 */
3654 	for_each_possible_cpu(cpu) {
3655 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3656 
3657 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3658 		/*
3659 		 * We now know the "local memory node" for each node--
3660 		 * i.e., the node of the first zone in the generic zonelist.
3661 		 * Set up numa_mem percpu variable for on-line cpus.  During
3662 		 * boot, only the boot cpu should be on-line;  we'll init the
3663 		 * secondary cpus' numa_mem as they come on-line.  During
3664 		 * node/memory hotplug, we'll fixup all on-line cpus.
3665 		 */
3666 		if (cpu_online(cpu))
3667 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3668 #endif
3669 	}
3670 
3671 	return 0;
3672 }
3673 
3674 /*
3675  * Called with zonelists_mutex held always
3676  * unless system_state == SYSTEM_BOOTING.
3677  */
3678 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3679 {
3680 	set_zonelist_order();
3681 
3682 	if (system_state == SYSTEM_BOOTING) {
3683 		__build_all_zonelists(NULL);
3684 		mminit_verify_zonelist();
3685 		cpuset_init_current_mems_allowed();
3686 	} else {
3687 		/* we have to stop all cpus to guarantee there is no user
3688 		   of zonelist */
3689 #ifdef CONFIG_MEMORY_HOTPLUG
3690 		if (zone)
3691 			setup_zone_pageset(zone);
3692 #endif
3693 		stop_machine(__build_all_zonelists, pgdat, NULL);
3694 		/* cpuset refresh routine should be here */
3695 	}
3696 	vm_total_pages = nr_free_pagecache_pages();
3697 	/*
3698 	 * Disable grouping by mobility if the number of pages in the
3699 	 * system is too low to allow the mechanism to work. It would be
3700 	 * more accurate, but expensive to check per-zone. This check is
3701 	 * made on memory-hotadd so a system can start with mobility
3702 	 * disabled and enable it later
3703 	 */
3704 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3705 		page_group_by_mobility_disabled = 1;
3706 	else
3707 		page_group_by_mobility_disabled = 0;
3708 
3709 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
3710 		"Total pages: %ld\n",
3711 			nr_online_nodes,
3712 			zonelist_order_name[current_zonelist_order],
3713 			page_group_by_mobility_disabled ? "off" : "on",
3714 			vm_total_pages);
3715 #ifdef CONFIG_NUMA
3716 	printk("Policy zone: %s\n", zone_names[policy_zone]);
3717 #endif
3718 }
3719 
3720 /*
3721  * Helper functions to size the waitqueue hash table.
3722  * Essentially these want to choose hash table sizes sufficiently
3723  * large so that collisions trying to wait on pages are rare.
3724  * But in fact, the number of active page waitqueues on typical
3725  * systems is ridiculously low, less than 200. So this is even
3726  * conservative, even though it seems large.
3727  *
3728  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3729  * waitqueues, i.e. the size of the waitq table given the number of pages.
3730  */
3731 #define PAGES_PER_WAITQUEUE	256
3732 
3733 #ifndef CONFIG_MEMORY_HOTPLUG
3734 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3735 {
3736 	unsigned long size = 1;
3737 
3738 	pages /= PAGES_PER_WAITQUEUE;
3739 
3740 	while (size < pages)
3741 		size <<= 1;
3742 
3743 	/*
3744 	 * Once we have dozens or even hundreds of threads sleeping
3745 	 * on IO we've got bigger problems than wait queue collision.
3746 	 * Limit the size of the wait table to a reasonable size.
3747 	 */
3748 	size = min(size, 4096UL);
3749 
3750 	return max(size, 4UL);
3751 }
3752 #else
3753 /*
3754  * A zone's size might be changed by hot-add, so it is not possible to determine
3755  * a suitable size for its wait_table.  So we use the maximum size now.
3756  *
3757  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3758  *
3759  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3760  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3761  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3762  *
3763  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3764  * or more by the traditional way. (See above).  It equals:
3765  *
3766  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3767  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3768  *    powerpc (64K page size)             : =  (32G +16M)byte.
3769  */
3770 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3771 {
3772 	return 4096UL;
3773 }
3774 #endif
3775 
3776 /*
3777  * This is an integer logarithm so that shifts can be used later
3778  * to extract the more random high bits from the multiplicative
3779  * hash function before the remainder is taken.
3780  */
3781 static inline unsigned long wait_table_bits(unsigned long size)
3782 {
3783 	return ffz(~size);
3784 }
3785 
3786 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3787 
3788 /*
3789  * Check if a pageblock contains reserved pages
3790  */
3791 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3792 {
3793 	unsigned long pfn;
3794 
3795 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3796 		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3797 			return 1;
3798 	}
3799 	return 0;
3800 }
3801 
3802 /*
3803  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3804  * of blocks reserved is based on min_wmark_pages(zone). The memory within
3805  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3806  * higher will lead to a bigger reserve which will get freed as contiguous
3807  * blocks as reclaim kicks in
3808  */
3809 static void setup_zone_migrate_reserve(struct zone *zone)
3810 {
3811 	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3812 	struct page *page;
3813 	unsigned long block_migratetype;
3814 	int reserve;
3815 
3816 	/*
3817 	 * Get the start pfn, end pfn and the number of blocks to reserve
3818 	 * We have to be careful to be aligned to pageblock_nr_pages to
3819 	 * make sure that we always check pfn_valid for the first page in
3820 	 * the block.
3821 	 */
3822 	start_pfn = zone->zone_start_pfn;
3823 	end_pfn = zone_end_pfn(zone);
3824 	start_pfn = roundup(start_pfn, pageblock_nr_pages);
3825 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3826 							pageblock_order;
3827 
3828 	/*
3829 	 * Reserve blocks are generally in place to help high-order atomic
3830 	 * allocations that are short-lived. A min_free_kbytes value that
3831 	 * would result in more than 2 reserve blocks for atomic allocations
3832 	 * is assumed to be in place to help anti-fragmentation for the
3833 	 * future allocation of hugepages at runtime.
3834 	 */
3835 	reserve = min(2, reserve);
3836 
3837 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3838 		if (!pfn_valid(pfn))
3839 			continue;
3840 		page = pfn_to_page(pfn);
3841 
3842 		/* Watch out for overlapping nodes */
3843 		if (page_to_nid(page) != zone_to_nid(zone))
3844 			continue;
3845 
3846 		block_migratetype = get_pageblock_migratetype(page);
3847 
3848 		/* Only test what is necessary when the reserves are not met */
3849 		if (reserve > 0) {
3850 			/*
3851 			 * Blocks with reserved pages will never free, skip
3852 			 * them.
3853 			 */
3854 			block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3855 			if (pageblock_is_reserved(pfn, block_end_pfn))
3856 				continue;
3857 
3858 			/* If this block is reserved, account for it */
3859 			if (block_migratetype == MIGRATE_RESERVE) {
3860 				reserve--;
3861 				continue;
3862 			}
3863 
3864 			/* Suitable for reserving if this block is movable */
3865 			if (block_migratetype == MIGRATE_MOVABLE) {
3866 				set_pageblock_migratetype(page,
3867 							MIGRATE_RESERVE);
3868 				move_freepages_block(zone, page,
3869 							MIGRATE_RESERVE);
3870 				reserve--;
3871 				continue;
3872 			}
3873 		}
3874 
3875 		/*
3876 		 * If the reserve is met and this is a previous reserved block,
3877 		 * take it back
3878 		 */
3879 		if (block_migratetype == MIGRATE_RESERVE) {
3880 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3881 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3882 		}
3883 	}
3884 }
3885 
3886 /*
3887  * Initially all pages are reserved - free ones are freed
3888  * up by free_all_bootmem() once the early boot process is
3889  * done. Non-atomic initialization, single-pass.
3890  */
3891 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3892 		unsigned long start_pfn, enum memmap_context context)
3893 {
3894 	struct page *page;
3895 	unsigned long end_pfn = start_pfn + size;
3896 	unsigned long pfn;
3897 	struct zone *z;
3898 
3899 	if (highest_memmap_pfn < end_pfn - 1)
3900 		highest_memmap_pfn = end_pfn - 1;
3901 
3902 	z = &NODE_DATA(nid)->node_zones[zone];
3903 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3904 		/*
3905 		 * There can be holes in boot-time mem_map[]s
3906 		 * handed to this function.  They do not
3907 		 * exist on hotplugged memory.
3908 		 */
3909 		if (context == MEMMAP_EARLY) {
3910 			if (!early_pfn_valid(pfn))
3911 				continue;
3912 			if (!early_pfn_in_nid(pfn, nid))
3913 				continue;
3914 		}
3915 		page = pfn_to_page(pfn);
3916 		set_page_links(page, zone, nid, pfn);
3917 		mminit_verify_page_links(page, zone, nid, pfn);
3918 		init_page_count(page);
3919 		page_mapcount_reset(page);
3920 		page_nid_reset_last(page);
3921 		SetPageReserved(page);
3922 		/*
3923 		 * Mark the block movable so that blocks are reserved for
3924 		 * movable at startup. This will force kernel allocations
3925 		 * to reserve their blocks rather than leaking throughout
3926 		 * the address space during boot when many long-lived
3927 		 * kernel allocations are made. Later some blocks near
3928 		 * the start are marked MIGRATE_RESERVE by
3929 		 * setup_zone_migrate_reserve()
3930 		 *
3931 		 * bitmap is created for zone's valid pfn range. but memmap
3932 		 * can be created for invalid pages (for alignment)
3933 		 * check here not to call set_pageblock_migratetype() against
3934 		 * pfn out of zone.
3935 		 */
3936 		if ((z->zone_start_pfn <= pfn)
3937 		    && (pfn < zone_end_pfn(z))
3938 		    && !(pfn & (pageblock_nr_pages - 1)))
3939 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3940 
3941 		INIT_LIST_HEAD(&page->lru);
3942 #ifdef WANT_PAGE_VIRTUAL
3943 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3944 		if (!is_highmem_idx(zone))
3945 			set_page_address(page, __va(pfn << PAGE_SHIFT));
3946 #endif
3947 	}
3948 }
3949 
3950 static void __meminit zone_init_free_lists(struct zone *zone)
3951 {
3952 	int order, t;
3953 	for_each_migratetype_order(order, t) {
3954 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3955 		zone->free_area[order].nr_free = 0;
3956 	}
3957 }
3958 
3959 #ifndef __HAVE_ARCH_MEMMAP_INIT
3960 #define memmap_init(size, nid, zone, start_pfn) \
3961 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3962 #endif
3963 
3964 static int __meminit zone_batchsize(struct zone *zone)
3965 {
3966 #ifdef CONFIG_MMU
3967 	int batch;
3968 
3969 	/*
3970 	 * The per-cpu-pages pools are set to around 1000th of the
3971 	 * size of the zone.  But no more than 1/2 of a meg.
3972 	 *
3973 	 * OK, so we don't know how big the cache is.  So guess.
3974 	 */
3975 	batch = zone->managed_pages / 1024;
3976 	if (batch * PAGE_SIZE > 512 * 1024)
3977 		batch = (512 * 1024) / PAGE_SIZE;
3978 	batch /= 4;		/* We effectively *= 4 below */
3979 	if (batch < 1)
3980 		batch = 1;
3981 
3982 	/*
3983 	 * Clamp the batch to a 2^n - 1 value. Having a power
3984 	 * of 2 value was found to be more likely to have
3985 	 * suboptimal cache aliasing properties in some cases.
3986 	 *
3987 	 * For example if 2 tasks are alternately allocating
3988 	 * batches of pages, one task can end up with a lot
3989 	 * of pages of one half of the possible page colors
3990 	 * and the other with pages of the other colors.
3991 	 */
3992 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3993 
3994 	return batch;
3995 
3996 #else
3997 	/* The deferral and batching of frees should be suppressed under NOMMU
3998 	 * conditions.
3999 	 *
4000 	 * The problem is that NOMMU needs to be able to allocate large chunks
4001 	 * of contiguous memory as there's no hardware page translation to
4002 	 * assemble apparent contiguous memory from discontiguous pages.
4003 	 *
4004 	 * Queueing large contiguous runs of pages for batching, however,
4005 	 * causes the pages to actually be freed in smaller chunks.  As there
4006 	 * can be a significant delay between the individual batches being
4007 	 * recycled, this leads to the once large chunks of space being
4008 	 * fragmented and becoming unavailable for high-order allocations.
4009 	 */
4010 	return 0;
4011 #endif
4012 }
4013 
4014 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4015 {
4016 	struct per_cpu_pages *pcp;
4017 	int migratetype;
4018 
4019 	memset(p, 0, sizeof(*p));
4020 
4021 	pcp = &p->pcp;
4022 	pcp->count = 0;
4023 	pcp->high = 6 * batch;
4024 	pcp->batch = max(1UL, 1 * batch);
4025 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4026 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
4027 }
4028 
4029 /*
4030  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
4031  * to the value high for the pageset p.
4032  */
4033 
4034 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
4035 				unsigned long high)
4036 {
4037 	struct per_cpu_pages *pcp;
4038 
4039 	pcp = &p->pcp;
4040 	pcp->high = high;
4041 	pcp->batch = max(1UL, high/4);
4042 	if ((high/4) > (PAGE_SHIFT * 8))
4043 		pcp->batch = PAGE_SHIFT * 8;
4044 }
4045 
4046 static void __meminit setup_zone_pageset(struct zone *zone)
4047 {
4048 	int cpu;
4049 
4050 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
4051 
4052 	for_each_possible_cpu(cpu) {
4053 		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4054 
4055 		setup_pageset(pcp, zone_batchsize(zone));
4056 
4057 		if (percpu_pagelist_fraction)
4058 			setup_pagelist_highmark(pcp,
4059 				(zone->managed_pages /
4060 					percpu_pagelist_fraction));
4061 	}
4062 }
4063 
4064 /*
4065  * Allocate per cpu pagesets and initialize them.
4066  * Before this call only boot pagesets were available.
4067  */
4068 void __init setup_per_cpu_pageset(void)
4069 {
4070 	struct zone *zone;
4071 
4072 	for_each_populated_zone(zone)
4073 		setup_zone_pageset(zone);
4074 }
4075 
4076 static noinline __init_refok
4077 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4078 {
4079 	int i;
4080 	struct pglist_data *pgdat = zone->zone_pgdat;
4081 	size_t alloc_size;
4082 
4083 	/*
4084 	 * The per-page waitqueue mechanism uses hashed waitqueues
4085 	 * per zone.
4086 	 */
4087 	zone->wait_table_hash_nr_entries =
4088 		 wait_table_hash_nr_entries(zone_size_pages);
4089 	zone->wait_table_bits =
4090 		wait_table_bits(zone->wait_table_hash_nr_entries);
4091 	alloc_size = zone->wait_table_hash_nr_entries
4092 					* sizeof(wait_queue_head_t);
4093 
4094 	if (!slab_is_available()) {
4095 		zone->wait_table = (wait_queue_head_t *)
4096 			alloc_bootmem_node_nopanic(pgdat, alloc_size);
4097 	} else {
4098 		/*
4099 		 * This case means that a zone whose size was 0 gets new memory
4100 		 * via memory hot-add.
4101 		 * But it may be the case that a new node was hot-added.  In
4102 		 * this case vmalloc() will not be able to use this new node's
4103 		 * memory - this wait_table must be initialized to use this new
4104 		 * node itself as well.
4105 		 * To use this new node's memory, further consideration will be
4106 		 * necessary.
4107 		 */
4108 		zone->wait_table = vmalloc(alloc_size);
4109 	}
4110 	if (!zone->wait_table)
4111 		return -ENOMEM;
4112 
4113 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4114 		init_waitqueue_head(zone->wait_table + i);
4115 
4116 	return 0;
4117 }
4118 
4119 static __meminit void zone_pcp_init(struct zone *zone)
4120 {
4121 	/*
4122 	 * per cpu subsystem is not up at this point. The following code
4123 	 * relies on the ability of the linker to provide the
4124 	 * offset of a (static) per cpu variable into the per cpu area.
4125 	 */
4126 	zone->pageset = &boot_pageset;
4127 
4128 	if (zone->present_pages)
4129 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4130 			zone->name, zone->present_pages,
4131 					 zone_batchsize(zone));
4132 }
4133 
4134 int __meminit init_currently_empty_zone(struct zone *zone,
4135 					unsigned long zone_start_pfn,
4136 					unsigned long size,
4137 					enum memmap_context context)
4138 {
4139 	struct pglist_data *pgdat = zone->zone_pgdat;
4140 	int ret;
4141 	ret = zone_wait_table_init(zone, size);
4142 	if (ret)
4143 		return ret;
4144 	pgdat->nr_zones = zone_idx(zone) + 1;
4145 
4146 	zone->zone_start_pfn = zone_start_pfn;
4147 
4148 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
4149 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
4150 			pgdat->node_id,
4151 			(unsigned long)zone_idx(zone),
4152 			zone_start_pfn, (zone_start_pfn + size));
4153 
4154 	zone_init_free_lists(zone);
4155 
4156 	return 0;
4157 }
4158 
4159 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4160 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4161 /*
4162  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4163  * Architectures may implement their own version but if add_active_range()
4164  * was used and there are no special requirements, this is a convenient
4165  * alternative
4166  */
4167 int __meminit __early_pfn_to_nid(unsigned long pfn)
4168 {
4169 	unsigned long start_pfn, end_pfn;
4170 	int i, nid;
4171 
4172 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4173 		if (start_pfn <= pfn && pfn < end_pfn)
4174 			return nid;
4175 	/* This is a memory hole */
4176 	return -1;
4177 }
4178 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4179 
4180 int __meminit early_pfn_to_nid(unsigned long pfn)
4181 {
4182 	int nid;
4183 
4184 	nid = __early_pfn_to_nid(pfn);
4185 	if (nid >= 0)
4186 		return nid;
4187 	/* just returns 0 */
4188 	return 0;
4189 }
4190 
4191 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
4192 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4193 {
4194 	int nid;
4195 
4196 	nid = __early_pfn_to_nid(pfn);
4197 	if (nid >= 0 && nid != node)
4198 		return false;
4199 	return true;
4200 }
4201 #endif
4202 
4203 /**
4204  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
4205  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4206  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
4207  *
4208  * If an architecture guarantees that all ranges registered with
4209  * add_active_ranges() contain no holes and may be freed, this
4210  * this function may be used instead of calling free_bootmem() manually.
4211  */
4212 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4213 {
4214 	unsigned long start_pfn, end_pfn;
4215 	int i, this_nid;
4216 
4217 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4218 		start_pfn = min(start_pfn, max_low_pfn);
4219 		end_pfn = min(end_pfn, max_low_pfn);
4220 
4221 		if (start_pfn < end_pfn)
4222 			free_bootmem_node(NODE_DATA(this_nid),
4223 					  PFN_PHYS(start_pfn),
4224 					  (end_pfn - start_pfn) << PAGE_SHIFT);
4225 	}
4226 }
4227 
4228 /**
4229  * sparse_memory_present_with_active_regions - Call memory_present for each active range
4230  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4231  *
4232  * If an architecture guarantees that all ranges registered with
4233  * add_active_ranges() contain no holes and may be freed, this
4234  * function may be used instead of calling memory_present() manually.
4235  */
4236 void __init sparse_memory_present_with_active_regions(int nid)
4237 {
4238 	unsigned long start_pfn, end_pfn;
4239 	int i, this_nid;
4240 
4241 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4242 		memory_present(this_nid, start_pfn, end_pfn);
4243 }
4244 
4245 /**
4246  * get_pfn_range_for_nid - Return the start and end page frames for a node
4247  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4248  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4249  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4250  *
4251  * It returns the start and end page frame of a node based on information
4252  * provided by an arch calling add_active_range(). If called for a node
4253  * with no available memory, a warning is printed and the start and end
4254  * PFNs will be 0.
4255  */
4256 void __meminit get_pfn_range_for_nid(unsigned int nid,
4257 			unsigned long *start_pfn, unsigned long *end_pfn)
4258 {
4259 	unsigned long this_start_pfn, this_end_pfn;
4260 	int i;
4261 
4262 	*start_pfn = -1UL;
4263 	*end_pfn = 0;
4264 
4265 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4266 		*start_pfn = min(*start_pfn, this_start_pfn);
4267 		*end_pfn = max(*end_pfn, this_end_pfn);
4268 	}
4269 
4270 	if (*start_pfn == -1UL)
4271 		*start_pfn = 0;
4272 }
4273 
4274 /*
4275  * This finds a zone that can be used for ZONE_MOVABLE pages. The
4276  * assumption is made that zones within a node are ordered in monotonic
4277  * increasing memory addresses so that the "highest" populated zone is used
4278  */
4279 static void __init find_usable_zone_for_movable(void)
4280 {
4281 	int zone_index;
4282 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4283 		if (zone_index == ZONE_MOVABLE)
4284 			continue;
4285 
4286 		if (arch_zone_highest_possible_pfn[zone_index] >
4287 				arch_zone_lowest_possible_pfn[zone_index])
4288 			break;
4289 	}
4290 
4291 	VM_BUG_ON(zone_index == -1);
4292 	movable_zone = zone_index;
4293 }
4294 
4295 /*
4296  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4297  * because it is sized independent of architecture. Unlike the other zones,
4298  * the starting point for ZONE_MOVABLE is not fixed. It may be different
4299  * in each node depending on the size of each node and how evenly kernelcore
4300  * is distributed. This helper function adjusts the zone ranges
4301  * provided by the architecture for a given node by using the end of the
4302  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4303  * zones within a node are in order of monotonic increases memory addresses
4304  */
4305 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4306 					unsigned long zone_type,
4307 					unsigned long node_start_pfn,
4308 					unsigned long node_end_pfn,
4309 					unsigned long *zone_start_pfn,
4310 					unsigned long *zone_end_pfn)
4311 {
4312 	/* Only adjust if ZONE_MOVABLE is on this node */
4313 	if (zone_movable_pfn[nid]) {
4314 		/* Size ZONE_MOVABLE */
4315 		if (zone_type == ZONE_MOVABLE) {
4316 			*zone_start_pfn = zone_movable_pfn[nid];
4317 			*zone_end_pfn = min(node_end_pfn,
4318 				arch_zone_highest_possible_pfn[movable_zone]);
4319 
4320 		/* Adjust for ZONE_MOVABLE starting within this range */
4321 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4322 				*zone_end_pfn > zone_movable_pfn[nid]) {
4323 			*zone_end_pfn = zone_movable_pfn[nid];
4324 
4325 		/* Check if this whole range is within ZONE_MOVABLE */
4326 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
4327 			*zone_start_pfn = *zone_end_pfn;
4328 	}
4329 }
4330 
4331 /*
4332  * Return the number of pages a zone spans in a node, including holes
4333  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4334  */
4335 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4336 					unsigned long zone_type,
4337 					unsigned long *ignored)
4338 {
4339 	unsigned long node_start_pfn, node_end_pfn;
4340 	unsigned long zone_start_pfn, zone_end_pfn;
4341 
4342 	/* Get the start and end of the node and zone */
4343 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4344 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4345 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4346 	adjust_zone_range_for_zone_movable(nid, zone_type,
4347 				node_start_pfn, node_end_pfn,
4348 				&zone_start_pfn, &zone_end_pfn);
4349 
4350 	/* Check that this node has pages within the zone's required range */
4351 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4352 		return 0;
4353 
4354 	/* Move the zone boundaries inside the node if necessary */
4355 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4356 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4357 
4358 	/* Return the spanned pages */
4359 	return zone_end_pfn - zone_start_pfn;
4360 }
4361 
4362 /*
4363  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4364  * then all holes in the requested range will be accounted for.
4365  */
4366 unsigned long __meminit __absent_pages_in_range(int nid,
4367 				unsigned long range_start_pfn,
4368 				unsigned long range_end_pfn)
4369 {
4370 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
4371 	unsigned long start_pfn, end_pfn;
4372 	int i;
4373 
4374 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4375 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4376 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4377 		nr_absent -= end_pfn - start_pfn;
4378 	}
4379 	return nr_absent;
4380 }
4381 
4382 /**
4383  * absent_pages_in_range - Return number of page frames in holes within a range
4384  * @start_pfn: The start PFN to start searching for holes
4385  * @end_pfn: The end PFN to stop searching for holes
4386  *
4387  * It returns the number of pages frames in memory holes within a range.
4388  */
4389 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4390 							unsigned long end_pfn)
4391 {
4392 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4393 }
4394 
4395 /* Return the number of page frames in holes in a zone on a node */
4396 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4397 					unsigned long zone_type,
4398 					unsigned long *ignored)
4399 {
4400 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4401 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4402 	unsigned long node_start_pfn, node_end_pfn;
4403 	unsigned long zone_start_pfn, zone_end_pfn;
4404 
4405 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4406 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4407 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4408 
4409 	adjust_zone_range_for_zone_movable(nid, zone_type,
4410 			node_start_pfn, node_end_pfn,
4411 			&zone_start_pfn, &zone_end_pfn);
4412 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4413 }
4414 
4415 /**
4416  * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
4417  *
4418  * zone_movable_limit is initialized as 0. This function will try to get
4419  * the first ZONE_MOVABLE pfn of each node from movablemem_map, and
4420  * assigne them to zone_movable_limit.
4421  * zone_movable_limit[nid] == 0 means no limit for the node.
4422  *
4423  * Note: Each range is represented as [start_pfn, end_pfn)
4424  */
4425 static void __meminit sanitize_zone_movable_limit(void)
4426 {
4427 	int map_pos = 0, i, nid;
4428 	unsigned long start_pfn, end_pfn;
4429 
4430 	if (!movablemem_map.nr_map)
4431 		return;
4432 
4433 	/* Iterate all ranges from minimum to maximum */
4434 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4435 		/*
4436 		 * If we have found lowest pfn of ZONE_MOVABLE of the node
4437 		 * specified by user, just go on to check next range.
4438 		 */
4439 		if (zone_movable_limit[nid])
4440 			continue;
4441 
4442 #ifdef CONFIG_ZONE_DMA
4443 		/* Skip DMA memory. */
4444 		if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
4445 			start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
4446 #endif
4447 
4448 #ifdef CONFIG_ZONE_DMA32
4449 		/* Skip DMA32 memory. */
4450 		if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
4451 			start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
4452 #endif
4453 
4454 #ifdef CONFIG_HIGHMEM
4455 		/* Skip lowmem if ZONE_MOVABLE is highmem. */
4456 		if (zone_movable_is_highmem() &&
4457 		    start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
4458 			start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
4459 #endif
4460 
4461 		if (start_pfn >= end_pfn)
4462 			continue;
4463 
4464 		while (map_pos < movablemem_map.nr_map) {
4465 			if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
4466 				break;
4467 
4468 			if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
4469 				map_pos++;
4470 				continue;
4471 			}
4472 
4473 			/*
4474 			 * The start_pfn of ZONE_MOVABLE is either the minimum
4475 			 * pfn specified by movablemem_map, or 0, which means
4476 			 * the node has no ZONE_MOVABLE.
4477 			 */
4478 			zone_movable_limit[nid] = max(start_pfn,
4479 					movablemem_map.map[map_pos].start_pfn);
4480 
4481 			break;
4482 		}
4483 	}
4484 }
4485 
4486 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4487 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4488 					unsigned long zone_type,
4489 					unsigned long *zones_size)
4490 {
4491 	return zones_size[zone_type];
4492 }
4493 
4494 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4495 						unsigned long zone_type,
4496 						unsigned long *zholes_size)
4497 {
4498 	if (!zholes_size)
4499 		return 0;
4500 
4501 	return zholes_size[zone_type];
4502 }
4503 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4504 
4505 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4506 		unsigned long *zones_size, unsigned long *zholes_size)
4507 {
4508 	unsigned long realtotalpages, totalpages = 0;
4509 	enum zone_type i;
4510 
4511 	for (i = 0; i < MAX_NR_ZONES; i++)
4512 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4513 								zones_size);
4514 	pgdat->node_spanned_pages = totalpages;
4515 
4516 	realtotalpages = totalpages;
4517 	for (i = 0; i < MAX_NR_ZONES; i++)
4518 		realtotalpages -=
4519 			zone_absent_pages_in_node(pgdat->node_id, i,
4520 								zholes_size);
4521 	pgdat->node_present_pages = realtotalpages;
4522 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4523 							realtotalpages);
4524 }
4525 
4526 #ifndef CONFIG_SPARSEMEM
4527 /*
4528  * Calculate the size of the zone->blockflags rounded to an unsigned long
4529  * Start by making sure zonesize is a multiple of pageblock_order by rounding
4530  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4531  * round what is now in bits to nearest long in bits, then return it in
4532  * bytes.
4533  */
4534 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4535 {
4536 	unsigned long usemapsize;
4537 
4538 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4539 	usemapsize = roundup(zonesize, pageblock_nr_pages);
4540 	usemapsize = usemapsize >> pageblock_order;
4541 	usemapsize *= NR_PAGEBLOCK_BITS;
4542 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4543 
4544 	return usemapsize / 8;
4545 }
4546 
4547 static void __init setup_usemap(struct pglist_data *pgdat,
4548 				struct zone *zone,
4549 				unsigned long zone_start_pfn,
4550 				unsigned long zonesize)
4551 {
4552 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4553 	zone->pageblock_flags = NULL;
4554 	if (usemapsize)
4555 		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4556 								   usemapsize);
4557 }
4558 #else
4559 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4560 				unsigned long zone_start_pfn, unsigned long zonesize) {}
4561 #endif /* CONFIG_SPARSEMEM */
4562 
4563 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4564 
4565 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4566 void __init set_pageblock_order(void)
4567 {
4568 	unsigned int order;
4569 
4570 	/* Check that pageblock_nr_pages has not already been setup */
4571 	if (pageblock_order)
4572 		return;
4573 
4574 	if (HPAGE_SHIFT > PAGE_SHIFT)
4575 		order = HUGETLB_PAGE_ORDER;
4576 	else
4577 		order = MAX_ORDER - 1;
4578 
4579 	/*
4580 	 * Assume the largest contiguous order of interest is a huge page.
4581 	 * This value may be variable depending on boot parameters on IA64 and
4582 	 * powerpc.
4583 	 */
4584 	pageblock_order = order;
4585 }
4586 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4587 
4588 /*
4589  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4590  * is unused as pageblock_order is set at compile-time. See
4591  * include/linux/pageblock-flags.h for the values of pageblock_order based on
4592  * the kernel config
4593  */
4594 void __init set_pageblock_order(void)
4595 {
4596 }
4597 
4598 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4599 
4600 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4601 						   unsigned long present_pages)
4602 {
4603 	unsigned long pages = spanned_pages;
4604 
4605 	/*
4606 	 * Provide a more accurate estimation if there are holes within
4607 	 * the zone and SPARSEMEM is in use. If there are holes within the
4608 	 * zone, each populated memory region may cost us one or two extra
4609 	 * memmap pages due to alignment because memmap pages for each
4610 	 * populated regions may not naturally algined on page boundary.
4611 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
4612 	 */
4613 	if (spanned_pages > present_pages + (present_pages >> 4) &&
4614 	    IS_ENABLED(CONFIG_SPARSEMEM))
4615 		pages = present_pages;
4616 
4617 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4618 }
4619 
4620 /*
4621  * Set up the zone data structures:
4622  *   - mark all pages reserved
4623  *   - mark all memory queues empty
4624  *   - clear the memory bitmaps
4625  *
4626  * NOTE: pgdat should get zeroed by caller.
4627  */
4628 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4629 		unsigned long *zones_size, unsigned long *zholes_size)
4630 {
4631 	enum zone_type j;
4632 	int nid = pgdat->node_id;
4633 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
4634 	int ret;
4635 
4636 	pgdat_resize_init(pgdat);
4637 #ifdef CONFIG_NUMA_BALANCING
4638 	spin_lock_init(&pgdat->numabalancing_migrate_lock);
4639 	pgdat->numabalancing_migrate_nr_pages = 0;
4640 	pgdat->numabalancing_migrate_next_window = jiffies;
4641 #endif
4642 	init_waitqueue_head(&pgdat->kswapd_wait);
4643 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
4644 	pgdat_page_cgroup_init(pgdat);
4645 
4646 	for (j = 0; j < MAX_NR_ZONES; j++) {
4647 		struct zone *zone = pgdat->node_zones + j;
4648 		unsigned long size, realsize, freesize, memmap_pages;
4649 
4650 		size = zone_spanned_pages_in_node(nid, j, zones_size);
4651 		realsize = freesize = size - zone_absent_pages_in_node(nid, j,
4652 								zholes_size);
4653 
4654 		/*
4655 		 * Adjust freesize so that it accounts for how much memory
4656 		 * is used by this zone for memmap. This affects the watermark
4657 		 * and per-cpu initialisations
4658 		 */
4659 		memmap_pages = calc_memmap_size(size, realsize);
4660 		if (freesize >= memmap_pages) {
4661 			freesize -= memmap_pages;
4662 			if (memmap_pages)
4663 				printk(KERN_DEBUG
4664 				       "  %s zone: %lu pages used for memmap\n",
4665 				       zone_names[j], memmap_pages);
4666 		} else
4667 			printk(KERN_WARNING
4668 				"  %s zone: %lu pages exceeds freesize %lu\n",
4669 				zone_names[j], memmap_pages, freesize);
4670 
4671 		/* Account for reserved pages */
4672 		if (j == 0 && freesize > dma_reserve) {
4673 			freesize -= dma_reserve;
4674 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4675 					zone_names[0], dma_reserve);
4676 		}
4677 
4678 		if (!is_highmem_idx(j))
4679 			nr_kernel_pages += freesize;
4680 		/* Charge for highmem memmap if there are enough kernel pages */
4681 		else if (nr_kernel_pages > memmap_pages * 2)
4682 			nr_kernel_pages -= memmap_pages;
4683 		nr_all_pages += freesize;
4684 
4685 		zone->spanned_pages = size;
4686 		zone->present_pages = realsize;
4687 		/*
4688 		 * Set an approximate value for lowmem here, it will be adjusted
4689 		 * when the bootmem allocator frees pages into the buddy system.
4690 		 * And all highmem pages will be managed by the buddy system.
4691 		 */
4692 		zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
4693 #ifdef CONFIG_NUMA
4694 		zone->node = nid;
4695 		zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
4696 						/ 100;
4697 		zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
4698 #endif
4699 		zone->name = zone_names[j];
4700 		spin_lock_init(&zone->lock);
4701 		spin_lock_init(&zone->lru_lock);
4702 		zone_seqlock_init(zone);
4703 		zone->zone_pgdat = pgdat;
4704 
4705 		zone_pcp_init(zone);
4706 		lruvec_init(&zone->lruvec);
4707 		if (!size)
4708 			continue;
4709 
4710 		set_pageblock_order();
4711 		setup_usemap(pgdat, zone, zone_start_pfn, size);
4712 		ret = init_currently_empty_zone(zone, zone_start_pfn,
4713 						size, MEMMAP_EARLY);
4714 		BUG_ON(ret);
4715 		memmap_init(size, nid, j, zone_start_pfn);
4716 		zone_start_pfn += size;
4717 	}
4718 }
4719 
4720 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4721 {
4722 	/* Skip empty nodes */
4723 	if (!pgdat->node_spanned_pages)
4724 		return;
4725 
4726 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4727 	/* ia64 gets its own node_mem_map, before this, without bootmem */
4728 	if (!pgdat->node_mem_map) {
4729 		unsigned long size, start, end;
4730 		struct page *map;
4731 
4732 		/*
4733 		 * The zone's endpoints aren't required to be MAX_ORDER
4734 		 * aligned but the node_mem_map endpoints must be in order
4735 		 * for the buddy allocator to function correctly.
4736 		 */
4737 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4738 		end = pgdat_end_pfn(pgdat);
4739 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4740 		size =  (end - start) * sizeof(struct page);
4741 		map = alloc_remap(pgdat->node_id, size);
4742 		if (!map)
4743 			map = alloc_bootmem_node_nopanic(pgdat, size);
4744 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4745 	}
4746 #ifndef CONFIG_NEED_MULTIPLE_NODES
4747 	/*
4748 	 * With no DISCONTIG, the global mem_map is just set as node 0's
4749 	 */
4750 	if (pgdat == NODE_DATA(0)) {
4751 		mem_map = NODE_DATA(0)->node_mem_map;
4752 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4753 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4754 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4755 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4756 	}
4757 #endif
4758 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
4759 }
4760 
4761 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4762 		unsigned long node_start_pfn, unsigned long *zholes_size)
4763 {
4764 	pg_data_t *pgdat = NODE_DATA(nid);
4765 
4766 	/* pg_data_t should be reset to zero when it's allocated */
4767 	WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
4768 
4769 	pgdat->node_id = nid;
4770 	pgdat->node_start_pfn = node_start_pfn;
4771 	init_zone_allows_reclaim(nid);
4772 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4773 
4774 	alloc_node_mem_map(pgdat);
4775 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4776 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4777 		nid, (unsigned long)pgdat,
4778 		(unsigned long)pgdat->node_mem_map);
4779 #endif
4780 
4781 	free_area_init_core(pgdat, zones_size, zholes_size);
4782 }
4783 
4784 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4785 
4786 #if MAX_NUMNODES > 1
4787 /*
4788  * Figure out the number of possible node ids.
4789  */
4790 static void __init setup_nr_node_ids(void)
4791 {
4792 	unsigned int node;
4793 	unsigned int highest = 0;
4794 
4795 	for_each_node_mask(node, node_possible_map)
4796 		highest = node;
4797 	nr_node_ids = highest + 1;
4798 }
4799 #else
4800 static inline void setup_nr_node_ids(void)
4801 {
4802 }
4803 #endif
4804 
4805 /**
4806  * node_map_pfn_alignment - determine the maximum internode alignment
4807  *
4808  * This function should be called after node map is populated and sorted.
4809  * It calculates the maximum power of two alignment which can distinguish
4810  * all the nodes.
4811  *
4812  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4813  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4814  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4815  * shifted, 1GiB is enough and this function will indicate so.
4816  *
4817  * This is used to test whether pfn -> nid mapping of the chosen memory
4818  * model has fine enough granularity to avoid incorrect mapping for the
4819  * populated node map.
4820  *
4821  * Returns the determined alignment in pfn's.  0 if there is no alignment
4822  * requirement (single node).
4823  */
4824 unsigned long __init node_map_pfn_alignment(void)
4825 {
4826 	unsigned long accl_mask = 0, last_end = 0;
4827 	unsigned long start, end, mask;
4828 	int last_nid = -1;
4829 	int i, nid;
4830 
4831 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4832 		if (!start || last_nid < 0 || last_nid == nid) {
4833 			last_nid = nid;
4834 			last_end = end;
4835 			continue;
4836 		}
4837 
4838 		/*
4839 		 * Start with a mask granular enough to pin-point to the
4840 		 * start pfn and tick off bits one-by-one until it becomes
4841 		 * too coarse to separate the current node from the last.
4842 		 */
4843 		mask = ~((1 << __ffs(start)) - 1);
4844 		while (mask && last_end <= (start & (mask << 1)))
4845 			mask <<= 1;
4846 
4847 		/* accumulate all internode masks */
4848 		accl_mask |= mask;
4849 	}
4850 
4851 	/* convert mask to number of pages */
4852 	return ~accl_mask + 1;
4853 }
4854 
4855 /* Find the lowest pfn for a node */
4856 static unsigned long __init find_min_pfn_for_node(int nid)
4857 {
4858 	unsigned long min_pfn = ULONG_MAX;
4859 	unsigned long start_pfn;
4860 	int i;
4861 
4862 	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4863 		min_pfn = min(min_pfn, start_pfn);
4864 
4865 	if (min_pfn == ULONG_MAX) {
4866 		printk(KERN_WARNING
4867 			"Could not find start_pfn for node %d\n", nid);
4868 		return 0;
4869 	}
4870 
4871 	return min_pfn;
4872 }
4873 
4874 /**
4875  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4876  *
4877  * It returns the minimum PFN based on information provided via
4878  * add_active_range().
4879  */
4880 unsigned long __init find_min_pfn_with_active_regions(void)
4881 {
4882 	return find_min_pfn_for_node(MAX_NUMNODES);
4883 }
4884 
4885 /*
4886  * early_calculate_totalpages()
4887  * Sum pages in active regions for movable zone.
4888  * Populate N_MEMORY for calculating usable_nodes.
4889  */
4890 static unsigned long __init early_calculate_totalpages(void)
4891 {
4892 	unsigned long totalpages = 0;
4893 	unsigned long start_pfn, end_pfn;
4894 	int i, nid;
4895 
4896 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4897 		unsigned long pages = end_pfn - start_pfn;
4898 
4899 		totalpages += pages;
4900 		if (pages)
4901 			node_set_state(nid, N_MEMORY);
4902 	}
4903   	return totalpages;
4904 }
4905 
4906 /*
4907  * Find the PFN the Movable zone begins in each node. Kernel memory
4908  * is spread evenly between nodes as long as the nodes have enough
4909  * memory. When they don't, some nodes will have more kernelcore than
4910  * others
4911  */
4912 static void __init find_zone_movable_pfns_for_nodes(void)
4913 {
4914 	int i, nid;
4915 	unsigned long usable_startpfn;
4916 	unsigned long kernelcore_node, kernelcore_remaining;
4917 	/* save the state before borrow the nodemask */
4918 	nodemask_t saved_node_state = node_states[N_MEMORY];
4919 	unsigned long totalpages = early_calculate_totalpages();
4920 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
4921 
4922 	/*
4923 	 * If movablecore was specified, calculate what size of
4924 	 * kernelcore that corresponds so that memory usable for
4925 	 * any allocation type is evenly spread. If both kernelcore
4926 	 * and movablecore are specified, then the value of kernelcore
4927 	 * will be used for required_kernelcore if it's greater than
4928 	 * what movablecore would have allowed.
4929 	 */
4930 	if (required_movablecore) {
4931 		unsigned long corepages;
4932 
4933 		/*
4934 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4935 		 * was requested by the user
4936 		 */
4937 		required_movablecore =
4938 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4939 		corepages = totalpages - required_movablecore;
4940 
4941 		required_kernelcore = max(required_kernelcore, corepages);
4942 	}
4943 
4944 	/*
4945 	 * If neither kernelcore/movablecore nor movablemem_map is specified,
4946 	 * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
4947 	 * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
4948 	 */
4949 	if (!required_kernelcore) {
4950 		if (movablemem_map.nr_map)
4951 			memcpy(zone_movable_pfn, zone_movable_limit,
4952 				sizeof(zone_movable_pfn));
4953 		goto out;
4954 	}
4955 
4956 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4957 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4958 
4959 restart:
4960 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4961 	kernelcore_node = required_kernelcore / usable_nodes;
4962 	for_each_node_state(nid, N_MEMORY) {
4963 		unsigned long start_pfn, end_pfn;
4964 
4965 		/*
4966 		 * Recalculate kernelcore_node if the division per node
4967 		 * now exceeds what is necessary to satisfy the requested
4968 		 * amount of memory for the kernel
4969 		 */
4970 		if (required_kernelcore < kernelcore_node)
4971 			kernelcore_node = required_kernelcore / usable_nodes;
4972 
4973 		/*
4974 		 * As the map is walked, we track how much memory is usable
4975 		 * by the kernel using kernelcore_remaining. When it is
4976 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4977 		 */
4978 		kernelcore_remaining = kernelcore_node;
4979 
4980 		/* Go through each range of PFNs within this node */
4981 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4982 			unsigned long size_pages;
4983 
4984 			/*
4985 			 * Find more memory for kernelcore in
4986 			 * [zone_movable_pfn[nid], zone_movable_limit[nid]).
4987 			 */
4988 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4989 			if (start_pfn >= end_pfn)
4990 				continue;
4991 
4992 			if (zone_movable_limit[nid]) {
4993 				end_pfn = min(end_pfn, zone_movable_limit[nid]);
4994 				/* No range left for kernelcore in this node */
4995 				if (start_pfn >= end_pfn) {
4996 					zone_movable_pfn[nid] =
4997 							zone_movable_limit[nid];
4998 					break;
4999 				}
5000 			}
5001 
5002 			/* Account for what is only usable for kernelcore */
5003 			if (start_pfn < usable_startpfn) {
5004 				unsigned long kernel_pages;
5005 				kernel_pages = min(end_pfn, usable_startpfn)
5006 								- start_pfn;
5007 
5008 				kernelcore_remaining -= min(kernel_pages,
5009 							kernelcore_remaining);
5010 				required_kernelcore -= min(kernel_pages,
5011 							required_kernelcore);
5012 
5013 				/* Continue if range is now fully accounted */
5014 				if (end_pfn <= usable_startpfn) {
5015 
5016 					/*
5017 					 * Push zone_movable_pfn to the end so
5018 					 * that if we have to rebalance
5019 					 * kernelcore across nodes, we will
5020 					 * not double account here
5021 					 */
5022 					zone_movable_pfn[nid] = end_pfn;
5023 					continue;
5024 				}
5025 				start_pfn = usable_startpfn;
5026 			}
5027 
5028 			/*
5029 			 * The usable PFN range for ZONE_MOVABLE is from
5030 			 * start_pfn->end_pfn. Calculate size_pages as the
5031 			 * number of pages used as kernelcore
5032 			 */
5033 			size_pages = end_pfn - start_pfn;
5034 			if (size_pages > kernelcore_remaining)
5035 				size_pages = kernelcore_remaining;
5036 			zone_movable_pfn[nid] = start_pfn + size_pages;
5037 
5038 			/*
5039 			 * Some kernelcore has been met, update counts and
5040 			 * break if the kernelcore for this node has been
5041 			 * satisified
5042 			 */
5043 			required_kernelcore -= min(required_kernelcore,
5044 								size_pages);
5045 			kernelcore_remaining -= size_pages;
5046 			if (!kernelcore_remaining)
5047 				break;
5048 		}
5049 	}
5050 
5051 	/*
5052 	 * If there is still required_kernelcore, we do another pass with one
5053 	 * less node in the count. This will push zone_movable_pfn[nid] further
5054 	 * along on the nodes that still have memory until kernelcore is
5055 	 * satisified
5056 	 */
5057 	usable_nodes--;
5058 	if (usable_nodes && required_kernelcore > usable_nodes)
5059 		goto restart;
5060 
5061 out:
5062 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5063 	for (nid = 0; nid < MAX_NUMNODES; nid++)
5064 		zone_movable_pfn[nid] =
5065 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5066 
5067 	/* restore the node_state */
5068 	node_states[N_MEMORY] = saved_node_state;
5069 }
5070 
5071 /* Any regular or high memory on that node ? */
5072 static void check_for_memory(pg_data_t *pgdat, int nid)
5073 {
5074 	enum zone_type zone_type;
5075 
5076 	if (N_MEMORY == N_NORMAL_MEMORY)
5077 		return;
5078 
5079 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5080 		struct zone *zone = &pgdat->node_zones[zone_type];
5081 		if (zone->present_pages) {
5082 			node_set_state(nid, N_HIGH_MEMORY);
5083 			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5084 			    zone_type <= ZONE_NORMAL)
5085 				node_set_state(nid, N_NORMAL_MEMORY);
5086 			break;
5087 		}
5088 	}
5089 }
5090 
5091 /**
5092  * free_area_init_nodes - Initialise all pg_data_t and zone data
5093  * @max_zone_pfn: an array of max PFNs for each zone
5094  *
5095  * This will call free_area_init_node() for each active node in the system.
5096  * Using the page ranges provided by add_active_range(), the size of each
5097  * zone in each node and their holes is calculated. If the maximum PFN
5098  * between two adjacent zones match, it is assumed that the zone is empty.
5099  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5100  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5101  * starts where the previous one ended. For example, ZONE_DMA32 starts
5102  * at arch_max_dma_pfn.
5103  */
5104 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5105 {
5106 	unsigned long start_pfn, end_pfn;
5107 	int i, nid;
5108 
5109 	/* Record where the zone boundaries are */
5110 	memset(arch_zone_lowest_possible_pfn, 0,
5111 				sizeof(arch_zone_lowest_possible_pfn));
5112 	memset(arch_zone_highest_possible_pfn, 0,
5113 				sizeof(arch_zone_highest_possible_pfn));
5114 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5115 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5116 	for (i = 1; i < MAX_NR_ZONES; i++) {
5117 		if (i == ZONE_MOVABLE)
5118 			continue;
5119 		arch_zone_lowest_possible_pfn[i] =
5120 			arch_zone_highest_possible_pfn[i-1];
5121 		arch_zone_highest_possible_pfn[i] =
5122 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5123 	}
5124 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5125 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5126 
5127 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
5128 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5129 	find_usable_zone_for_movable();
5130 	sanitize_zone_movable_limit();
5131 	find_zone_movable_pfns_for_nodes();
5132 
5133 	/* Print out the zone ranges */
5134 	printk("Zone ranges:\n");
5135 	for (i = 0; i < MAX_NR_ZONES; i++) {
5136 		if (i == ZONE_MOVABLE)
5137 			continue;
5138 		printk(KERN_CONT "  %-8s ", zone_names[i]);
5139 		if (arch_zone_lowest_possible_pfn[i] ==
5140 				arch_zone_highest_possible_pfn[i])
5141 			printk(KERN_CONT "empty\n");
5142 		else
5143 			printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
5144 				arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
5145 				(arch_zone_highest_possible_pfn[i]
5146 					<< PAGE_SHIFT) - 1);
5147 	}
5148 
5149 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
5150 	printk("Movable zone start for each node\n");
5151 	for (i = 0; i < MAX_NUMNODES; i++) {
5152 		if (zone_movable_pfn[i])
5153 			printk("  Node %d: %#010lx\n", i,
5154 			       zone_movable_pfn[i] << PAGE_SHIFT);
5155 	}
5156 
5157 	/* Print out the early node map */
5158 	printk("Early memory node ranges\n");
5159 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5160 		printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
5161 		       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
5162 
5163 	/* Initialise every node */
5164 	mminit_verify_pageflags_layout();
5165 	setup_nr_node_ids();
5166 	for_each_online_node(nid) {
5167 		pg_data_t *pgdat = NODE_DATA(nid);
5168 		free_area_init_node(nid, NULL,
5169 				find_min_pfn_for_node(nid), NULL);
5170 
5171 		/* Any memory on that node */
5172 		if (pgdat->node_present_pages)
5173 			node_set_state(nid, N_MEMORY);
5174 		check_for_memory(pgdat, nid);
5175 	}
5176 }
5177 
5178 static int __init cmdline_parse_core(char *p, unsigned long *core)
5179 {
5180 	unsigned long long coremem;
5181 	if (!p)
5182 		return -EINVAL;
5183 
5184 	coremem = memparse(p, &p);
5185 	*core = coremem >> PAGE_SHIFT;
5186 
5187 	/* Paranoid check that UL is enough for the coremem value */
5188 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5189 
5190 	return 0;
5191 }
5192 
5193 /*
5194  * kernelcore=size sets the amount of memory for use for allocations that
5195  * cannot be reclaimed or migrated.
5196  */
5197 static int __init cmdline_parse_kernelcore(char *p)
5198 {
5199 	return cmdline_parse_core(p, &required_kernelcore);
5200 }
5201 
5202 /*
5203  * movablecore=size sets the amount of memory for use for allocations that
5204  * can be reclaimed or migrated.
5205  */
5206 static int __init cmdline_parse_movablecore(char *p)
5207 {
5208 	return cmdline_parse_core(p, &required_movablecore);
5209 }
5210 
5211 early_param("kernelcore", cmdline_parse_kernelcore);
5212 early_param("movablecore", cmdline_parse_movablecore);
5213 
5214 /**
5215  * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
5216  * @start_pfn:	start pfn of the range to be checked
5217  * @end_pfn: 	end pfn of the range to be checked (exclusive)
5218  *
5219  * This function checks if a given memory range [start_pfn, end_pfn) overlaps
5220  * the movablemem_map.map[] array.
5221  *
5222  * Return: index of the first overlapped element in movablemem_map.map[]
5223  *         or -1 if they don't overlap each other.
5224  */
5225 int __init movablemem_map_overlap(unsigned long start_pfn,
5226 				   unsigned long end_pfn)
5227 {
5228 	int overlap;
5229 
5230 	if (!movablemem_map.nr_map)
5231 		return -1;
5232 
5233 	for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
5234 		if (start_pfn < movablemem_map.map[overlap].end_pfn)
5235 			break;
5236 
5237 	if (overlap == movablemem_map.nr_map ||
5238 	    end_pfn <= movablemem_map.map[overlap].start_pfn)
5239 		return -1;
5240 
5241 	return overlap;
5242 }
5243 
5244 /**
5245  * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
5246  * @start_pfn:	start pfn of the range
5247  * @end_pfn:	end pfn of the range
5248  *
5249  * This function will also merge the overlapped ranges, and sort the array
5250  * by start_pfn in monotonic increasing order.
5251  */
5252 void __init insert_movablemem_map(unsigned long start_pfn,
5253 				  unsigned long end_pfn)
5254 {
5255 	int pos, overlap;
5256 
5257 	/*
5258 	 * pos will be at the 1st overlapped range, or the position
5259 	 * where the element should be inserted.
5260 	 */
5261 	for (pos = 0; pos < movablemem_map.nr_map; pos++)
5262 		if (start_pfn <= movablemem_map.map[pos].end_pfn)
5263 			break;
5264 
5265 	/* If there is no overlapped range, just insert the element. */
5266 	if (pos == movablemem_map.nr_map ||
5267 	    end_pfn < movablemem_map.map[pos].start_pfn) {
5268 		/*
5269 		 * If pos is not the end of array, we need to move all
5270 		 * the rest elements backward.
5271 		 */
5272 		if (pos < movablemem_map.nr_map)
5273 			memmove(&movablemem_map.map[pos+1],
5274 				&movablemem_map.map[pos],
5275 				sizeof(struct movablemem_entry) *
5276 				(movablemem_map.nr_map - pos));
5277 		movablemem_map.map[pos].start_pfn = start_pfn;
5278 		movablemem_map.map[pos].end_pfn = end_pfn;
5279 		movablemem_map.nr_map++;
5280 		return;
5281 	}
5282 
5283 	/* overlap will be at the last overlapped range */
5284 	for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
5285 		if (end_pfn < movablemem_map.map[overlap].start_pfn)
5286 			break;
5287 
5288 	/*
5289 	 * If there are more ranges overlapped, we need to merge them,
5290 	 * and move the rest elements forward.
5291 	 */
5292 	overlap--;
5293 	movablemem_map.map[pos].start_pfn = min(start_pfn,
5294 					movablemem_map.map[pos].start_pfn);
5295 	movablemem_map.map[pos].end_pfn = max(end_pfn,
5296 					movablemem_map.map[overlap].end_pfn);
5297 
5298 	if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
5299 		memmove(&movablemem_map.map[pos+1],
5300 			&movablemem_map.map[overlap+1],
5301 			sizeof(struct movablemem_entry) *
5302 			(movablemem_map.nr_map - overlap - 1));
5303 
5304 	movablemem_map.nr_map -= overlap - pos;
5305 }
5306 
5307 /**
5308  * movablemem_map_add_region - Add a memory range into movablemem_map.
5309  * @start:	physical start address of range
5310  * @end:	physical end address of range
5311  *
5312  * This function transform the physical address into pfn, and then add the
5313  * range into movablemem_map by calling insert_movablemem_map().
5314  */
5315 static void __init movablemem_map_add_region(u64 start, u64 size)
5316 {
5317 	unsigned long start_pfn, end_pfn;
5318 
5319 	/* In case size == 0 or start + size overflows */
5320 	if (start + size <= start)
5321 		return;
5322 
5323 	if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
5324 		pr_err("movablemem_map: too many entries;"
5325 			" ignoring [mem %#010llx-%#010llx]\n",
5326 			(unsigned long long) start,
5327 			(unsigned long long) (start + size - 1));
5328 		return;
5329 	}
5330 
5331 	start_pfn = PFN_DOWN(start);
5332 	end_pfn = PFN_UP(start + size);
5333 	insert_movablemem_map(start_pfn, end_pfn);
5334 }
5335 
5336 /*
5337  * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
5338  * @p:	The boot option of the following format:
5339  *	movablemem_map=nn[KMG]@ss[KMG]
5340  *
5341  * This option sets the memory range [ss, ss+nn) to be used as movable memory.
5342  *
5343  * Return: 0 on success or -EINVAL on failure.
5344  */
5345 static int __init cmdline_parse_movablemem_map(char *p)
5346 {
5347 	char *oldp;
5348 	u64 start_at, mem_size;
5349 
5350 	if (!p)
5351 		goto err;
5352 
5353 	if (!strcmp(p, "acpi"))
5354 		movablemem_map.acpi = true;
5355 
5356 	/*
5357 	 * If user decide to use info from BIOS, all the other user specified
5358 	 * ranges will be ingored.
5359 	 */
5360 	if (movablemem_map.acpi) {
5361 		if (movablemem_map.nr_map) {
5362 			memset(movablemem_map.map, 0,
5363 				sizeof(struct movablemem_entry)
5364 				* movablemem_map.nr_map);
5365 			movablemem_map.nr_map = 0;
5366 		}
5367 		return 0;
5368 	}
5369 
5370 	oldp = p;
5371 	mem_size = memparse(p, &p);
5372 	if (p == oldp)
5373 		goto err;
5374 
5375 	if (*p == '@') {
5376 		oldp = ++p;
5377 		start_at = memparse(p, &p);
5378 		if (p == oldp || *p != '\0')
5379 			goto err;
5380 
5381 		movablemem_map_add_region(start_at, mem_size);
5382 		return 0;
5383 	}
5384 err:
5385 	return -EINVAL;
5386 }
5387 early_param("movablemem_map", cmdline_parse_movablemem_map);
5388 
5389 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5390 
5391 /**
5392  * set_dma_reserve - set the specified number of pages reserved in the first zone
5393  * @new_dma_reserve: The number of pages to mark reserved
5394  *
5395  * The per-cpu batchsize and zone watermarks are determined by present_pages.
5396  * In the DMA zone, a significant percentage may be consumed by kernel image
5397  * and other unfreeable allocations which can skew the watermarks badly. This
5398  * function may optionally be used to account for unfreeable pages in the
5399  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5400  * smaller per-cpu batchsize.
5401  */
5402 void __init set_dma_reserve(unsigned long new_dma_reserve)
5403 {
5404 	dma_reserve = new_dma_reserve;
5405 }
5406 
5407 void __init free_area_init(unsigned long *zones_size)
5408 {
5409 	free_area_init_node(0, zones_size,
5410 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5411 }
5412 
5413 static int page_alloc_cpu_notify(struct notifier_block *self,
5414 				 unsigned long action, void *hcpu)
5415 {
5416 	int cpu = (unsigned long)hcpu;
5417 
5418 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5419 		lru_add_drain_cpu(cpu);
5420 		drain_pages(cpu);
5421 
5422 		/*
5423 		 * Spill the event counters of the dead processor
5424 		 * into the current processors event counters.
5425 		 * This artificially elevates the count of the current
5426 		 * processor.
5427 		 */
5428 		vm_events_fold_cpu(cpu);
5429 
5430 		/*
5431 		 * Zero the differential counters of the dead processor
5432 		 * so that the vm statistics are consistent.
5433 		 *
5434 		 * This is only okay since the processor is dead and cannot
5435 		 * race with what we are doing.
5436 		 */
5437 		refresh_cpu_vm_stats(cpu);
5438 	}
5439 	return NOTIFY_OK;
5440 }
5441 
5442 void __init page_alloc_init(void)
5443 {
5444 	hotcpu_notifier(page_alloc_cpu_notify, 0);
5445 }
5446 
5447 /*
5448  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5449  *	or min_free_kbytes changes.
5450  */
5451 static void calculate_totalreserve_pages(void)
5452 {
5453 	struct pglist_data *pgdat;
5454 	unsigned long reserve_pages = 0;
5455 	enum zone_type i, j;
5456 
5457 	for_each_online_pgdat(pgdat) {
5458 		for (i = 0; i < MAX_NR_ZONES; i++) {
5459 			struct zone *zone = pgdat->node_zones + i;
5460 			unsigned long max = 0;
5461 
5462 			/* Find valid and maximum lowmem_reserve in the zone */
5463 			for (j = i; j < MAX_NR_ZONES; j++) {
5464 				if (zone->lowmem_reserve[j] > max)
5465 					max = zone->lowmem_reserve[j];
5466 			}
5467 
5468 			/* we treat the high watermark as reserved pages. */
5469 			max += high_wmark_pages(zone);
5470 
5471 			if (max > zone->managed_pages)
5472 				max = zone->managed_pages;
5473 			reserve_pages += max;
5474 			/*
5475 			 * Lowmem reserves are not available to
5476 			 * GFP_HIGHUSER page cache allocations and
5477 			 * kswapd tries to balance zones to their high
5478 			 * watermark.  As a result, neither should be
5479 			 * regarded as dirtyable memory, to prevent a
5480 			 * situation where reclaim has to clean pages
5481 			 * in order to balance the zones.
5482 			 */
5483 			zone->dirty_balance_reserve = max;
5484 		}
5485 	}
5486 	dirty_balance_reserve = reserve_pages;
5487 	totalreserve_pages = reserve_pages;
5488 }
5489 
5490 /*
5491  * setup_per_zone_lowmem_reserve - called whenever
5492  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5493  *	has a correct pages reserved value, so an adequate number of
5494  *	pages are left in the zone after a successful __alloc_pages().
5495  */
5496 static void setup_per_zone_lowmem_reserve(void)
5497 {
5498 	struct pglist_data *pgdat;
5499 	enum zone_type j, idx;
5500 
5501 	for_each_online_pgdat(pgdat) {
5502 		for (j = 0; j < MAX_NR_ZONES; j++) {
5503 			struct zone *zone = pgdat->node_zones + j;
5504 			unsigned long managed_pages = zone->managed_pages;
5505 
5506 			zone->lowmem_reserve[j] = 0;
5507 
5508 			idx = j;
5509 			while (idx) {
5510 				struct zone *lower_zone;
5511 
5512 				idx--;
5513 
5514 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
5515 					sysctl_lowmem_reserve_ratio[idx] = 1;
5516 
5517 				lower_zone = pgdat->node_zones + idx;
5518 				lower_zone->lowmem_reserve[j] = managed_pages /
5519 					sysctl_lowmem_reserve_ratio[idx];
5520 				managed_pages += lower_zone->managed_pages;
5521 			}
5522 		}
5523 	}
5524 
5525 	/* update totalreserve_pages */
5526 	calculate_totalreserve_pages();
5527 }
5528 
5529 static void __setup_per_zone_wmarks(void)
5530 {
5531 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5532 	unsigned long lowmem_pages = 0;
5533 	struct zone *zone;
5534 	unsigned long flags;
5535 
5536 	/* Calculate total number of !ZONE_HIGHMEM pages */
5537 	for_each_zone(zone) {
5538 		if (!is_highmem(zone))
5539 			lowmem_pages += zone->managed_pages;
5540 	}
5541 
5542 	for_each_zone(zone) {
5543 		u64 tmp;
5544 
5545 		spin_lock_irqsave(&zone->lock, flags);
5546 		tmp = (u64)pages_min * zone->managed_pages;
5547 		do_div(tmp, lowmem_pages);
5548 		if (is_highmem(zone)) {
5549 			/*
5550 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5551 			 * need highmem pages, so cap pages_min to a small
5552 			 * value here.
5553 			 *
5554 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5555 			 * deltas controls asynch page reclaim, and so should
5556 			 * not be capped for highmem.
5557 			 */
5558 			unsigned long min_pages;
5559 
5560 			min_pages = zone->managed_pages / 1024;
5561 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5562 			zone->watermark[WMARK_MIN] = min_pages;
5563 		} else {
5564 			/*
5565 			 * If it's a lowmem zone, reserve a number of pages
5566 			 * proportionate to the zone's size.
5567 			 */
5568 			zone->watermark[WMARK_MIN] = tmp;
5569 		}
5570 
5571 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5572 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5573 
5574 		setup_zone_migrate_reserve(zone);
5575 		spin_unlock_irqrestore(&zone->lock, flags);
5576 	}
5577 
5578 	/* update totalreserve_pages */
5579 	calculate_totalreserve_pages();
5580 }
5581 
5582 /**
5583  * setup_per_zone_wmarks - called when min_free_kbytes changes
5584  * or when memory is hot-{added|removed}
5585  *
5586  * Ensures that the watermark[min,low,high] values for each zone are set
5587  * correctly with respect to min_free_kbytes.
5588  */
5589 void setup_per_zone_wmarks(void)
5590 {
5591 	mutex_lock(&zonelists_mutex);
5592 	__setup_per_zone_wmarks();
5593 	mutex_unlock(&zonelists_mutex);
5594 }
5595 
5596 /*
5597  * The inactive anon list should be small enough that the VM never has to
5598  * do too much work, but large enough that each inactive page has a chance
5599  * to be referenced again before it is swapped out.
5600  *
5601  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5602  * INACTIVE_ANON pages on this zone's LRU, maintained by the
5603  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5604  * the anonymous pages are kept on the inactive list.
5605  *
5606  * total     target    max
5607  * memory    ratio     inactive anon
5608  * -------------------------------------
5609  *   10MB       1         5MB
5610  *  100MB       1        50MB
5611  *    1GB       3       250MB
5612  *   10GB      10       0.9GB
5613  *  100GB      31         3GB
5614  *    1TB     101        10GB
5615  *   10TB     320        32GB
5616  */
5617 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5618 {
5619 	unsigned int gb, ratio;
5620 
5621 	/* Zone size in gigabytes */
5622 	gb = zone->managed_pages >> (30 - PAGE_SHIFT);
5623 	if (gb)
5624 		ratio = int_sqrt(10 * gb);
5625 	else
5626 		ratio = 1;
5627 
5628 	zone->inactive_ratio = ratio;
5629 }
5630 
5631 static void __meminit setup_per_zone_inactive_ratio(void)
5632 {
5633 	struct zone *zone;
5634 
5635 	for_each_zone(zone)
5636 		calculate_zone_inactive_ratio(zone);
5637 }
5638 
5639 /*
5640  * Initialise min_free_kbytes.
5641  *
5642  * For small machines we want it small (128k min).  For large machines
5643  * we want it large (64MB max).  But it is not linear, because network
5644  * bandwidth does not increase linearly with machine size.  We use
5645  *
5646  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5647  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
5648  *
5649  * which yields
5650  *
5651  * 16MB:	512k
5652  * 32MB:	724k
5653  * 64MB:	1024k
5654  * 128MB:	1448k
5655  * 256MB:	2048k
5656  * 512MB:	2896k
5657  * 1024MB:	4096k
5658  * 2048MB:	5792k
5659  * 4096MB:	8192k
5660  * 8192MB:	11584k
5661  * 16384MB:	16384k
5662  */
5663 int __meminit init_per_zone_wmark_min(void)
5664 {
5665 	unsigned long lowmem_kbytes;
5666 
5667 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5668 
5669 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5670 	if (min_free_kbytes < 128)
5671 		min_free_kbytes = 128;
5672 	if (min_free_kbytes > 65536)
5673 		min_free_kbytes = 65536;
5674 	setup_per_zone_wmarks();
5675 	refresh_zone_stat_thresholds();
5676 	setup_per_zone_lowmem_reserve();
5677 	setup_per_zone_inactive_ratio();
5678 	return 0;
5679 }
5680 module_init(init_per_zone_wmark_min)
5681 
5682 /*
5683  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5684  *	that we can call two helper functions whenever min_free_kbytes
5685  *	changes.
5686  */
5687 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5688 	void __user *buffer, size_t *length, loff_t *ppos)
5689 {
5690 	proc_dointvec(table, write, buffer, length, ppos);
5691 	if (write)
5692 		setup_per_zone_wmarks();
5693 	return 0;
5694 }
5695 
5696 #ifdef CONFIG_NUMA
5697 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5698 	void __user *buffer, size_t *length, loff_t *ppos)
5699 {
5700 	struct zone *zone;
5701 	int rc;
5702 
5703 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5704 	if (rc)
5705 		return rc;
5706 
5707 	for_each_zone(zone)
5708 		zone->min_unmapped_pages = (zone->managed_pages *
5709 				sysctl_min_unmapped_ratio) / 100;
5710 	return 0;
5711 }
5712 
5713 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5714 	void __user *buffer, size_t *length, loff_t *ppos)
5715 {
5716 	struct zone *zone;
5717 	int rc;
5718 
5719 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5720 	if (rc)
5721 		return rc;
5722 
5723 	for_each_zone(zone)
5724 		zone->min_slab_pages = (zone->managed_pages *
5725 				sysctl_min_slab_ratio) / 100;
5726 	return 0;
5727 }
5728 #endif
5729 
5730 /*
5731  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5732  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5733  *	whenever sysctl_lowmem_reserve_ratio changes.
5734  *
5735  * The reserve ratio obviously has absolutely no relation with the
5736  * minimum watermarks. The lowmem reserve ratio can only make sense
5737  * if in function of the boot time zone sizes.
5738  */
5739 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5740 	void __user *buffer, size_t *length, loff_t *ppos)
5741 {
5742 	proc_dointvec_minmax(table, write, buffer, length, ppos);
5743 	setup_per_zone_lowmem_reserve();
5744 	return 0;
5745 }
5746 
5747 /*
5748  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5749  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5750  * can have before it gets flushed back to buddy allocator.
5751  */
5752 
5753 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5754 	void __user *buffer, size_t *length, loff_t *ppos)
5755 {
5756 	struct zone *zone;
5757 	unsigned int cpu;
5758 	int ret;
5759 
5760 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5761 	if (!write || (ret < 0))
5762 		return ret;
5763 	for_each_populated_zone(zone) {
5764 		for_each_possible_cpu(cpu) {
5765 			unsigned long  high;
5766 			high = zone->managed_pages / percpu_pagelist_fraction;
5767 			setup_pagelist_highmark(
5768 				per_cpu_ptr(zone->pageset, cpu), high);
5769 		}
5770 	}
5771 	return 0;
5772 }
5773 
5774 int hashdist = HASHDIST_DEFAULT;
5775 
5776 #ifdef CONFIG_NUMA
5777 static int __init set_hashdist(char *str)
5778 {
5779 	if (!str)
5780 		return 0;
5781 	hashdist = simple_strtoul(str, &str, 0);
5782 	return 1;
5783 }
5784 __setup("hashdist=", set_hashdist);
5785 #endif
5786 
5787 /*
5788  * allocate a large system hash table from bootmem
5789  * - it is assumed that the hash table must contain an exact power-of-2
5790  *   quantity of entries
5791  * - limit is the number of hash buckets, not the total allocation size
5792  */
5793 void *__init alloc_large_system_hash(const char *tablename,
5794 				     unsigned long bucketsize,
5795 				     unsigned long numentries,
5796 				     int scale,
5797 				     int flags,
5798 				     unsigned int *_hash_shift,
5799 				     unsigned int *_hash_mask,
5800 				     unsigned long low_limit,
5801 				     unsigned long high_limit)
5802 {
5803 	unsigned long long max = high_limit;
5804 	unsigned long log2qty, size;
5805 	void *table = NULL;
5806 
5807 	/* allow the kernel cmdline to have a say */
5808 	if (!numentries) {
5809 		/* round applicable memory size up to nearest megabyte */
5810 		numentries = nr_kernel_pages;
5811 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5812 		numentries >>= 20 - PAGE_SHIFT;
5813 		numentries <<= 20 - PAGE_SHIFT;
5814 
5815 		/* limit to 1 bucket per 2^scale bytes of low memory */
5816 		if (scale > PAGE_SHIFT)
5817 			numentries >>= (scale - PAGE_SHIFT);
5818 		else
5819 			numentries <<= (PAGE_SHIFT - scale);
5820 
5821 		/* Make sure we've got at least a 0-order allocation.. */
5822 		if (unlikely(flags & HASH_SMALL)) {
5823 			/* Makes no sense without HASH_EARLY */
5824 			WARN_ON(!(flags & HASH_EARLY));
5825 			if (!(numentries >> *_hash_shift)) {
5826 				numentries = 1UL << *_hash_shift;
5827 				BUG_ON(!numentries);
5828 			}
5829 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5830 			numentries = PAGE_SIZE / bucketsize;
5831 	}
5832 	numentries = roundup_pow_of_two(numentries);
5833 
5834 	/* limit allocation size to 1/16 total memory by default */
5835 	if (max == 0) {
5836 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5837 		do_div(max, bucketsize);
5838 	}
5839 	max = min(max, 0x80000000ULL);
5840 
5841 	if (numentries < low_limit)
5842 		numentries = low_limit;
5843 	if (numentries > max)
5844 		numentries = max;
5845 
5846 	log2qty = ilog2(numentries);
5847 
5848 	do {
5849 		size = bucketsize << log2qty;
5850 		if (flags & HASH_EARLY)
5851 			table = alloc_bootmem_nopanic(size);
5852 		else if (hashdist)
5853 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5854 		else {
5855 			/*
5856 			 * If bucketsize is not a power-of-two, we may free
5857 			 * some pages at the end of hash table which
5858 			 * alloc_pages_exact() automatically does
5859 			 */
5860 			if (get_order(size) < MAX_ORDER) {
5861 				table = alloc_pages_exact(size, GFP_ATOMIC);
5862 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5863 			}
5864 		}
5865 	} while (!table && size > PAGE_SIZE && --log2qty);
5866 
5867 	if (!table)
5868 		panic("Failed to allocate %s hash table\n", tablename);
5869 
5870 	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5871 	       tablename,
5872 	       (1UL << log2qty),
5873 	       ilog2(size) - PAGE_SHIFT,
5874 	       size);
5875 
5876 	if (_hash_shift)
5877 		*_hash_shift = log2qty;
5878 	if (_hash_mask)
5879 		*_hash_mask = (1 << log2qty) - 1;
5880 
5881 	return table;
5882 }
5883 
5884 /* Return a pointer to the bitmap storing bits affecting a block of pages */
5885 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5886 							unsigned long pfn)
5887 {
5888 #ifdef CONFIG_SPARSEMEM
5889 	return __pfn_to_section(pfn)->pageblock_flags;
5890 #else
5891 	return zone->pageblock_flags;
5892 #endif /* CONFIG_SPARSEMEM */
5893 }
5894 
5895 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5896 {
5897 #ifdef CONFIG_SPARSEMEM
5898 	pfn &= (PAGES_PER_SECTION-1);
5899 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5900 #else
5901 	pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
5902 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5903 #endif /* CONFIG_SPARSEMEM */
5904 }
5905 
5906 /**
5907  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5908  * @page: The page within the block of interest
5909  * @start_bitidx: The first bit of interest to retrieve
5910  * @end_bitidx: The last bit of interest
5911  * returns pageblock_bits flags
5912  */
5913 unsigned long get_pageblock_flags_group(struct page *page,
5914 					int start_bitidx, int end_bitidx)
5915 {
5916 	struct zone *zone;
5917 	unsigned long *bitmap;
5918 	unsigned long pfn, bitidx;
5919 	unsigned long flags = 0;
5920 	unsigned long value = 1;
5921 
5922 	zone = page_zone(page);
5923 	pfn = page_to_pfn(page);
5924 	bitmap = get_pageblock_bitmap(zone, pfn);
5925 	bitidx = pfn_to_bitidx(zone, pfn);
5926 
5927 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5928 		if (test_bit(bitidx + start_bitidx, bitmap))
5929 			flags |= value;
5930 
5931 	return flags;
5932 }
5933 
5934 /**
5935  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5936  * @page: The page within the block of interest
5937  * @start_bitidx: The first bit of interest
5938  * @end_bitidx: The last bit of interest
5939  * @flags: The flags to set
5940  */
5941 void set_pageblock_flags_group(struct page *page, unsigned long flags,
5942 					int start_bitidx, int end_bitidx)
5943 {
5944 	struct zone *zone;
5945 	unsigned long *bitmap;
5946 	unsigned long pfn, bitidx;
5947 	unsigned long value = 1;
5948 
5949 	zone = page_zone(page);
5950 	pfn = page_to_pfn(page);
5951 	bitmap = get_pageblock_bitmap(zone, pfn);
5952 	bitidx = pfn_to_bitidx(zone, pfn);
5953 	VM_BUG_ON(!zone_spans_pfn(zone, pfn));
5954 
5955 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5956 		if (flags & value)
5957 			__set_bit(bitidx + start_bitidx, bitmap);
5958 		else
5959 			__clear_bit(bitidx + start_bitidx, bitmap);
5960 }
5961 
5962 /*
5963  * This function checks whether pageblock includes unmovable pages or not.
5964  * If @count is not zero, it is okay to include less @count unmovable pages
5965  *
5966  * PageLRU check wihtout isolation or lru_lock could race so that
5967  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5968  * expect this function should be exact.
5969  */
5970 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
5971 			 bool skip_hwpoisoned_pages)
5972 {
5973 	unsigned long pfn, iter, found;
5974 	int mt;
5975 
5976 	/*
5977 	 * For avoiding noise data, lru_add_drain_all() should be called
5978 	 * If ZONE_MOVABLE, the zone never contains unmovable pages
5979 	 */
5980 	if (zone_idx(zone) == ZONE_MOVABLE)
5981 		return false;
5982 	mt = get_pageblock_migratetype(page);
5983 	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5984 		return false;
5985 
5986 	pfn = page_to_pfn(page);
5987 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5988 		unsigned long check = pfn + iter;
5989 
5990 		if (!pfn_valid_within(check))
5991 			continue;
5992 
5993 		page = pfn_to_page(check);
5994 		/*
5995 		 * We can't use page_count without pin a page
5996 		 * because another CPU can free compound page.
5997 		 * This check already skips compound tails of THP
5998 		 * because their page->_count is zero at all time.
5999 		 */
6000 		if (!atomic_read(&page->_count)) {
6001 			if (PageBuddy(page))
6002 				iter += (1 << page_order(page)) - 1;
6003 			continue;
6004 		}
6005 
6006 		/*
6007 		 * The HWPoisoned page may be not in buddy system, and
6008 		 * page_count() is not 0.
6009 		 */
6010 		if (skip_hwpoisoned_pages && PageHWPoison(page))
6011 			continue;
6012 
6013 		if (!PageLRU(page))
6014 			found++;
6015 		/*
6016 		 * If there are RECLAIMABLE pages, we need to check it.
6017 		 * But now, memory offline itself doesn't call shrink_slab()
6018 		 * and it still to be fixed.
6019 		 */
6020 		/*
6021 		 * If the page is not RAM, page_count()should be 0.
6022 		 * we don't need more check. This is an _used_ not-movable page.
6023 		 *
6024 		 * The problematic thing here is PG_reserved pages. PG_reserved
6025 		 * is set to both of a memory hole page and a _used_ kernel
6026 		 * page at boot.
6027 		 */
6028 		if (found > count)
6029 			return true;
6030 	}
6031 	return false;
6032 }
6033 
6034 bool is_pageblock_removable_nolock(struct page *page)
6035 {
6036 	struct zone *zone;
6037 	unsigned long pfn;
6038 
6039 	/*
6040 	 * We have to be careful here because we are iterating over memory
6041 	 * sections which are not zone aware so we might end up outside of
6042 	 * the zone but still within the section.
6043 	 * We have to take care about the node as well. If the node is offline
6044 	 * its NODE_DATA will be NULL - see page_zone.
6045 	 */
6046 	if (!node_online(page_to_nid(page)))
6047 		return false;
6048 
6049 	zone = page_zone(page);
6050 	pfn = page_to_pfn(page);
6051 	if (!zone_spans_pfn(zone, pfn))
6052 		return false;
6053 
6054 	return !has_unmovable_pages(zone, page, 0, true);
6055 }
6056 
6057 #ifdef CONFIG_CMA
6058 
6059 static unsigned long pfn_max_align_down(unsigned long pfn)
6060 {
6061 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6062 			     pageblock_nr_pages) - 1);
6063 }
6064 
6065 static unsigned long pfn_max_align_up(unsigned long pfn)
6066 {
6067 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6068 				pageblock_nr_pages));
6069 }
6070 
6071 /* [start, end) must belong to a single zone. */
6072 static int __alloc_contig_migrate_range(struct compact_control *cc,
6073 					unsigned long start, unsigned long end)
6074 {
6075 	/* This function is based on compact_zone() from compaction.c. */
6076 	unsigned long nr_reclaimed;
6077 	unsigned long pfn = start;
6078 	unsigned int tries = 0;
6079 	int ret = 0;
6080 
6081 	migrate_prep();
6082 
6083 	while (pfn < end || !list_empty(&cc->migratepages)) {
6084 		if (fatal_signal_pending(current)) {
6085 			ret = -EINTR;
6086 			break;
6087 		}
6088 
6089 		if (list_empty(&cc->migratepages)) {
6090 			cc->nr_migratepages = 0;
6091 			pfn = isolate_migratepages_range(cc->zone, cc,
6092 							 pfn, end, true);
6093 			if (!pfn) {
6094 				ret = -EINTR;
6095 				break;
6096 			}
6097 			tries = 0;
6098 		} else if (++tries == 5) {
6099 			ret = ret < 0 ? ret : -EBUSY;
6100 			break;
6101 		}
6102 
6103 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6104 							&cc->migratepages);
6105 		cc->nr_migratepages -= nr_reclaimed;
6106 
6107 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6108 				    0, MIGRATE_SYNC, MR_CMA);
6109 	}
6110 	if (ret < 0) {
6111 		putback_movable_pages(&cc->migratepages);
6112 		return ret;
6113 	}
6114 	return 0;
6115 }
6116 
6117 /**
6118  * alloc_contig_range() -- tries to allocate given range of pages
6119  * @start:	start PFN to allocate
6120  * @end:	one-past-the-last PFN to allocate
6121  * @migratetype:	migratetype of the underlaying pageblocks (either
6122  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
6123  *			in range must have the same migratetype and it must
6124  *			be either of the two.
6125  *
6126  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6127  * aligned, however it's the caller's responsibility to guarantee that
6128  * we are the only thread that changes migrate type of pageblocks the
6129  * pages fall in.
6130  *
6131  * The PFN range must belong to a single zone.
6132  *
6133  * Returns zero on success or negative error code.  On success all
6134  * pages which PFN is in [start, end) are allocated for the caller and
6135  * need to be freed with free_contig_range().
6136  */
6137 int alloc_contig_range(unsigned long start, unsigned long end,
6138 		       unsigned migratetype)
6139 {
6140 	unsigned long outer_start, outer_end;
6141 	int ret = 0, order;
6142 
6143 	struct compact_control cc = {
6144 		.nr_migratepages = 0,
6145 		.order = -1,
6146 		.zone = page_zone(pfn_to_page(start)),
6147 		.sync = true,
6148 		.ignore_skip_hint = true,
6149 	};
6150 	INIT_LIST_HEAD(&cc.migratepages);
6151 
6152 	/*
6153 	 * What we do here is we mark all pageblocks in range as
6154 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
6155 	 * have different sizes, and due to the way page allocator
6156 	 * work, we align the range to biggest of the two pages so
6157 	 * that page allocator won't try to merge buddies from
6158 	 * different pageblocks and change MIGRATE_ISOLATE to some
6159 	 * other migration type.
6160 	 *
6161 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6162 	 * migrate the pages from an unaligned range (ie. pages that
6163 	 * we are interested in).  This will put all the pages in
6164 	 * range back to page allocator as MIGRATE_ISOLATE.
6165 	 *
6166 	 * When this is done, we take the pages in range from page
6167 	 * allocator removing them from the buddy system.  This way
6168 	 * page allocator will never consider using them.
6169 	 *
6170 	 * This lets us mark the pageblocks back as
6171 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6172 	 * aligned range but not in the unaligned, original range are
6173 	 * put back to page allocator so that buddy can use them.
6174 	 */
6175 
6176 	ret = start_isolate_page_range(pfn_max_align_down(start),
6177 				       pfn_max_align_up(end), migratetype,
6178 				       false);
6179 	if (ret)
6180 		return ret;
6181 
6182 	ret = __alloc_contig_migrate_range(&cc, start, end);
6183 	if (ret)
6184 		goto done;
6185 
6186 	/*
6187 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6188 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
6189 	 * more, all pages in [start, end) are free in page allocator.
6190 	 * What we are going to do is to allocate all pages from
6191 	 * [start, end) (that is remove them from page allocator).
6192 	 *
6193 	 * The only problem is that pages at the beginning and at the
6194 	 * end of interesting range may be not aligned with pages that
6195 	 * page allocator holds, ie. they can be part of higher order
6196 	 * pages.  Because of this, we reserve the bigger range and
6197 	 * once this is done free the pages we are not interested in.
6198 	 *
6199 	 * We don't have to hold zone->lock here because the pages are
6200 	 * isolated thus they won't get removed from buddy.
6201 	 */
6202 
6203 	lru_add_drain_all();
6204 	drain_all_pages();
6205 
6206 	order = 0;
6207 	outer_start = start;
6208 	while (!PageBuddy(pfn_to_page(outer_start))) {
6209 		if (++order >= MAX_ORDER) {
6210 			ret = -EBUSY;
6211 			goto done;
6212 		}
6213 		outer_start &= ~0UL << order;
6214 	}
6215 
6216 	/* Make sure the range is really isolated. */
6217 	if (test_pages_isolated(outer_start, end, false)) {
6218 		pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
6219 		       outer_start, end);
6220 		ret = -EBUSY;
6221 		goto done;
6222 	}
6223 
6224 
6225 	/* Grab isolated pages from freelists. */
6226 	outer_end = isolate_freepages_range(&cc, outer_start, end);
6227 	if (!outer_end) {
6228 		ret = -EBUSY;
6229 		goto done;
6230 	}
6231 
6232 	/* Free head and tail (if any) */
6233 	if (start != outer_start)
6234 		free_contig_range(outer_start, start - outer_start);
6235 	if (end != outer_end)
6236 		free_contig_range(end, outer_end - end);
6237 
6238 done:
6239 	undo_isolate_page_range(pfn_max_align_down(start),
6240 				pfn_max_align_up(end), migratetype);
6241 	return ret;
6242 }
6243 
6244 void free_contig_range(unsigned long pfn, unsigned nr_pages)
6245 {
6246 	unsigned int count = 0;
6247 
6248 	for (; nr_pages--; pfn++) {
6249 		struct page *page = pfn_to_page(pfn);
6250 
6251 		count += page_count(page) != 1;
6252 		__free_page(page);
6253 	}
6254 	WARN(count != 0, "%d pages are still in use!\n", count);
6255 }
6256 #endif
6257 
6258 #ifdef CONFIG_MEMORY_HOTPLUG
6259 static int __meminit __zone_pcp_update(void *data)
6260 {
6261 	struct zone *zone = data;
6262 	int cpu;
6263 	unsigned long batch = zone_batchsize(zone), flags;
6264 
6265 	for_each_possible_cpu(cpu) {
6266 		struct per_cpu_pageset *pset;
6267 		struct per_cpu_pages *pcp;
6268 
6269 		pset = per_cpu_ptr(zone->pageset, cpu);
6270 		pcp = &pset->pcp;
6271 
6272 		local_irq_save(flags);
6273 		if (pcp->count > 0)
6274 			free_pcppages_bulk(zone, pcp->count, pcp);
6275 		drain_zonestat(zone, pset);
6276 		setup_pageset(pset, batch);
6277 		local_irq_restore(flags);
6278 	}
6279 	return 0;
6280 }
6281 
6282 void __meminit zone_pcp_update(struct zone *zone)
6283 {
6284 	stop_machine(__zone_pcp_update, zone, NULL);
6285 }
6286 #endif
6287 
6288 void zone_pcp_reset(struct zone *zone)
6289 {
6290 	unsigned long flags;
6291 	int cpu;
6292 	struct per_cpu_pageset *pset;
6293 
6294 	/* avoid races with drain_pages()  */
6295 	local_irq_save(flags);
6296 	if (zone->pageset != &boot_pageset) {
6297 		for_each_online_cpu(cpu) {
6298 			pset = per_cpu_ptr(zone->pageset, cpu);
6299 			drain_zonestat(zone, pset);
6300 		}
6301 		free_percpu(zone->pageset);
6302 		zone->pageset = &boot_pageset;
6303 	}
6304 	local_irq_restore(flags);
6305 }
6306 
6307 #ifdef CONFIG_MEMORY_HOTREMOVE
6308 /*
6309  * All pages in the range must be isolated before calling this.
6310  */
6311 void
6312 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6313 {
6314 	struct page *page;
6315 	struct zone *zone;
6316 	int order, i;
6317 	unsigned long pfn;
6318 	unsigned long flags;
6319 	/* find the first valid pfn */
6320 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
6321 		if (pfn_valid(pfn))
6322 			break;
6323 	if (pfn == end_pfn)
6324 		return;
6325 	zone = page_zone(pfn_to_page(pfn));
6326 	spin_lock_irqsave(&zone->lock, flags);
6327 	pfn = start_pfn;
6328 	while (pfn < end_pfn) {
6329 		if (!pfn_valid(pfn)) {
6330 			pfn++;
6331 			continue;
6332 		}
6333 		page = pfn_to_page(pfn);
6334 		/*
6335 		 * The HWPoisoned page may be not in buddy system, and
6336 		 * page_count() is not 0.
6337 		 */
6338 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6339 			pfn++;
6340 			SetPageReserved(page);
6341 			continue;
6342 		}
6343 
6344 		BUG_ON(page_count(page));
6345 		BUG_ON(!PageBuddy(page));
6346 		order = page_order(page);
6347 #ifdef CONFIG_DEBUG_VM
6348 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
6349 		       pfn, 1 << order, end_pfn);
6350 #endif
6351 		list_del(&page->lru);
6352 		rmv_page_order(page);
6353 		zone->free_area[order].nr_free--;
6354 		for (i = 0; i < (1 << order); i++)
6355 			SetPageReserved((page+i));
6356 		pfn += (1 << order);
6357 	}
6358 	spin_unlock_irqrestore(&zone->lock, flags);
6359 }
6360 #endif
6361 
6362 #ifdef CONFIG_MEMORY_FAILURE
6363 bool is_free_buddy_page(struct page *page)
6364 {
6365 	struct zone *zone = page_zone(page);
6366 	unsigned long pfn = page_to_pfn(page);
6367 	unsigned long flags;
6368 	int order;
6369 
6370 	spin_lock_irqsave(&zone->lock, flags);
6371 	for (order = 0; order < MAX_ORDER; order++) {
6372 		struct page *page_head = page - (pfn & ((1 << order) - 1));
6373 
6374 		if (PageBuddy(page_head) && page_order(page_head) >= order)
6375 			break;
6376 	}
6377 	spin_unlock_irqrestore(&zone->lock, flags);
6378 
6379 	return order < MAX_ORDER;
6380 }
6381 #endif
6382 
6383 static const struct trace_print_flags pageflag_names[] = {
6384 	{1UL << PG_locked,		"locked"	},
6385 	{1UL << PG_error,		"error"		},
6386 	{1UL << PG_referenced,		"referenced"	},
6387 	{1UL << PG_uptodate,		"uptodate"	},
6388 	{1UL << PG_dirty,		"dirty"		},
6389 	{1UL << PG_lru,			"lru"		},
6390 	{1UL << PG_active,		"active"	},
6391 	{1UL << PG_slab,		"slab"		},
6392 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
6393 	{1UL << PG_arch_1,		"arch_1"	},
6394 	{1UL << PG_reserved,		"reserved"	},
6395 	{1UL << PG_private,		"private"	},
6396 	{1UL << PG_private_2,		"private_2"	},
6397 	{1UL << PG_writeback,		"writeback"	},
6398 #ifdef CONFIG_PAGEFLAGS_EXTENDED
6399 	{1UL << PG_head,		"head"		},
6400 	{1UL << PG_tail,		"tail"		},
6401 #else
6402 	{1UL << PG_compound,		"compound"	},
6403 #endif
6404 	{1UL << PG_swapcache,		"swapcache"	},
6405 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
6406 	{1UL << PG_reclaim,		"reclaim"	},
6407 	{1UL << PG_swapbacked,		"swapbacked"	},
6408 	{1UL << PG_unevictable,		"unevictable"	},
6409 #ifdef CONFIG_MMU
6410 	{1UL << PG_mlocked,		"mlocked"	},
6411 #endif
6412 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
6413 	{1UL << PG_uncached,		"uncached"	},
6414 #endif
6415 #ifdef CONFIG_MEMORY_FAILURE
6416 	{1UL << PG_hwpoison,		"hwpoison"	},
6417 #endif
6418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6419 	{1UL << PG_compound_lock,	"compound_lock"	},
6420 #endif
6421 };
6422 
6423 static void dump_page_flags(unsigned long flags)
6424 {
6425 	const char *delim = "";
6426 	unsigned long mask;
6427 	int i;
6428 
6429 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
6430 
6431 	printk(KERN_ALERT "page flags: %#lx(", flags);
6432 
6433 	/* remove zone id */
6434 	flags &= (1UL << NR_PAGEFLAGS) - 1;
6435 
6436 	for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
6437 
6438 		mask = pageflag_names[i].mask;
6439 		if ((flags & mask) != mask)
6440 			continue;
6441 
6442 		flags &= ~mask;
6443 		printk("%s%s", delim, pageflag_names[i].name);
6444 		delim = "|";
6445 	}
6446 
6447 	/* check for left over flags */
6448 	if (flags)
6449 		printk("%s%#lx", delim, flags);
6450 
6451 	printk(")\n");
6452 }
6453 
6454 void dump_page(struct page *page)
6455 {
6456 	printk(KERN_ALERT
6457 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
6458 		page, atomic_read(&page->_count), page_mapcount(page),
6459 		page->mapping, page->index);
6460 	dump_page_flags(page->flags);
6461 	mem_cgroup_print_bad_page(page);
6462 }
6463