1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/kasan.h> 29 #include <linux/module.h> 30 #include <linux/suspend.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/ratelimit.h> 35 #include <linux/oom.h> 36 #include <linux/notifier.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/stop_machine.h> 47 #include <linux/sort.h> 48 #include <linux/pfn.h> 49 #include <linux/backing-dev.h> 50 #include <linux/fault-inject.h> 51 #include <linux/page-isolation.h> 52 #include <linux/page_ext.h> 53 #include <linux/debugobjects.h> 54 #include <linux/kmemleak.h> 55 #include <linux/compaction.h> 56 #include <trace/events/kmem.h> 57 #include <linux/prefetch.h> 58 #include <linux/mm_inline.h> 59 #include <linux/migrate.h> 60 #include <linux/page_ext.h> 61 #include <linux/hugetlb.h> 62 #include <linux/sched/rt.h> 63 #include <linux/page_owner.h> 64 #include <linux/kthread.h> 65 66 #include <asm/sections.h> 67 #include <asm/tlbflush.h> 68 #include <asm/div64.h> 69 #include "internal.h" 70 71 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 72 static DEFINE_MUTEX(pcp_batch_high_lock); 73 #define MIN_PERCPU_PAGELIST_FRACTION (8) 74 75 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 76 DEFINE_PER_CPU(int, numa_node); 77 EXPORT_PER_CPU_SYMBOL(numa_node); 78 #endif 79 80 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 81 /* 82 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 83 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 84 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 85 * defined in <linux/topology.h>. 86 */ 87 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 88 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 89 int _node_numa_mem_[MAX_NUMNODES]; 90 #endif 91 92 /* 93 * Array of node states. 94 */ 95 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 96 [N_POSSIBLE] = NODE_MASK_ALL, 97 [N_ONLINE] = { { [0] = 1UL } }, 98 #ifndef CONFIG_NUMA 99 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 100 #ifdef CONFIG_HIGHMEM 101 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 102 #endif 103 #ifdef CONFIG_MOVABLE_NODE 104 [N_MEMORY] = { { [0] = 1UL } }, 105 #endif 106 [N_CPU] = { { [0] = 1UL } }, 107 #endif /* NUMA */ 108 }; 109 EXPORT_SYMBOL(node_states); 110 111 /* Protect totalram_pages and zone->managed_pages */ 112 static DEFINE_SPINLOCK(managed_page_count_lock); 113 114 unsigned long totalram_pages __read_mostly; 115 unsigned long totalreserve_pages __read_mostly; 116 unsigned long totalcma_pages __read_mostly; 117 /* 118 * When calculating the number of globally allowed dirty pages, there 119 * is a certain number of per-zone reserves that should not be 120 * considered dirtyable memory. This is the sum of those reserves 121 * over all existing zones that contribute dirtyable memory. 122 */ 123 unsigned long dirty_balance_reserve __read_mostly; 124 125 int percpu_pagelist_fraction; 126 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 127 128 /* 129 * A cached value of the page's pageblock's migratetype, used when the page is 130 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 131 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 132 * Also the migratetype set in the page does not necessarily match the pcplist 133 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 134 * other index - this ensures that it will be put on the correct CMA freelist. 135 */ 136 static inline int get_pcppage_migratetype(struct page *page) 137 { 138 return page->index; 139 } 140 141 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 142 { 143 page->index = migratetype; 144 } 145 146 #ifdef CONFIG_PM_SLEEP 147 /* 148 * The following functions are used by the suspend/hibernate code to temporarily 149 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 150 * while devices are suspended. To avoid races with the suspend/hibernate code, 151 * they should always be called with pm_mutex held (gfp_allowed_mask also should 152 * only be modified with pm_mutex held, unless the suspend/hibernate code is 153 * guaranteed not to run in parallel with that modification). 154 */ 155 156 static gfp_t saved_gfp_mask; 157 158 void pm_restore_gfp_mask(void) 159 { 160 WARN_ON(!mutex_is_locked(&pm_mutex)); 161 if (saved_gfp_mask) { 162 gfp_allowed_mask = saved_gfp_mask; 163 saved_gfp_mask = 0; 164 } 165 } 166 167 void pm_restrict_gfp_mask(void) 168 { 169 WARN_ON(!mutex_is_locked(&pm_mutex)); 170 WARN_ON(saved_gfp_mask); 171 saved_gfp_mask = gfp_allowed_mask; 172 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 173 } 174 175 bool pm_suspended_storage(void) 176 { 177 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 178 return false; 179 return true; 180 } 181 #endif /* CONFIG_PM_SLEEP */ 182 183 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 184 unsigned int pageblock_order __read_mostly; 185 #endif 186 187 static void __free_pages_ok(struct page *page, unsigned int order); 188 189 /* 190 * results with 256, 32 in the lowmem_reserve sysctl: 191 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 192 * 1G machine -> (16M dma, 784M normal, 224M high) 193 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 194 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 195 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 196 * 197 * TBD: should special case ZONE_DMA32 machines here - in those we normally 198 * don't need any ZONE_NORMAL reservation 199 */ 200 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 201 #ifdef CONFIG_ZONE_DMA 202 256, 203 #endif 204 #ifdef CONFIG_ZONE_DMA32 205 256, 206 #endif 207 #ifdef CONFIG_HIGHMEM 208 32, 209 #endif 210 32, 211 }; 212 213 EXPORT_SYMBOL(totalram_pages); 214 215 static char * const zone_names[MAX_NR_ZONES] = { 216 #ifdef CONFIG_ZONE_DMA 217 "DMA", 218 #endif 219 #ifdef CONFIG_ZONE_DMA32 220 "DMA32", 221 #endif 222 "Normal", 223 #ifdef CONFIG_HIGHMEM 224 "HighMem", 225 #endif 226 "Movable", 227 #ifdef CONFIG_ZONE_DEVICE 228 "Device", 229 #endif 230 }; 231 232 static void free_compound_page(struct page *page); 233 compound_page_dtor * const compound_page_dtors[] = { 234 NULL, 235 free_compound_page, 236 #ifdef CONFIG_HUGETLB_PAGE 237 free_huge_page, 238 #endif 239 }; 240 241 int min_free_kbytes = 1024; 242 int user_min_free_kbytes = -1; 243 244 static unsigned long __meminitdata nr_kernel_pages; 245 static unsigned long __meminitdata nr_all_pages; 246 static unsigned long __meminitdata dma_reserve; 247 248 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 249 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 250 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 251 static unsigned long __initdata required_kernelcore; 252 static unsigned long __initdata required_movablecore; 253 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 254 255 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 256 int movable_zone; 257 EXPORT_SYMBOL(movable_zone); 258 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 259 260 #if MAX_NUMNODES > 1 261 int nr_node_ids __read_mostly = MAX_NUMNODES; 262 int nr_online_nodes __read_mostly = 1; 263 EXPORT_SYMBOL(nr_node_ids); 264 EXPORT_SYMBOL(nr_online_nodes); 265 #endif 266 267 int page_group_by_mobility_disabled __read_mostly; 268 269 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 270 static inline void reset_deferred_meminit(pg_data_t *pgdat) 271 { 272 pgdat->first_deferred_pfn = ULONG_MAX; 273 } 274 275 /* Returns true if the struct page for the pfn is uninitialised */ 276 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 277 { 278 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn) 279 return true; 280 281 return false; 282 } 283 284 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 285 { 286 if (pfn >= NODE_DATA(nid)->first_deferred_pfn) 287 return true; 288 289 return false; 290 } 291 292 /* 293 * Returns false when the remaining initialisation should be deferred until 294 * later in the boot cycle when it can be parallelised. 295 */ 296 static inline bool update_defer_init(pg_data_t *pgdat, 297 unsigned long pfn, unsigned long zone_end, 298 unsigned long *nr_initialised) 299 { 300 /* Always populate low zones for address-contrained allocations */ 301 if (zone_end < pgdat_end_pfn(pgdat)) 302 return true; 303 304 /* Initialise at least 2G of the highest zone */ 305 (*nr_initialised)++; 306 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && 307 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 308 pgdat->first_deferred_pfn = pfn; 309 return false; 310 } 311 312 return true; 313 } 314 #else 315 static inline void reset_deferred_meminit(pg_data_t *pgdat) 316 { 317 } 318 319 static inline bool early_page_uninitialised(unsigned long pfn) 320 { 321 return false; 322 } 323 324 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 325 { 326 return false; 327 } 328 329 static inline bool update_defer_init(pg_data_t *pgdat, 330 unsigned long pfn, unsigned long zone_end, 331 unsigned long *nr_initialised) 332 { 333 return true; 334 } 335 #endif 336 337 338 void set_pageblock_migratetype(struct page *page, int migratetype) 339 { 340 if (unlikely(page_group_by_mobility_disabled && 341 migratetype < MIGRATE_PCPTYPES)) 342 migratetype = MIGRATE_UNMOVABLE; 343 344 set_pageblock_flags_group(page, (unsigned long)migratetype, 345 PB_migrate, PB_migrate_end); 346 } 347 348 #ifdef CONFIG_DEBUG_VM 349 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 350 { 351 int ret = 0; 352 unsigned seq; 353 unsigned long pfn = page_to_pfn(page); 354 unsigned long sp, start_pfn; 355 356 do { 357 seq = zone_span_seqbegin(zone); 358 start_pfn = zone->zone_start_pfn; 359 sp = zone->spanned_pages; 360 if (!zone_spans_pfn(zone, pfn)) 361 ret = 1; 362 } while (zone_span_seqretry(zone, seq)); 363 364 if (ret) 365 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 366 pfn, zone_to_nid(zone), zone->name, 367 start_pfn, start_pfn + sp); 368 369 return ret; 370 } 371 372 static int page_is_consistent(struct zone *zone, struct page *page) 373 { 374 if (!pfn_valid_within(page_to_pfn(page))) 375 return 0; 376 if (zone != page_zone(page)) 377 return 0; 378 379 return 1; 380 } 381 /* 382 * Temporary debugging check for pages not lying within a given zone. 383 */ 384 static int bad_range(struct zone *zone, struct page *page) 385 { 386 if (page_outside_zone_boundaries(zone, page)) 387 return 1; 388 if (!page_is_consistent(zone, page)) 389 return 1; 390 391 return 0; 392 } 393 #else 394 static inline int bad_range(struct zone *zone, struct page *page) 395 { 396 return 0; 397 } 398 #endif 399 400 static void bad_page(struct page *page, const char *reason, 401 unsigned long bad_flags) 402 { 403 static unsigned long resume; 404 static unsigned long nr_shown; 405 static unsigned long nr_unshown; 406 407 /* Don't complain about poisoned pages */ 408 if (PageHWPoison(page)) { 409 page_mapcount_reset(page); /* remove PageBuddy */ 410 return; 411 } 412 413 /* 414 * Allow a burst of 60 reports, then keep quiet for that minute; 415 * or allow a steady drip of one report per second. 416 */ 417 if (nr_shown == 60) { 418 if (time_before(jiffies, resume)) { 419 nr_unshown++; 420 goto out; 421 } 422 if (nr_unshown) { 423 printk(KERN_ALERT 424 "BUG: Bad page state: %lu messages suppressed\n", 425 nr_unshown); 426 nr_unshown = 0; 427 } 428 nr_shown = 0; 429 } 430 if (nr_shown++ == 0) 431 resume = jiffies + 60 * HZ; 432 433 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 434 current->comm, page_to_pfn(page)); 435 dump_page_badflags(page, reason, bad_flags); 436 437 print_modules(); 438 dump_stack(); 439 out: 440 /* Leave bad fields for debug, except PageBuddy could make trouble */ 441 page_mapcount_reset(page); /* remove PageBuddy */ 442 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 443 } 444 445 /* 446 * Higher-order pages are called "compound pages". They are structured thusly: 447 * 448 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 449 * 450 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 451 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 452 * 453 * The first tail page's ->compound_dtor holds the offset in array of compound 454 * page destructors. See compound_page_dtors. 455 * 456 * The first tail page's ->compound_order holds the order of allocation. 457 * This usage means that zero-order pages may not be compound. 458 */ 459 460 static void free_compound_page(struct page *page) 461 { 462 __free_pages_ok(page, compound_order(page)); 463 } 464 465 void prep_compound_page(struct page *page, unsigned int order) 466 { 467 int i; 468 int nr_pages = 1 << order; 469 470 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 471 set_compound_order(page, order); 472 __SetPageHead(page); 473 for (i = 1; i < nr_pages; i++) { 474 struct page *p = page + i; 475 set_page_count(p, 0); 476 set_compound_head(p, page); 477 } 478 } 479 480 #ifdef CONFIG_DEBUG_PAGEALLOC 481 unsigned int _debug_guardpage_minorder; 482 bool _debug_pagealloc_enabled __read_mostly; 483 bool _debug_guardpage_enabled __read_mostly; 484 485 static int __init early_debug_pagealloc(char *buf) 486 { 487 if (!buf) 488 return -EINVAL; 489 490 if (strcmp(buf, "on") == 0) 491 _debug_pagealloc_enabled = true; 492 493 return 0; 494 } 495 early_param("debug_pagealloc", early_debug_pagealloc); 496 497 static bool need_debug_guardpage(void) 498 { 499 /* If we don't use debug_pagealloc, we don't need guard page */ 500 if (!debug_pagealloc_enabled()) 501 return false; 502 503 return true; 504 } 505 506 static void init_debug_guardpage(void) 507 { 508 if (!debug_pagealloc_enabled()) 509 return; 510 511 _debug_guardpage_enabled = true; 512 } 513 514 struct page_ext_operations debug_guardpage_ops = { 515 .need = need_debug_guardpage, 516 .init = init_debug_guardpage, 517 }; 518 519 static int __init debug_guardpage_minorder_setup(char *buf) 520 { 521 unsigned long res; 522 523 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 524 printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); 525 return 0; 526 } 527 _debug_guardpage_minorder = res; 528 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); 529 return 0; 530 } 531 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 532 533 static inline void set_page_guard(struct zone *zone, struct page *page, 534 unsigned int order, int migratetype) 535 { 536 struct page_ext *page_ext; 537 538 if (!debug_guardpage_enabled()) 539 return; 540 541 page_ext = lookup_page_ext(page); 542 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 543 544 INIT_LIST_HEAD(&page->lru); 545 set_page_private(page, order); 546 /* Guard pages are not available for any usage */ 547 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 548 } 549 550 static inline void clear_page_guard(struct zone *zone, struct page *page, 551 unsigned int order, int migratetype) 552 { 553 struct page_ext *page_ext; 554 555 if (!debug_guardpage_enabled()) 556 return; 557 558 page_ext = lookup_page_ext(page); 559 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 560 561 set_page_private(page, 0); 562 if (!is_migrate_isolate(migratetype)) 563 __mod_zone_freepage_state(zone, (1 << order), migratetype); 564 } 565 #else 566 struct page_ext_operations debug_guardpage_ops = { NULL, }; 567 static inline void set_page_guard(struct zone *zone, struct page *page, 568 unsigned int order, int migratetype) {} 569 static inline void clear_page_guard(struct zone *zone, struct page *page, 570 unsigned int order, int migratetype) {} 571 #endif 572 573 static inline void set_page_order(struct page *page, unsigned int order) 574 { 575 set_page_private(page, order); 576 __SetPageBuddy(page); 577 } 578 579 static inline void rmv_page_order(struct page *page) 580 { 581 __ClearPageBuddy(page); 582 set_page_private(page, 0); 583 } 584 585 /* 586 * This function checks whether a page is free && is the buddy 587 * we can do coalesce a page and its buddy if 588 * (a) the buddy is not in a hole && 589 * (b) the buddy is in the buddy system && 590 * (c) a page and its buddy have the same order && 591 * (d) a page and its buddy are in the same zone. 592 * 593 * For recording whether a page is in the buddy system, we set ->_mapcount 594 * PAGE_BUDDY_MAPCOUNT_VALUE. 595 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 596 * serialized by zone->lock. 597 * 598 * For recording page's order, we use page_private(page). 599 */ 600 static inline int page_is_buddy(struct page *page, struct page *buddy, 601 unsigned int order) 602 { 603 if (!pfn_valid_within(page_to_pfn(buddy))) 604 return 0; 605 606 if (page_is_guard(buddy) && page_order(buddy) == order) { 607 if (page_zone_id(page) != page_zone_id(buddy)) 608 return 0; 609 610 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 611 612 return 1; 613 } 614 615 if (PageBuddy(buddy) && page_order(buddy) == order) { 616 /* 617 * zone check is done late to avoid uselessly 618 * calculating zone/node ids for pages that could 619 * never merge. 620 */ 621 if (page_zone_id(page) != page_zone_id(buddy)) 622 return 0; 623 624 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 625 626 return 1; 627 } 628 return 0; 629 } 630 631 /* 632 * Freeing function for a buddy system allocator. 633 * 634 * The concept of a buddy system is to maintain direct-mapped table 635 * (containing bit values) for memory blocks of various "orders". 636 * The bottom level table contains the map for the smallest allocatable 637 * units of memory (here, pages), and each level above it describes 638 * pairs of units from the levels below, hence, "buddies". 639 * At a high level, all that happens here is marking the table entry 640 * at the bottom level available, and propagating the changes upward 641 * as necessary, plus some accounting needed to play nicely with other 642 * parts of the VM system. 643 * At each level, we keep a list of pages, which are heads of continuous 644 * free pages of length of (1 << order) and marked with _mapcount 645 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 646 * field. 647 * So when we are allocating or freeing one, we can derive the state of the 648 * other. That is, if we allocate a small block, and both were 649 * free, the remainder of the region must be split into blocks. 650 * If a block is freed, and its buddy is also free, then this 651 * triggers coalescing into a block of larger size. 652 * 653 * -- nyc 654 */ 655 656 static inline void __free_one_page(struct page *page, 657 unsigned long pfn, 658 struct zone *zone, unsigned int order, 659 int migratetype) 660 { 661 unsigned long page_idx; 662 unsigned long combined_idx; 663 unsigned long uninitialized_var(buddy_idx); 664 struct page *buddy; 665 unsigned int max_order = MAX_ORDER; 666 667 VM_BUG_ON(!zone_is_initialized(zone)); 668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 669 670 VM_BUG_ON(migratetype == -1); 671 if (is_migrate_isolate(migratetype)) { 672 /* 673 * We restrict max order of merging to prevent merge 674 * between freepages on isolate pageblock and normal 675 * pageblock. Without this, pageblock isolation 676 * could cause incorrect freepage accounting. 677 */ 678 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 679 } else { 680 __mod_zone_freepage_state(zone, 1 << order, migratetype); 681 } 682 683 page_idx = pfn & ((1 << max_order) - 1); 684 685 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 686 VM_BUG_ON_PAGE(bad_range(zone, page), page); 687 688 while (order < max_order - 1) { 689 buddy_idx = __find_buddy_index(page_idx, order); 690 buddy = page + (buddy_idx - page_idx); 691 if (!page_is_buddy(page, buddy, order)) 692 break; 693 /* 694 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 695 * merge with it and move up one order. 696 */ 697 if (page_is_guard(buddy)) { 698 clear_page_guard(zone, buddy, order, migratetype); 699 } else { 700 list_del(&buddy->lru); 701 zone->free_area[order].nr_free--; 702 rmv_page_order(buddy); 703 } 704 combined_idx = buddy_idx & page_idx; 705 page = page + (combined_idx - page_idx); 706 page_idx = combined_idx; 707 order++; 708 } 709 set_page_order(page, order); 710 711 /* 712 * If this is not the largest possible page, check if the buddy 713 * of the next-highest order is free. If it is, it's possible 714 * that pages are being freed that will coalesce soon. In case, 715 * that is happening, add the free page to the tail of the list 716 * so it's less likely to be used soon and more likely to be merged 717 * as a higher order page 718 */ 719 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 720 struct page *higher_page, *higher_buddy; 721 combined_idx = buddy_idx & page_idx; 722 higher_page = page + (combined_idx - page_idx); 723 buddy_idx = __find_buddy_index(combined_idx, order + 1); 724 higher_buddy = higher_page + (buddy_idx - combined_idx); 725 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 726 list_add_tail(&page->lru, 727 &zone->free_area[order].free_list[migratetype]); 728 goto out; 729 } 730 } 731 732 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 733 out: 734 zone->free_area[order].nr_free++; 735 } 736 737 static inline int free_pages_check(struct page *page) 738 { 739 const char *bad_reason = NULL; 740 unsigned long bad_flags = 0; 741 742 if (unlikely(page_mapcount(page))) 743 bad_reason = "nonzero mapcount"; 744 if (unlikely(page->mapping != NULL)) 745 bad_reason = "non-NULL mapping"; 746 if (unlikely(atomic_read(&page->_count) != 0)) 747 bad_reason = "nonzero _count"; 748 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 749 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 750 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 751 } 752 #ifdef CONFIG_MEMCG 753 if (unlikely(page->mem_cgroup)) 754 bad_reason = "page still charged to cgroup"; 755 #endif 756 if (unlikely(bad_reason)) { 757 bad_page(page, bad_reason, bad_flags); 758 return 1; 759 } 760 page_cpupid_reset_last(page); 761 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 762 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 763 return 0; 764 } 765 766 /* 767 * Frees a number of pages from the PCP lists 768 * Assumes all pages on list are in same zone, and of same order. 769 * count is the number of pages to free. 770 * 771 * If the zone was previously in an "all pages pinned" state then look to 772 * see if this freeing clears that state. 773 * 774 * And clear the zone's pages_scanned counter, to hold off the "all pages are 775 * pinned" detection logic. 776 */ 777 static void free_pcppages_bulk(struct zone *zone, int count, 778 struct per_cpu_pages *pcp) 779 { 780 int migratetype = 0; 781 int batch_free = 0; 782 int to_free = count; 783 unsigned long nr_scanned; 784 785 spin_lock(&zone->lock); 786 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 787 if (nr_scanned) 788 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 789 790 while (to_free) { 791 struct page *page; 792 struct list_head *list; 793 794 /* 795 * Remove pages from lists in a round-robin fashion. A 796 * batch_free count is maintained that is incremented when an 797 * empty list is encountered. This is so more pages are freed 798 * off fuller lists instead of spinning excessively around empty 799 * lists 800 */ 801 do { 802 batch_free++; 803 if (++migratetype == MIGRATE_PCPTYPES) 804 migratetype = 0; 805 list = &pcp->lists[migratetype]; 806 } while (list_empty(list)); 807 808 /* This is the only non-empty list. Free them all. */ 809 if (batch_free == MIGRATE_PCPTYPES) 810 batch_free = to_free; 811 812 do { 813 int mt; /* migratetype of the to-be-freed page */ 814 815 page = list_entry(list->prev, struct page, lru); 816 /* must delete as __free_one_page list manipulates */ 817 list_del(&page->lru); 818 819 mt = get_pcppage_migratetype(page); 820 /* MIGRATE_ISOLATE page should not go to pcplists */ 821 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 822 /* Pageblock could have been isolated meanwhile */ 823 if (unlikely(has_isolate_pageblock(zone))) 824 mt = get_pageblock_migratetype(page); 825 826 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 827 trace_mm_page_pcpu_drain(page, 0, mt); 828 } while (--to_free && --batch_free && !list_empty(list)); 829 } 830 spin_unlock(&zone->lock); 831 } 832 833 static void free_one_page(struct zone *zone, 834 struct page *page, unsigned long pfn, 835 unsigned int order, 836 int migratetype) 837 { 838 unsigned long nr_scanned; 839 spin_lock(&zone->lock); 840 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 841 if (nr_scanned) 842 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 843 844 if (unlikely(has_isolate_pageblock(zone) || 845 is_migrate_isolate(migratetype))) { 846 migratetype = get_pfnblock_migratetype(page, pfn); 847 } 848 __free_one_page(page, pfn, zone, order, migratetype); 849 spin_unlock(&zone->lock); 850 } 851 852 static int free_tail_pages_check(struct page *head_page, struct page *page) 853 { 854 int ret = 1; 855 856 /* 857 * We rely page->lru.next never has bit 0 set, unless the page 858 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 859 */ 860 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 861 862 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 863 ret = 0; 864 goto out; 865 } 866 if (unlikely(!PageTail(page))) { 867 bad_page(page, "PageTail not set", 0); 868 goto out; 869 } 870 if (unlikely(compound_head(page) != head_page)) { 871 bad_page(page, "compound_head not consistent", 0); 872 goto out; 873 } 874 ret = 0; 875 out: 876 clear_compound_head(page); 877 return ret; 878 } 879 880 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 881 unsigned long zone, int nid) 882 { 883 set_page_links(page, zone, nid, pfn); 884 init_page_count(page); 885 page_mapcount_reset(page); 886 page_cpupid_reset_last(page); 887 888 INIT_LIST_HEAD(&page->lru); 889 #ifdef WANT_PAGE_VIRTUAL 890 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 891 if (!is_highmem_idx(zone)) 892 set_page_address(page, __va(pfn << PAGE_SHIFT)); 893 #endif 894 } 895 896 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 897 int nid) 898 { 899 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 900 } 901 902 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 903 static void init_reserved_page(unsigned long pfn) 904 { 905 pg_data_t *pgdat; 906 int nid, zid; 907 908 if (!early_page_uninitialised(pfn)) 909 return; 910 911 nid = early_pfn_to_nid(pfn); 912 pgdat = NODE_DATA(nid); 913 914 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 915 struct zone *zone = &pgdat->node_zones[zid]; 916 917 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 918 break; 919 } 920 __init_single_pfn(pfn, zid, nid); 921 } 922 #else 923 static inline void init_reserved_page(unsigned long pfn) 924 { 925 } 926 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 927 928 /* 929 * Initialised pages do not have PageReserved set. This function is 930 * called for each range allocated by the bootmem allocator and 931 * marks the pages PageReserved. The remaining valid pages are later 932 * sent to the buddy page allocator. 933 */ 934 void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) 935 { 936 unsigned long start_pfn = PFN_DOWN(start); 937 unsigned long end_pfn = PFN_UP(end); 938 939 for (; start_pfn < end_pfn; start_pfn++) { 940 if (pfn_valid(start_pfn)) { 941 struct page *page = pfn_to_page(start_pfn); 942 943 init_reserved_page(start_pfn); 944 945 /* Avoid false-positive PageTail() */ 946 INIT_LIST_HEAD(&page->lru); 947 948 SetPageReserved(page); 949 } 950 } 951 } 952 953 static bool free_pages_prepare(struct page *page, unsigned int order) 954 { 955 bool compound = PageCompound(page); 956 int i, bad = 0; 957 958 VM_BUG_ON_PAGE(PageTail(page), page); 959 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 960 961 trace_mm_page_free(page, order); 962 kmemcheck_free_shadow(page, order); 963 kasan_free_pages(page, order); 964 965 if (PageAnon(page)) 966 page->mapping = NULL; 967 bad += free_pages_check(page); 968 for (i = 1; i < (1 << order); i++) { 969 if (compound) 970 bad += free_tail_pages_check(page, page + i); 971 bad += free_pages_check(page + i); 972 } 973 if (bad) 974 return false; 975 976 reset_page_owner(page, order); 977 978 if (!PageHighMem(page)) { 979 debug_check_no_locks_freed(page_address(page), 980 PAGE_SIZE << order); 981 debug_check_no_obj_freed(page_address(page), 982 PAGE_SIZE << order); 983 } 984 arch_free_page(page, order); 985 kernel_map_pages(page, 1 << order, 0); 986 987 return true; 988 } 989 990 static void __free_pages_ok(struct page *page, unsigned int order) 991 { 992 unsigned long flags; 993 int migratetype; 994 unsigned long pfn = page_to_pfn(page); 995 996 if (!free_pages_prepare(page, order)) 997 return; 998 999 migratetype = get_pfnblock_migratetype(page, pfn); 1000 local_irq_save(flags); 1001 __count_vm_events(PGFREE, 1 << order); 1002 free_one_page(page_zone(page), page, pfn, order, migratetype); 1003 local_irq_restore(flags); 1004 } 1005 1006 static void __init __free_pages_boot_core(struct page *page, 1007 unsigned long pfn, unsigned int order) 1008 { 1009 unsigned int nr_pages = 1 << order; 1010 struct page *p = page; 1011 unsigned int loop; 1012 1013 prefetchw(p); 1014 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1015 prefetchw(p + 1); 1016 __ClearPageReserved(p); 1017 set_page_count(p, 0); 1018 } 1019 __ClearPageReserved(p); 1020 set_page_count(p, 0); 1021 1022 page_zone(page)->managed_pages += nr_pages; 1023 set_page_refcounted(page); 1024 __free_pages(page, order); 1025 } 1026 1027 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 1028 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1029 1030 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1031 1032 int __meminit early_pfn_to_nid(unsigned long pfn) 1033 { 1034 static DEFINE_SPINLOCK(early_pfn_lock); 1035 int nid; 1036 1037 spin_lock(&early_pfn_lock); 1038 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1039 if (nid < 0) 1040 nid = 0; 1041 spin_unlock(&early_pfn_lock); 1042 1043 return nid; 1044 } 1045 #endif 1046 1047 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1048 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1049 struct mminit_pfnnid_cache *state) 1050 { 1051 int nid; 1052 1053 nid = __early_pfn_to_nid(pfn, state); 1054 if (nid >= 0 && nid != node) 1055 return false; 1056 return true; 1057 } 1058 1059 /* Only safe to use early in boot when initialisation is single-threaded */ 1060 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1061 { 1062 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 1063 } 1064 1065 #else 1066 1067 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1068 { 1069 return true; 1070 } 1071 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1072 struct mminit_pfnnid_cache *state) 1073 { 1074 return true; 1075 } 1076 #endif 1077 1078 1079 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 1080 unsigned int order) 1081 { 1082 if (early_page_uninitialised(pfn)) 1083 return; 1084 return __free_pages_boot_core(page, pfn, order); 1085 } 1086 1087 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1088 static void __init deferred_free_range(struct page *page, 1089 unsigned long pfn, int nr_pages) 1090 { 1091 int i; 1092 1093 if (!page) 1094 return; 1095 1096 /* Free a large naturally-aligned chunk if possible */ 1097 if (nr_pages == MAX_ORDER_NR_PAGES && 1098 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 1099 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1100 __free_pages_boot_core(page, pfn, MAX_ORDER-1); 1101 return; 1102 } 1103 1104 for (i = 0; i < nr_pages; i++, page++, pfn++) 1105 __free_pages_boot_core(page, pfn, 0); 1106 } 1107 1108 /* Completion tracking for deferred_init_memmap() threads */ 1109 static atomic_t pgdat_init_n_undone __initdata; 1110 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1111 1112 static inline void __init pgdat_init_report_one_done(void) 1113 { 1114 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1115 complete(&pgdat_init_all_done_comp); 1116 } 1117 1118 /* Initialise remaining memory on a node */ 1119 static int __init deferred_init_memmap(void *data) 1120 { 1121 pg_data_t *pgdat = data; 1122 int nid = pgdat->node_id; 1123 struct mminit_pfnnid_cache nid_init_state = { }; 1124 unsigned long start = jiffies; 1125 unsigned long nr_pages = 0; 1126 unsigned long walk_start, walk_end; 1127 int i, zid; 1128 struct zone *zone; 1129 unsigned long first_init_pfn = pgdat->first_deferred_pfn; 1130 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1131 1132 if (first_init_pfn == ULONG_MAX) { 1133 pgdat_init_report_one_done(); 1134 return 0; 1135 } 1136 1137 /* Bind memory initialisation thread to a local node if possible */ 1138 if (!cpumask_empty(cpumask)) 1139 set_cpus_allowed_ptr(current, cpumask); 1140 1141 /* Sanity check boundaries */ 1142 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1143 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1144 pgdat->first_deferred_pfn = ULONG_MAX; 1145 1146 /* Only the highest zone is deferred so find it */ 1147 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1148 zone = pgdat->node_zones + zid; 1149 if (first_init_pfn < zone_end_pfn(zone)) 1150 break; 1151 } 1152 1153 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1154 unsigned long pfn, end_pfn; 1155 struct page *page = NULL; 1156 struct page *free_base_page = NULL; 1157 unsigned long free_base_pfn = 0; 1158 int nr_to_free = 0; 1159 1160 end_pfn = min(walk_end, zone_end_pfn(zone)); 1161 pfn = first_init_pfn; 1162 if (pfn < walk_start) 1163 pfn = walk_start; 1164 if (pfn < zone->zone_start_pfn) 1165 pfn = zone->zone_start_pfn; 1166 1167 for (; pfn < end_pfn; pfn++) { 1168 if (!pfn_valid_within(pfn)) 1169 goto free_range; 1170 1171 /* 1172 * Ensure pfn_valid is checked every 1173 * MAX_ORDER_NR_PAGES for memory holes 1174 */ 1175 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 1176 if (!pfn_valid(pfn)) { 1177 page = NULL; 1178 goto free_range; 1179 } 1180 } 1181 1182 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 1183 page = NULL; 1184 goto free_range; 1185 } 1186 1187 /* Minimise pfn page lookups and scheduler checks */ 1188 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { 1189 page++; 1190 } else { 1191 nr_pages += nr_to_free; 1192 deferred_free_range(free_base_page, 1193 free_base_pfn, nr_to_free); 1194 free_base_page = NULL; 1195 free_base_pfn = nr_to_free = 0; 1196 1197 page = pfn_to_page(pfn); 1198 cond_resched(); 1199 } 1200 1201 if (page->flags) { 1202 VM_BUG_ON(page_zone(page) != zone); 1203 goto free_range; 1204 } 1205 1206 __init_single_page(page, pfn, zid, nid); 1207 if (!free_base_page) { 1208 free_base_page = page; 1209 free_base_pfn = pfn; 1210 nr_to_free = 0; 1211 } 1212 nr_to_free++; 1213 1214 /* Where possible, batch up pages for a single free */ 1215 continue; 1216 free_range: 1217 /* Free the current block of pages to allocator */ 1218 nr_pages += nr_to_free; 1219 deferred_free_range(free_base_page, free_base_pfn, 1220 nr_to_free); 1221 free_base_page = NULL; 1222 free_base_pfn = nr_to_free = 0; 1223 } 1224 1225 first_init_pfn = max(end_pfn, first_init_pfn); 1226 } 1227 1228 /* Sanity check that the next zone really is unpopulated */ 1229 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1230 1231 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1232 jiffies_to_msecs(jiffies - start)); 1233 1234 pgdat_init_report_one_done(); 1235 return 0; 1236 } 1237 1238 void __init page_alloc_init_late(void) 1239 { 1240 int nid; 1241 1242 /* There will be num_node_state(N_MEMORY) threads */ 1243 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 1244 for_each_node_state(nid, N_MEMORY) { 1245 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1246 } 1247 1248 /* Block until all are initialised */ 1249 wait_for_completion(&pgdat_init_all_done_comp); 1250 1251 /* Reinit limits that are based on free pages after the kernel is up */ 1252 files_maxfiles_init(); 1253 } 1254 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1255 1256 #ifdef CONFIG_CMA 1257 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1258 void __init init_cma_reserved_pageblock(struct page *page) 1259 { 1260 unsigned i = pageblock_nr_pages; 1261 struct page *p = page; 1262 1263 do { 1264 __ClearPageReserved(p); 1265 set_page_count(p, 0); 1266 } while (++p, --i); 1267 1268 set_pageblock_migratetype(page, MIGRATE_CMA); 1269 1270 if (pageblock_order >= MAX_ORDER) { 1271 i = pageblock_nr_pages; 1272 p = page; 1273 do { 1274 set_page_refcounted(p); 1275 __free_pages(p, MAX_ORDER - 1); 1276 p += MAX_ORDER_NR_PAGES; 1277 } while (i -= MAX_ORDER_NR_PAGES); 1278 } else { 1279 set_page_refcounted(page); 1280 __free_pages(page, pageblock_order); 1281 } 1282 1283 adjust_managed_page_count(page, pageblock_nr_pages); 1284 } 1285 #endif 1286 1287 /* 1288 * The order of subdivision here is critical for the IO subsystem. 1289 * Please do not alter this order without good reasons and regression 1290 * testing. Specifically, as large blocks of memory are subdivided, 1291 * the order in which smaller blocks are delivered depends on the order 1292 * they're subdivided in this function. This is the primary factor 1293 * influencing the order in which pages are delivered to the IO 1294 * subsystem according to empirical testing, and this is also justified 1295 * by considering the behavior of a buddy system containing a single 1296 * large block of memory acted on by a series of small allocations. 1297 * This behavior is a critical factor in sglist merging's success. 1298 * 1299 * -- nyc 1300 */ 1301 static inline void expand(struct zone *zone, struct page *page, 1302 int low, int high, struct free_area *area, 1303 int migratetype) 1304 { 1305 unsigned long size = 1 << high; 1306 1307 while (high > low) { 1308 area--; 1309 high--; 1310 size >>= 1; 1311 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1312 1313 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 1314 debug_guardpage_enabled() && 1315 high < debug_guardpage_minorder()) { 1316 /* 1317 * Mark as guard pages (or page), that will allow to 1318 * merge back to allocator when buddy will be freed. 1319 * Corresponding page table entries will not be touched, 1320 * pages will stay not present in virtual address space 1321 */ 1322 set_page_guard(zone, &page[size], high, migratetype); 1323 continue; 1324 } 1325 list_add(&page[size].lru, &area->free_list[migratetype]); 1326 area->nr_free++; 1327 set_page_order(&page[size], high); 1328 } 1329 } 1330 1331 /* 1332 * This page is about to be returned from the page allocator 1333 */ 1334 static inline int check_new_page(struct page *page) 1335 { 1336 const char *bad_reason = NULL; 1337 unsigned long bad_flags = 0; 1338 1339 if (unlikely(page_mapcount(page))) 1340 bad_reason = "nonzero mapcount"; 1341 if (unlikely(page->mapping != NULL)) 1342 bad_reason = "non-NULL mapping"; 1343 if (unlikely(atomic_read(&page->_count) != 0)) 1344 bad_reason = "nonzero _count"; 1345 if (unlikely(page->flags & __PG_HWPOISON)) { 1346 bad_reason = "HWPoisoned (hardware-corrupted)"; 1347 bad_flags = __PG_HWPOISON; 1348 } 1349 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1350 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1351 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1352 } 1353 #ifdef CONFIG_MEMCG 1354 if (unlikely(page->mem_cgroup)) 1355 bad_reason = "page still charged to cgroup"; 1356 #endif 1357 if (unlikely(bad_reason)) { 1358 bad_page(page, bad_reason, bad_flags); 1359 return 1; 1360 } 1361 return 0; 1362 } 1363 1364 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1365 int alloc_flags) 1366 { 1367 int i; 1368 1369 for (i = 0; i < (1 << order); i++) { 1370 struct page *p = page + i; 1371 if (unlikely(check_new_page(p))) 1372 return 1; 1373 } 1374 1375 set_page_private(page, 0); 1376 set_page_refcounted(page); 1377 1378 arch_alloc_page(page, order); 1379 kernel_map_pages(page, 1 << order, 1); 1380 kasan_alloc_pages(page, order); 1381 1382 if (gfp_flags & __GFP_ZERO) 1383 for (i = 0; i < (1 << order); i++) 1384 clear_highpage(page + i); 1385 1386 if (order && (gfp_flags & __GFP_COMP)) 1387 prep_compound_page(page, order); 1388 1389 set_page_owner(page, order, gfp_flags); 1390 1391 /* 1392 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1393 * allocate the page. The expectation is that the caller is taking 1394 * steps that will free more memory. The caller should avoid the page 1395 * being used for !PFMEMALLOC purposes. 1396 */ 1397 if (alloc_flags & ALLOC_NO_WATERMARKS) 1398 set_page_pfmemalloc(page); 1399 else 1400 clear_page_pfmemalloc(page); 1401 1402 return 0; 1403 } 1404 1405 /* 1406 * Go through the free lists for the given migratetype and remove 1407 * the smallest available page from the freelists 1408 */ 1409 static inline 1410 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1411 int migratetype) 1412 { 1413 unsigned int current_order; 1414 struct free_area *area; 1415 struct page *page; 1416 1417 /* Find a page of the appropriate size in the preferred list */ 1418 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1419 area = &(zone->free_area[current_order]); 1420 if (list_empty(&area->free_list[migratetype])) 1421 continue; 1422 1423 page = list_entry(area->free_list[migratetype].next, 1424 struct page, lru); 1425 list_del(&page->lru); 1426 rmv_page_order(page); 1427 area->nr_free--; 1428 expand(zone, page, order, current_order, area, migratetype); 1429 set_pcppage_migratetype(page, migratetype); 1430 return page; 1431 } 1432 1433 return NULL; 1434 } 1435 1436 1437 /* 1438 * This array describes the order lists are fallen back to when 1439 * the free lists for the desirable migrate type are depleted 1440 */ 1441 static int fallbacks[MIGRATE_TYPES][4] = { 1442 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1443 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1444 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 1445 #ifdef CONFIG_CMA 1446 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 1447 #endif 1448 #ifdef CONFIG_MEMORY_ISOLATION 1449 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 1450 #endif 1451 }; 1452 1453 #ifdef CONFIG_CMA 1454 static struct page *__rmqueue_cma_fallback(struct zone *zone, 1455 unsigned int order) 1456 { 1457 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1458 } 1459 #else 1460 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1461 unsigned int order) { return NULL; } 1462 #endif 1463 1464 /* 1465 * Move the free pages in a range to the free lists of the requested type. 1466 * Note that start_page and end_pages are not aligned on a pageblock 1467 * boundary. If alignment is required, use move_freepages_block() 1468 */ 1469 int move_freepages(struct zone *zone, 1470 struct page *start_page, struct page *end_page, 1471 int migratetype) 1472 { 1473 struct page *page; 1474 unsigned int order; 1475 int pages_moved = 0; 1476 1477 #ifndef CONFIG_HOLES_IN_ZONE 1478 /* 1479 * page_zone is not safe to call in this context when 1480 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1481 * anyway as we check zone boundaries in move_freepages_block(). 1482 * Remove at a later date when no bug reports exist related to 1483 * grouping pages by mobility 1484 */ 1485 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1486 #endif 1487 1488 for (page = start_page; page <= end_page;) { 1489 /* Make sure we are not inadvertently changing nodes */ 1490 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1491 1492 if (!pfn_valid_within(page_to_pfn(page))) { 1493 page++; 1494 continue; 1495 } 1496 1497 if (!PageBuddy(page)) { 1498 page++; 1499 continue; 1500 } 1501 1502 order = page_order(page); 1503 list_move(&page->lru, 1504 &zone->free_area[order].free_list[migratetype]); 1505 page += 1 << order; 1506 pages_moved += 1 << order; 1507 } 1508 1509 return pages_moved; 1510 } 1511 1512 int move_freepages_block(struct zone *zone, struct page *page, 1513 int migratetype) 1514 { 1515 unsigned long start_pfn, end_pfn; 1516 struct page *start_page, *end_page; 1517 1518 start_pfn = page_to_pfn(page); 1519 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1520 start_page = pfn_to_page(start_pfn); 1521 end_page = start_page + pageblock_nr_pages - 1; 1522 end_pfn = start_pfn + pageblock_nr_pages - 1; 1523 1524 /* Do not cross zone boundaries */ 1525 if (!zone_spans_pfn(zone, start_pfn)) 1526 start_page = page; 1527 if (!zone_spans_pfn(zone, end_pfn)) 1528 return 0; 1529 1530 return move_freepages(zone, start_page, end_page, migratetype); 1531 } 1532 1533 static void change_pageblock_range(struct page *pageblock_page, 1534 int start_order, int migratetype) 1535 { 1536 int nr_pageblocks = 1 << (start_order - pageblock_order); 1537 1538 while (nr_pageblocks--) { 1539 set_pageblock_migratetype(pageblock_page, migratetype); 1540 pageblock_page += pageblock_nr_pages; 1541 } 1542 } 1543 1544 /* 1545 * When we are falling back to another migratetype during allocation, try to 1546 * steal extra free pages from the same pageblocks to satisfy further 1547 * allocations, instead of polluting multiple pageblocks. 1548 * 1549 * If we are stealing a relatively large buddy page, it is likely there will 1550 * be more free pages in the pageblock, so try to steal them all. For 1551 * reclaimable and unmovable allocations, we steal regardless of page size, 1552 * as fragmentation caused by those allocations polluting movable pageblocks 1553 * is worse than movable allocations stealing from unmovable and reclaimable 1554 * pageblocks. 1555 */ 1556 static bool can_steal_fallback(unsigned int order, int start_mt) 1557 { 1558 /* 1559 * Leaving this order check is intended, although there is 1560 * relaxed order check in next check. The reason is that 1561 * we can actually steal whole pageblock if this condition met, 1562 * but, below check doesn't guarantee it and that is just heuristic 1563 * so could be changed anytime. 1564 */ 1565 if (order >= pageblock_order) 1566 return true; 1567 1568 if (order >= pageblock_order / 2 || 1569 start_mt == MIGRATE_RECLAIMABLE || 1570 start_mt == MIGRATE_UNMOVABLE || 1571 page_group_by_mobility_disabled) 1572 return true; 1573 1574 return false; 1575 } 1576 1577 /* 1578 * This function implements actual steal behaviour. If order is large enough, 1579 * we can steal whole pageblock. If not, we first move freepages in this 1580 * pageblock and check whether half of pages are moved or not. If half of 1581 * pages are moved, we can change migratetype of pageblock and permanently 1582 * use it's pages as requested migratetype in the future. 1583 */ 1584 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1585 int start_type) 1586 { 1587 unsigned int current_order = page_order(page); 1588 int pages; 1589 1590 /* Take ownership for orders >= pageblock_order */ 1591 if (current_order >= pageblock_order) { 1592 change_pageblock_range(page, current_order, start_type); 1593 return; 1594 } 1595 1596 pages = move_freepages_block(zone, page, start_type); 1597 1598 /* Claim the whole block if over half of it is free */ 1599 if (pages >= (1 << (pageblock_order-1)) || 1600 page_group_by_mobility_disabled) 1601 set_pageblock_migratetype(page, start_type); 1602 } 1603 1604 /* 1605 * Check whether there is a suitable fallback freepage with requested order. 1606 * If only_stealable is true, this function returns fallback_mt only if 1607 * we can steal other freepages all together. This would help to reduce 1608 * fragmentation due to mixed migratetype pages in one pageblock. 1609 */ 1610 int find_suitable_fallback(struct free_area *area, unsigned int order, 1611 int migratetype, bool only_stealable, bool *can_steal) 1612 { 1613 int i; 1614 int fallback_mt; 1615 1616 if (area->nr_free == 0) 1617 return -1; 1618 1619 *can_steal = false; 1620 for (i = 0;; i++) { 1621 fallback_mt = fallbacks[migratetype][i]; 1622 if (fallback_mt == MIGRATE_TYPES) 1623 break; 1624 1625 if (list_empty(&area->free_list[fallback_mt])) 1626 continue; 1627 1628 if (can_steal_fallback(order, migratetype)) 1629 *can_steal = true; 1630 1631 if (!only_stealable) 1632 return fallback_mt; 1633 1634 if (*can_steal) 1635 return fallback_mt; 1636 } 1637 1638 return -1; 1639 } 1640 1641 /* 1642 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1643 * there are no empty page blocks that contain a page with a suitable order 1644 */ 1645 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 1646 unsigned int alloc_order) 1647 { 1648 int mt; 1649 unsigned long max_managed, flags; 1650 1651 /* 1652 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1653 * Check is race-prone but harmless. 1654 */ 1655 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; 1656 if (zone->nr_reserved_highatomic >= max_managed) 1657 return; 1658 1659 spin_lock_irqsave(&zone->lock, flags); 1660 1661 /* Recheck the nr_reserved_highatomic limit under the lock */ 1662 if (zone->nr_reserved_highatomic >= max_managed) 1663 goto out_unlock; 1664 1665 /* Yoink! */ 1666 mt = get_pageblock_migratetype(page); 1667 if (mt != MIGRATE_HIGHATOMIC && 1668 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { 1669 zone->nr_reserved_highatomic += pageblock_nr_pages; 1670 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1671 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 1672 } 1673 1674 out_unlock: 1675 spin_unlock_irqrestore(&zone->lock, flags); 1676 } 1677 1678 /* 1679 * Used when an allocation is about to fail under memory pressure. This 1680 * potentially hurts the reliability of high-order allocations when under 1681 * intense memory pressure but failed atomic allocations should be easier 1682 * to recover from than an OOM. 1683 */ 1684 static void unreserve_highatomic_pageblock(const struct alloc_context *ac) 1685 { 1686 struct zonelist *zonelist = ac->zonelist; 1687 unsigned long flags; 1688 struct zoneref *z; 1689 struct zone *zone; 1690 struct page *page; 1691 int order; 1692 1693 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 1694 ac->nodemask) { 1695 /* Preserve at least one pageblock */ 1696 if (zone->nr_reserved_highatomic <= pageblock_nr_pages) 1697 continue; 1698 1699 spin_lock_irqsave(&zone->lock, flags); 1700 for (order = 0; order < MAX_ORDER; order++) { 1701 struct free_area *area = &(zone->free_area[order]); 1702 1703 if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) 1704 continue; 1705 1706 page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next, 1707 struct page, lru); 1708 1709 /* 1710 * It should never happen but changes to locking could 1711 * inadvertently allow a per-cpu drain to add pages 1712 * to MIGRATE_HIGHATOMIC while unreserving so be safe 1713 * and watch for underflows. 1714 */ 1715 zone->nr_reserved_highatomic -= min(pageblock_nr_pages, 1716 zone->nr_reserved_highatomic); 1717 1718 /* 1719 * Convert to ac->migratetype and avoid the normal 1720 * pageblock stealing heuristics. Minimally, the caller 1721 * is doing the work and needs the pages. More 1722 * importantly, if the block was always converted to 1723 * MIGRATE_UNMOVABLE or another type then the number 1724 * of pageblocks that cannot be completely freed 1725 * may increase. 1726 */ 1727 set_pageblock_migratetype(page, ac->migratetype); 1728 move_freepages_block(zone, page, ac->migratetype); 1729 spin_unlock_irqrestore(&zone->lock, flags); 1730 return; 1731 } 1732 spin_unlock_irqrestore(&zone->lock, flags); 1733 } 1734 } 1735 1736 /* Remove an element from the buddy allocator from the fallback list */ 1737 static inline struct page * 1738 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 1739 { 1740 struct free_area *area; 1741 unsigned int current_order; 1742 struct page *page; 1743 int fallback_mt; 1744 bool can_steal; 1745 1746 /* Find the largest possible block of pages in the other list */ 1747 for (current_order = MAX_ORDER-1; 1748 current_order >= order && current_order <= MAX_ORDER-1; 1749 --current_order) { 1750 area = &(zone->free_area[current_order]); 1751 fallback_mt = find_suitable_fallback(area, current_order, 1752 start_migratetype, false, &can_steal); 1753 if (fallback_mt == -1) 1754 continue; 1755 1756 page = list_entry(area->free_list[fallback_mt].next, 1757 struct page, lru); 1758 if (can_steal) 1759 steal_suitable_fallback(zone, page, start_migratetype); 1760 1761 /* Remove the page from the freelists */ 1762 area->nr_free--; 1763 list_del(&page->lru); 1764 rmv_page_order(page); 1765 1766 expand(zone, page, order, current_order, area, 1767 start_migratetype); 1768 /* 1769 * The pcppage_migratetype may differ from pageblock's 1770 * migratetype depending on the decisions in 1771 * find_suitable_fallback(). This is OK as long as it does not 1772 * differ for MIGRATE_CMA pageblocks. Those can be used as 1773 * fallback only via special __rmqueue_cma_fallback() function 1774 */ 1775 set_pcppage_migratetype(page, start_migratetype); 1776 1777 trace_mm_page_alloc_extfrag(page, order, current_order, 1778 start_migratetype, fallback_mt); 1779 1780 return page; 1781 } 1782 1783 return NULL; 1784 } 1785 1786 /* 1787 * Do the hard work of removing an element from the buddy allocator. 1788 * Call me with the zone->lock already held. 1789 */ 1790 static struct page *__rmqueue(struct zone *zone, unsigned int order, 1791 int migratetype, gfp_t gfp_flags) 1792 { 1793 struct page *page; 1794 1795 page = __rmqueue_smallest(zone, order, migratetype); 1796 if (unlikely(!page)) { 1797 if (migratetype == MIGRATE_MOVABLE) 1798 page = __rmqueue_cma_fallback(zone, order); 1799 1800 if (!page) 1801 page = __rmqueue_fallback(zone, order, migratetype); 1802 } 1803 1804 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1805 return page; 1806 } 1807 1808 /* 1809 * Obtain a specified number of elements from the buddy allocator, all under 1810 * a single hold of the lock, for efficiency. Add them to the supplied list. 1811 * Returns the number of new pages which were placed at *list. 1812 */ 1813 static int rmqueue_bulk(struct zone *zone, unsigned int order, 1814 unsigned long count, struct list_head *list, 1815 int migratetype, bool cold) 1816 { 1817 int i; 1818 1819 spin_lock(&zone->lock); 1820 for (i = 0; i < count; ++i) { 1821 struct page *page = __rmqueue(zone, order, migratetype, 0); 1822 if (unlikely(page == NULL)) 1823 break; 1824 1825 /* 1826 * Split buddy pages returned by expand() are received here 1827 * in physical page order. The page is added to the callers and 1828 * list and the list head then moves forward. From the callers 1829 * perspective, the linked list is ordered by page number in 1830 * some conditions. This is useful for IO devices that can 1831 * merge IO requests if the physical pages are ordered 1832 * properly. 1833 */ 1834 if (likely(!cold)) 1835 list_add(&page->lru, list); 1836 else 1837 list_add_tail(&page->lru, list); 1838 list = &page->lru; 1839 if (is_migrate_cma(get_pcppage_migratetype(page))) 1840 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1841 -(1 << order)); 1842 } 1843 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1844 spin_unlock(&zone->lock); 1845 return i; 1846 } 1847 1848 #ifdef CONFIG_NUMA 1849 /* 1850 * Called from the vmstat counter updater to drain pagesets of this 1851 * currently executing processor on remote nodes after they have 1852 * expired. 1853 * 1854 * Note that this function must be called with the thread pinned to 1855 * a single processor. 1856 */ 1857 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1858 { 1859 unsigned long flags; 1860 int to_drain, batch; 1861 1862 local_irq_save(flags); 1863 batch = READ_ONCE(pcp->batch); 1864 to_drain = min(pcp->count, batch); 1865 if (to_drain > 0) { 1866 free_pcppages_bulk(zone, to_drain, pcp); 1867 pcp->count -= to_drain; 1868 } 1869 local_irq_restore(flags); 1870 } 1871 #endif 1872 1873 /* 1874 * Drain pcplists of the indicated processor and zone. 1875 * 1876 * The processor must either be the current processor and the 1877 * thread pinned to the current processor or a processor that 1878 * is not online. 1879 */ 1880 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 1881 { 1882 unsigned long flags; 1883 struct per_cpu_pageset *pset; 1884 struct per_cpu_pages *pcp; 1885 1886 local_irq_save(flags); 1887 pset = per_cpu_ptr(zone->pageset, cpu); 1888 1889 pcp = &pset->pcp; 1890 if (pcp->count) { 1891 free_pcppages_bulk(zone, pcp->count, pcp); 1892 pcp->count = 0; 1893 } 1894 local_irq_restore(flags); 1895 } 1896 1897 /* 1898 * Drain pcplists of all zones on the indicated processor. 1899 * 1900 * The processor must either be the current processor and the 1901 * thread pinned to the current processor or a processor that 1902 * is not online. 1903 */ 1904 static void drain_pages(unsigned int cpu) 1905 { 1906 struct zone *zone; 1907 1908 for_each_populated_zone(zone) { 1909 drain_pages_zone(cpu, zone); 1910 } 1911 } 1912 1913 /* 1914 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1915 * 1916 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 1917 * the single zone's pages. 1918 */ 1919 void drain_local_pages(struct zone *zone) 1920 { 1921 int cpu = smp_processor_id(); 1922 1923 if (zone) 1924 drain_pages_zone(cpu, zone); 1925 else 1926 drain_pages(cpu); 1927 } 1928 1929 /* 1930 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 1931 * 1932 * When zone parameter is non-NULL, spill just the single zone's pages. 1933 * 1934 * Note that this code is protected against sending an IPI to an offline 1935 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 1936 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 1937 * nothing keeps CPUs from showing up after we populated the cpumask and 1938 * before the call to on_each_cpu_mask(). 1939 */ 1940 void drain_all_pages(struct zone *zone) 1941 { 1942 int cpu; 1943 1944 /* 1945 * Allocate in the BSS so we wont require allocation in 1946 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 1947 */ 1948 static cpumask_t cpus_with_pcps; 1949 1950 /* 1951 * We don't care about racing with CPU hotplug event 1952 * as offline notification will cause the notified 1953 * cpu to drain that CPU pcps and on_each_cpu_mask 1954 * disables preemption as part of its processing 1955 */ 1956 for_each_online_cpu(cpu) { 1957 struct per_cpu_pageset *pcp; 1958 struct zone *z; 1959 bool has_pcps = false; 1960 1961 if (zone) { 1962 pcp = per_cpu_ptr(zone->pageset, cpu); 1963 if (pcp->pcp.count) 1964 has_pcps = true; 1965 } else { 1966 for_each_populated_zone(z) { 1967 pcp = per_cpu_ptr(z->pageset, cpu); 1968 if (pcp->pcp.count) { 1969 has_pcps = true; 1970 break; 1971 } 1972 } 1973 } 1974 1975 if (has_pcps) 1976 cpumask_set_cpu(cpu, &cpus_with_pcps); 1977 else 1978 cpumask_clear_cpu(cpu, &cpus_with_pcps); 1979 } 1980 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, 1981 zone, 1); 1982 } 1983 1984 #ifdef CONFIG_HIBERNATION 1985 1986 void mark_free_pages(struct zone *zone) 1987 { 1988 unsigned long pfn, max_zone_pfn; 1989 unsigned long flags; 1990 unsigned int order, t; 1991 struct list_head *curr; 1992 1993 if (zone_is_empty(zone)) 1994 return; 1995 1996 spin_lock_irqsave(&zone->lock, flags); 1997 1998 max_zone_pfn = zone_end_pfn(zone); 1999 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2000 if (pfn_valid(pfn)) { 2001 struct page *page = pfn_to_page(pfn); 2002 2003 if (!swsusp_page_is_forbidden(page)) 2004 swsusp_unset_page_free(page); 2005 } 2006 2007 for_each_migratetype_order(order, t) { 2008 list_for_each(curr, &zone->free_area[order].free_list[t]) { 2009 unsigned long i; 2010 2011 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 2012 for (i = 0; i < (1UL << order); i++) 2013 swsusp_set_page_free(pfn_to_page(pfn + i)); 2014 } 2015 } 2016 spin_unlock_irqrestore(&zone->lock, flags); 2017 } 2018 #endif /* CONFIG_PM */ 2019 2020 /* 2021 * Free a 0-order page 2022 * cold == true ? free a cold page : free a hot page 2023 */ 2024 void free_hot_cold_page(struct page *page, bool cold) 2025 { 2026 struct zone *zone = page_zone(page); 2027 struct per_cpu_pages *pcp; 2028 unsigned long flags; 2029 unsigned long pfn = page_to_pfn(page); 2030 int migratetype; 2031 2032 if (!free_pages_prepare(page, 0)) 2033 return; 2034 2035 migratetype = get_pfnblock_migratetype(page, pfn); 2036 set_pcppage_migratetype(page, migratetype); 2037 local_irq_save(flags); 2038 __count_vm_event(PGFREE); 2039 2040 /* 2041 * We only track unmovable, reclaimable and movable on pcp lists. 2042 * Free ISOLATE pages back to the allocator because they are being 2043 * offlined but treat RESERVE as movable pages so we can get those 2044 * areas back if necessary. Otherwise, we may have to free 2045 * excessively into the page allocator 2046 */ 2047 if (migratetype >= MIGRATE_PCPTYPES) { 2048 if (unlikely(is_migrate_isolate(migratetype))) { 2049 free_one_page(zone, page, pfn, 0, migratetype); 2050 goto out; 2051 } 2052 migratetype = MIGRATE_MOVABLE; 2053 } 2054 2055 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2056 if (!cold) 2057 list_add(&page->lru, &pcp->lists[migratetype]); 2058 else 2059 list_add_tail(&page->lru, &pcp->lists[migratetype]); 2060 pcp->count++; 2061 if (pcp->count >= pcp->high) { 2062 unsigned long batch = READ_ONCE(pcp->batch); 2063 free_pcppages_bulk(zone, batch, pcp); 2064 pcp->count -= batch; 2065 } 2066 2067 out: 2068 local_irq_restore(flags); 2069 } 2070 2071 /* 2072 * Free a list of 0-order pages 2073 */ 2074 void free_hot_cold_page_list(struct list_head *list, bool cold) 2075 { 2076 struct page *page, *next; 2077 2078 list_for_each_entry_safe(page, next, list, lru) { 2079 trace_mm_page_free_batched(page, cold); 2080 free_hot_cold_page(page, cold); 2081 } 2082 } 2083 2084 /* 2085 * split_page takes a non-compound higher-order page, and splits it into 2086 * n (1<<order) sub-pages: page[0..n] 2087 * Each sub-page must be freed individually. 2088 * 2089 * Note: this is probably too low level an operation for use in drivers. 2090 * Please consult with lkml before using this in your driver. 2091 */ 2092 void split_page(struct page *page, unsigned int order) 2093 { 2094 int i; 2095 gfp_t gfp_mask; 2096 2097 VM_BUG_ON_PAGE(PageCompound(page), page); 2098 VM_BUG_ON_PAGE(!page_count(page), page); 2099 2100 #ifdef CONFIG_KMEMCHECK 2101 /* 2102 * Split shadow pages too, because free(page[0]) would 2103 * otherwise free the whole shadow. 2104 */ 2105 if (kmemcheck_page_is_tracked(page)) 2106 split_page(virt_to_page(page[0].shadow), order); 2107 #endif 2108 2109 gfp_mask = get_page_owner_gfp(page); 2110 set_page_owner(page, 0, gfp_mask); 2111 for (i = 1; i < (1 << order); i++) { 2112 set_page_refcounted(page + i); 2113 set_page_owner(page + i, 0, gfp_mask); 2114 } 2115 } 2116 EXPORT_SYMBOL_GPL(split_page); 2117 2118 int __isolate_free_page(struct page *page, unsigned int order) 2119 { 2120 unsigned long watermark; 2121 struct zone *zone; 2122 int mt; 2123 2124 BUG_ON(!PageBuddy(page)); 2125 2126 zone = page_zone(page); 2127 mt = get_pageblock_migratetype(page); 2128 2129 if (!is_migrate_isolate(mt)) { 2130 /* Obey watermarks as if the page was being allocated */ 2131 watermark = low_wmark_pages(zone) + (1 << order); 2132 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 2133 return 0; 2134 2135 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2136 } 2137 2138 /* Remove page from free list */ 2139 list_del(&page->lru); 2140 zone->free_area[order].nr_free--; 2141 rmv_page_order(page); 2142 2143 set_page_owner(page, order, __GFP_MOVABLE); 2144 2145 /* Set the pageblock if the isolated page is at least a pageblock */ 2146 if (order >= pageblock_order - 1) { 2147 struct page *endpage = page + (1 << order) - 1; 2148 for (; page < endpage; page += pageblock_nr_pages) { 2149 int mt = get_pageblock_migratetype(page); 2150 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 2151 set_pageblock_migratetype(page, 2152 MIGRATE_MOVABLE); 2153 } 2154 } 2155 2156 2157 return 1UL << order; 2158 } 2159 2160 /* 2161 * Similar to split_page except the page is already free. As this is only 2162 * being used for migration, the migratetype of the block also changes. 2163 * As this is called with interrupts disabled, the caller is responsible 2164 * for calling arch_alloc_page() and kernel_map_page() after interrupts 2165 * are enabled. 2166 * 2167 * Note: this is probably too low level an operation for use in drivers. 2168 * Please consult with lkml before using this in your driver. 2169 */ 2170 int split_free_page(struct page *page) 2171 { 2172 unsigned int order; 2173 int nr_pages; 2174 2175 order = page_order(page); 2176 2177 nr_pages = __isolate_free_page(page, order); 2178 if (!nr_pages) 2179 return 0; 2180 2181 /* Split into individual pages */ 2182 set_page_refcounted(page); 2183 split_page(page, order); 2184 return nr_pages; 2185 } 2186 2187 /* 2188 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2189 */ 2190 static inline 2191 struct page *buffered_rmqueue(struct zone *preferred_zone, 2192 struct zone *zone, unsigned int order, 2193 gfp_t gfp_flags, int alloc_flags, int migratetype) 2194 { 2195 unsigned long flags; 2196 struct page *page; 2197 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2198 2199 if (likely(order == 0)) { 2200 struct per_cpu_pages *pcp; 2201 struct list_head *list; 2202 2203 local_irq_save(flags); 2204 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2205 list = &pcp->lists[migratetype]; 2206 if (list_empty(list)) { 2207 pcp->count += rmqueue_bulk(zone, 0, 2208 pcp->batch, list, 2209 migratetype, cold); 2210 if (unlikely(list_empty(list))) 2211 goto failed; 2212 } 2213 2214 if (cold) 2215 page = list_entry(list->prev, struct page, lru); 2216 else 2217 page = list_entry(list->next, struct page, lru); 2218 2219 list_del(&page->lru); 2220 pcp->count--; 2221 } else { 2222 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 2223 /* 2224 * __GFP_NOFAIL is not to be used in new code. 2225 * 2226 * All __GFP_NOFAIL callers should be fixed so that they 2227 * properly detect and handle allocation failures. 2228 * 2229 * We most definitely don't want callers attempting to 2230 * allocate greater than order-1 page units with 2231 * __GFP_NOFAIL. 2232 */ 2233 WARN_ON_ONCE(order > 1); 2234 } 2235 spin_lock_irqsave(&zone->lock, flags); 2236 2237 page = NULL; 2238 if (alloc_flags & ALLOC_HARDER) { 2239 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2240 if (page) 2241 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2242 } 2243 if (!page) 2244 page = __rmqueue(zone, order, migratetype, gfp_flags); 2245 spin_unlock(&zone->lock); 2246 if (!page) 2247 goto failed; 2248 __mod_zone_freepage_state(zone, -(1 << order), 2249 get_pcppage_migratetype(page)); 2250 } 2251 2252 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 2253 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 2254 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) 2255 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2256 2257 __count_zone_vm_events(PGALLOC, zone, 1 << order); 2258 zone_statistics(preferred_zone, zone, gfp_flags); 2259 local_irq_restore(flags); 2260 2261 VM_BUG_ON_PAGE(bad_range(zone, page), page); 2262 return page; 2263 2264 failed: 2265 local_irq_restore(flags); 2266 return NULL; 2267 } 2268 2269 #ifdef CONFIG_FAIL_PAGE_ALLOC 2270 2271 static struct { 2272 struct fault_attr attr; 2273 2274 bool ignore_gfp_highmem; 2275 bool ignore_gfp_reclaim; 2276 u32 min_order; 2277 } fail_page_alloc = { 2278 .attr = FAULT_ATTR_INITIALIZER, 2279 .ignore_gfp_reclaim = true, 2280 .ignore_gfp_highmem = true, 2281 .min_order = 1, 2282 }; 2283 2284 static int __init setup_fail_page_alloc(char *str) 2285 { 2286 return setup_fault_attr(&fail_page_alloc.attr, str); 2287 } 2288 __setup("fail_page_alloc=", setup_fail_page_alloc); 2289 2290 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2291 { 2292 if (order < fail_page_alloc.min_order) 2293 return false; 2294 if (gfp_mask & __GFP_NOFAIL) 2295 return false; 2296 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2297 return false; 2298 if (fail_page_alloc.ignore_gfp_reclaim && 2299 (gfp_mask & __GFP_DIRECT_RECLAIM)) 2300 return false; 2301 2302 return should_fail(&fail_page_alloc.attr, 1 << order); 2303 } 2304 2305 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 2306 2307 static int __init fail_page_alloc_debugfs(void) 2308 { 2309 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 2310 struct dentry *dir; 2311 2312 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 2313 &fail_page_alloc.attr); 2314 if (IS_ERR(dir)) 2315 return PTR_ERR(dir); 2316 2317 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2318 &fail_page_alloc.ignore_gfp_reclaim)) 2319 goto fail; 2320 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2321 &fail_page_alloc.ignore_gfp_highmem)) 2322 goto fail; 2323 if (!debugfs_create_u32("min-order", mode, dir, 2324 &fail_page_alloc.min_order)) 2325 goto fail; 2326 2327 return 0; 2328 fail: 2329 debugfs_remove_recursive(dir); 2330 2331 return -ENOMEM; 2332 } 2333 2334 late_initcall(fail_page_alloc_debugfs); 2335 2336 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 2337 2338 #else /* CONFIG_FAIL_PAGE_ALLOC */ 2339 2340 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2341 { 2342 return false; 2343 } 2344 2345 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 2346 2347 /* 2348 * Return true if free base pages are above 'mark'. For high-order checks it 2349 * will return true of the order-0 watermark is reached and there is at least 2350 * one free page of a suitable size. Checking now avoids taking the zone lock 2351 * to check in the allocation paths if no pages are free. 2352 */ 2353 static bool __zone_watermark_ok(struct zone *z, unsigned int order, 2354 unsigned long mark, int classzone_idx, int alloc_flags, 2355 long free_pages) 2356 { 2357 long min = mark; 2358 int o; 2359 const int alloc_harder = (alloc_flags & ALLOC_HARDER); 2360 2361 /* free_pages may go negative - that's OK */ 2362 free_pages -= (1 << order) - 1; 2363 2364 if (alloc_flags & ALLOC_HIGH) 2365 min -= min / 2; 2366 2367 /* 2368 * If the caller does not have rights to ALLOC_HARDER then subtract 2369 * the high-atomic reserves. This will over-estimate the size of the 2370 * atomic reserve but it avoids a search. 2371 */ 2372 if (likely(!alloc_harder)) 2373 free_pages -= z->nr_reserved_highatomic; 2374 else 2375 min -= min / 4; 2376 2377 #ifdef CONFIG_CMA 2378 /* If allocation can't use CMA areas don't use free CMA pages */ 2379 if (!(alloc_flags & ALLOC_CMA)) 2380 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 2381 #endif 2382 2383 /* 2384 * Check watermarks for an order-0 allocation request. If these 2385 * are not met, then a high-order request also cannot go ahead 2386 * even if a suitable page happened to be free. 2387 */ 2388 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 2389 return false; 2390 2391 /* If this is an order-0 request then the watermark is fine */ 2392 if (!order) 2393 return true; 2394 2395 /* For a high-order request, check at least one suitable page is free */ 2396 for (o = order; o < MAX_ORDER; o++) { 2397 struct free_area *area = &z->free_area[o]; 2398 int mt; 2399 2400 if (!area->nr_free) 2401 continue; 2402 2403 if (alloc_harder) 2404 return true; 2405 2406 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2407 if (!list_empty(&area->free_list[mt])) 2408 return true; 2409 } 2410 2411 #ifdef CONFIG_CMA 2412 if ((alloc_flags & ALLOC_CMA) && 2413 !list_empty(&area->free_list[MIGRATE_CMA])) { 2414 return true; 2415 } 2416 #endif 2417 } 2418 return false; 2419 } 2420 2421 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2422 int classzone_idx, int alloc_flags) 2423 { 2424 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2425 zone_page_state(z, NR_FREE_PAGES)); 2426 } 2427 2428 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2429 unsigned long mark, int classzone_idx) 2430 { 2431 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2432 2433 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2434 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2435 2436 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 2437 free_pages); 2438 } 2439 2440 #ifdef CONFIG_NUMA 2441 static bool zone_local(struct zone *local_zone, struct zone *zone) 2442 { 2443 return local_zone->node == zone->node; 2444 } 2445 2446 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2447 { 2448 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < 2449 RECLAIM_DISTANCE; 2450 } 2451 #else /* CONFIG_NUMA */ 2452 static bool zone_local(struct zone *local_zone, struct zone *zone) 2453 { 2454 return true; 2455 } 2456 2457 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2458 { 2459 return true; 2460 } 2461 #endif /* CONFIG_NUMA */ 2462 2463 static void reset_alloc_batches(struct zone *preferred_zone) 2464 { 2465 struct zone *zone = preferred_zone->zone_pgdat->node_zones; 2466 2467 do { 2468 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2469 high_wmark_pages(zone) - low_wmark_pages(zone) - 2470 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 2471 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2472 } while (zone++ != preferred_zone); 2473 } 2474 2475 /* 2476 * get_page_from_freelist goes through the zonelist trying to allocate 2477 * a page. 2478 */ 2479 static struct page * 2480 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 2481 const struct alloc_context *ac) 2482 { 2483 struct zonelist *zonelist = ac->zonelist; 2484 struct zoneref *z; 2485 struct page *page = NULL; 2486 struct zone *zone; 2487 int nr_fair_skipped = 0; 2488 bool zonelist_rescan; 2489 2490 zonelist_scan: 2491 zonelist_rescan = false; 2492 2493 /* 2494 * Scan zonelist, looking for a zone with enough free. 2495 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2496 */ 2497 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2498 ac->nodemask) { 2499 unsigned long mark; 2500 2501 if (cpusets_enabled() && 2502 (alloc_flags & ALLOC_CPUSET) && 2503 !cpuset_zone_allowed(zone, gfp_mask)) 2504 continue; 2505 /* 2506 * Distribute pages in proportion to the individual 2507 * zone size to ensure fair page aging. The zone a 2508 * page was allocated in should have no effect on the 2509 * time the page has in memory before being reclaimed. 2510 */ 2511 if (alloc_flags & ALLOC_FAIR) { 2512 if (!zone_local(ac->preferred_zone, zone)) 2513 break; 2514 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { 2515 nr_fair_skipped++; 2516 continue; 2517 } 2518 } 2519 /* 2520 * When allocating a page cache page for writing, we 2521 * want to get it from a zone that is within its dirty 2522 * limit, such that no single zone holds more than its 2523 * proportional share of globally allowed dirty pages. 2524 * The dirty limits take into account the zone's 2525 * lowmem reserves and high watermark so that kswapd 2526 * should be able to balance it without having to 2527 * write pages from its LRU list. 2528 * 2529 * This may look like it could increase pressure on 2530 * lower zones by failing allocations in higher zones 2531 * before they are full. But the pages that do spill 2532 * over are limited as the lower zones are protected 2533 * by this very same mechanism. It should not become 2534 * a practical burden to them. 2535 * 2536 * XXX: For now, allow allocations to potentially 2537 * exceed the per-zone dirty limit in the slowpath 2538 * (spread_dirty_pages unset) before going into reclaim, 2539 * which is important when on a NUMA setup the allowed 2540 * zones are together not big enough to reach the 2541 * global limit. The proper fix for these situations 2542 * will require awareness of zones in the 2543 * dirty-throttling and the flusher threads. 2544 */ 2545 if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) 2546 continue; 2547 2548 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2549 if (!zone_watermark_ok(zone, order, mark, 2550 ac->classzone_idx, alloc_flags)) { 2551 int ret; 2552 2553 /* Checked here to keep the fast path fast */ 2554 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 2555 if (alloc_flags & ALLOC_NO_WATERMARKS) 2556 goto try_this_zone; 2557 2558 if (zone_reclaim_mode == 0 || 2559 !zone_allows_reclaim(ac->preferred_zone, zone)) 2560 continue; 2561 2562 ret = zone_reclaim(zone, gfp_mask, order); 2563 switch (ret) { 2564 case ZONE_RECLAIM_NOSCAN: 2565 /* did not scan */ 2566 continue; 2567 case ZONE_RECLAIM_FULL: 2568 /* scanned but unreclaimable */ 2569 continue; 2570 default: 2571 /* did we reclaim enough */ 2572 if (zone_watermark_ok(zone, order, mark, 2573 ac->classzone_idx, alloc_flags)) 2574 goto try_this_zone; 2575 2576 continue; 2577 } 2578 } 2579 2580 try_this_zone: 2581 page = buffered_rmqueue(ac->preferred_zone, zone, order, 2582 gfp_mask, alloc_flags, ac->migratetype); 2583 if (page) { 2584 if (prep_new_page(page, order, gfp_mask, alloc_flags)) 2585 goto try_this_zone; 2586 2587 /* 2588 * If this is a high-order atomic allocation then check 2589 * if the pageblock should be reserved for the future 2590 */ 2591 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 2592 reserve_highatomic_pageblock(page, zone, order); 2593 2594 return page; 2595 } 2596 } 2597 2598 /* 2599 * The first pass makes sure allocations are spread fairly within the 2600 * local node. However, the local node might have free pages left 2601 * after the fairness batches are exhausted, and remote zones haven't 2602 * even been considered yet. Try once more without fairness, and 2603 * include remote zones now, before entering the slowpath and waking 2604 * kswapd: prefer spilling to a remote zone over swapping locally. 2605 */ 2606 if (alloc_flags & ALLOC_FAIR) { 2607 alloc_flags &= ~ALLOC_FAIR; 2608 if (nr_fair_skipped) { 2609 zonelist_rescan = true; 2610 reset_alloc_batches(ac->preferred_zone); 2611 } 2612 if (nr_online_nodes > 1) 2613 zonelist_rescan = true; 2614 } 2615 2616 if (zonelist_rescan) 2617 goto zonelist_scan; 2618 2619 return NULL; 2620 } 2621 2622 /* 2623 * Large machines with many possible nodes should not always dump per-node 2624 * meminfo in irq context. 2625 */ 2626 static inline bool should_suppress_show_mem(void) 2627 { 2628 bool ret = false; 2629 2630 #if NODES_SHIFT > 8 2631 ret = in_interrupt(); 2632 #endif 2633 return ret; 2634 } 2635 2636 static DEFINE_RATELIMIT_STATE(nopage_rs, 2637 DEFAULT_RATELIMIT_INTERVAL, 2638 DEFAULT_RATELIMIT_BURST); 2639 2640 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) 2641 { 2642 unsigned int filter = SHOW_MEM_FILTER_NODES; 2643 2644 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 2645 debug_guardpage_minorder() > 0) 2646 return; 2647 2648 /* 2649 * This documents exceptions given to allocations in certain 2650 * contexts that are allowed to allocate outside current's set 2651 * of allowed nodes. 2652 */ 2653 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2654 if (test_thread_flag(TIF_MEMDIE) || 2655 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2656 filter &= ~SHOW_MEM_FILTER_NODES; 2657 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 2658 filter &= ~SHOW_MEM_FILTER_NODES; 2659 2660 if (fmt) { 2661 struct va_format vaf; 2662 va_list args; 2663 2664 va_start(args, fmt); 2665 2666 vaf.fmt = fmt; 2667 vaf.va = &args; 2668 2669 pr_warn("%pV", &vaf); 2670 2671 va_end(args); 2672 } 2673 2674 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", 2675 current->comm, order, gfp_mask); 2676 2677 dump_stack(); 2678 if (!should_suppress_show_mem()) 2679 show_mem(filter); 2680 } 2681 2682 static inline struct page * 2683 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2684 const struct alloc_context *ac, unsigned long *did_some_progress) 2685 { 2686 struct oom_control oc = { 2687 .zonelist = ac->zonelist, 2688 .nodemask = ac->nodemask, 2689 .gfp_mask = gfp_mask, 2690 .order = order, 2691 }; 2692 struct page *page; 2693 2694 *did_some_progress = 0; 2695 2696 /* 2697 * Acquire the oom lock. If that fails, somebody else is 2698 * making progress for us. 2699 */ 2700 if (!mutex_trylock(&oom_lock)) { 2701 *did_some_progress = 1; 2702 schedule_timeout_uninterruptible(1); 2703 return NULL; 2704 } 2705 2706 /* 2707 * Go through the zonelist yet one more time, keep very high watermark 2708 * here, this is only to catch a parallel oom killing, we must fail if 2709 * we're still under heavy pressure. 2710 */ 2711 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 2712 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 2713 if (page) 2714 goto out; 2715 2716 if (!(gfp_mask & __GFP_NOFAIL)) { 2717 /* Coredumps can quickly deplete all memory reserves */ 2718 if (current->flags & PF_DUMPCORE) 2719 goto out; 2720 /* The OOM killer will not help higher order allocs */ 2721 if (order > PAGE_ALLOC_COSTLY_ORDER) 2722 goto out; 2723 /* The OOM killer does not needlessly kill tasks for lowmem */ 2724 if (ac->high_zoneidx < ZONE_NORMAL) 2725 goto out; 2726 /* The OOM killer does not compensate for IO-less reclaim */ 2727 if (!(gfp_mask & __GFP_FS)) { 2728 /* 2729 * XXX: Page reclaim didn't yield anything, 2730 * and the OOM killer can't be invoked, but 2731 * keep looping as per tradition. 2732 */ 2733 *did_some_progress = 1; 2734 goto out; 2735 } 2736 if (pm_suspended_storage()) 2737 goto out; 2738 /* The OOM killer may not free memory on a specific node */ 2739 if (gfp_mask & __GFP_THISNODE) 2740 goto out; 2741 } 2742 /* Exhausted what can be done so it's blamo time */ 2743 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) 2744 *did_some_progress = 1; 2745 out: 2746 mutex_unlock(&oom_lock); 2747 return page; 2748 } 2749 2750 #ifdef CONFIG_COMPACTION 2751 /* Try memory compaction for high-order allocations before reclaim */ 2752 static struct page * 2753 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2754 int alloc_flags, const struct alloc_context *ac, 2755 enum migrate_mode mode, int *contended_compaction, 2756 bool *deferred_compaction) 2757 { 2758 unsigned long compact_result; 2759 struct page *page; 2760 2761 if (!order) 2762 return NULL; 2763 2764 current->flags |= PF_MEMALLOC; 2765 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 2766 mode, contended_compaction); 2767 current->flags &= ~PF_MEMALLOC; 2768 2769 switch (compact_result) { 2770 case COMPACT_DEFERRED: 2771 *deferred_compaction = true; 2772 /* fall-through */ 2773 case COMPACT_SKIPPED: 2774 return NULL; 2775 default: 2776 break; 2777 } 2778 2779 /* 2780 * At least in one zone compaction wasn't deferred or skipped, so let's 2781 * count a compaction stall 2782 */ 2783 count_vm_event(COMPACTSTALL); 2784 2785 page = get_page_from_freelist(gfp_mask, order, 2786 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 2787 2788 if (page) { 2789 struct zone *zone = page_zone(page); 2790 2791 zone->compact_blockskip_flush = false; 2792 compaction_defer_reset(zone, order, true); 2793 count_vm_event(COMPACTSUCCESS); 2794 return page; 2795 } 2796 2797 /* 2798 * It's bad if compaction run occurs and fails. The most likely reason 2799 * is that pages exist, but not enough to satisfy watermarks. 2800 */ 2801 count_vm_event(COMPACTFAIL); 2802 2803 cond_resched(); 2804 2805 return NULL; 2806 } 2807 #else 2808 static inline struct page * 2809 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2810 int alloc_flags, const struct alloc_context *ac, 2811 enum migrate_mode mode, int *contended_compaction, 2812 bool *deferred_compaction) 2813 { 2814 return NULL; 2815 } 2816 #endif /* CONFIG_COMPACTION */ 2817 2818 /* Perform direct synchronous page reclaim */ 2819 static int 2820 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 2821 const struct alloc_context *ac) 2822 { 2823 struct reclaim_state reclaim_state; 2824 int progress; 2825 2826 cond_resched(); 2827 2828 /* We now go into synchronous reclaim */ 2829 cpuset_memory_pressure_bump(); 2830 current->flags |= PF_MEMALLOC; 2831 lockdep_set_current_reclaim_state(gfp_mask); 2832 reclaim_state.reclaimed_slab = 0; 2833 current->reclaim_state = &reclaim_state; 2834 2835 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 2836 ac->nodemask); 2837 2838 current->reclaim_state = NULL; 2839 lockdep_clear_current_reclaim_state(); 2840 current->flags &= ~PF_MEMALLOC; 2841 2842 cond_resched(); 2843 2844 return progress; 2845 } 2846 2847 /* The really slow allocator path where we enter direct reclaim */ 2848 static inline struct page * 2849 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2850 int alloc_flags, const struct alloc_context *ac, 2851 unsigned long *did_some_progress) 2852 { 2853 struct page *page = NULL; 2854 bool drained = false; 2855 2856 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 2857 if (unlikely(!(*did_some_progress))) 2858 return NULL; 2859 2860 retry: 2861 page = get_page_from_freelist(gfp_mask, order, 2862 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 2863 2864 /* 2865 * If an allocation failed after direct reclaim, it could be because 2866 * pages are pinned on the per-cpu lists or in high alloc reserves. 2867 * Shrink them them and try again 2868 */ 2869 if (!page && !drained) { 2870 unreserve_highatomic_pageblock(ac); 2871 drain_all_pages(NULL); 2872 drained = true; 2873 goto retry; 2874 } 2875 2876 return page; 2877 } 2878 2879 /* 2880 * This is called in the allocator slow-path if the allocation request is of 2881 * sufficient urgency to ignore watermarks and take other desperate measures 2882 */ 2883 static inline struct page * 2884 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2885 const struct alloc_context *ac) 2886 { 2887 struct page *page; 2888 2889 do { 2890 page = get_page_from_freelist(gfp_mask, order, 2891 ALLOC_NO_WATERMARKS, ac); 2892 2893 if (!page && gfp_mask & __GFP_NOFAIL) 2894 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, 2895 HZ/50); 2896 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2897 2898 return page; 2899 } 2900 2901 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) 2902 { 2903 struct zoneref *z; 2904 struct zone *zone; 2905 2906 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2907 ac->high_zoneidx, ac->nodemask) 2908 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); 2909 } 2910 2911 static inline int 2912 gfp_to_alloc_flags(gfp_t gfp_mask) 2913 { 2914 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2915 2916 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2917 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2918 2919 /* 2920 * The caller may dip into page reserves a bit more if the caller 2921 * cannot run direct reclaim, or if the caller has realtime scheduling 2922 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2923 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 2924 */ 2925 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2926 2927 if (gfp_mask & __GFP_ATOMIC) { 2928 /* 2929 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 2930 * if it can't schedule. 2931 */ 2932 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2933 alloc_flags |= ALLOC_HARDER; 2934 /* 2935 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2936 * comment for __cpuset_node_allowed(). 2937 */ 2938 alloc_flags &= ~ALLOC_CPUSET; 2939 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2940 alloc_flags |= ALLOC_HARDER; 2941 2942 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2943 if (gfp_mask & __GFP_MEMALLOC) 2944 alloc_flags |= ALLOC_NO_WATERMARKS; 2945 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 2946 alloc_flags |= ALLOC_NO_WATERMARKS; 2947 else if (!in_interrupt() && 2948 ((current->flags & PF_MEMALLOC) || 2949 unlikely(test_thread_flag(TIF_MEMDIE)))) 2950 alloc_flags |= ALLOC_NO_WATERMARKS; 2951 } 2952 #ifdef CONFIG_CMA 2953 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2954 alloc_flags |= ALLOC_CMA; 2955 #endif 2956 return alloc_flags; 2957 } 2958 2959 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 2960 { 2961 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2962 } 2963 2964 static inline bool is_thp_gfp_mask(gfp_t gfp_mask) 2965 { 2966 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; 2967 } 2968 2969 static inline struct page * 2970 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2971 struct alloc_context *ac) 2972 { 2973 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 2974 struct page *page = NULL; 2975 int alloc_flags; 2976 unsigned long pages_reclaimed = 0; 2977 unsigned long did_some_progress; 2978 enum migrate_mode migration_mode = MIGRATE_ASYNC; 2979 bool deferred_compaction = false; 2980 int contended_compaction = COMPACT_CONTENDED_NONE; 2981 2982 /* 2983 * In the slowpath, we sanity check order to avoid ever trying to 2984 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2985 * be using allocators in order of preference for an area that is 2986 * too large. 2987 */ 2988 if (order >= MAX_ORDER) { 2989 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2990 return NULL; 2991 } 2992 2993 /* 2994 * We also sanity check to catch abuse of atomic reserves being used by 2995 * callers that are not in atomic context. 2996 */ 2997 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 2998 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 2999 gfp_mask &= ~__GFP_ATOMIC; 3000 3001 /* 3002 * If this allocation cannot block and it is for a specific node, then 3003 * fail early. There's no need to wakeup kswapd or retry for a 3004 * speculative node-specific allocation. 3005 */ 3006 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim) 3007 goto nopage; 3008 3009 retry: 3010 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3011 wake_all_kswapds(order, ac); 3012 3013 /* 3014 * OK, we're below the kswapd watermark and have kicked background 3015 * reclaim. Now things get more complex, so set up alloc_flags according 3016 * to how we want to proceed. 3017 */ 3018 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3019 3020 /* 3021 * Find the true preferred zone if the allocation is unconstrained by 3022 * cpusets. 3023 */ 3024 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) { 3025 struct zoneref *preferred_zoneref; 3026 preferred_zoneref = first_zones_zonelist(ac->zonelist, 3027 ac->high_zoneidx, NULL, &ac->preferred_zone); 3028 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref); 3029 } 3030 3031 /* This is the last chance, in general, before the goto nopage. */ 3032 page = get_page_from_freelist(gfp_mask, order, 3033 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3034 if (page) 3035 goto got_pg; 3036 3037 /* Allocate without watermarks if the context allows */ 3038 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3039 /* 3040 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 3041 * the allocation is high priority and these type of 3042 * allocations are system rather than user orientated 3043 */ 3044 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3045 3046 page = __alloc_pages_high_priority(gfp_mask, order, ac); 3047 3048 if (page) { 3049 goto got_pg; 3050 } 3051 } 3052 3053 /* Caller is not willing to reclaim, we can't balance anything */ 3054 if (!can_direct_reclaim) { 3055 /* 3056 * All existing users of the deprecated __GFP_NOFAIL are 3057 * blockable, so warn of any new users that actually allow this 3058 * type of allocation to fail. 3059 */ 3060 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 3061 goto nopage; 3062 } 3063 3064 /* Avoid recursion of direct reclaim */ 3065 if (current->flags & PF_MEMALLOC) 3066 goto nopage; 3067 3068 /* Avoid allocations with no watermarks from looping endlessly */ 3069 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 3070 goto nopage; 3071 3072 /* 3073 * Try direct compaction. The first pass is asynchronous. Subsequent 3074 * attempts after direct reclaim are synchronous 3075 */ 3076 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3077 migration_mode, 3078 &contended_compaction, 3079 &deferred_compaction); 3080 if (page) 3081 goto got_pg; 3082 3083 /* Checks for THP-specific high-order allocations */ 3084 if (is_thp_gfp_mask(gfp_mask)) { 3085 /* 3086 * If compaction is deferred for high-order allocations, it is 3087 * because sync compaction recently failed. If this is the case 3088 * and the caller requested a THP allocation, we do not want 3089 * to heavily disrupt the system, so we fail the allocation 3090 * instead of entering direct reclaim. 3091 */ 3092 if (deferred_compaction) 3093 goto nopage; 3094 3095 /* 3096 * In all zones where compaction was attempted (and not 3097 * deferred or skipped), lock contention has been detected. 3098 * For THP allocation we do not want to disrupt the others 3099 * so we fallback to base pages instead. 3100 */ 3101 if (contended_compaction == COMPACT_CONTENDED_LOCK) 3102 goto nopage; 3103 3104 /* 3105 * If compaction was aborted due to need_resched(), we do not 3106 * want to further increase allocation latency, unless it is 3107 * khugepaged trying to collapse. 3108 */ 3109 if (contended_compaction == COMPACT_CONTENDED_SCHED 3110 && !(current->flags & PF_KTHREAD)) 3111 goto nopage; 3112 } 3113 3114 /* 3115 * It can become very expensive to allocate transparent hugepages at 3116 * fault, so use asynchronous memory compaction for THP unless it is 3117 * khugepaged trying to collapse. 3118 */ 3119 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD)) 3120 migration_mode = MIGRATE_SYNC_LIGHT; 3121 3122 /* Try direct reclaim and then allocating */ 3123 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 3124 &did_some_progress); 3125 if (page) 3126 goto got_pg; 3127 3128 /* Do not loop if specifically requested */ 3129 if (gfp_mask & __GFP_NORETRY) 3130 goto noretry; 3131 3132 /* Keep reclaiming pages as long as there is reasonable progress */ 3133 pages_reclaimed += did_some_progress; 3134 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || 3135 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { 3136 /* Wait for some write requests to complete then retry */ 3137 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); 3138 goto retry; 3139 } 3140 3141 /* Reclaim has failed us, start killing things */ 3142 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3143 if (page) 3144 goto got_pg; 3145 3146 /* Retry as long as the OOM killer is making progress */ 3147 if (did_some_progress) 3148 goto retry; 3149 3150 noretry: 3151 /* 3152 * High-order allocations do not necessarily loop after 3153 * direct reclaim and reclaim/compaction depends on compaction 3154 * being called after reclaim so call directly if necessary 3155 */ 3156 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, 3157 ac, migration_mode, 3158 &contended_compaction, 3159 &deferred_compaction); 3160 if (page) 3161 goto got_pg; 3162 nopage: 3163 warn_alloc_failed(gfp_mask, order, NULL); 3164 got_pg: 3165 return page; 3166 } 3167 3168 /* 3169 * This is the 'heart' of the zoned buddy allocator. 3170 */ 3171 struct page * 3172 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3173 struct zonelist *zonelist, nodemask_t *nodemask) 3174 { 3175 struct zoneref *preferred_zoneref; 3176 struct page *page = NULL; 3177 unsigned int cpuset_mems_cookie; 3178 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 3179 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 3180 struct alloc_context ac = { 3181 .high_zoneidx = gfp_zone(gfp_mask), 3182 .nodemask = nodemask, 3183 .migratetype = gfpflags_to_migratetype(gfp_mask), 3184 }; 3185 3186 gfp_mask &= gfp_allowed_mask; 3187 3188 lockdep_trace_alloc(gfp_mask); 3189 3190 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3191 3192 if (should_fail_alloc_page(gfp_mask, order)) 3193 return NULL; 3194 3195 /* 3196 * Check the zones suitable for the gfp_mask contain at least one 3197 * valid zone. It's possible to have an empty zonelist as a result 3198 * of __GFP_THISNODE and a memoryless node 3199 */ 3200 if (unlikely(!zonelist->_zonerefs->zone)) 3201 return NULL; 3202 3203 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3204 alloc_flags |= ALLOC_CMA; 3205 3206 retry_cpuset: 3207 cpuset_mems_cookie = read_mems_allowed_begin(); 3208 3209 /* We set it here, as __alloc_pages_slowpath might have changed it */ 3210 ac.zonelist = zonelist; 3211 3212 /* Dirty zone balancing only done in the fast path */ 3213 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3214 3215 /* The preferred zone is used for statistics later */ 3216 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, 3217 ac.nodemask ? : &cpuset_current_mems_allowed, 3218 &ac.preferred_zone); 3219 if (!ac.preferred_zone) 3220 goto out; 3221 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); 3222 3223 /* First allocation attempt */ 3224 alloc_mask = gfp_mask|__GFP_HARDWALL; 3225 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 3226 if (unlikely(!page)) { 3227 /* 3228 * Runtime PM, block IO and its error handling path 3229 * can deadlock because I/O on the device might not 3230 * complete. 3231 */ 3232 alloc_mask = memalloc_noio_flags(gfp_mask); 3233 ac.spread_dirty_pages = false; 3234 3235 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 3236 } 3237 3238 if (kmemcheck_enabled && page) 3239 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3240 3241 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 3242 3243 out: 3244 /* 3245 * When updating a task's mems_allowed, it is possible to race with 3246 * parallel threads in such a way that an allocation can fail while 3247 * the mask is being updated. If a page allocation is about to fail, 3248 * check if the cpuset changed during allocation and if so, retry. 3249 */ 3250 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 3251 goto retry_cpuset; 3252 3253 return page; 3254 } 3255 EXPORT_SYMBOL(__alloc_pages_nodemask); 3256 3257 /* 3258 * Common helper functions. 3259 */ 3260 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 3261 { 3262 struct page *page; 3263 3264 /* 3265 * __get_free_pages() returns a 32-bit address, which cannot represent 3266 * a highmem page 3267 */ 3268 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 3269 3270 page = alloc_pages(gfp_mask, order); 3271 if (!page) 3272 return 0; 3273 return (unsigned long) page_address(page); 3274 } 3275 EXPORT_SYMBOL(__get_free_pages); 3276 3277 unsigned long get_zeroed_page(gfp_t gfp_mask) 3278 { 3279 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 3280 } 3281 EXPORT_SYMBOL(get_zeroed_page); 3282 3283 void __free_pages(struct page *page, unsigned int order) 3284 { 3285 if (put_page_testzero(page)) { 3286 if (order == 0) 3287 free_hot_cold_page(page, false); 3288 else 3289 __free_pages_ok(page, order); 3290 } 3291 } 3292 3293 EXPORT_SYMBOL(__free_pages); 3294 3295 void free_pages(unsigned long addr, unsigned int order) 3296 { 3297 if (addr != 0) { 3298 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3299 __free_pages(virt_to_page((void *)addr), order); 3300 } 3301 } 3302 3303 EXPORT_SYMBOL(free_pages); 3304 3305 /* 3306 * Page Fragment: 3307 * An arbitrary-length arbitrary-offset area of memory which resides 3308 * within a 0 or higher order page. Multiple fragments within that page 3309 * are individually refcounted, in the page's reference counter. 3310 * 3311 * The page_frag functions below provide a simple allocation framework for 3312 * page fragments. This is used by the network stack and network device 3313 * drivers to provide a backing region of memory for use as either an 3314 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 3315 */ 3316 static struct page *__page_frag_refill(struct page_frag_cache *nc, 3317 gfp_t gfp_mask) 3318 { 3319 struct page *page = NULL; 3320 gfp_t gfp = gfp_mask; 3321 3322 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3323 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 3324 __GFP_NOMEMALLOC; 3325 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 3326 PAGE_FRAG_CACHE_MAX_ORDER); 3327 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 3328 #endif 3329 if (unlikely(!page)) 3330 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 3331 3332 nc->va = page ? page_address(page) : NULL; 3333 3334 return page; 3335 } 3336 3337 void *__alloc_page_frag(struct page_frag_cache *nc, 3338 unsigned int fragsz, gfp_t gfp_mask) 3339 { 3340 unsigned int size = PAGE_SIZE; 3341 struct page *page; 3342 int offset; 3343 3344 if (unlikely(!nc->va)) { 3345 refill: 3346 page = __page_frag_refill(nc, gfp_mask); 3347 if (!page) 3348 return NULL; 3349 3350 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3351 /* if size can vary use size else just use PAGE_SIZE */ 3352 size = nc->size; 3353 #endif 3354 /* Even if we own the page, we do not use atomic_set(). 3355 * This would break get_page_unless_zero() users. 3356 */ 3357 atomic_add(size - 1, &page->_count); 3358 3359 /* reset page count bias and offset to start of new frag */ 3360 nc->pfmemalloc = page_is_pfmemalloc(page); 3361 nc->pagecnt_bias = size; 3362 nc->offset = size; 3363 } 3364 3365 offset = nc->offset - fragsz; 3366 if (unlikely(offset < 0)) { 3367 page = virt_to_page(nc->va); 3368 3369 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) 3370 goto refill; 3371 3372 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3373 /* if size can vary use size else just use PAGE_SIZE */ 3374 size = nc->size; 3375 #endif 3376 /* OK, page count is 0, we can safely set it */ 3377 atomic_set(&page->_count, size); 3378 3379 /* reset page count bias and offset to start of new frag */ 3380 nc->pagecnt_bias = size; 3381 offset = size - fragsz; 3382 } 3383 3384 nc->pagecnt_bias--; 3385 nc->offset = offset; 3386 3387 return nc->va + offset; 3388 } 3389 EXPORT_SYMBOL(__alloc_page_frag); 3390 3391 /* 3392 * Frees a page fragment allocated out of either a compound or order 0 page. 3393 */ 3394 void __free_page_frag(void *addr) 3395 { 3396 struct page *page = virt_to_head_page(addr); 3397 3398 if (unlikely(put_page_testzero(page))) 3399 __free_pages_ok(page, compound_order(page)); 3400 } 3401 EXPORT_SYMBOL(__free_page_frag); 3402 3403 /* 3404 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter 3405 * of the current memory cgroup. 3406 * 3407 * It should be used when the caller would like to use kmalloc, but since the 3408 * allocation is large, it has to fall back to the page allocator. 3409 */ 3410 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) 3411 { 3412 struct page *page; 3413 3414 page = alloc_pages(gfp_mask, order); 3415 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 3416 __free_pages(page, order); 3417 page = NULL; 3418 } 3419 return page; 3420 } 3421 3422 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 3423 { 3424 struct page *page; 3425 3426 page = alloc_pages_node(nid, gfp_mask, order); 3427 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 3428 __free_pages(page, order); 3429 page = NULL; 3430 } 3431 return page; 3432 } 3433 3434 /* 3435 * __free_kmem_pages and free_kmem_pages will free pages allocated with 3436 * alloc_kmem_pages. 3437 */ 3438 void __free_kmem_pages(struct page *page, unsigned int order) 3439 { 3440 memcg_kmem_uncharge(page, order); 3441 __free_pages(page, order); 3442 } 3443 3444 void free_kmem_pages(unsigned long addr, unsigned int order) 3445 { 3446 if (addr != 0) { 3447 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3448 __free_kmem_pages(virt_to_page((void *)addr), order); 3449 } 3450 } 3451 3452 static void *make_alloc_exact(unsigned long addr, unsigned int order, 3453 size_t size) 3454 { 3455 if (addr) { 3456 unsigned long alloc_end = addr + (PAGE_SIZE << order); 3457 unsigned long used = addr + PAGE_ALIGN(size); 3458 3459 split_page(virt_to_page((void *)addr), order); 3460 while (used < alloc_end) { 3461 free_page(used); 3462 used += PAGE_SIZE; 3463 } 3464 } 3465 return (void *)addr; 3466 } 3467 3468 /** 3469 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 3470 * @size: the number of bytes to allocate 3471 * @gfp_mask: GFP flags for the allocation 3472 * 3473 * This function is similar to alloc_pages(), except that it allocates the 3474 * minimum number of pages to satisfy the request. alloc_pages() can only 3475 * allocate memory in power-of-two pages. 3476 * 3477 * This function is also limited by MAX_ORDER. 3478 * 3479 * Memory allocated by this function must be released by free_pages_exact(). 3480 */ 3481 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 3482 { 3483 unsigned int order = get_order(size); 3484 unsigned long addr; 3485 3486 addr = __get_free_pages(gfp_mask, order); 3487 return make_alloc_exact(addr, order, size); 3488 } 3489 EXPORT_SYMBOL(alloc_pages_exact); 3490 3491 /** 3492 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 3493 * pages on a node. 3494 * @nid: the preferred node ID where memory should be allocated 3495 * @size: the number of bytes to allocate 3496 * @gfp_mask: GFP flags for the allocation 3497 * 3498 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 3499 * back. 3500 */ 3501 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3502 { 3503 unsigned int order = get_order(size); 3504 struct page *p = alloc_pages_node(nid, gfp_mask, order); 3505 if (!p) 3506 return NULL; 3507 return make_alloc_exact((unsigned long)page_address(p), order, size); 3508 } 3509 3510 /** 3511 * free_pages_exact - release memory allocated via alloc_pages_exact() 3512 * @virt: the value returned by alloc_pages_exact. 3513 * @size: size of allocation, same value as passed to alloc_pages_exact(). 3514 * 3515 * Release the memory allocated by a previous call to alloc_pages_exact. 3516 */ 3517 void free_pages_exact(void *virt, size_t size) 3518 { 3519 unsigned long addr = (unsigned long)virt; 3520 unsigned long end = addr + PAGE_ALIGN(size); 3521 3522 while (addr < end) { 3523 free_page(addr); 3524 addr += PAGE_SIZE; 3525 } 3526 } 3527 EXPORT_SYMBOL(free_pages_exact); 3528 3529 /** 3530 * nr_free_zone_pages - count number of pages beyond high watermark 3531 * @offset: The zone index of the highest zone 3532 * 3533 * nr_free_zone_pages() counts the number of counts pages which are beyond the 3534 * high watermark within all zones at or below a given zone index. For each 3535 * zone, the number of pages is calculated as: 3536 * managed_pages - high_pages 3537 */ 3538 static unsigned long nr_free_zone_pages(int offset) 3539 { 3540 struct zoneref *z; 3541 struct zone *zone; 3542 3543 /* Just pick one node, since fallback list is circular */ 3544 unsigned long sum = 0; 3545 3546 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 3547 3548 for_each_zone_zonelist(zone, z, zonelist, offset) { 3549 unsigned long size = zone->managed_pages; 3550 unsigned long high = high_wmark_pages(zone); 3551 if (size > high) 3552 sum += size - high; 3553 } 3554 3555 return sum; 3556 } 3557 3558 /** 3559 * nr_free_buffer_pages - count number of pages beyond high watermark 3560 * 3561 * nr_free_buffer_pages() counts the number of pages which are beyond the high 3562 * watermark within ZONE_DMA and ZONE_NORMAL. 3563 */ 3564 unsigned long nr_free_buffer_pages(void) 3565 { 3566 return nr_free_zone_pages(gfp_zone(GFP_USER)); 3567 } 3568 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 3569 3570 /** 3571 * nr_free_pagecache_pages - count number of pages beyond high watermark 3572 * 3573 * nr_free_pagecache_pages() counts the number of pages which are beyond the 3574 * high watermark within all zones. 3575 */ 3576 unsigned long nr_free_pagecache_pages(void) 3577 { 3578 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 3579 } 3580 3581 static inline void show_node(struct zone *zone) 3582 { 3583 if (IS_ENABLED(CONFIG_NUMA)) 3584 printk("Node %d ", zone_to_nid(zone)); 3585 } 3586 3587 void si_meminfo(struct sysinfo *val) 3588 { 3589 val->totalram = totalram_pages; 3590 val->sharedram = global_page_state(NR_SHMEM); 3591 val->freeram = global_page_state(NR_FREE_PAGES); 3592 val->bufferram = nr_blockdev_pages(); 3593 val->totalhigh = totalhigh_pages; 3594 val->freehigh = nr_free_highpages(); 3595 val->mem_unit = PAGE_SIZE; 3596 } 3597 3598 EXPORT_SYMBOL(si_meminfo); 3599 3600 #ifdef CONFIG_NUMA 3601 void si_meminfo_node(struct sysinfo *val, int nid) 3602 { 3603 int zone_type; /* needs to be signed */ 3604 unsigned long managed_pages = 0; 3605 pg_data_t *pgdat = NODE_DATA(nid); 3606 3607 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 3608 managed_pages += pgdat->node_zones[zone_type].managed_pages; 3609 val->totalram = managed_pages; 3610 val->sharedram = node_page_state(nid, NR_SHMEM); 3611 val->freeram = node_page_state(nid, NR_FREE_PAGES); 3612 #ifdef CONFIG_HIGHMEM 3613 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; 3614 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 3615 NR_FREE_PAGES); 3616 #else 3617 val->totalhigh = 0; 3618 val->freehigh = 0; 3619 #endif 3620 val->mem_unit = PAGE_SIZE; 3621 } 3622 #endif 3623 3624 /* 3625 * Determine whether the node should be displayed or not, depending on whether 3626 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 3627 */ 3628 bool skip_free_areas_node(unsigned int flags, int nid) 3629 { 3630 bool ret = false; 3631 unsigned int cpuset_mems_cookie; 3632 3633 if (!(flags & SHOW_MEM_FILTER_NODES)) 3634 goto out; 3635 3636 do { 3637 cpuset_mems_cookie = read_mems_allowed_begin(); 3638 ret = !node_isset(nid, cpuset_current_mems_allowed); 3639 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3640 out: 3641 return ret; 3642 } 3643 3644 #define K(x) ((x) << (PAGE_SHIFT-10)) 3645 3646 static void show_migration_types(unsigned char type) 3647 { 3648 static const char types[MIGRATE_TYPES] = { 3649 [MIGRATE_UNMOVABLE] = 'U', 3650 [MIGRATE_MOVABLE] = 'M', 3651 [MIGRATE_RECLAIMABLE] = 'E', 3652 [MIGRATE_HIGHATOMIC] = 'H', 3653 #ifdef CONFIG_CMA 3654 [MIGRATE_CMA] = 'C', 3655 #endif 3656 #ifdef CONFIG_MEMORY_ISOLATION 3657 [MIGRATE_ISOLATE] = 'I', 3658 #endif 3659 }; 3660 char tmp[MIGRATE_TYPES + 1]; 3661 char *p = tmp; 3662 int i; 3663 3664 for (i = 0; i < MIGRATE_TYPES; i++) { 3665 if (type & (1 << i)) 3666 *p++ = types[i]; 3667 } 3668 3669 *p = '\0'; 3670 printk("(%s) ", tmp); 3671 } 3672 3673 /* 3674 * Show free area list (used inside shift_scroll-lock stuff) 3675 * We also calculate the percentage fragmentation. We do this by counting the 3676 * memory on each free list with the exception of the first item on the list. 3677 * 3678 * Bits in @filter: 3679 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 3680 * cpuset. 3681 */ 3682 void show_free_areas(unsigned int filter) 3683 { 3684 unsigned long free_pcp = 0; 3685 int cpu; 3686 struct zone *zone; 3687 3688 for_each_populated_zone(zone) { 3689 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3690 continue; 3691 3692 for_each_online_cpu(cpu) 3693 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 3694 } 3695 3696 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 3697 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 3698 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 3699 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 3700 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 3701 " free:%lu free_pcp:%lu free_cma:%lu\n", 3702 global_page_state(NR_ACTIVE_ANON), 3703 global_page_state(NR_INACTIVE_ANON), 3704 global_page_state(NR_ISOLATED_ANON), 3705 global_page_state(NR_ACTIVE_FILE), 3706 global_page_state(NR_INACTIVE_FILE), 3707 global_page_state(NR_ISOLATED_FILE), 3708 global_page_state(NR_UNEVICTABLE), 3709 global_page_state(NR_FILE_DIRTY), 3710 global_page_state(NR_WRITEBACK), 3711 global_page_state(NR_UNSTABLE_NFS), 3712 global_page_state(NR_SLAB_RECLAIMABLE), 3713 global_page_state(NR_SLAB_UNRECLAIMABLE), 3714 global_page_state(NR_FILE_MAPPED), 3715 global_page_state(NR_SHMEM), 3716 global_page_state(NR_PAGETABLE), 3717 global_page_state(NR_BOUNCE), 3718 global_page_state(NR_FREE_PAGES), 3719 free_pcp, 3720 global_page_state(NR_FREE_CMA_PAGES)); 3721 3722 for_each_populated_zone(zone) { 3723 int i; 3724 3725 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3726 continue; 3727 3728 free_pcp = 0; 3729 for_each_online_cpu(cpu) 3730 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 3731 3732 show_node(zone); 3733 printk("%s" 3734 " free:%lukB" 3735 " min:%lukB" 3736 " low:%lukB" 3737 " high:%lukB" 3738 " active_anon:%lukB" 3739 " inactive_anon:%lukB" 3740 " active_file:%lukB" 3741 " inactive_file:%lukB" 3742 " unevictable:%lukB" 3743 " isolated(anon):%lukB" 3744 " isolated(file):%lukB" 3745 " present:%lukB" 3746 " managed:%lukB" 3747 " mlocked:%lukB" 3748 " dirty:%lukB" 3749 " writeback:%lukB" 3750 " mapped:%lukB" 3751 " shmem:%lukB" 3752 " slab_reclaimable:%lukB" 3753 " slab_unreclaimable:%lukB" 3754 " kernel_stack:%lukB" 3755 " pagetables:%lukB" 3756 " unstable:%lukB" 3757 " bounce:%lukB" 3758 " free_pcp:%lukB" 3759 " local_pcp:%ukB" 3760 " free_cma:%lukB" 3761 " writeback_tmp:%lukB" 3762 " pages_scanned:%lu" 3763 " all_unreclaimable? %s" 3764 "\n", 3765 zone->name, 3766 K(zone_page_state(zone, NR_FREE_PAGES)), 3767 K(min_wmark_pages(zone)), 3768 K(low_wmark_pages(zone)), 3769 K(high_wmark_pages(zone)), 3770 K(zone_page_state(zone, NR_ACTIVE_ANON)), 3771 K(zone_page_state(zone, NR_INACTIVE_ANON)), 3772 K(zone_page_state(zone, NR_ACTIVE_FILE)), 3773 K(zone_page_state(zone, NR_INACTIVE_FILE)), 3774 K(zone_page_state(zone, NR_UNEVICTABLE)), 3775 K(zone_page_state(zone, NR_ISOLATED_ANON)), 3776 K(zone_page_state(zone, NR_ISOLATED_FILE)), 3777 K(zone->present_pages), 3778 K(zone->managed_pages), 3779 K(zone_page_state(zone, NR_MLOCK)), 3780 K(zone_page_state(zone, NR_FILE_DIRTY)), 3781 K(zone_page_state(zone, NR_WRITEBACK)), 3782 K(zone_page_state(zone, NR_FILE_MAPPED)), 3783 K(zone_page_state(zone, NR_SHMEM)), 3784 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 3785 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 3786 zone_page_state(zone, NR_KERNEL_STACK) * 3787 THREAD_SIZE / 1024, 3788 K(zone_page_state(zone, NR_PAGETABLE)), 3789 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 3790 K(zone_page_state(zone, NR_BOUNCE)), 3791 K(free_pcp), 3792 K(this_cpu_read(zone->pageset->pcp.count)), 3793 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3794 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3795 K(zone_page_state(zone, NR_PAGES_SCANNED)), 3796 (!zone_reclaimable(zone) ? "yes" : "no") 3797 ); 3798 printk("lowmem_reserve[]:"); 3799 for (i = 0; i < MAX_NR_ZONES; i++) 3800 printk(" %ld", zone->lowmem_reserve[i]); 3801 printk("\n"); 3802 } 3803 3804 for_each_populated_zone(zone) { 3805 unsigned int order; 3806 unsigned long nr[MAX_ORDER], flags, total = 0; 3807 unsigned char types[MAX_ORDER]; 3808 3809 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3810 continue; 3811 show_node(zone); 3812 printk("%s: ", zone->name); 3813 3814 spin_lock_irqsave(&zone->lock, flags); 3815 for (order = 0; order < MAX_ORDER; order++) { 3816 struct free_area *area = &zone->free_area[order]; 3817 int type; 3818 3819 nr[order] = area->nr_free; 3820 total += nr[order] << order; 3821 3822 types[order] = 0; 3823 for (type = 0; type < MIGRATE_TYPES; type++) { 3824 if (!list_empty(&area->free_list[type])) 3825 types[order] |= 1 << type; 3826 } 3827 } 3828 spin_unlock_irqrestore(&zone->lock, flags); 3829 for (order = 0; order < MAX_ORDER; order++) { 3830 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3831 if (nr[order]) 3832 show_migration_types(types[order]); 3833 } 3834 printk("= %lukB\n", K(total)); 3835 } 3836 3837 hugetlb_show_meminfo(); 3838 3839 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 3840 3841 show_swap_cache_info(); 3842 } 3843 3844 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 3845 { 3846 zoneref->zone = zone; 3847 zoneref->zone_idx = zone_idx(zone); 3848 } 3849 3850 /* 3851 * Builds allocation fallback zone lists. 3852 * 3853 * Add all populated zones of a node to the zonelist. 3854 */ 3855 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 3856 int nr_zones) 3857 { 3858 struct zone *zone; 3859 enum zone_type zone_type = MAX_NR_ZONES; 3860 3861 do { 3862 zone_type--; 3863 zone = pgdat->node_zones + zone_type; 3864 if (populated_zone(zone)) { 3865 zoneref_set_zone(zone, 3866 &zonelist->_zonerefs[nr_zones++]); 3867 check_highest_zone(zone_type); 3868 } 3869 } while (zone_type); 3870 3871 return nr_zones; 3872 } 3873 3874 3875 /* 3876 * zonelist_order: 3877 * 0 = automatic detection of better ordering. 3878 * 1 = order by ([node] distance, -zonetype) 3879 * 2 = order by (-zonetype, [node] distance) 3880 * 3881 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 3882 * the same zonelist. So only NUMA can configure this param. 3883 */ 3884 #define ZONELIST_ORDER_DEFAULT 0 3885 #define ZONELIST_ORDER_NODE 1 3886 #define ZONELIST_ORDER_ZONE 2 3887 3888 /* zonelist order in the kernel. 3889 * set_zonelist_order() will set this to NODE or ZONE. 3890 */ 3891 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 3892 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 3893 3894 3895 #ifdef CONFIG_NUMA 3896 /* The value user specified ....changed by config */ 3897 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3898 /* string for sysctl */ 3899 #define NUMA_ZONELIST_ORDER_LEN 16 3900 char numa_zonelist_order[16] = "default"; 3901 3902 /* 3903 * interface for configure zonelist ordering. 3904 * command line option "numa_zonelist_order" 3905 * = "[dD]efault - default, automatic configuration. 3906 * = "[nN]ode - order by node locality, then by zone within node 3907 * = "[zZ]one - order by zone, then by locality within zone 3908 */ 3909 3910 static int __parse_numa_zonelist_order(char *s) 3911 { 3912 if (*s == 'd' || *s == 'D') { 3913 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3914 } else if (*s == 'n' || *s == 'N') { 3915 user_zonelist_order = ZONELIST_ORDER_NODE; 3916 } else if (*s == 'z' || *s == 'Z') { 3917 user_zonelist_order = ZONELIST_ORDER_ZONE; 3918 } else { 3919 printk(KERN_WARNING 3920 "Ignoring invalid numa_zonelist_order value: " 3921 "%s\n", s); 3922 return -EINVAL; 3923 } 3924 return 0; 3925 } 3926 3927 static __init int setup_numa_zonelist_order(char *s) 3928 { 3929 int ret; 3930 3931 if (!s) 3932 return 0; 3933 3934 ret = __parse_numa_zonelist_order(s); 3935 if (ret == 0) 3936 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 3937 3938 return ret; 3939 } 3940 early_param("numa_zonelist_order", setup_numa_zonelist_order); 3941 3942 /* 3943 * sysctl handler for numa_zonelist_order 3944 */ 3945 int numa_zonelist_order_handler(struct ctl_table *table, int write, 3946 void __user *buffer, size_t *length, 3947 loff_t *ppos) 3948 { 3949 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 3950 int ret; 3951 static DEFINE_MUTEX(zl_order_mutex); 3952 3953 mutex_lock(&zl_order_mutex); 3954 if (write) { 3955 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 3956 ret = -EINVAL; 3957 goto out; 3958 } 3959 strcpy(saved_string, (char *)table->data); 3960 } 3961 ret = proc_dostring(table, write, buffer, length, ppos); 3962 if (ret) 3963 goto out; 3964 if (write) { 3965 int oldval = user_zonelist_order; 3966 3967 ret = __parse_numa_zonelist_order((char *)table->data); 3968 if (ret) { 3969 /* 3970 * bogus value. restore saved string 3971 */ 3972 strncpy((char *)table->data, saved_string, 3973 NUMA_ZONELIST_ORDER_LEN); 3974 user_zonelist_order = oldval; 3975 } else if (oldval != user_zonelist_order) { 3976 mutex_lock(&zonelists_mutex); 3977 build_all_zonelists(NULL, NULL); 3978 mutex_unlock(&zonelists_mutex); 3979 } 3980 } 3981 out: 3982 mutex_unlock(&zl_order_mutex); 3983 return ret; 3984 } 3985 3986 3987 #define MAX_NODE_LOAD (nr_online_nodes) 3988 static int node_load[MAX_NUMNODES]; 3989 3990 /** 3991 * find_next_best_node - find the next node that should appear in a given node's fallback list 3992 * @node: node whose fallback list we're appending 3993 * @used_node_mask: nodemask_t of already used nodes 3994 * 3995 * We use a number of factors to determine which is the next node that should 3996 * appear on a given node's fallback list. The node should not have appeared 3997 * already in @node's fallback list, and it should be the next closest node 3998 * according to the distance array (which contains arbitrary distance values 3999 * from each node to each node in the system), and should also prefer nodes 4000 * with no CPUs, since presumably they'll have very little allocation pressure 4001 * on them otherwise. 4002 * It returns -1 if no node is found. 4003 */ 4004 static int find_next_best_node(int node, nodemask_t *used_node_mask) 4005 { 4006 int n, val; 4007 int min_val = INT_MAX; 4008 int best_node = NUMA_NO_NODE; 4009 const struct cpumask *tmp = cpumask_of_node(0); 4010 4011 /* Use the local node if we haven't already */ 4012 if (!node_isset(node, *used_node_mask)) { 4013 node_set(node, *used_node_mask); 4014 return node; 4015 } 4016 4017 for_each_node_state(n, N_MEMORY) { 4018 4019 /* Don't want a node to appear more than once */ 4020 if (node_isset(n, *used_node_mask)) 4021 continue; 4022 4023 /* Use the distance array to find the distance */ 4024 val = node_distance(node, n); 4025 4026 /* Penalize nodes under us ("prefer the next node") */ 4027 val += (n < node); 4028 4029 /* Give preference to headless and unused nodes */ 4030 tmp = cpumask_of_node(n); 4031 if (!cpumask_empty(tmp)) 4032 val += PENALTY_FOR_NODE_WITH_CPUS; 4033 4034 /* Slight preference for less loaded node */ 4035 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 4036 val += node_load[n]; 4037 4038 if (val < min_val) { 4039 min_val = val; 4040 best_node = n; 4041 } 4042 } 4043 4044 if (best_node >= 0) 4045 node_set(best_node, *used_node_mask); 4046 4047 return best_node; 4048 } 4049 4050 4051 /* 4052 * Build zonelists ordered by node and zones within node. 4053 * This results in maximum locality--normal zone overflows into local 4054 * DMA zone, if any--but risks exhausting DMA zone. 4055 */ 4056 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 4057 { 4058 int j; 4059 struct zonelist *zonelist; 4060 4061 zonelist = &pgdat->node_zonelists[0]; 4062 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 4063 ; 4064 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4065 zonelist->_zonerefs[j].zone = NULL; 4066 zonelist->_zonerefs[j].zone_idx = 0; 4067 } 4068 4069 /* 4070 * Build gfp_thisnode zonelists 4071 */ 4072 static void build_thisnode_zonelists(pg_data_t *pgdat) 4073 { 4074 int j; 4075 struct zonelist *zonelist; 4076 4077 zonelist = &pgdat->node_zonelists[1]; 4078 j = build_zonelists_node(pgdat, zonelist, 0); 4079 zonelist->_zonerefs[j].zone = NULL; 4080 zonelist->_zonerefs[j].zone_idx = 0; 4081 } 4082 4083 /* 4084 * Build zonelists ordered by zone and nodes within zones. 4085 * This results in conserving DMA zone[s] until all Normal memory is 4086 * exhausted, but results in overflowing to remote node while memory 4087 * may still exist in local DMA zone. 4088 */ 4089 static int node_order[MAX_NUMNODES]; 4090 4091 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 4092 { 4093 int pos, j, node; 4094 int zone_type; /* needs to be signed */ 4095 struct zone *z; 4096 struct zonelist *zonelist; 4097 4098 zonelist = &pgdat->node_zonelists[0]; 4099 pos = 0; 4100 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 4101 for (j = 0; j < nr_nodes; j++) { 4102 node = node_order[j]; 4103 z = &NODE_DATA(node)->node_zones[zone_type]; 4104 if (populated_zone(z)) { 4105 zoneref_set_zone(z, 4106 &zonelist->_zonerefs[pos++]); 4107 check_highest_zone(zone_type); 4108 } 4109 } 4110 } 4111 zonelist->_zonerefs[pos].zone = NULL; 4112 zonelist->_zonerefs[pos].zone_idx = 0; 4113 } 4114 4115 #if defined(CONFIG_64BIT) 4116 /* 4117 * Devices that require DMA32/DMA are relatively rare and do not justify a 4118 * penalty to every machine in case the specialised case applies. Default 4119 * to Node-ordering on 64-bit NUMA machines 4120 */ 4121 static int default_zonelist_order(void) 4122 { 4123 return ZONELIST_ORDER_NODE; 4124 } 4125 #else 4126 /* 4127 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 4128 * by the kernel. If processes running on node 0 deplete the low memory zone 4129 * then reclaim will occur more frequency increasing stalls and potentially 4130 * be easier to OOM if a large percentage of the zone is under writeback or 4131 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 4132 * Hence, default to zone ordering on 32-bit. 4133 */ 4134 static int default_zonelist_order(void) 4135 { 4136 return ZONELIST_ORDER_ZONE; 4137 } 4138 #endif /* CONFIG_64BIT */ 4139 4140 static void set_zonelist_order(void) 4141 { 4142 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 4143 current_zonelist_order = default_zonelist_order(); 4144 else 4145 current_zonelist_order = user_zonelist_order; 4146 } 4147 4148 static void build_zonelists(pg_data_t *pgdat) 4149 { 4150 int j, node, load; 4151 enum zone_type i; 4152 nodemask_t used_mask; 4153 int local_node, prev_node; 4154 struct zonelist *zonelist; 4155 unsigned int order = current_zonelist_order; 4156 4157 /* initialize zonelists */ 4158 for (i = 0; i < MAX_ZONELISTS; i++) { 4159 zonelist = pgdat->node_zonelists + i; 4160 zonelist->_zonerefs[0].zone = NULL; 4161 zonelist->_zonerefs[0].zone_idx = 0; 4162 } 4163 4164 /* NUMA-aware ordering of nodes */ 4165 local_node = pgdat->node_id; 4166 load = nr_online_nodes; 4167 prev_node = local_node; 4168 nodes_clear(used_mask); 4169 4170 memset(node_order, 0, sizeof(node_order)); 4171 j = 0; 4172 4173 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4174 /* 4175 * We don't want to pressure a particular node. 4176 * So adding penalty to the first node in same 4177 * distance group to make it round-robin. 4178 */ 4179 if (node_distance(local_node, node) != 4180 node_distance(local_node, prev_node)) 4181 node_load[node] = load; 4182 4183 prev_node = node; 4184 load--; 4185 if (order == ZONELIST_ORDER_NODE) 4186 build_zonelists_in_node_order(pgdat, node); 4187 else 4188 node_order[j++] = node; /* remember order */ 4189 } 4190 4191 if (order == ZONELIST_ORDER_ZONE) { 4192 /* calculate node order -- i.e., DMA last! */ 4193 build_zonelists_in_zone_order(pgdat, j); 4194 } 4195 4196 build_thisnode_zonelists(pgdat); 4197 } 4198 4199 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 4200 /* 4201 * Return node id of node used for "local" allocations. 4202 * I.e., first node id of first zone in arg node's generic zonelist. 4203 * Used for initializing percpu 'numa_mem', which is used primarily 4204 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 4205 */ 4206 int local_memory_node(int node) 4207 { 4208 struct zone *zone; 4209 4210 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 4211 gfp_zone(GFP_KERNEL), 4212 NULL, 4213 &zone); 4214 return zone->node; 4215 } 4216 #endif 4217 4218 #else /* CONFIG_NUMA */ 4219 4220 static void set_zonelist_order(void) 4221 { 4222 current_zonelist_order = ZONELIST_ORDER_ZONE; 4223 } 4224 4225 static void build_zonelists(pg_data_t *pgdat) 4226 { 4227 int node, local_node; 4228 enum zone_type j; 4229 struct zonelist *zonelist; 4230 4231 local_node = pgdat->node_id; 4232 4233 zonelist = &pgdat->node_zonelists[0]; 4234 j = build_zonelists_node(pgdat, zonelist, 0); 4235 4236 /* 4237 * Now we build the zonelist so that it contains the zones 4238 * of all the other nodes. 4239 * We don't want to pressure a particular node, so when 4240 * building the zones for node N, we make sure that the 4241 * zones coming right after the local ones are those from 4242 * node N+1 (modulo N) 4243 */ 4244 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 4245 if (!node_online(node)) 4246 continue; 4247 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4248 } 4249 for (node = 0; node < local_node; node++) { 4250 if (!node_online(node)) 4251 continue; 4252 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4253 } 4254 4255 zonelist->_zonerefs[j].zone = NULL; 4256 zonelist->_zonerefs[j].zone_idx = 0; 4257 } 4258 4259 #endif /* CONFIG_NUMA */ 4260 4261 /* 4262 * Boot pageset table. One per cpu which is going to be used for all 4263 * zones and all nodes. The parameters will be set in such a way 4264 * that an item put on a list will immediately be handed over to 4265 * the buddy list. This is safe since pageset manipulation is done 4266 * with interrupts disabled. 4267 * 4268 * The boot_pagesets must be kept even after bootup is complete for 4269 * unused processors and/or zones. They do play a role for bootstrapping 4270 * hotplugged processors. 4271 * 4272 * zoneinfo_show() and maybe other functions do 4273 * not check if the processor is online before following the pageset pointer. 4274 * Other parts of the kernel may not check if the zone is available. 4275 */ 4276 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 4277 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 4278 static void setup_zone_pageset(struct zone *zone); 4279 4280 /* 4281 * Global mutex to protect against size modification of zonelists 4282 * as well as to serialize pageset setup for the new populated zone. 4283 */ 4284 DEFINE_MUTEX(zonelists_mutex); 4285 4286 /* return values int ....just for stop_machine() */ 4287 static int __build_all_zonelists(void *data) 4288 { 4289 int nid; 4290 int cpu; 4291 pg_data_t *self = data; 4292 4293 #ifdef CONFIG_NUMA 4294 memset(node_load, 0, sizeof(node_load)); 4295 #endif 4296 4297 if (self && !node_online(self->node_id)) { 4298 build_zonelists(self); 4299 } 4300 4301 for_each_online_node(nid) { 4302 pg_data_t *pgdat = NODE_DATA(nid); 4303 4304 build_zonelists(pgdat); 4305 } 4306 4307 /* 4308 * Initialize the boot_pagesets that are going to be used 4309 * for bootstrapping processors. The real pagesets for 4310 * each zone will be allocated later when the per cpu 4311 * allocator is available. 4312 * 4313 * boot_pagesets are used also for bootstrapping offline 4314 * cpus if the system is already booted because the pagesets 4315 * are needed to initialize allocators on a specific cpu too. 4316 * F.e. the percpu allocator needs the page allocator which 4317 * needs the percpu allocator in order to allocate its pagesets 4318 * (a chicken-egg dilemma). 4319 */ 4320 for_each_possible_cpu(cpu) { 4321 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 4322 4323 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 4324 /* 4325 * We now know the "local memory node" for each node-- 4326 * i.e., the node of the first zone in the generic zonelist. 4327 * Set up numa_mem percpu variable for on-line cpus. During 4328 * boot, only the boot cpu should be on-line; we'll init the 4329 * secondary cpus' numa_mem as they come on-line. During 4330 * node/memory hotplug, we'll fixup all on-line cpus. 4331 */ 4332 if (cpu_online(cpu)) 4333 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 4334 #endif 4335 } 4336 4337 return 0; 4338 } 4339 4340 static noinline void __init 4341 build_all_zonelists_init(void) 4342 { 4343 __build_all_zonelists(NULL); 4344 mminit_verify_zonelist(); 4345 cpuset_init_current_mems_allowed(); 4346 } 4347 4348 /* 4349 * Called with zonelists_mutex held always 4350 * unless system_state == SYSTEM_BOOTING. 4351 * 4352 * __ref due to (1) call of __meminit annotated setup_zone_pageset 4353 * [we're only called with non-NULL zone through __meminit paths] and 4354 * (2) call of __init annotated helper build_all_zonelists_init 4355 * [protected by SYSTEM_BOOTING]. 4356 */ 4357 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 4358 { 4359 set_zonelist_order(); 4360 4361 if (system_state == SYSTEM_BOOTING) { 4362 build_all_zonelists_init(); 4363 } else { 4364 #ifdef CONFIG_MEMORY_HOTPLUG 4365 if (zone) 4366 setup_zone_pageset(zone); 4367 #endif 4368 /* we have to stop all cpus to guarantee there is no user 4369 of zonelist */ 4370 stop_machine(__build_all_zonelists, pgdat, NULL); 4371 /* cpuset refresh routine should be here */ 4372 } 4373 vm_total_pages = nr_free_pagecache_pages(); 4374 /* 4375 * Disable grouping by mobility if the number of pages in the 4376 * system is too low to allow the mechanism to work. It would be 4377 * more accurate, but expensive to check per-zone. This check is 4378 * made on memory-hotadd so a system can start with mobility 4379 * disabled and enable it later 4380 */ 4381 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 4382 page_group_by_mobility_disabled = 1; 4383 else 4384 page_group_by_mobility_disabled = 0; 4385 4386 pr_info("Built %i zonelists in %s order, mobility grouping %s. " 4387 "Total pages: %ld\n", 4388 nr_online_nodes, 4389 zonelist_order_name[current_zonelist_order], 4390 page_group_by_mobility_disabled ? "off" : "on", 4391 vm_total_pages); 4392 #ifdef CONFIG_NUMA 4393 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 4394 #endif 4395 } 4396 4397 /* 4398 * Helper functions to size the waitqueue hash table. 4399 * Essentially these want to choose hash table sizes sufficiently 4400 * large so that collisions trying to wait on pages are rare. 4401 * But in fact, the number of active page waitqueues on typical 4402 * systems is ridiculously low, less than 200. So this is even 4403 * conservative, even though it seems large. 4404 * 4405 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 4406 * waitqueues, i.e. the size of the waitq table given the number of pages. 4407 */ 4408 #define PAGES_PER_WAITQUEUE 256 4409 4410 #ifndef CONFIG_MEMORY_HOTPLUG 4411 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4412 { 4413 unsigned long size = 1; 4414 4415 pages /= PAGES_PER_WAITQUEUE; 4416 4417 while (size < pages) 4418 size <<= 1; 4419 4420 /* 4421 * Once we have dozens or even hundreds of threads sleeping 4422 * on IO we've got bigger problems than wait queue collision. 4423 * Limit the size of the wait table to a reasonable size. 4424 */ 4425 size = min(size, 4096UL); 4426 4427 return max(size, 4UL); 4428 } 4429 #else 4430 /* 4431 * A zone's size might be changed by hot-add, so it is not possible to determine 4432 * a suitable size for its wait_table. So we use the maximum size now. 4433 * 4434 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 4435 * 4436 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 4437 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 4438 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 4439 * 4440 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 4441 * or more by the traditional way. (See above). It equals: 4442 * 4443 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 4444 * ia64(16K page size) : = ( 8G + 4M)byte. 4445 * powerpc (64K page size) : = (32G +16M)byte. 4446 */ 4447 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4448 { 4449 return 4096UL; 4450 } 4451 #endif 4452 4453 /* 4454 * This is an integer logarithm so that shifts can be used later 4455 * to extract the more random high bits from the multiplicative 4456 * hash function before the remainder is taken. 4457 */ 4458 static inline unsigned long wait_table_bits(unsigned long size) 4459 { 4460 return ffz(~size); 4461 } 4462 4463 /* 4464 * Initially all pages are reserved - free ones are freed 4465 * up by free_all_bootmem() once the early boot process is 4466 * done. Non-atomic initialization, single-pass. 4467 */ 4468 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4469 unsigned long start_pfn, enum memmap_context context) 4470 { 4471 pg_data_t *pgdat = NODE_DATA(nid); 4472 unsigned long end_pfn = start_pfn + size; 4473 unsigned long pfn; 4474 struct zone *z; 4475 unsigned long nr_initialised = 0; 4476 4477 if (highest_memmap_pfn < end_pfn - 1) 4478 highest_memmap_pfn = end_pfn - 1; 4479 4480 z = &pgdat->node_zones[zone]; 4481 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4482 /* 4483 * There can be holes in boot-time mem_map[]s 4484 * handed to this function. They do not 4485 * exist on hotplugged memory. 4486 */ 4487 if (context == MEMMAP_EARLY) { 4488 if (!early_pfn_valid(pfn)) 4489 continue; 4490 if (!early_pfn_in_nid(pfn, nid)) 4491 continue; 4492 if (!update_defer_init(pgdat, pfn, end_pfn, 4493 &nr_initialised)) 4494 break; 4495 } 4496 4497 /* 4498 * Mark the block movable so that blocks are reserved for 4499 * movable at startup. This will force kernel allocations 4500 * to reserve their blocks rather than leaking throughout 4501 * the address space during boot when many long-lived 4502 * kernel allocations are made. 4503 * 4504 * bitmap is created for zone's valid pfn range. but memmap 4505 * can be created for invalid pages (for alignment) 4506 * check here not to call set_pageblock_migratetype() against 4507 * pfn out of zone. 4508 */ 4509 if (!(pfn & (pageblock_nr_pages - 1))) { 4510 struct page *page = pfn_to_page(pfn); 4511 4512 __init_single_page(page, pfn, zone, nid); 4513 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4514 } else { 4515 __init_single_pfn(pfn, zone, nid); 4516 } 4517 } 4518 } 4519 4520 static void __meminit zone_init_free_lists(struct zone *zone) 4521 { 4522 unsigned int order, t; 4523 for_each_migratetype_order(order, t) { 4524 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 4525 zone->free_area[order].nr_free = 0; 4526 } 4527 } 4528 4529 #ifndef __HAVE_ARCH_MEMMAP_INIT 4530 #define memmap_init(size, nid, zone, start_pfn) \ 4531 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4532 #endif 4533 4534 static int zone_batchsize(struct zone *zone) 4535 { 4536 #ifdef CONFIG_MMU 4537 int batch; 4538 4539 /* 4540 * The per-cpu-pages pools are set to around 1000th of the 4541 * size of the zone. But no more than 1/2 of a meg. 4542 * 4543 * OK, so we don't know how big the cache is. So guess. 4544 */ 4545 batch = zone->managed_pages / 1024; 4546 if (batch * PAGE_SIZE > 512 * 1024) 4547 batch = (512 * 1024) / PAGE_SIZE; 4548 batch /= 4; /* We effectively *= 4 below */ 4549 if (batch < 1) 4550 batch = 1; 4551 4552 /* 4553 * Clamp the batch to a 2^n - 1 value. Having a power 4554 * of 2 value was found to be more likely to have 4555 * suboptimal cache aliasing properties in some cases. 4556 * 4557 * For example if 2 tasks are alternately allocating 4558 * batches of pages, one task can end up with a lot 4559 * of pages of one half of the possible page colors 4560 * and the other with pages of the other colors. 4561 */ 4562 batch = rounddown_pow_of_two(batch + batch/2) - 1; 4563 4564 return batch; 4565 4566 #else 4567 /* The deferral and batching of frees should be suppressed under NOMMU 4568 * conditions. 4569 * 4570 * The problem is that NOMMU needs to be able to allocate large chunks 4571 * of contiguous memory as there's no hardware page translation to 4572 * assemble apparent contiguous memory from discontiguous pages. 4573 * 4574 * Queueing large contiguous runs of pages for batching, however, 4575 * causes the pages to actually be freed in smaller chunks. As there 4576 * can be a significant delay between the individual batches being 4577 * recycled, this leads to the once large chunks of space being 4578 * fragmented and becoming unavailable for high-order allocations. 4579 */ 4580 return 0; 4581 #endif 4582 } 4583 4584 /* 4585 * pcp->high and pcp->batch values are related and dependent on one another: 4586 * ->batch must never be higher then ->high. 4587 * The following function updates them in a safe manner without read side 4588 * locking. 4589 * 4590 * Any new users of pcp->batch and pcp->high should ensure they can cope with 4591 * those fields changing asynchronously (acording the the above rule). 4592 * 4593 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 4594 * outside of boot time (or some other assurance that no concurrent updaters 4595 * exist). 4596 */ 4597 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 4598 unsigned long batch) 4599 { 4600 /* start with a fail safe value for batch */ 4601 pcp->batch = 1; 4602 smp_wmb(); 4603 4604 /* Update high, then batch, in order */ 4605 pcp->high = high; 4606 smp_wmb(); 4607 4608 pcp->batch = batch; 4609 } 4610 4611 /* a companion to pageset_set_high() */ 4612 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 4613 { 4614 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 4615 } 4616 4617 static void pageset_init(struct per_cpu_pageset *p) 4618 { 4619 struct per_cpu_pages *pcp; 4620 int migratetype; 4621 4622 memset(p, 0, sizeof(*p)); 4623 4624 pcp = &p->pcp; 4625 pcp->count = 0; 4626 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 4627 INIT_LIST_HEAD(&pcp->lists[migratetype]); 4628 } 4629 4630 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 4631 { 4632 pageset_init(p); 4633 pageset_set_batch(p, batch); 4634 } 4635 4636 /* 4637 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 4638 * to the value high for the pageset p. 4639 */ 4640 static void pageset_set_high(struct per_cpu_pageset *p, 4641 unsigned long high) 4642 { 4643 unsigned long batch = max(1UL, high / 4); 4644 if ((high / 4) > (PAGE_SHIFT * 8)) 4645 batch = PAGE_SHIFT * 8; 4646 4647 pageset_update(&p->pcp, high, batch); 4648 } 4649 4650 static void pageset_set_high_and_batch(struct zone *zone, 4651 struct per_cpu_pageset *pcp) 4652 { 4653 if (percpu_pagelist_fraction) 4654 pageset_set_high(pcp, 4655 (zone->managed_pages / 4656 percpu_pagelist_fraction)); 4657 else 4658 pageset_set_batch(pcp, zone_batchsize(zone)); 4659 } 4660 4661 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 4662 { 4663 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 4664 4665 pageset_init(pcp); 4666 pageset_set_high_and_batch(zone, pcp); 4667 } 4668 4669 static void __meminit setup_zone_pageset(struct zone *zone) 4670 { 4671 int cpu; 4672 zone->pageset = alloc_percpu(struct per_cpu_pageset); 4673 for_each_possible_cpu(cpu) 4674 zone_pageset_init(zone, cpu); 4675 } 4676 4677 /* 4678 * Allocate per cpu pagesets and initialize them. 4679 * Before this call only boot pagesets were available. 4680 */ 4681 void __init setup_per_cpu_pageset(void) 4682 { 4683 struct zone *zone; 4684 4685 for_each_populated_zone(zone) 4686 setup_zone_pageset(zone); 4687 } 4688 4689 static noinline __init_refok 4690 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4691 { 4692 int i; 4693 size_t alloc_size; 4694 4695 /* 4696 * The per-page waitqueue mechanism uses hashed waitqueues 4697 * per zone. 4698 */ 4699 zone->wait_table_hash_nr_entries = 4700 wait_table_hash_nr_entries(zone_size_pages); 4701 zone->wait_table_bits = 4702 wait_table_bits(zone->wait_table_hash_nr_entries); 4703 alloc_size = zone->wait_table_hash_nr_entries 4704 * sizeof(wait_queue_head_t); 4705 4706 if (!slab_is_available()) { 4707 zone->wait_table = (wait_queue_head_t *) 4708 memblock_virt_alloc_node_nopanic( 4709 alloc_size, zone->zone_pgdat->node_id); 4710 } else { 4711 /* 4712 * This case means that a zone whose size was 0 gets new memory 4713 * via memory hot-add. 4714 * But it may be the case that a new node was hot-added. In 4715 * this case vmalloc() will not be able to use this new node's 4716 * memory - this wait_table must be initialized to use this new 4717 * node itself as well. 4718 * To use this new node's memory, further consideration will be 4719 * necessary. 4720 */ 4721 zone->wait_table = vmalloc(alloc_size); 4722 } 4723 if (!zone->wait_table) 4724 return -ENOMEM; 4725 4726 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4727 init_waitqueue_head(zone->wait_table + i); 4728 4729 return 0; 4730 } 4731 4732 static __meminit void zone_pcp_init(struct zone *zone) 4733 { 4734 /* 4735 * per cpu subsystem is not up at this point. The following code 4736 * relies on the ability of the linker to provide the 4737 * offset of a (static) per cpu variable into the per cpu area. 4738 */ 4739 zone->pageset = &boot_pageset; 4740 4741 if (populated_zone(zone)) 4742 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4743 zone->name, zone->present_pages, 4744 zone_batchsize(zone)); 4745 } 4746 4747 int __meminit init_currently_empty_zone(struct zone *zone, 4748 unsigned long zone_start_pfn, 4749 unsigned long size) 4750 { 4751 struct pglist_data *pgdat = zone->zone_pgdat; 4752 int ret; 4753 ret = zone_wait_table_init(zone, size); 4754 if (ret) 4755 return ret; 4756 pgdat->nr_zones = zone_idx(zone) + 1; 4757 4758 zone->zone_start_pfn = zone_start_pfn; 4759 4760 mminit_dprintk(MMINIT_TRACE, "memmap_init", 4761 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4762 pgdat->node_id, 4763 (unsigned long)zone_idx(zone), 4764 zone_start_pfn, (zone_start_pfn + size)); 4765 4766 zone_init_free_lists(zone); 4767 4768 return 0; 4769 } 4770 4771 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4772 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4773 4774 /* 4775 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4776 */ 4777 int __meminit __early_pfn_to_nid(unsigned long pfn, 4778 struct mminit_pfnnid_cache *state) 4779 { 4780 unsigned long start_pfn, end_pfn; 4781 int nid; 4782 4783 if (state->last_start <= pfn && pfn < state->last_end) 4784 return state->last_nid; 4785 4786 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4787 if (nid != -1) { 4788 state->last_start = start_pfn; 4789 state->last_end = end_pfn; 4790 state->last_nid = nid; 4791 } 4792 4793 return nid; 4794 } 4795 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4796 4797 /** 4798 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4799 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4800 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 4801 * 4802 * If an architecture guarantees that all ranges registered contain no holes 4803 * and may be freed, this this function may be used instead of calling 4804 * memblock_free_early_nid() manually. 4805 */ 4806 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4807 { 4808 unsigned long start_pfn, end_pfn; 4809 int i, this_nid; 4810 4811 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 4812 start_pfn = min(start_pfn, max_low_pfn); 4813 end_pfn = min(end_pfn, max_low_pfn); 4814 4815 if (start_pfn < end_pfn) 4816 memblock_free_early_nid(PFN_PHYS(start_pfn), 4817 (end_pfn - start_pfn) << PAGE_SHIFT, 4818 this_nid); 4819 } 4820 } 4821 4822 /** 4823 * sparse_memory_present_with_active_regions - Call memory_present for each active range 4824 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 4825 * 4826 * If an architecture guarantees that all ranges registered contain no holes and may 4827 * be freed, this function may be used instead of calling memory_present() manually. 4828 */ 4829 void __init sparse_memory_present_with_active_regions(int nid) 4830 { 4831 unsigned long start_pfn, end_pfn; 4832 int i, this_nid; 4833 4834 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 4835 memory_present(this_nid, start_pfn, end_pfn); 4836 } 4837 4838 /** 4839 * get_pfn_range_for_nid - Return the start and end page frames for a node 4840 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 4841 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 4842 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 4843 * 4844 * It returns the start and end page frame of a node based on information 4845 * provided by memblock_set_node(). If called for a node 4846 * with no available memory, a warning is printed and the start and end 4847 * PFNs will be 0. 4848 */ 4849 void __meminit get_pfn_range_for_nid(unsigned int nid, 4850 unsigned long *start_pfn, unsigned long *end_pfn) 4851 { 4852 unsigned long this_start_pfn, this_end_pfn; 4853 int i; 4854 4855 *start_pfn = -1UL; 4856 *end_pfn = 0; 4857 4858 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 4859 *start_pfn = min(*start_pfn, this_start_pfn); 4860 *end_pfn = max(*end_pfn, this_end_pfn); 4861 } 4862 4863 if (*start_pfn == -1UL) 4864 *start_pfn = 0; 4865 } 4866 4867 /* 4868 * This finds a zone that can be used for ZONE_MOVABLE pages. The 4869 * assumption is made that zones within a node are ordered in monotonic 4870 * increasing memory addresses so that the "highest" populated zone is used 4871 */ 4872 static void __init find_usable_zone_for_movable(void) 4873 { 4874 int zone_index; 4875 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 4876 if (zone_index == ZONE_MOVABLE) 4877 continue; 4878 4879 if (arch_zone_highest_possible_pfn[zone_index] > 4880 arch_zone_lowest_possible_pfn[zone_index]) 4881 break; 4882 } 4883 4884 VM_BUG_ON(zone_index == -1); 4885 movable_zone = zone_index; 4886 } 4887 4888 /* 4889 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 4890 * because it is sized independent of architecture. Unlike the other zones, 4891 * the starting point for ZONE_MOVABLE is not fixed. It may be different 4892 * in each node depending on the size of each node and how evenly kernelcore 4893 * is distributed. This helper function adjusts the zone ranges 4894 * provided by the architecture for a given node by using the end of the 4895 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 4896 * zones within a node are in order of monotonic increases memory addresses 4897 */ 4898 static void __meminit adjust_zone_range_for_zone_movable(int nid, 4899 unsigned long zone_type, 4900 unsigned long node_start_pfn, 4901 unsigned long node_end_pfn, 4902 unsigned long *zone_start_pfn, 4903 unsigned long *zone_end_pfn) 4904 { 4905 /* Only adjust if ZONE_MOVABLE is on this node */ 4906 if (zone_movable_pfn[nid]) { 4907 /* Size ZONE_MOVABLE */ 4908 if (zone_type == ZONE_MOVABLE) { 4909 *zone_start_pfn = zone_movable_pfn[nid]; 4910 *zone_end_pfn = min(node_end_pfn, 4911 arch_zone_highest_possible_pfn[movable_zone]); 4912 4913 /* Adjust for ZONE_MOVABLE starting within this range */ 4914 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 4915 *zone_end_pfn > zone_movable_pfn[nid]) { 4916 *zone_end_pfn = zone_movable_pfn[nid]; 4917 4918 /* Check if this whole range is within ZONE_MOVABLE */ 4919 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 4920 *zone_start_pfn = *zone_end_pfn; 4921 } 4922 } 4923 4924 /* 4925 * Return the number of pages a zone spans in a node, including holes 4926 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 4927 */ 4928 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 4929 unsigned long zone_type, 4930 unsigned long node_start_pfn, 4931 unsigned long node_end_pfn, 4932 unsigned long *ignored) 4933 { 4934 unsigned long zone_start_pfn, zone_end_pfn; 4935 4936 /* When hotadd a new node from cpu_up(), the node should be empty */ 4937 if (!node_start_pfn && !node_end_pfn) 4938 return 0; 4939 4940 /* Get the start and end of the zone */ 4941 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4942 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4943 adjust_zone_range_for_zone_movable(nid, zone_type, 4944 node_start_pfn, node_end_pfn, 4945 &zone_start_pfn, &zone_end_pfn); 4946 4947 /* Check that this node has pages within the zone's required range */ 4948 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4949 return 0; 4950 4951 /* Move the zone boundaries inside the node if necessary */ 4952 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4953 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4954 4955 /* Return the spanned pages */ 4956 return zone_end_pfn - zone_start_pfn; 4957 } 4958 4959 /* 4960 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4961 * then all holes in the requested range will be accounted for. 4962 */ 4963 unsigned long __meminit __absent_pages_in_range(int nid, 4964 unsigned long range_start_pfn, 4965 unsigned long range_end_pfn) 4966 { 4967 unsigned long nr_absent = range_end_pfn - range_start_pfn; 4968 unsigned long start_pfn, end_pfn; 4969 int i; 4970 4971 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 4972 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 4973 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 4974 nr_absent -= end_pfn - start_pfn; 4975 } 4976 return nr_absent; 4977 } 4978 4979 /** 4980 * absent_pages_in_range - Return number of page frames in holes within a range 4981 * @start_pfn: The start PFN to start searching for holes 4982 * @end_pfn: The end PFN to stop searching for holes 4983 * 4984 * It returns the number of pages frames in memory holes within a range. 4985 */ 4986 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4987 unsigned long end_pfn) 4988 { 4989 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4990 } 4991 4992 /* Return the number of page frames in holes in a zone on a node */ 4993 static unsigned long __meminit zone_absent_pages_in_node(int nid, 4994 unsigned long zone_type, 4995 unsigned long node_start_pfn, 4996 unsigned long node_end_pfn, 4997 unsigned long *ignored) 4998 { 4999 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 5000 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5001 unsigned long zone_start_pfn, zone_end_pfn; 5002 5003 /* When hotadd a new node from cpu_up(), the node should be empty */ 5004 if (!node_start_pfn && !node_end_pfn) 5005 return 0; 5006 5007 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5008 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5009 5010 adjust_zone_range_for_zone_movable(nid, zone_type, 5011 node_start_pfn, node_end_pfn, 5012 &zone_start_pfn, &zone_end_pfn); 5013 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 5014 } 5015 5016 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5017 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 5018 unsigned long zone_type, 5019 unsigned long node_start_pfn, 5020 unsigned long node_end_pfn, 5021 unsigned long *zones_size) 5022 { 5023 return zones_size[zone_type]; 5024 } 5025 5026 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 5027 unsigned long zone_type, 5028 unsigned long node_start_pfn, 5029 unsigned long node_end_pfn, 5030 unsigned long *zholes_size) 5031 { 5032 if (!zholes_size) 5033 return 0; 5034 5035 return zholes_size[zone_type]; 5036 } 5037 5038 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5039 5040 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 5041 unsigned long node_start_pfn, 5042 unsigned long node_end_pfn, 5043 unsigned long *zones_size, 5044 unsigned long *zholes_size) 5045 { 5046 unsigned long realtotalpages = 0, totalpages = 0; 5047 enum zone_type i; 5048 5049 for (i = 0; i < MAX_NR_ZONES; i++) { 5050 struct zone *zone = pgdat->node_zones + i; 5051 unsigned long size, real_size; 5052 5053 size = zone_spanned_pages_in_node(pgdat->node_id, i, 5054 node_start_pfn, 5055 node_end_pfn, 5056 zones_size); 5057 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, 5058 node_start_pfn, node_end_pfn, 5059 zholes_size); 5060 zone->spanned_pages = size; 5061 zone->present_pages = real_size; 5062 5063 totalpages += size; 5064 realtotalpages += real_size; 5065 } 5066 5067 pgdat->node_spanned_pages = totalpages; 5068 pgdat->node_present_pages = realtotalpages; 5069 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 5070 realtotalpages); 5071 } 5072 5073 #ifndef CONFIG_SPARSEMEM 5074 /* 5075 * Calculate the size of the zone->blockflags rounded to an unsigned long 5076 * Start by making sure zonesize is a multiple of pageblock_order by rounding 5077 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 5078 * round what is now in bits to nearest long in bits, then return it in 5079 * bytes. 5080 */ 5081 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 5082 { 5083 unsigned long usemapsize; 5084 5085 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 5086 usemapsize = roundup(zonesize, pageblock_nr_pages); 5087 usemapsize = usemapsize >> pageblock_order; 5088 usemapsize *= NR_PAGEBLOCK_BITS; 5089 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 5090 5091 return usemapsize / 8; 5092 } 5093 5094 static void __init setup_usemap(struct pglist_data *pgdat, 5095 struct zone *zone, 5096 unsigned long zone_start_pfn, 5097 unsigned long zonesize) 5098 { 5099 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 5100 zone->pageblock_flags = NULL; 5101 if (usemapsize) 5102 zone->pageblock_flags = 5103 memblock_virt_alloc_node_nopanic(usemapsize, 5104 pgdat->node_id); 5105 } 5106 #else 5107 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 5108 unsigned long zone_start_pfn, unsigned long zonesize) {} 5109 #endif /* CONFIG_SPARSEMEM */ 5110 5111 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 5112 5113 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 5114 void __paginginit set_pageblock_order(void) 5115 { 5116 unsigned int order; 5117 5118 /* Check that pageblock_nr_pages has not already been setup */ 5119 if (pageblock_order) 5120 return; 5121 5122 if (HPAGE_SHIFT > PAGE_SHIFT) 5123 order = HUGETLB_PAGE_ORDER; 5124 else 5125 order = MAX_ORDER - 1; 5126 5127 /* 5128 * Assume the largest contiguous order of interest is a huge page. 5129 * This value may be variable depending on boot parameters on IA64 and 5130 * powerpc. 5131 */ 5132 pageblock_order = order; 5133 } 5134 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5135 5136 /* 5137 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 5138 * is unused as pageblock_order is set at compile-time. See 5139 * include/linux/pageblock-flags.h for the values of pageblock_order based on 5140 * the kernel config 5141 */ 5142 void __paginginit set_pageblock_order(void) 5143 { 5144 } 5145 5146 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5147 5148 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 5149 unsigned long present_pages) 5150 { 5151 unsigned long pages = spanned_pages; 5152 5153 /* 5154 * Provide a more accurate estimation if there are holes within 5155 * the zone and SPARSEMEM is in use. If there are holes within the 5156 * zone, each populated memory region may cost us one or two extra 5157 * memmap pages due to alignment because memmap pages for each 5158 * populated regions may not naturally algined on page boundary. 5159 * So the (present_pages >> 4) heuristic is a tradeoff for that. 5160 */ 5161 if (spanned_pages > present_pages + (present_pages >> 4) && 5162 IS_ENABLED(CONFIG_SPARSEMEM)) 5163 pages = present_pages; 5164 5165 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 5166 } 5167 5168 /* 5169 * Set up the zone data structures: 5170 * - mark all pages reserved 5171 * - mark all memory queues empty 5172 * - clear the memory bitmaps 5173 * 5174 * NOTE: pgdat should get zeroed by caller. 5175 */ 5176 static void __paginginit free_area_init_core(struct pglist_data *pgdat) 5177 { 5178 enum zone_type j; 5179 int nid = pgdat->node_id; 5180 unsigned long zone_start_pfn = pgdat->node_start_pfn; 5181 int ret; 5182 5183 pgdat_resize_init(pgdat); 5184 #ifdef CONFIG_NUMA_BALANCING 5185 spin_lock_init(&pgdat->numabalancing_migrate_lock); 5186 pgdat->numabalancing_migrate_nr_pages = 0; 5187 pgdat->numabalancing_migrate_next_window = jiffies; 5188 #endif 5189 init_waitqueue_head(&pgdat->kswapd_wait); 5190 init_waitqueue_head(&pgdat->pfmemalloc_wait); 5191 pgdat_page_ext_init(pgdat); 5192 5193 for (j = 0; j < MAX_NR_ZONES; j++) { 5194 struct zone *zone = pgdat->node_zones + j; 5195 unsigned long size, realsize, freesize, memmap_pages; 5196 5197 size = zone->spanned_pages; 5198 realsize = freesize = zone->present_pages; 5199 5200 /* 5201 * Adjust freesize so that it accounts for how much memory 5202 * is used by this zone for memmap. This affects the watermark 5203 * and per-cpu initialisations 5204 */ 5205 memmap_pages = calc_memmap_size(size, realsize); 5206 if (!is_highmem_idx(j)) { 5207 if (freesize >= memmap_pages) { 5208 freesize -= memmap_pages; 5209 if (memmap_pages) 5210 printk(KERN_DEBUG 5211 " %s zone: %lu pages used for memmap\n", 5212 zone_names[j], memmap_pages); 5213 } else 5214 printk(KERN_WARNING 5215 " %s zone: %lu pages exceeds freesize %lu\n", 5216 zone_names[j], memmap_pages, freesize); 5217 } 5218 5219 /* Account for reserved pages */ 5220 if (j == 0 && freesize > dma_reserve) { 5221 freesize -= dma_reserve; 5222 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 5223 zone_names[0], dma_reserve); 5224 } 5225 5226 if (!is_highmem_idx(j)) 5227 nr_kernel_pages += freesize; 5228 /* Charge for highmem memmap if there are enough kernel pages */ 5229 else if (nr_kernel_pages > memmap_pages * 2) 5230 nr_kernel_pages -= memmap_pages; 5231 nr_all_pages += freesize; 5232 5233 /* 5234 * Set an approximate value for lowmem here, it will be adjusted 5235 * when the bootmem allocator frees pages into the buddy system. 5236 * And all highmem pages will be managed by the buddy system. 5237 */ 5238 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5239 #ifdef CONFIG_NUMA 5240 zone->node = nid; 5241 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 5242 / 100; 5243 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5244 #endif 5245 zone->name = zone_names[j]; 5246 spin_lock_init(&zone->lock); 5247 spin_lock_init(&zone->lru_lock); 5248 zone_seqlock_init(zone); 5249 zone->zone_pgdat = pgdat; 5250 zone_pcp_init(zone); 5251 5252 /* For bootup, initialized properly in watermark setup */ 5253 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 5254 5255 lruvec_init(&zone->lruvec); 5256 if (!size) 5257 continue; 5258 5259 set_pageblock_order(); 5260 setup_usemap(pgdat, zone, zone_start_pfn, size); 5261 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 5262 BUG_ON(ret); 5263 memmap_init(size, nid, j, zone_start_pfn); 5264 zone_start_pfn += size; 5265 } 5266 } 5267 5268 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 5269 { 5270 unsigned long __maybe_unused start = 0; 5271 unsigned long __maybe_unused offset = 0; 5272 5273 /* Skip empty nodes */ 5274 if (!pgdat->node_spanned_pages) 5275 return; 5276 5277 #ifdef CONFIG_FLAT_NODE_MEM_MAP 5278 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 5279 offset = pgdat->node_start_pfn - start; 5280 /* ia64 gets its own node_mem_map, before this, without bootmem */ 5281 if (!pgdat->node_mem_map) { 5282 unsigned long size, end; 5283 struct page *map; 5284 5285 /* 5286 * The zone's endpoints aren't required to be MAX_ORDER 5287 * aligned but the node_mem_map endpoints must be in order 5288 * for the buddy allocator to function correctly. 5289 */ 5290 end = pgdat_end_pfn(pgdat); 5291 end = ALIGN(end, MAX_ORDER_NR_PAGES); 5292 size = (end - start) * sizeof(struct page); 5293 map = alloc_remap(pgdat->node_id, size); 5294 if (!map) 5295 map = memblock_virt_alloc_node_nopanic(size, 5296 pgdat->node_id); 5297 pgdat->node_mem_map = map + offset; 5298 } 5299 #ifndef CONFIG_NEED_MULTIPLE_NODES 5300 /* 5301 * With no DISCONTIG, the global mem_map is just set as node 0's 5302 */ 5303 if (pgdat == NODE_DATA(0)) { 5304 mem_map = NODE_DATA(0)->node_mem_map; 5305 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) 5306 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 5307 mem_map -= offset; 5308 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5309 } 5310 #endif 5311 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 5312 } 5313 5314 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 5315 unsigned long node_start_pfn, unsigned long *zholes_size) 5316 { 5317 pg_data_t *pgdat = NODE_DATA(nid); 5318 unsigned long start_pfn = 0; 5319 unsigned long end_pfn = 0; 5320 5321 /* pg_data_t should be reset to zero when it's allocated */ 5322 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5323 5324 reset_deferred_meminit(pgdat); 5325 pgdat->node_id = nid; 5326 pgdat->node_start_pfn = node_start_pfn; 5327 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5328 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 5329 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 5330 (u64)start_pfn << PAGE_SHIFT, 5331 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 5332 #endif 5333 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 5334 zones_size, zholes_size); 5335 5336 alloc_node_mem_map(pgdat); 5337 #ifdef CONFIG_FLAT_NODE_MEM_MAP 5338 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 5339 nid, (unsigned long)pgdat, 5340 (unsigned long)pgdat->node_mem_map); 5341 #endif 5342 5343 free_area_init_core(pgdat); 5344 } 5345 5346 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5347 5348 #if MAX_NUMNODES > 1 5349 /* 5350 * Figure out the number of possible node ids. 5351 */ 5352 void __init setup_nr_node_ids(void) 5353 { 5354 unsigned int highest; 5355 5356 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 5357 nr_node_ids = highest + 1; 5358 } 5359 #endif 5360 5361 /** 5362 * node_map_pfn_alignment - determine the maximum internode alignment 5363 * 5364 * This function should be called after node map is populated and sorted. 5365 * It calculates the maximum power of two alignment which can distinguish 5366 * all the nodes. 5367 * 5368 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 5369 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 5370 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 5371 * shifted, 1GiB is enough and this function will indicate so. 5372 * 5373 * This is used to test whether pfn -> nid mapping of the chosen memory 5374 * model has fine enough granularity to avoid incorrect mapping for the 5375 * populated node map. 5376 * 5377 * Returns the determined alignment in pfn's. 0 if there is no alignment 5378 * requirement (single node). 5379 */ 5380 unsigned long __init node_map_pfn_alignment(void) 5381 { 5382 unsigned long accl_mask = 0, last_end = 0; 5383 unsigned long start, end, mask; 5384 int last_nid = -1; 5385 int i, nid; 5386 5387 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 5388 if (!start || last_nid < 0 || last_nid == nid) { 5389 last_nid = nid; 5390 last_end = end; 5391 continue; 5392 } 5393 5394 /* 5395 * Start with a mask granular enough to pin-point to the 5396 * start pfn and tick off bits one-by-one until it becomes 5397 * too coarse to separate the current node from the last. 5398 */ 5399 mask = ~((1 << __ffs(start)) - 1); 5400 while (mask && last_end <= (start & (mask << 1))) 5401 mask <<= 1; 5402 5403 /* accumulate all internode masks */ 5404 accl_mask |= mask; 5405 } 5406 5407 /* convert mask to number of pages */ 5408 return ~accl_mask + 1; 5409 } 5410 5411 /* Find the lowest pfn for a node */ 5412 static unsigned long __init find_min_pfn_for_node(int nid) 5413 { 5414 unsigned long min_pfn = ULONG_MAX; 5415 unsigned long start_pfn; 5416 int i; 5417 5418 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 5419 min_pfn = min(min_pfn, start_pfn); 5420 5421 if (min_pfn == ULONG_MAX) { 5422 printk(KERN_WARNING 5423 "Could not find start_pfn for node %d\n", nid); 5424 return 0; 5425 } 5426 5427 return min_pfn; 5428 } 5429 5430 /** 5431 * find_min_pfn_with_active_regions - Find the minimum PFN registered 5432 * 5433 * It returns the minimum PFN based on information provided via 5434 * memblock_set_node(). 5435 */ 5436 unsigned long __init find_min_pfn_with_active_regions(void) 5437 { 5438 return find_min_pfn_for_node(MAX_NUMNODES); 5439 } 5440 5441 /* 5442 * early_calculate_totalpages() 5443 * Sum pages in active regions for movable zone. 5444 * Populate N_MEMORY for calculating usable_nodes. 5445 */ 5446 static unsigned long __init early_calculate_totalpages(void) 5447 { 5448 unsigned long totalpages = 0; 5449 unsigned long start_pfn, end_pfn; 5450 int i, nid; 5451 5452 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5453 unsigned long pages = end_pfn - start_pfn; 5454 5455 totalpages += pages; 5456 if (pages) 5457 node_set_state(nid, N_MEMORY); 5458 } 5459 return totalpages; 5460 } 5461 5462 /* 5463 * Find the PFN the Movable zone begins in each node. Kernel memory 5464 * is spread evenly between nodes as long as the nodes have enough 5465 * memory. When they don't, some nodes will have more kernelcore than 5466 * others 5467 */ 5468 static void __init find_zone_movable_pfns_for_nodes(void) 5469 { 5470 int i, nid; 5471 unsigned long usable_startpfn; 5472 unsigned long kernelcore_node, kernelcore_remaining; 5473 /* save the state before borrow the nodemask */ 5474 nodemask_t saved_node_state = node_states[N_MEMORY]; 5475 unsigned long totalpages = early_calculate_totalpages(); 5476 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 5477 struct memblock_region *r; 5478 5479 /* Need to find movable_zone earlier when movable_node is specified. */ 5480 find_usable_zone_for_movable(); 5481 5482 /* 5483 * If movable_node is specified, ignore kernelcore and movablecore 5484 * options. 5485 */ 5486 if (movable_node_is_enabled()) { 5487 for_each_memblock(memory, r) { 5488 if (!memblock_is_hotpluggable(r)) 5489 continue; 5490 5491 nid = r->nid; 5492 5493 usable_startpfn = PFN_DOWN(r->base); 5494 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 5495 min(usable_startpfn, zone_movable_pfn[nid]) : 5496 usable_startpfn; 5497 } 5498 5499 goto out2; 5500 } 5501 5502 /* 5503 * If movablecore=nn[KMG] was specified, calculate what size of 5504 * kernelcore that corresponds so that memory usable for 5505 * any allocation type is evenly spread. If both kernelcore 5506 * and movablecore are specified, then the value of kernelcore 5507 * will be used for required_kernelcore if it's greater than 5508 * what movablecore would have allowed. 5509 */ 5510 if (required_movablecore) { 5511 unsigned long corepages; 5512 5513 /* 5514 * Round-up so that ZONE_MOVABLE is at least as large as what 5515 * was requested by the user 5516 */ 5517 required_movablecore = 5518 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 5519 required_movablecore = min(totalpages, required_movablecore); 5520 corepages = totalpages - required_movablecore; 5521 5522 required_kernelcore = max(required_kernelcore, corepages); 5523 } 5524 5525 /* 5526 * If kernelcore was not specified or kernelcore size is larger 5527 * than totalpages, there is no ZONE_MOVABLE. 5528 */ 5529 if (!required_kernelcore || required_kernelcore >= totalpages) 5530 goto out; 5531 5532 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 5533 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 5534 5535 restart: 5536 /* Spread kernelcore memory as evenly as possible throughout nodes */ 5537 kernelcore_node = required_kernelcore / usable_nodes; 5538 for_each_node_state(nid, N_MEMORY) { 5539 unsigned long start_pfn, end_pfn; 5540 5541 /* 5542 * Recalculate kernelcore_node if the division per node 5543 * now exceeds what is necessary to satisfy the requested 5544 * amount of memory for the kernel 5545 */ 5546 if (required_kernelcore < kernelcore_node) 5547 kernelcore_node = required_kernelcore / usable_nodes; 5548 5549 /* 5550 * As the map is walked, we track how much memory is usable 5551 * by the kernel using kernelcore_remaining. When it is 5552 * 0, the rest of the node is usable by ZONE_MOVABLE 5553 */ 5554 kernelcore_remaining = kernelcore_node; 5555 5556 /* Go through each range of PFNs within this node */ 5557 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5558 unsigned long size_pages; 5559 5560 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 5561 if (start_pfn >= end_pfn) 5562 continue; 5563 5564 /* Account for what is only usable for kernelcore */ 5565 if (start_pfn < usable_startpfn) { 5566 unsigned long kernel_pages; 5567 kernel_pages = min(end_pfn, usable_startpfn) 5568 - start_pfn; 5569 5570 kernelcore_remaining -= min(kernel_pages, 5571 kernelcore_remaining); 5572 required_kernelcore -= min(kernel_pages, 5573 required_kernelcore); 5574 5575 /* Continue if range is now fully accounted */ 5576 if (end_pfn <= usable_startpfn) { 5577 5578 /* 5579 * Push zone_movable_pfn to the end so 5580 * that if we have to rebalance 5581 * kernelcore across nodes, we will 5582 * not double account here 5583 */ 5584 zone_movable_pfn[nid] = end_pfn; 5585 continue; 5586 } 5587 start_pfn = usable_startpfn; 5588 } 5589 5590 /* 5591 * The usable PFN range for ZONE_MOVABLE is from 5592 * start_pfn->end_pfn. Calculate size_pages as the 5593 * number of pages used as kernelcore 5594 */ 5595 size_pages = end_pfn - start_pfn; 5596 if (size_pages > kernelcore_remaining) 5597 size_pages = kernelcore_remaining; 5598 zone_movable_pfn[nid] = start_pfn + size_pages; 5599 5600 /* 5601 * Some kernelcore has been met, update counts and 5602 * break if the kernelcore for this node has been 5603 * satisfied 5604 */ 5605 required_kernelcore -= min(required_kernelcore, 5606 size_pages); 5607 kernelcore_remaining -= size_pages; 5608 if (!kernelcore_remaining) 5609 break; 5610 } 5611 } 5612 5613 /* 5614 * If there is still required_kernelcore, we do another pass with one 5615 * less node in the count. This will push zone_movable_pfn[nid] further 5616 * along on the nodes that still have memory until kernelcore is 5617 * satisfied 5618 */ 5619 usable_nodes--; 5620 if (usable_nodes && required_kernelcore > usable_nodes) 5621 goto restart; 5622 5623 out2: 5624 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 5625 for (nid = 0; nid < MAX_NUMNODES; nid++) 5626 zone_movable_pfn[nid] = 5627 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 5628 5629 out: 5630 /* restore the node_state */ 5631 node_states[N_MEMORY] = saved_node_state; 5632 } 5633 5634 /* Any regular or high memory on that node ? */ 5635 static void check_for_memory(pg_data_t *pgdat, int nid) 5636 { 5637 enum zone_type zone_type; 5638 5639 if (N_MEMORY == N_NORMAL_MEMORY) 5640 return; 5641 5642 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5643 struct zone *zone = &pgdat->node_zones[zone_type]; 5644 if (populated_zone(zone)) { 5645 node_set_state(nid, N_HIGH_MEMORY); 5646 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5647 zone_type <= ZONE_NORMAL) 5648 node_set_state(nid, N_NORMAL_MEMORY); 5649 break; 5650 } 5651 } 5652 } 5653 5654 /** 5655 * free_area_init_nodes - Initialise all pg_data_t and zone data 5656 * @max_zone_pfn: an array of max PFNs for each zone 5657 * 5658 * This will call free_area_init_node() for each active node in the system. 5659 * Using the page ranges provided by memblock_set_node(), the size of each 5660 * zone in each node and their holes is calculated. If the maximum PFN 5661 * between two adjacent zones match, it is assumed that the zone is empty. 5662 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 5663 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 5664 * starts where the previous one ended. For example, ZONE_DMA32 starts 5665 * at arch_max_dma_pfn. 5666 */ 5667 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 5668 { 5669 unsigned long start_pfn, end_pfn; 5670 int i, nid; 5671 5672 /* Record where the zone boundaries are */ 5673 memset(arch_zone_lowest_possible_pfn, 0, 5674 sizeof(arch_zone_lowest_possible_pfn)); 5675 memset(arch_zone_highest_possible_pfn, 0, 5676 sizeof(arch_zone_highest_possible_pfn)); 5677 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 5678 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 5679 for (i = 1; i < MAX_NR_ZONES; i++) { 5680 if (i == ZONE_MOVABLE) 5681 continue; 5682 arch_zone_lowest_possible_pfn[i] = 5683 arch_zone_highest_possible_pfn[i-1]; 5684 arch_zone_highest_possible_pfn[i] = 5685 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 5686 } 5687 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 5688 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 5689 5690 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 5691 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 5692 find_zone_movable_pfns_for_nodes(); 5693 5694 /* Print out the zone ranges */ 5695 pr_info("Zone ranges:\n"); 5696 for (i = 0; i < MAX_NR_ZONES; i++) { 5697 if (i == ZONE_MOVABLE) 5698 continue; 5699 pr_info(" %-8s ", zone_names[i]); 5700 if (arch_zone_lowest_possible_pfn[i] == 5701 arch_zone_highest_possible_pfn[i]) 5702 pr_cont("empty\n"); 5703 else 5704 pr_cont("[mem %#018Lx-%#018Lx]\n", 5705 (u64)arch_zone_lowest_possible_pfn[i] 5706 << PAGE_SHIFT, 5707 ((u64)arch_zone_highest_possible_pfn[i] 5708 << PAGE_SHIFT) - 1); 5709 } 5710 5711 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 5712 pr_info("Movable zone start for each node\n"); 5713 for (i = 0; i < MAX_NUMNODES; i++) { 5714 if (zone_movable_pfn[i]) 5715 pr_info(" Node %d: %#018Lx\n", i, 5716 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 5717 } 5718 5719 /* Print out the early node map */ 5720 pr_info("Early memory node ranges\n"); 5721 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 5722 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 5723 (u64)start_pfn << PAGE_SHIFT, 5724 ((u64)end_pfn << PAGE_SHIFT) - 1); 5725 5726 /* Initialise every node */ 5727 mminit_verify_pageflags_layout(); 5728 setup_nr_node_ids(); 5729 for_each_online_node(nid) { 5730 pg_data_t *pgdat = NODE_DATA(nid); 5731 free_area_init_node(nid, NULL, 5732 find_min_pfn_for_node(nid), NULL); 5733 5734 /* Any memory on that node */ 5735 if (pgdat->node_present_pages) 5736 node_set_state(nid, N_MEMORY); 5737 check_for_memory(pgdat, nid); 5738 } 5739 } 5740 5741 static int __init cmdline_parse_core(char *p, unsigned long *core) 5742 { 5743 unsigned long long coremem; 5744 if (!p) 5745 return -EINVAL; 5746 5747 coremem = memparse(p, &p); 5748 *core = coremem >> PAGE_SHIFT; 5749 5750 /* Paranoid check that UL is enough for the coremem value */ 5751 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 5752 5753 return 0; 5754 } 5755 5756 /* 5757 * kernelcore=size sets the amount of memory for use for allocations that 5758 * cannot be reclaimed or migrated. 5759 */ 5760 static int __init cmdline_parse_kernelcore(char *p) 5761 { 5762 return cmdline_parse_core(p, &required_kernelcore); 5763 } 5764 5765 /* 5766 * movablecore=size sets the amount of memory for use for allocations that 5767 * can be reclaimed or migrated. 5768 */ 5769 static int __init cmdline_parse_movablecore(char *p) 5770 { 5771 return cmdline_parse_core(p, &required_movablecore); 5772 } 5773 5774 early_param("kernelcore", cmdline_parse_kernelcore); 5775 early_param("movablecore", cmdline_parse_movablecore); 5776 5777 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5778 5779 void adjust_managed_page_count(struct page *page, long count) 5780 { 5781 spin_lock(&managed_page_count_lock); 5782 page_zone(page)->managed_pages += count; 5783 totalram_pages += count; 5784 #ifdef CONFIG_HIGHMEM 5785 if (PageHighMem(page)) 5786 totalhigh_pages += count; 5787 #endif 5788 spin_unlock(&managed_page_count_lock); 5789 } 5790 EXPORT_SYMBOL(adjust_managed_page_count); 5791 5792 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5793 { 5794 void *pos; 5795 unsigned long pages = 0; 5796 5797 start = (void *)PAGE_ALIGN((unsigned long)start); 5798 end = (void *)((unsigned long)end & PAGE_MASK); 5799 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5800 if ((unsigned int)poison <= 0xFF) 5801 memset(pos, poison, PAGE_SIZE); 5802 free_reserved_page(virt_to_page(pos)); 5803 } 5804 5805 if (pages && s) 5806 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5807 s, pages << (PAGE_SHIFT - 10), start, end); 5808 5809 return pages; 5810 } 5811 EXPORT_SYMBOL(free_reserved_area); 5812 5813 #ifdef CONFIG_HIGHMEM 5814 void free_highmem_page(struct page *page) 5815 { 5816 __free_reserved_page(page); 5817 totalram_pages++; 5818 page_zone(page)->managed_pages++; 5819 totalhigh_pages++; 5820 } 5821 #endif 5822 5823 5824 void __init mem_init_print_info(const char *str) 5825 { 5826 unsigned long physpages, codesize, datasize, rosize, bss_size; 5827 unsigned long init_code_size, init_data_size; 5828 5829 physpages = get_num_physpages(); 5830 codesize = _etext - _stext; 5831 datasize = _edata - _sdata; 5832 rosize = __end_rodata - __start_rodata; 5833 bss_size = __bss_stop - __bss_start; 5834 init_data_size = __init_end - __init_begin; 5835 init_code_size = _einittext - _sinittext; 5836 5837 /* 5838 * Detect special cases and adjust section sizes accordingly: 5839 * 1) .init.* may be embedded into .data sections 5840 * 2) .init.text.* may be out of [__init_begin, __init_end], 5841 * please refer to arch/tile/kernel/vmlinux.lds.S. 5842 * 3) .rodata.* may be embedded into .text or .data sections. 5843 */ 5844 #define adj_init_size(start, end, size, pos, adj) \ 5845 do { \ 5846 if (start <= pos && pos < end && size > adj) \ 5847 size -= adj; \ 5848 } while (0) 5849 5850 adj_init_size(__init_begin, __init_end, init_data_size, 5851 _sinittext, init_code_size); 5852 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 5853 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 5854 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 5855 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 5856 5857 #undef adj_init_size 5858 5859 pr_info("Memory: %luK/%luK available " 5860 "(%luK kernel code, %luK rwdata, %luK rodata, " 5861 "%luK init, %luK bss, %luK reserved, %luK cma-reserved" 5862 #ifdef CONFIG_HIGHMEM 5863 ", %luK highmem" 5864 #endif 5865 "%s%s)\n", 5866 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 5867 codesize >> 10, datasize >> 10, rosize >> 10, 5868 (init_data_size + init_code_size) >> 10, bss_size >> 10, 5869 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), 5870 totalcma_pages << (PAGE_SHIFT-10), 5871 #ifdef CONFIG_HIGHMEM 5872 totalhigh_pages << (PAGE_SHIFT-10), 5873 #endif 5874 str ? ", " : "", str ? str : ""); 5875 } 5876 5877 /** 5878 * set_dma_reserve - set the specified number of pages reserved in the first zone 5879 * @new_dma_reserve: The number of pages to mark reserved 5880 * 5881 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 5882 * In the DMA zone, a significant percentage may be consumed by kernel image 5883 * and other unfreeable allocations which can skew the watermarks badly. This 5884 * function may optionally be used to account for unfreeable pages in the 5885 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 5886 * smaller per-cpu batchsize. 5887 */ 5888 void __init set_dma_reserve(unsigned long new_dma_reserve) 5889 { 5890 dma_reserve = new_dma_reserve; 5891 } 5892 5893 void __init free_area_init(unsigned long *zones_size) 5894 { 5895 free_area_init_node(0, zones_size, 5896 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 5897 } 5898 5899 static int page_alloc_cpu_notify(struct notifier_block *self, 5900 unsigned long action, void *hcpu) 5901 { 5902 int cpu = (unsigned long)hcpu; 5903 5904 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 5905 lru_add_drain_cpu(cpu); 5906 drain_pages(cpu); 5907 5908 /* 5909 * Spill the event counters of the dead processor 5910 * into the current processors event counters. 5911 * This artificially elevates the count of the current 5912 * processor. 5913 */ 5914 vm_events_fold_cpu(cpu); 5915 5916 /* 5917 * Zero the differential counters of the dead processor 5918 * so that the vm statistics are consistent. 5919 * 5920 * This is only okay since the processor is dead and cannot 5921 * race with what we are doing. 5922 */ 5923 cpu_vm_stats_fold(cpu); 5924 } 5925 return NOTIFY_OK; 5926 } 5927 5928 void __init page_alloc_init(void) 5929 { 5930 hotcpu_notifier(page_alloc_cpu_notify, 0); 5931 } 5932 5933 /* 5934 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5935 * or min_free_kbytes changes. 5936 */ 5937 static void calculate_totalreserve_pages(void) 5938 { 5939 struct pglist_data *pgdat; 5940 unsigned long reserve_pages = 0; 5941 enum zone_type i, j; 5942 5943 for_each_online_pgdat(pgdat) { 5944 for (i = 0; i < MAX_NR_ZONES; i++) { 5945 struct zone *zone = pgdat->node_zones + i; 5946 long max = 0; 5947 5948 /* Find valid and maximum lowmem_reserve in the zone */ 5949 for (j = i; j < MAX_NR_ZONES; j++) { 5950 if (zone->lowmem_reserve[j] > max) 5951 max = zone->lowmem_reserve[j]; 5952 } 5953 5954 /* we treat the high watermark as reserved pages. */ 5955 max += high_wmark_pages(zone); 5956 5957 if (max > zone->managed_pages) 5958 max = zone->managed_pages; 5959 reserve_pages += max; 5960 /* 5961 * Lowmem reserves are not available to 5962 * GFP_HIGHUSER page cache allocations and 5963 * kswapd tries to balance zones to their high 5964 * watermark. As a result, neither should be 5965 * regarded as dirtyable memory, to prevent a 5966 * situation where reclaim has to clean pages 5967 * in order to balance the zones. 5968 */ 5969 zone->dirty_balance_reserve = max; 5970 } 5971 } 5972 dirty_balance_reserve = reserve_pages; 5973 totalreserve_pages = reserve_pages; 5974 } 5975 5976 /* 5977 * setup_per_zone_lowmem_reserve - called whenever 5978 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5979 * has a correct pages reserved value, so an adequate number of 5980 * pages are left in the zone after a successful __alloc_pages(). 5981 */ 5982 static void setup_per_zone_lowmem_reserve(void) 5983 { 5984 struct pglist_data *pgdat; 5985 enum zone_type j, idx; 5986 5987 for_each_online_pgdat(pgdat) { 5988 for (j = 0; j < MAX_NR_ZONES; j++) { 5989 struct zone *zone = pgdat->node_zones + j; 5990 unsigned long managed_pages = zone->managed_pages; 5991 5992 zone->lowmem_reserve[j] = 0; 5993 5994 idx = j; 5995 while (idx) { 5996 struct zone *lower_zone; 5997 5998 idx--; 5999 6000 if (sysctl_lowmem_reserve_ratio[idx] < 1) 6001 sysctl_lowmem_reserve_ratio[idx] = 1; 6002 6003 lower_zone = pgdat->node_zones + idx; 6004 lower_zone->lowmem_reserve[j] = managed_pages / 6005 sysctl_lowmem_reserve_ratio[idx]; 6006 managed_pages += lower_zone->managed_pages; 6007 } 6008 } 6009 } 6010 6011 /* update totalreserve_pages */ 6012 calculate_totalreserve_pages(); 6013 } 6014 6015 static void __setup_per_zone_wmarks(void) 6016 { 6017 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6018 unsigned long lowmem_pages = 0; 6019 struct zone *zone; 6020 unsigned long flags; 6021 6022 /* Calculate total number of !ZONE_HIGHMEM pages */ 6023 for_each_zone(zone) { 6024 if (!is_highmem(zone)) 6025 lowmem_pages += zone->managed_pages; 6026 } 6027 6028 for_each_zone(zone) { 6029 u64 tmp; 6030 6031 spin_lock_irqsave(&zone->lock, flags); 6032 tmp = (u64)pages_min * zone->managed_pages; 6033 do_div(tmp, lowmem_pages); 6034 if (is_highmem(zone)) { 6035 /* 6036 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6037 * need highmem pages, so cap pages_min to a small 6038 * value here. 6039 * 6040 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6041 * deltas control asynch page reclaim, and so should 6042 * not be capped for highmem. 6043 */ 6044 unsigned long min_pages; 6045 6046 min_pages = zone->managed_pages / 1024; 6047 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6048 zone->watermark[WMARK_MIN] = min_pages; 6049 } else { 6050 /* 6051 * If it's a lowmem zone, reserve a number of pages 6052 * proportionate to the zone's size. 6053 */ 6054 zone->watermark[WMARK_MIN] = tmp; 6055 } 6056 6057 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 6058 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 6059 6060 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 6061 high_wmark_pages(zone) - low_wmark_pages(zone) - 6062 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 6063 6064 spin_unlock_irqrestore(&zone->lock, flags); 6065 } 6066 6067 /* update totalreserve_pages */ 6068 calculate_totalreserve_pages(); 6069 } 6070 6071 /** 6072 * setup_per_zone_wmarks - called when min_free_kbytes changes 6073 * or when memory is hot-{added|removed} 6074 * 6075 * Ensures that the watermark[min,low,high] values for each zone are set 6076 * correctly with respect to min_free_kbytes. 6077 */ 6078 void setup_per_zone_wmarks(void) 6079 { 6080 mutex_lock(&zonelists_mutex); 6081 __setup_per_zone_wmarks(); 6082 mutex_unlock(&zonelists_mutex); 6083 } 6084 6085 /* 6086 * The inactive anon list should be small enough that the VM never has to 6087 * do too much work, but large enough that each inactive page has a chance 6088 * to be referenced again before it is swapped out. 6089 * 6090 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 6091 * INACTIVE_ANON pages on this zone's LRU, maintained by the 6092 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 6093 * the anonymous pages are kept on the inactive list. 6094 * 6095 * total target max 6096 * memory ratio inactive anon 6097 * ------------------------------------- 6098 * 10MB 1 5MB 6099 * 100MB 1 50MB 6100 * 1GB 3 250MB 6101 * 10GB 10 0.9GB 6102 * 100GB 31 3GB 6103 * 1TB 101 10GB 6104 * 10TB 320 32GB 6105 */ 6106 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) 6107 { 6108 unsigned int gb, ratio; 6109 6110 /* Zone size in gigabytes */ 6111 gb = zone->managed_pages >> (30 - PAGE_SHIFT); 6112 if (gb) 6113 ratio = int_sqrt(10 * gb); 6114 else 6115 ratio = 1; 6116 6117 zone->inactive_ratio = ratio; 6118 } 6119 6120 static void __meminit setup_per_zone_inactive_ratio(void) 6121 { 6122 struct zone *zone; 6123 6124 for_each_zone(zone) 6125 calculate_zone_inactive_ratio(zone); 6126 } 6127 6128 /* 6129 * Initialise min_free_kbytes. 6130 * 6131 * For small machines we want it small (128k min). For large machines 6132 * we want it large (64MB max). But it is not linear, because network 6133 * bandwidth does not increase linearly with machine size. We use 6134 * 6135 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6136 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6137 * 6138 * which yields 6139 * 6140 * 16MB: 512k 6141 * 32MB: 724k 6142 * 64MB: 1024k 6143 * 128MB: 1448k 6144 * 256MB: 2048k 6145 * 512MB: 2896k 6146 * 1024MB: 4096k 6147 * 2048MB: 5792k 6148 * 4096MB: 8192k 6149 * 8192MB: 11584k 6150 * 16384MB: 16384k 6151 */ 6152 int __meminit init_per_zone_wmark_min(void) 6153 { 6154 unsigned long lowmem_kbytes; 6155 int new_min_free_kbytes; 6156 6157 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6158 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6159 6160 if (new_min_free_kbytes > user_min_free_kbytes) { 6161 min_free_kbytes = new_min_free_kbytes; 6162 if (min_free_kbytes < 128) 6163 min_free_kbytes = 128; 6164 if (min_free_kbytes > 65536) 6165 min_free_kbytes = 65536; 6166 } else { 6167 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6168 new_min_free_kbytes, user_min_free_kbytes); 6169 } 6170 setup_per_zone_wmarks(); 6171 refresh_zone_stat_thresholds(); 6172 setup_per_zone_lowmem_reserve(); 6173 setup_per_zone_inactive_ratio(); 6174 return 0; 6175 } 6176 module_init(init_per_zone_wmark_min) 6177 6178 /* 6179 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6180 * that we can call two helper functions whenever min_free_kbytes 6181 * changes. 6182 */ 6183 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6184 void __user *buffer, size_t *length, loff_t *ppos) 6185 { 6186 int rc; 6187 6188 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6189 if (rc) 6190 return rc; 6191 6192 if (write) { 6193 user_min_free_kbytes = min_free_kbytes; 6194 setup_per_zone_wmarks(); 6195 } 6196 return 0; 6197 } 6198 6199 #ifdef CONFIG_NUMA 6200 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6201 void __user *buffer, size_t *length, loff_t *ppos) 6202 { 6203 struct zone *zone; 6204 int rc; 6205 6206 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6207 if (rc) 6208 return rc; 6209 6210 for_each_zone(zone) 6211 zone->min_unmapped_pages = (zone->managed_pages * 6212 sysctl_min_unmapped_ratio) / 100; 6213 return 0; 6214 } 6215 6216 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6217 void __user *buffer, size_t *length, loff_t *ppos) 6218 { 6219 struct zone *zone; 6220 int rc; 6221 6222 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6223 if (rc) 6224 return rc; 6225 6226 for_each_zone(zone) 6227 zone->min_slab_pages = (zone->managed_pages * 6228 sysctl_min_slab_ratio) / 100; 6229 return 0; 6230 } 6231 #endif 6232 6233 /* 6234 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6235 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6236 * whenever sysctl_lowmem_reserve_ratio changes. 6237 * 6238 * The reserve ratio obviously has absolutely no relation with the 6239 * minimum watermarks. The lowmem reserve ratio can only make sense 6240 * if in function of the boot time zone sizes. 6241 */ 6242 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 6243 void __user *buffer, size_t *length, loff_t *ppos) 6244 { 6245 proc_dointvec_minmax(table, write, buffer, length, ppos); 6246 setup_per_zone_lowmem_reserve(); 6247 return 0; 6248 } 6249 6250 /* 6251 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 6252 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6253 * pagelist can have before it gets flushed back to buddy allocator. 6254 */ 6255 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 6256 void __user *buffer, size_t *length, loff_t *ppos) 6257 { 6258 struct zone *zone; 6259 int old_percpu_pagelist_fraction; 6260 int ret; 6261 6262 mutex_lock(&pcp_batch_high_lock); 6263 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 6264 6265 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6266 if (!write || ret < 0) 6267 goto out; 6268 6269 /* Sanity checking to avoid pcp imbalance */ 6270 if (percpu_pagelist_fraction && 6271 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 6272 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 6273 ret = -EINVAL; 6274 goto out; 6275 } 6276 6277 /* No change? */ 6278 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 6279 goto out; 6280 6281 for_each_populated_zone(zone) { 6282 unsigned int cpu; 6283 6284 for_each_possible_cpu(cpu) 6285 pageset_set_high_and_batch(zone, 6286 per_cpu_ptr(zone->pageset, cpu)); 6287 } 6288 out: 6289 mutex_unlock(&pcp_batch_high_lock); 6290 return ret; 6291 } 6292 6293 #ifdef CONFIG_NUMA 6294 int hashdist = HASHDIST_DEFAULT; 6295 6296 static int __init set_hashdist(char *str) 6297 { 6298 if (!str) 6299 return 0; 6300 hashdist = simple_strtoul(str, &str, 0); 6301 return 1; 6302 } 6303 __setup("hashdist=", set_hashdist); 6304 #endif 6305 6306 /* 6307 * allocate a large system hash table from bootmem 6308 * - it is assumed that the hash table must contain an exact power-of-2 6309 * quantity of entries 6310 * - limit is the number of hash buckets, not the total allocation size 6311 */ 6312 void *__init alloc_large_system_hash(const char *tablename, 6313 unsigned long bucketsize, 6314 unsigned long numentries, 6315 int scale, 6316 int flags, 6317 unsigned int *_hash_shift, 6318 unsigned int *_hash_mask, 6319 unsigned long low_limit, 6320 unsigned long high_limit) 6321 { 6322 unsigned long long max = high_limit; 6323 unsigned long log2qty, size; 6324 void *table = NULL; 6325 6326 /* allow the kernel cmdline to have a say */ 6327 if (!numentries) { 6328 /* round applicable memory size up to nearest megabyte */ 6329 numentries = nr_kernel_pages; 6330 6331 /* It isn't necessary when PAGE_SIZE >= 1MB */ 6332 if (PAGE_SHIFT < 20) 6333 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 6334 6335 /* limit to 1 bucket per 2^scale bytes of low memory */ 6336 if (scale > PAGE_SHIFT) 6337 numentries >>= (scale - PAGE_SHIFT); 6338 else 6339 numentries <<= (PAGE_SHIFT - scale); 6340 6341 /* Make sure we've got at least a 0-order allocation.. */ 6342 if (unlikely(flags & HASH_SMALL)) { 6343 /* Makes no sense without HASH_EARLY */ 6344 WARN_ON(!(flags & HASH_EARLY)); 6345 if (!(numentries >> *_hash_shift)) { 6346 numentries = 1UL << *_hash_shift; 6347 BUG_ON(!numentries); 6348 } 6349 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 6350 numentries = PAGE_SIZE / bucketsize; 6351 } 6352 numentries = roundup_pow_of_two(numentries); 6353 6354 /* limit allocation size to 1/16 total memory by default */ 6355 if (max == 0) { 6356 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 6357 do_div(max, bucketsize); 6358 } 6359 max = min(max, 0x80000000ULL); 6360 6361 if (numentries < low_limit) 6362 numentries = low_limit; 6363 if (numentries > max) 6364 numentries = max; 6365 6366 log2qty = ilog2(numentries); 6367 6368 do { 6369 size = bucketsize << log2qty; 6370 if (flags & HASH_EARLY) 6371 table = memblock_virt_alloc_nopanic(size, 0); 6372 else if (hashdist) 6373 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 6374 else { 6375 /* 6376 * If bucketsize is not a power-of-two, we may free 6377 * some pages at the end of hash table which 6378 * alloc_pages_exact() automatically does 6379 */ 6380 if (get_order(size) < MAX_ORDER) { 6381 table = alloc_pages_exact(size, GFP_ATOMIC); 6382 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 6383 } 6384 } 6385 } while (!table && size > PAGE_SIZE && --log2qty); 6386 6387 if (!table) 6388 panic("Failed to allocate %s hash table\n", tablename); 6389 6390 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 6391 tablename, 6392 (1UL << log2qty), 6393 ilog2(size) - PAGE_SHIFT, 6394 size); 6395 6396 if (_hash_shift) 6397 *_hash_shift = log2qty; 6398 if (_hash_mask) 6399 *_hash_mask = (1 << log2qty) - 1; 6400 6401 return table; 6402 } 6403 6404 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 6405 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 6406 unsigned long pfn) 6407 { 6408 #ifdef CONFIG_SPARSEMEM 6409 return __pfn_to_section(pfn)->pageblock_flags; 6410 #else 6411 return zone->pageblock_flags; 6412 #endif /* CONFIG_SPARSEMEM */ 6413 } 6414 6415 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 6416 { 6417 #ifdef CONFIG_SPARSEMEM 6418 pfn &= (PAGES_PER_SECTION-1); 6419 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6420 #else 6421 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); 6422 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6423 #endif /* CONFIG_SPARSEMEM */ 6424 } 6425 6426 /** 6427 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 6428 * @page: The page within the block of interest 6429 * @pfn: The target page frame number 6430 * @end_bitidx: The last bit of interest to retrieve 6431 * @mask: mask of bits that the caller is interested in 6432 * 6433 * Return: pageblock_bits flags 6434 */ 6435 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6436 unsigned long end_bitidx, 6437 unsigned long mask) 6438 { 6439 struct zone *zone; 6440 unsigned long *bitmap; 6441 unsigned long bitidx, word_bitidx; 6442 unsigned long word; 6443 6444 zone = page_zone(page); 6445 bitmap = get_pageblock_bitmap(zone, pfn); 6446 bitidx = pfn_to_bitidx(zone, pfn); 6447 word_bitidx = bitidx / BITS_PER_LONG; 6448 bitidx &= (BITS_PER_LONG-1); 6449 6450 word = bitmap[word_bitidx]; 6451 bitidx += end_bitidx; 6452 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 6453 } 6454 6455 /** 6456 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6457 * @page: The page within the block of interest 6458 * @flags: The flags to set 6459 * @pfn: The target page frame number 6460 * @end_bitidx: The last bit of interest 6461 * @mask: mask of bits that the caller is interested in 6462 */ 6463 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6464 unsigned long pfn, 6465 unsigned long end_bitidx, 6466 unsigned long mask) 6467 { 6468 struct zone *zone; 6469 unsigned long *bitmap; 6470 unsigned long bitidx, word_bitidx; 6471 unsigned long old_word, word; 6472 6473 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 6474 6475 zone = page_zone(page); 6476 bitmap = get_pageblock_bitmap(zone, pfn); 6477 bitidx = pfn_to_bitidx(zone, pfn); 6478 word_bitidx = bitidx / BITS_PER_LONG; 6479 bitidx &= (BITS_PER_LONG-1); 6480 6481 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6482 6483 bitidx += end_bitidx; 6484 mask <<= (BITS_PER_LONG - bitidx - 1); 6485 flags <<= (BITS_PER_LONG - bitidx - 1); 6486 6487 word = READ_ONCE(bitmap[word_bitidx]); 6488 for (;;) { 6489 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 6490 if (word == old_word) 6491 break; 6492 word = old_word; 6493 } 6494 } 6495 6496 /* 6497 * This function checks whether pageblock includes unmovable pages or not. 6498 * If @count is not zero, it is okay to include less @count unmovable pages 6499 * 6500 * PageLRU check without isolation or lru_lock could race so that 6501 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 6502 * expect this function should be exact. 6503 */ 6504 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 6505 bool skip_hwpoisoned_pages) 6506 { 6507 unsigned long pfn, iter, found; 6508 int mt; 6509 6510 /* 6511 * For avoiding noise data, lru_add_drain_all() should be called 6512 * If ZONE_MOVABLE, the zone never contains unmovable pages 6513 */ 6514 if (zone_idx(zone) == ZONE_MOVABLE) 6515 return false; 6516 mt = get_pageblock_migratetype(page); 6517 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 6518 return false; 6519 6520 pfn = page_to_pfn(page); 6521 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 6522 unsigned long check = pfn + iter; 6523 6524 if (!pfn_valid_within(check)) 6525 continue; 6526 6527 page = pfn_to_page(check); 6528 6529 /* 6530 * Hugepages are not in LRU lists, but they're movable. 6531 * We need not scan over tail pages bacause we don't 6532 * handle each tail page individually in migration. 6533 */ 6534 if (PageHuge(page)) { 6535 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 6536 continue; 6537 } 6538 6539 /* 6540 * We can't use page_count without pin a page 6541 * because another CPU can free compound page. 6542 * This check already skips compound tails of THP 6543 * because their page->_count is zero at all time. 6544 */ 6545 if (!atomic_read(&page->_count)) { 6546 if (PageBuddy(page)) 6547 iter += (1 << page_order(page)) - 1; 6548 continue; 6549 } 6550 6551 /* 6552 * The HWPoisoned page may be not in buddy system, and 6553 * page_count() is not 0. 6554 */ 6555 if (skip_hwpoisoned_pages && PageHWPoison(page)) 6556 continue; 6557 6558 if (!PageLRU(page)) 6559 found++; 6560 /* 6561 * If there are RECLAIMABLE pages, we need to check 6562 * it. But now, memory offline itself doesn't call 6563 * shrink_node_slabs() and it still to be fixed. 6564 */ 6565 /* 6566 * If the page is not RAM, page_count()should be 0. 6567 * we don't need more check. This is an _used_ not-movable page. 6568 * 6569 * The problematic thing here is PG_reserved pages. PG_reserved 6570 * is set to both of a memory hole page and a _used_ kernel 6571 * page at boot. 6572 */ 6573 if (found > count) 6574 return true; 6575 } 6576 return false; 6577 } 6578 6579 bool is_pageblock_removable_nolock(struct page *page) 6580 { 6581 struct zone *zone; 6582 unsigned long pfn; 6583 6584 /* 6585 * We have to be careful here because we are iterating over memory 6586 * sections which are not zone aware so we might end up outside of 6587 * the zone but still within the section. 6588 * We have to take care about the node as well. If the node is offline 6589 * its NODE_DATA will be NULL - see page_zone. 6590 */ 6591 if (!node_online(page_to_nid(page))) 6592 return false; 6593 6594 zone = page_zone(page); 6595 pfn = page_to_pfn(page); 6596 if (!zone_spans_pfn(zone, pfn)) 6597 return false; 6598 6599 return !has_unmovable_pages(zone, page, 0, true); 6600 } 6601 6602 #ifdef CONFIG_CMA 6603 6604 static unsigned long pfn_max_align_down(unsigned long pfn) 6605 { 6606 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 6607 pageblock_nr_pages) - 1); 6608 } 6609 6610 static unsigned long pfn_max_align_up(unsigned long pfn) 6611 { 6612 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 6613 pageblock_nr_pages)); 6614 } 6615 6616 /* [start, end) must belong to a single zone. */ 6617 static int __alloc_contig_migrate_range(struct compact_control *cc, 6618 unsigned long start, unsigned long end) 6619 { 6620 /* This function is based on compact_zone() from compaction.c. */ 6621 unsigned long nr_reclaimed; 6622 unsigned long pfn = start; 6623 unsigned int tries = 0; 6624 int ret = 0; 6625 6626 migrate_prep(); 6627 6628 while (pfn < end || !list_empty(&cc->migratepages)) { 6629 if (fatal_signal_pending(current)) { 6630 ret = -EINTR; 6631 break; 6632 } 6633 6634 if (list_empty(&cc->migratepages)) { 6635 cc->nr_migratepages = 0; 6636 pfn = isolate_migratepages_range(cc, pfn, end); 6637 if (!pfn) { 6638 ret = -EINTR; 6639 break; 6640 } 6641 tries = 0; 6642 } else if (++tries == 5) { 6643 ret = ret < 0 ? ret : -EBUSY; 6644 break; 6645 } 6646 6647 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6648 &cc->migratepages); 6649 cc->nr_migratepages -= nr_reclaimed; 6650 6651 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6652 NULL, 0, cc->mode, MR_CMA); 6653 } 6654 if (ret < 0) { 6655 putback_movable_pages(&cc->migratepages); 6656 return ret; 6657 } 6658 return 0; 6659 } 6660 6661 /** 6662 * alloc_contig_range() -- tries to allocate given range of pages 6663 * @start: start PFN to allocate 6664 * @end: one-past-the-last PFN to allocate 6665 * @migratetype: migratetype of the underlaying pageblocks (either 6666 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6667 * in range must have the same migratetype and it must 6668 * be either of the two. 6669 * 6670 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 6671 * aligned, however it's the caller's responsibility to guarantee that 6672 * we are the only thread that changes migrate type of pageblocks the 6673 * pages fall in. 6674 * 6675 * The PFN range must belong to a single zone. 6676 * 6677 * Returns zero on success or negative error code. On success all 6678 * pages which PFN is in [start, end) are allocated for the caller and 6679 * need to be freed with free_contig_range(). 6680 */ 6681 int alloc_contig_range(unsigned long start, unsigned long end, 6682 unsigned migratetype) 6683 { 6684 unsigned long outer_start, outer_end; 6685 unsigned int order; 6686 int ret = 0; 6687 6688 struct compact_control cc = { 6689 .nr_migratepages = 0, 6690 .order = -1, 6691 .zone = page_zone(pfn_to_page(start)), 6692 .mode = MIGRATE_SYNC, 6693 .ignore_skip_hint = true, 6694 }; 6695 INIT_LIST_HEAD(&cc.migratepages); 6696 6697 /* 6698 * What we do here is we mark all pageblocks in range as 6699 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6700 * have different sizes, and due to the way page allocator 6701 * work, we align the range to biggest of the two pages so 6702 * that page allocator won't try to merge buddies from 6703 * different pageblocks and change MIGRATE_ISOLATE to some 6704 * other migration type. 6705 * 6706 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6707 * migrate the pages from an unaligned range (ie. pages that 6708 * we are interested in). This will put all the pages in 6709 * range back to page allocator as MIGRATE_ISOLATE. 6710 * 6711 * When this is done, we take the pages in range from page 6712 * allocator removing them from the buddy system. This way 6713 * page allocator will never consider using them. 6714 * 6715 * This lets us mark the pageblocks back as 6716 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6717 * aligned range but not in the unaligned, original range are 6718 * put back to page allocator so that buddy can use them. 6719 */ 6720 6721 ret = start_isolate_page_range(pfn_max_align_down(start), 6722 pfn_max_align_up(end), migratetype, 6723 false); 6724 if (ret) 6725 return ret; 6726 6727 ret = __alloc_contig_migrate_range(&cc, start, end); 6728 if (ret) 6729 goto done; 6730 6731 /* 6732 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 6733 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6734 * more, all pages in [start, end) are free in page allocator. 6735 * What we are going to do is to allocate all pages from 6736 * [start, end) (that is remove them from page allocator). 6737 * 6738 * The only problem is that pages at the beginning and at the 6739 * end of interesting range may be not aligned with pages that 6740 * page allocator holds, ie. they can be part of higher order 6741 * pages. Because of this, we reserve the bigger range and 6742 * once this is done free the pages we are not interested in. 6743 * 6744 * We don't have to hold zone->lock here because the pages are 6745 * isolated thus they won't get removed from buddy. 6746 */ 6747 6748 lru_add_drain_all(); 6749 drain_all_pages(cc.zone); 6750 6751 order = 0; 6752 outer_start = start; 6753 while (!PageBuddy(pfn_to_page(outer_start))) { 6754 if (++order >= MAX_ORDER) { 6755 ret = -EBUSY; 6756 goto done; 6757 } 6758 outer_start &= ~0UL << order; 6759 } 6760 6761 /* Make sure the range is really isolated. */ 6762 if (test_pages_isolated(outer_start, end, false)) { 6763 pr_info("%s: [%lx, %lx) PFNs busy\n", 6764 __func__, outer_start, end); 6765 ret = -EBUSY; 6766 goto done; 6767 } 6768 6769 /* Grab isolated pages from freelists. */ 6770 outer_end = isolate_freepages_range(&cc, outer_start, end); 6771 if (!outer_end) { 6772 ret = -EBUSY; 6773 goto done; 6774 } 6775 6776 /* Free head and tail (if any) */ 6777 if (start != outer_start) 6778 free_contig_range(outer_start, start - outer_start); 6779 if (end != outer_end) 6780 free_contig_range(end, outer_end - end); 6781 6782 done: 6783 undo_isolate_page_range(pfn_max_align_down(start), 6784 pfn_max_align_up(end), migratetype); 6785 return ret; 6786 } 6787 6788 void free_contig_range(unsigned long pfn, unsigned nr_pages) 6789 { 6790 unsigned int count = 0; 6791 6792 for (; nr_pages--; pfn++) { 6793 struct page *page = pfn_to_page(pfn); 6794 6795 count += page_count(page) != 1; 6796 __free_page(page); 6797 } 6798 WARN(count != 0, "%d pages are still in use!\n", count); 6799 } 6800 #endif 6801 6802 #ifdef CONFIG_MEMORY_HOTPLUG 6803 /* 6804 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6805 * page high values need to be recalulated. 6806 */ 6807 void __meminit zone_pcp_update(struct zone *zone) 6808 { 6809 unsigned cpu; 6810 mutex_lock(&pcp_batch_high_lock); 6811 for_each_possible_cpu(cpu) 6812 pageset_set_high_and_batch(zone, 6813 per_cpu_ptr(zone->pageset, cpu)); 6814 mutex_unlock(&pcp_batch_high_lock); 6815 } 6816 #endif 6817 6818 void zone_pcp_reset(struct zone *zone) 6819 { 6820 unsigned long flags; 6821 int cpu; 6822 struct per_cpu_pageset *pset; 6823 6824 /* avoid races with drain_pages() */ 6825 local_irq_save(flags); 6826 if (zone->pageset != &boot_pageset) { 6827 for_each_online_cpu(cpu) { 6828 pset = per_cpu_ptr(zone->pageset, cpu); 6829 drain_zonestat(zone, pset); 6830 } 6831 free_percpu(zone->pageset); 6832 zone->pageset = &boot_pageset; 6833 } 6834 local_irq_restore(flags); 6835 } 6836 6837 #ifdef CONFIG_MEMORY_HOTREMOVE 6838 /* 6839 * All pages in the range must be isolated before calling this. 6840 */ 6841 void 6842 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6843 { 6844 struct page *page; 6845 struct zone *zone; 6846 unsigned int order, i; 6847 unsigned long pfn; 6848 unsigned long flags; 6849 /* find the first valid pfn */ 6850 for (pfn = start_pfn; pfn < end_pfn; pfn++) 6851 if (pfn_valid(pfn)) 6852 break; 6853 if (pfn == end_pfn) 6854 return; 6855 zone = page_zone(pfn_to_page(pfn)); 6856 spin_lock_irqsave(&zone->lock, flags); 6857 pfn = start_pfn; 6858 while (pfn < end_pfn) { 6859 if (!pfn_valid(pfn)) { 6860 pfn++; 6861 continue; 6862 } 6863 page = pfn_to_page(pfn); 6864 /* 6865 * The HWPoisoned page may be not in buddy system, and 6866 * page_count() is not 0. 6867 */ 6868 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6869 pfn++; 6870 SetPageReserved(page); 6871 continue; 6872 } 6873 6874 BUG_ON(page_count(page)); 6875 BUG_ON(!PageBuddy(page)); 6876 order = page_order(page); 6877 #ifdef CONFIG_DEBUG_VM 6878 printk(KERN_INFO "remove from free list %lx %d %lx\n", 6879 pfn, 1 << order, end_pfn); 6880 #endif 6881 list_del(&page->lru); 6882 rmv_page_order(page); 6883 zone->free_area[order].nr_free--; 6884 for (i = 0; i < (1 << order); i++) 6885 SetPageReserved((page+i)); 6886 pfn += (1 << order); 6887 } 6888 spin_unlock_irqrestore(&zone->lock, flags); 6889 } 6890 #endif 6891 6892 #ifdef CONFIG_MEMORY_FAILURE 6893 bool is_free_buddy_page(struct page *page) 6894 { 6895 struct zone *zone = page_zone(page); 6896 unsigned long pfn = page_to_pfn(page); 6897 unsigned long flags; 6898 unsigned int order; 6899 6900 spin_lock_irqsave(&zone->lock, flags); 6901 for (order = 0; order < MAX_ORDER; order++) { 6902 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6903 6904 if (PageBuddy(page_head) && page_order(page_head) >= order) 6905 break; 6906 } 6907 spin_unlock_irqrestore(&zone->lock, flags); 6908 6909 return order < MAX_ORDER; 6910 } 6911 #endif 6912