1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/module.h> 29 #include <linux/suspend.h> 30 #include <linux/pagevec.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/oom.h> 34 #include <linux/notifier.h> 35 #include <linux/topology.h> 36 #include <linux/sysctl.h> 37 #include <linux/cpu.h> 38 #include <linux/cpuset.h> 39 #include <linux/memory_hotplug.h> 40 #include <linux/nodemask.h> 41 #include <linux/vmalloc.h> 42 #include <linux/mempolicy.h> 43 #include <linux/stop_machine.h> 44 #include <linux/sort.h> 45 #include <linux/pfn.h> 46 #include <linux/backing-dev.h> 47 #include <linux/fault-inject.h> 48 #include <linux/page-isolation.h> 49 #include <linux/page_cgroup.h> 50 #include <linux/debugobjects.h> 51 #include <linux/kmemleak.h> 52 #include <linux/memory.h> 53 #include <linux/compaction.h> 54 #include <trace/events/kmem.h> 55 #include <linux/ftrace_event.h> 56 #include <linux/memcontrol.h> 57 #include <linux/prefetch.h> 58 59 #include <asm/tlbflush.h> 60 #include <asm/div64.h> 61 #include "internal.h" 62 63 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 64 DEFINE_PER_CPU(int, numa_node); 65 EXPORT_PER_CPU_SYMBOL(numa_node); 66 #endif 67 68 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 69 /* 70 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 71 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 72 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 73 * defined in <linux/topology.h>. 74 */ 75 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 76 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 77 #endif 78 79 /* 80 * Array of node states. 81 */ 82 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 83 [N_POSSIBLE] = NODE_MASK_ALL, 84 [N_ONLINE] = { { [0] = 1UL } }, 85 #ifndef CONFIG_NUMA 86 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 87 #ifdef CONFIG_HIGHMEM 88 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 89 #endif 90 [N_CPU] = { { [0] = 1UL } }, 91 #endif /* NUMA */ 92 }; 93 EXPORT_SYMBOL(node_states); 94 95 unsigned long totalram_pages __read_mostly; 96 unsigned long totalreserve_pages __read_mostly; 97 int percpu_pagelist_fraction; 98 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 99 100 #ifdef CONFIG_PM_SLEEP 101 /* 102 * The following functions are used by the suspend/hibernate code to temporarily 103 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 104 * while devices are suspended. To avoid races with the suspend/hibernate code, 105 * they should always be called with pm_mutex held (gfp_allowed_mask also should 106 * only be modified with pm_mutex held, unless the suspend/hibernate code is 107 * guaranteed not to run in parallel with that modification). 108 */ 109 110 static gfp_t saved_gfp_mask; 111 112 void pm_restore_gfp_mask(void) 113 { 114 WARN_ON(!mutex_is_locked(&pm_mutex)); 115 if (saved_gfp_mask) { 116 gfp_allowed_mask = saved_gfp_mask; 117 saved_gfp_mask = 0; 118 } 119 } 120 121 void pm_restrict_gfp_mask(void) 122 { 123 WARN_ON(!mutex_is_locked(&pm_mutex)); 124 WARN_ON(saved_gfp_mask); 125 saved_gfp_mask = gfp_allowed_mask; 126 gfp_allowed_mask &= ~GFP_IOFS; 127 } 128 #endif /* CONFIG_PM_SLEEP */ 129 130 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 131 int pageblock_order __read_mostly; 132 #endif 133 134 static void __free_pages_ok(struct page *page, unsigned int order); 135 136 /* 137 * results with 256, 32 in the lowmem_reserve sysctl: 138 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 139 * 1G machine -> (16M dma, 784M normal, 224M high) 140 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 141 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 142 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 143 * 144 * TBD: should special case ZONE_DMA32 machines here - in those we normally 145 * don't need any ZONE_NORMAL reservation 146 */ 147 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 148 #ifdef CONFIG_ZONE_DMA 149 256, 150 #endif 151 #ifdef CONFIG_ZONE_DMA32 152 256, 153 #endif 154 #ifdef CONFIG_HIGHMEM 155 32, 156 #endif 157 32, 158 }; 159 160 EXPORT_SYMBOL(totalram_pages); 161 162 static char * const zone_names[MAX_NR_ZONES] = { 163 #ifdef CONFIG_ZONE_DMA 164 "DMA", 165 #endif 166 #ifdef CONFIG_ZONE_DMA32 167 "DMA32", 168 #endif 169 "Normal", 170 #ifdef CONFIG_HIGHMEM 171 "HighMem", 172 #endif 173 "Movable", 174 }; 175 176 int min_free_kbytes = 1024; 177 178 static unsigned long __meminitdata nr_kernel_pages; 179 static unsigned long __meminitdata nr_all_pages; 180 static unsigned long __meminitdata dma_reserve; 181 182 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 183 /* 184 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 185 * ranges of memory (RAM) that may be registered with add_active_range(). 186 * Ranges passed to add_active_range() will be merged if possible 187 * so the number of times add_active_range() can be called is 188 * related to the number of nodes and the number of holes 189 */ 190 #ifdef CONFIG_MAX_ACTIVE_REGIONS 191 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 192 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 193 #else 194 #if MAX_NUMNODES >= 32 195 /* If there can be many nodes, allow up to 50 holes per node */ 196 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 197 #else 198 /* By default, allow up to 256 distinct regions */ 199 #define MAX_ACTIVE_REGIONS 256 200 #endif 201 #endif 202 203 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 204 static int __meminitdata nr_nodemap_entries; 205 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 206 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 207 static unsigned long __initdata required_kernelcore; 208 static unsigned long __initdata required_movablecore; 209 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 210 211 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 212 int movable_zone; 213 EXPORT_SYMBOL(movable_zone); 214 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 215 216 #if MAX_NUMNODES > 1 217 int nr_node_ids __read_mostly = MAX_NUMNODES; 218 int nr_online_nodes __read_mostly = 1; 219 EXPORT_SYMBOL(nr_node_ids); 220 EXPORT_SYMBOL(nr_online_nodes); 221 #endif 222 223 int page_group_by_mobility_disabled __read_mostly; 224 225 static void set_pageblock_migratetype(struct page *page, int migratetype) 226 { 227 228 if (unlikely(page_group_by_mobility_disabled)) 229 migratetype = MIGRATE_UNMOVABLE; 230 231 set_pageblock_flags_group(page, (unsigned long)migratetype, 232 PB_migrate, PB_migrate_end); 233 } 234 235 bool oom_killer_disabled __read_mostly; 236 237 #ifdef CONFIG_DEBUG_VM 238 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 239 { 240 int ret = 0; 241 unsigned seq; 242 unsigned long pfn = page_to_pfn(page); 243 244 do { 245 seq = zone_span_seqbegin(zone); 246 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 247 ret = 1; 248 else if (pfn < zone->zone_start_pfn) 249 ret = 1; 250 } while (zone_span_seqretry(zone, seq)); 251 252 return ret; 253 } 254 255 static int page_is_consistent(struct zone *zone, struct page *page) 256 { 257 if (!pfn_valid_within(page_to_pfn(page))) 258 return 0; 259 if (zone != page_zone(page)) 260 return 0; 261 262 return 1; 263 } 264 /* 265 * Temporary debugging check for pages not lying within a given zone. 266 */ 267 static int bad_range(struct zone *zone, struct page *page) 268 { 269 if (page_outside_zone_boundaries(zone, page)) 270 return 1; 271 if (!page_is_consistent(zone, page)) 272 return 1; 273 274 return 0; 275 } 276 #else 277 static inline int bad_range(struct zone *zone, struct page *page) 278 { 279 return 0; 280 } 281 #endif 282 283 static void bad_page(struct page *page) 284 { 285 static unsigned long resume; 286 static unsigned long nr_shown; 287 static unsigned long nr_unshown; 288 289 /* Don't complain about poisoned pages */ 290 if (PageHWPoison(page)) { 291 reset_page_mapcount(page); /* remove PageBuddy */ 292 return; 293 } 294 295 /* 296 * Allow a burst of 60 reports, then keep quiet for that minute; 297 * or allow a steady drip of one report per second. 298 */ 299 if (nr_shown == 60) { 300 if (time_before(jiffies, resume)) { 301 nr_unshown++; 302 goto out; 303 } 304 if (nr_unshown) { 305 printk(KERN_ALERT 306 "BUG: Bad page state: %lu messages suppressed\n", 307 nr_unshown); 308 nr_unshown = 0; 309 } 310 nr_shown = 0; 311 } 312 if (nr_shown++ == 0) 313 resume = jiffies + 60 * HZ; 314 315 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 316 current->comm, page_to_pfn(page)); 317 dump_page(page); 318 319 dump_stack(); 320 out: 321 /* Leave bad fields for debug, except PageBuddy could make trouble */ 322 reset_page_mapcount(page); /* remove PageBuddy */ 323 add_taint(TAINT_BAD_PAGE); 324 } 325 326 /* 327 * Higher-order pages are called "compound pages". They are structured thusly: 328 * 329 * The first PAGE_SIZE page is called the "head page". 330 * 331 * The remaining PAGE_SIZE pages are called "tail pages". 332 * 333 * All pages have PG_compound set. All pages have their ->private pointing at 334 * the head page (even the head page has this). 335 * 336 * The first tail page's ->lru.next holds the address of the compound page's 337 * put_page() function. Its ->lru.prev holds the order of allocation. 338 * This usage means that zero-order pages may not be compound. 339 */ 340 341 static void free_compound_page(struct page *page) 342 { 343 __free_pages_ok(page, compound_order(page)); 344 } 345 346 void prep_compound_page(struct page *page, unsigned long order) 347 { 348 int i; 349 int nr_pages = 1 << order; 350 351 set_compound_page_dtor(page, free_compound_page); 352 set_compound_order(page, order); 353 __SetPageHead(page); 354 for (i = 1; i < nr_pages; i++) { 355 struct page *p = page + i; 356 357 __SetPageTail(p); 358 p->first_page = page; 359 } 360 } 361 362 /* update __split_huge_page_refcount if you change this function */ 363 static int destroy_compound_page(struct page *page, unsigned long order) 364 { 365 int i; 366 int nr_pages = 1 << order; 367 int bad = 0; 368 369 if (unlikely(compound_order(page) != order) || 370 unlikely(!PageHead(page))) { 371 bad_page(page); 372 bad++; 373 } 374 375 __ClearPageHead(page); 376 377 for (i = 1; i < nr_pages; i++) { 378 struct page *p = page + i; 379 380 if (unlikely(!PageTail(p) || (p->first_page != page))) { 381 bad_page(page); 382 bad++; 383 } 384 __ClearPageTail(p); 385 } 386 387 return bad; 388 } 389 390 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 391 { 392 int i; 393 394 /* 395 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 396 * and __GFP_HIGHMEM from hard or soft interrupt context. 397 */ 398 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 399 for (i = 0; i < (1 << order); i++) 400 clear_highpage(page + i); 401 } 402 403 static inline void set_page_order(struct page *page, int order) 404 { 405 set_page_private(page, order); 406 __SetPageBuddy(page); 407 } 408 409 static inline void rmv_page_order(struct page *page) 410 { 411 __ClearPageBuddy(page); 412 set_page_private(page, 0); 413 } 414 415 /* 416 * Locate the struct page for both the matching buddy in our 417 * pair (buddy1) and the combined O(n+1) page they form (page). 418 * 419 * 1) Any buddy B1 will have an order O twin B2 which satisfies 420 * the following equation: 421 * B2 = B1 ^ (1 << O) 422 * For example, if the starting buddy (buddy2) is #8 its order 423 * 1 buddy is #10: 424 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 425 * 426 * 2) Any buddy B will have an order O+1 parent P which 427 * satisfies the following equation: 428 * P = B & ~(1 << O) 429 * 430 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 431 */ 432 static inline unsigned long 433 __find_buddy_index(unsigned long page_idx, unsigned int order) 434 { 435 return page_idx ^ (1 << order); 436 } 437 438 /* 439 * This function checks whether a page is free && is the buddy 440 * we can do coalesce a page and its buddy if 441 * (a) the buddy is not in a hole && 442 * (b) the buddy is in the buddy system && 443 * (c) a page and its buddy have the same order && 444 * (d) a page and its buddy are in the same zone. 445 * 446 * For recording whether a page is in the buddy system, we set ->_mapcount -2. 447 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock. 448 * 449 * For recording page's order, we use page_private(page). 450 */ 451 static inline int page_is_buddy(struct page *page, struct page *buddy, 452 int order) 453 { 454 if (!pfn_valid_within(page_to_pfn(buddy))) 455 return 0; 456 457 if (page_zone_id(page) != page_zone_id(buddy)) 458 return 0; 459 460 if (PageBuddy(buddy) && page_order(buddy) == order) { 461 VM_BUG_ON(page_count(buddy) != 0); 462 return 1; 463 } 464 return 0; 465 } 466 467 /* 468 * Freeing function for a buddy system allocator. 469 * 470 * The concept of a buddy system is to maintain direct-mapped table 471 * (containing bit values) for memory blocks of various "orders". 472 * The bottom level table contains the map for the smallest allocatable 473 * units of memory (here, pages), and each level above it describes 474 * pairs of units from the levels below, hence, "buddies". 475 * At a high level, all that happens here is marking the table entry 476 * at the bottom level available, and propagating the changes upward 477 * as necessary, plus some accounting needed to play nicely with other 478 * parts of the VM system. 479 * At each level, we keep a list of pages, which are heads of continuous 480 * free pages of length of (1 << order) and marked with _mapcount -2. Page's 481 * order is recorded in page_private(page) field. 482 * So when we are allocating or freeing one, we can derive the state of the 483 * other. That is, if we allocate a small block, and both were 484 * free, the remainder of the region must be split into blocks. 485 * If a block is freed, and its buddy is also free, then this 486 * triggers coalescing into a block of larger size. 487 * 488 * -- wli 489 */ 490 491 static inline void __free_one_page(struct page *page, 492 struct zone *zone, unsigned int order, 493 int migratetype) 494 { 495 unsigned long page_idx; 496 unsigned long combined_idx; 497 unsigned long uninitialized_var(buddy_idx); 498 struct page *buddy; 499 500 if (unlikely(PageCompound(page))) 501 if (unlikely(destroy_compound_page(page, order))) 502 return; 503 504 VM_BUG_ON(migratetype == -1); 505 506 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 507 508 VM_BUG_ON(page_idx & ((1 << order) - 1)); 509 VM_BUG_ON(bad_range(zone, page)); 510 511 while (order < MAX_ORDER-1) { 512 buddy_idx = __find_buddy_index(page_idx, order); 513 buddy = page + (buddy_idx - page_idx); 514 if (!page_is_buddy(page, buddy, order)) 515 break; 516 517 /* Our buddy is free, merge with it and move up one order. */ 518 list_del(&buddy->lru); 519 zone->free_area[order].nr_free--; 520 rmv_page_order(buddy); 521 combined_idx = buddy_idx & page_idx; 522 page = page + (combined_idx - page_idx); 523 page_idx = combined_idx; 524 order++; 525 } 526 set_page_order(page, order); 527 528 /* 529 * If this is not the largest possible page, check if the buddy 530 * of the next-highest order is free. If it is, it's possible 531 * that pages are being freed that will coalesce soon. In case, 532 * that is happening, add the free page to the tail of the list 533 * so it's less likely to be used soon and more likely to be merged 534 * as a higher order page 535 */ 536 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 537 struct page *higher_page, *higher_buddy; 538 combined_idx = buddy_idx & page_idx; 539 higher_page = page + (combined_idx - page_idx); 540 buddy_idx = __find_buddy_index(combined_idx, order + 1); 541 higher_buddy = page + (buddy_idx - combined_idx); 542 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 543 list_add_tail(&page->lru, 544 &zone->free_area[order].free_list[migratetype]); 545 goto out; 546 } 547 } 548 549 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 550 out: 551 zone->free_area[order].nr_free++; 552 } 553 554 /* 555 * free_page_mlock() -- clean up attempts to free and mlocked() page. 556 * Page should not be on lru, so no need to fix that up. 557 * free_pages_check() will verify... 558 */ 559 static inline void free_page_mlock(struct page *page) 560 { 561 __dec_zone_page_state(page, NR_MLOCK); 562 __count_vm_event(UNEVICTABLE_MLOCKFREED); 563 } 564 565 static inline int free_pages_check(struct page *page) 566 { 567 if (unlikely(page_mapcount(page) | 568 (page->mapping != NULL) | 569 (atomic_read(&page->_count) != 0) | 570 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | 571 (mem_cgroup_bad_page_check(page)))) { 572 bad_page(page); 573 return 1; 574 } 575 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 576 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 577 return 0; 578 } 579 580 /* 581 * Frees a number of pages from the PCP lists 582 * Assumes all pages on list are in same zone, and of same order. 583 * count is the number of pages to free. 584 * 585 * If the zone was previously in an "all pages pinned" state then look to 586 * see if this freeing clears that state. 587 * 588 * And clear the zone's pages_scanned counter, to hold off the "all pages are 589 * pinned" detection logic. 590 */ 591 static void free_pcppages_bulk(struct zone *zone, int count, 592 struct per_cpu_pages *pcp) 593 { 594 int migratetype = 0; 595 int batch_free = 0; 596 int to_free = count; 597 598 spin_lock(&zone->lock); 599 zone->all_unreclaimable = 0; 600 zone->pages_scanned = 0; 601 602 while (to_free) { 603 struct page *page; 604 struct list_head *list; 605 606 /* 607 * Remove pages from lists in a round-robin fashion. A 608 * batch_free count is maintained that is incremented when an 609 * empty list is encountered. This is so more pages are freed 610 * off fuller lists instead of spinning excessively around empty 611 * lists 612 */ 613 do { 614 batch_free++; 615 if (++migratetype == MIGRATE_PCPTYPES) 616 migratetype = 0; 617 list = &pcp->lists[migratetype]; 618 } while (list_empty(list)); 619 620 /* This is the only non-empty list. Free them all. */ 621 if (batch_free == MIGRATE_PCPTYPES) 622 batch_free = to_free; 623 624 do { 625 page = list_entry(list->prev, struct page, lru); 626 /* must delete as __free_one_page list manipulates */ 627 list_del(&page->lru); 628 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 629 __free_one_page(page, zone, 0, page_private(page)); 630 trace_mm_page_pcpu_drain(page, 0, page_private(page)); 631 } while (--to_free && --batch_free && !list_empty(list)); 632 } 633 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 634 spin_unlock(&zone->lock); 635 } 636 637 static void free_one_page(struct zone *zone, struct page *page, int order, 638 int migratetype) 639 { 640 spin_lock(&zone->lock); 641 zone->all_unreclaimable = 0; 642 zone->pages_scanned = 0; 643 644 __free_one_page(page, zone, order, migratetype); 645 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 646 spin_unlock(&zone->lock); 647 } 648 649 static bool free_pages_prepare(struct page *page, unsigned int order) 650 { 651 int i; 652 int bad = 0; 653 654 trace_mm_page_free_direct(page, order); 655 kmemcheck_free_shadow(page, order); 656 657 if (PageAnon(page)) 658 page->mapping = NULL; 659 for (i = 0; i < (1 << order); i++) 660 bad += free_pages_check(page + i); 661 if (bad) 662 return false; 663 664 if (!PageHighMem(page)) { 665 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 666 debug_check_no_obj_freed(page_address(page), 667 PAGE_SIZE << order); 668 } 669 arch_free_page(page, order); 670 kernel_map_pages(page, 1 << order, 0); 671 672 return true; 673 } 674 675 static void __free_pages_ok(struct page *page, unsigned int order) 676 { 677 unsigned long flags; 678 int wasMlocked = __TestClearPageMlocked(page); 679 680 if (!free_pages_prepare(page, order)) 681 return; 682 683 local_irq_save(flags); 684 if (unlikely(wasMlocked)) 685 free_page_mlock(page); 686 __count_vm_events(PGFREE, 1 << order); 687 free_one_page(page_zone(page), page, order, 688 get_pageblock_migratetype(page)); 689 local_irq_restore(flags); 690 } 691 692 /* 693 * permit the bootmem allocator to evade page validation on high-order frees 694 */ 695 void __meminit __free_pages_bootmem(struct page *page, unsigned int order) 696 { 697 if (order == 0) { 698 __ClearPageReserved(page); 699 set_page_count(page, 0); 700 set_page_refcounted(page); 701 __free_page(page); 702 } else { 703 int loop; 704 705 prefetchw(page); 706 for (loop = 0; loop < BITS_PER_LONG; loop++) { 707 struct page *p = &page[loop]; 708 709 if (loop + 1 < BITS_PER_LONG) 710 prefetchw(p + 1); 711 __ClearPageReserved(p); 712 set_page_count(p, 0); 713 } 714 715 set_page_refcounted(page); 716 __free_pages(page, order); 717 } 718 } 719 720 721 /* 722 * The order of subdivision here is critical for the IO subsystem. 723 * Please do not alter this order without good reasons and regression 724 * testing. Specifically, as large blocks of memory are subdivided, 725 * the order in which smaller blocks are delivered depends on the order 726 * they're subdivided in this function. This is the primary factor 727 * influencing the order in which pages are delivered to the IO 728 * subsystem according to empirical testing, and this is also justified 729 * by considering the behavior of a buddy system containing a single 730 * large block of memory acted on by a series of small allocations. 731 * This behavior is a critical factor in sglist merging's success. 732 * 733 * -- wli 734 */ 735 static inline void expand(struct zone *zone, struct page *page, 736 int low, int high, struct free_area *area, 737 int migratetype) 738 { 739 unsigned long size = 1 << high; 740 741 while (high > low) { 742 area--; 743 high--; 744 size >>= 1; 745 VM_BUG_ON(bad_range(zone, &page[size])); 746 list_add(&page[size].lru, &area->free_list[migratetype]); 747 area->nr_free++; 748 set_page_order(&page[size], high); 749 } 750 } 751 752 /* 753 * This page is about to be returned from the page allocator 754 */ 755 static inline int check_new_page(struct page *page) 756 { 757 if (unlikely(page_mapcount(page) | 758 (page->mapping != NULL) | 759 (atomic_read(&page->_count) != 0) | 760 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | 761 (mem_cgroup_bad_page_check(page)))) { 762 bad_page(page); 763 return 1; 764 } 765 return 0; 766 } 767 768 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 769 { 770 int i; 771 772 for (i = 0; i < (1 << order); i++) { 773 struct page *p = page + i; 774 if (unlikely(check_new_page(p))) 775 return 1; 776 } 777 778 set_page_private(page, 0); 779 set_page_refcounted(page); 780 781 arch_alloc_page(page, order); 782 kernel_map_pages(page, 1 << order, 1); 783 784 if (gfp_flags & __GFP_ZERO) 785 prep_zero_page(page, order, gfp_flags); 786 787 if (order && (gfp_flags & __GFP_COMP)) 788 prep_compound_page(page, order); 789 790 return 0; 791 } 792 793 /* 794 * Go through the free lists for the given migratetype and remove 795 * the smallest available page from the freelists 796 */ 797 static inline 798 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 799 int migratetype) 800 { 801 unsigned int current_order; 802 struct free_area * area; 803 struct page *page; 804 805 /* Find a page of the appropriate size in the preferred list */ 806 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 807 area = &(zone->free_area[current_order]); 808 if (list_empty(&area->free_list[migratetype])) 809 continue; 810 811 page = list_entry(area->free_list[migratetype].next, 812 struct page, lru); 813 list_del(&page->lru); 814 rmv_page_order(page); 815 area->nr_free--; 816 expand(zone, page, order, current_order, area, migratetype); 817 return page; 818 } 819 820 return NULL; 821 } 822 823 824 /* 825 * This array describes the order lists are fallen back to when 826 * the free lists for the desirable migrate type are depleted 827 */ 828 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 829 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 830 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 831 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 832 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 833 }; 834 835 /* 836 * Move the free pages in a range to the free lists of the requested type. 837 * Note that start_page and end_pages are not aligned on a pageblock 838 * boundary. If alignment is required, use move_freepages_block() 839 */ 840 static int move_freepages(struct zone *zone, 841 struct page *start_page, struct page *end_page, 842 int migratetype) 843 { 844 struct page *page; 845 unsigned long order; 846 int pages_moved = 0; 847 848 #ifndef CONFIG_HOLES_IN_ZONE 849 /* 850 * page_zone is not safe to call in this context when 851 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 852 * anyway as we check zone boundaries in move_freepages_block(). 853 * Remove at a later date when no bug reports exist related to 854 * grouping pages by mobility 855 */ 856 BUG_ON(page_zone(start_page) != page_zone(end_page)); 857 #endif 858 859 for (page = start_page; page <= end_page;) { 860 /* Make sure we are not inadvertently changing nodes */ 861 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 862 863 if (!pfn_valid_within(page_to_pfn(page))) { 864 page++; 865 continue; 866 } 867 868 if (!PageBuddy(page)) { 869 page++; 870 continue; 871 } 872 873 order = page_order(page); 874 list_move(&page->lru, 875 &zone->free_area[order].free_list[migratetype]); 876 page += 1 << order; 877 pages_moved += 1 << order; 878 } 879 880 return pages_moved; 881 } 882 883 static int move_freepages_block(struct zone *zone, struct page *page, 884 int migratetype) 885 { 886 unsigned long start_pfn, end_pfn; 887 struct page *start_page, *end_page; 888 889 start_pfn = page_to_pfn(page); 890 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 891 start_page = pfn_to_page(start_pfn); 892 end_page = start_page + pageblock_nr_pages - 1; 893 end_pfn = start_pfn + pageblock_nr_pages - 1; 894 895 /* Do not cross zone boundaries */ 896 if (start_pfn < zone->zone_start_pfn) 897 start_page = page; 898 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 899 return 0; 900 901 return move_freepages(zone, start_page, end_page, migratetype); 902 } 903 904 static void change_pageblock_range(struct page *pageblock_page, 905 int start_order, int migratetype) 906 { 907 int nr_pageblocks = 1 << (start_order - pageblock_order); 908 909 while (nr_pageblocks--) { 910 set_pageblock_migratetype(pageblock_page, migratetype); 911 pageblock_page += pageblock_nr_pages; 912 } 913 } 914 915 /* Remove an element from the buddy allocator from the fallback list */ 916 static inline struct page * 917 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 918 { 919 struct free_area * area; 920 int current_order; 921 struct page *page; 922 int migratetype, i; 923 924 /* Find the largest possible block of pages in the other list */ 925 for (current_order = MAX_ORDER-1; current_order >= order; 926 --current_order) { 927 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 928 migratetype = fallbacks[start_migratetype][i]; 929 930 /* MIGRATE_RESERVE handled later if necessary */ 931 if (migratetype == MIGRATE_RESERVE) 932 continue; 933 934 area = &(zone->free_area[current_order]); 935 if (list_empty(&area->free_list[migratetype])) 936 continue; 937 938 page = list_entry(area->free_list[migratetype].next, 939 struct page, lru); 940 area->nr_free--; 941 942 /* 943 * If breaking a large block of pages, move all free 944 * pages to the preferred allocation list. If falling 945 * back for a reclaimable kernel allocation, be more 946 * aggressive about taking ownership of free pages 947 */ 948 if (unlikely(current_order >= (pageblock_order >> 1)) || 949 start_migratetype == MIGRATE_RECLAIMABLE || 950 page_group_by_mobility_disabled) { 951 unsigned long pages; 952 pages = move_freepages_block(zone, page, 953 start_migratetype); 954 955 /* Claim the whole block if over half of it is free */ 956 if (pages >= (1 << (pageblock_order-1)) || 957 page_group_by_mobility_disabled) 958 set_pageblock_migratetype(page, 959 start_migratetype); 960 961 migratetype = start_migratetype; 962 } 963 964 /* Remove the page from the freelists */ 965 list_del(&page->lru); 966 rmv_page_order(page); 967 968 /* Take ownership for orders >= pageblock_order */ 969 if (current_order >= pageblock_order) 970 change_pageblock_range(page, current_order, 971 start_migratetype); 972 973 expand(zone, page, order, current_order, area, migratetype); 974 975 trace_mm_page_alloc_extfrag(page, order, current_order, 976 start_migratetype, migratetype); 977 978 return page; 979 } 980 } 981 982 return NULL; 983 } 984 985 /* 986 * Do the hard work of removing an element from the buddy allocator. 987 * Call me with the zone->lock already held. 988 */ 989 static struct page *__rmqueue(struct zone *zone, unsigned int order, 990 int migratetype) 991 { 992 struct page *page; 993 994 retry_reserve: 995 page = __rmqueue_smallest(zone, order, migratetype); 996 997 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 998 page = __rmqueue_fallback(zone, order, migratetype); 999 1000 /* 1001 * Use MIGRATE_RESERVE rather than fail an allocation. goto 1002 * is used because __rmqueue_smallest is an inline function 1003 * and we want just one call site 1004 */ 1005 if (!page) { 1006 migratetype = MIGRATE_RESERVE; 1007 goto retry_reserve; 1008 } 1009 } 1010 1011 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1012 return page; 1013 } 1014 1015 /* 1016 * Obtain a specified number of elements from the buddy allocator, all under 1017 * a single hold of the lock, for efficiency. Add them to the supplied list. 1018 * Returns the number of new pages which were placed at *list. 1019 */ 1020 static int rmqueue_bulk(struct zone *zone, unsigned int order, 1021 unsigned long count, struct list_head *list, 1022 int migratetype, int cold) 1023 { 1024 int i; 1025 1026 spin_lock(&zone->lock); 1027 for (i = 0; i < count; ++i) { 1028 struct page *page = __rmqueue(zone, order, migratetype); 1029 if (unlikely(page == NULL)) 1030 break; 1031 1032 /* 1033 * Split buddy pages returned by expand() are received here 1034 * in physical page order. The page is added to the callers and 1035 * list and the list head then moves forward. From the callers 1036 * perspective, the linked list is ordered by page number in 1037 * some conditions. This is useful for IO devices that can 1038 * merge IO requests if the physical pages are ordered 1039 * properly. 1040 */ 1041 if (likely(cold == 0)) 1042 list_add(&page->lru, list); 1043 else 1044 list_add_tail(&page->lru, list); 1045 set_page_private(page, migratetype); 1046 list = &page->lru; 1047 } 1048 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1049 spin_unlock(&zone->lock); 1050 return i; 1051 } 1052 1053 #ifdef CONFIG_NUMA 1054 /* 1055 * Called from the vmstat counter updater to drain pagesets of this 1056 * currently executing processor on remote nodes after they have 1057 * expired. 1058 * 1059 * Note that this function must be called with the thread pinned to 1060 * a single processor. 1061 */ 1062 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1063 { 1064 unsigned long flags; 1065 int to_drain; 1066 1067 local_irq_save(flags); 1068 if (pcp->count >= pcp->batch) 1069 to_drain = pcp->batch; 1070 else 1071 to_drain = pcp->count; 1072 free_pcppages_bulk(zone, to_drain, pcp); 1073 pcp->count -= to_drain; 1074 local_irq_restore(flags); 1075 } 1076 #endif 1077 1078 /* 1079 * Drain pages of the indicated processor. 1080 * 1081 * The processor must either be the current processor and the 1082 * thread pinned to the current processor or a processor that 1083 * is not online. 1084 */ 1085 static void drain_pages(unsigned int cpu) 1086 { 1087 unsigned long flags; 1088 struct zone *zone; 1089 1090 for_each_populated_zone(zone) { 1091 struct per_cpu_pageset *pset; 1092 struct per_cpu_pages *pcp; 1093 1094 local_irq_save(flags); 1095 pset = per_cpu_ptr(zone->pageset, cpu); 1096 1097 pcp = &pset->pcp; 1098 if (pcp->count) { 1099 free_pcppages_bulk(zone, pcp->count, pcp); 1100 pcp->count = 0; 1101 } 1102 local_irq_restore(flags); 1103 } 1104 } 1105 1106 /* 1107 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1108 */ 1109 void drain_local_pages(void *arg) 1110 { 1111 drain_pages(smp_processor_id()); 1112 } 1113 1114 /* 1115 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 1116 */ 1117 void drain_all_pages(void) 1118 { 1119 on_each_cpu(drain_local_pages, NULL, 1); 1120 } 1121 1122 #ifdef CONFIG_HIBERNATION 1123 1124 void mark_free_pages(struct zone *zone) 1125 { 1126 unsigned long pfn, max_zone_pfn; 1127 unsigned long flags; 1128 int order, t; 1129 struct list_head *curr; 1130 1131 if (!zone->spanned_pages) 1132 return; 1133 1134 spin_lock_irqsave(&zone->lock, flags); 1135 1136 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1137 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1138 if (pfn_valid(pfn)) { 1139 struct page *page = pfn_to_page(pfn); 1140 1141 if (!swsusp_page_is_forbidden(page)) 1142 swsusp_unset_page_free(page); 1143 } 1144 1145 for_each_migratetype_order(order, t) { 1146 list_for_each(curr, &zone->free_area[order].free_list[t]) { 1147 unsigned long i; 1148 1149 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 1150 for (i = 0; i < (1UL << order); i++) 1151 swsusp_set_page_free(pfn_to_page(pfn + i)); 1152 } 1153 } 1154 spin_unlock_irqrestore(&zone->lock, flags); 1155 } 1156 #endif /* CONFIG_PM */ 1157 1158 /* 1159 * Free a 0-order page 1160 * cold == 1 ? free a cold page : free a hot page 1161 */ 1162 void free_hot_cold_page(struct page *page, int cold) 1163 { 1164 struct zone *zone = page_zone(page); 1165 struct per_cpu_pages *pcp; 1166 unsigned long flags; 1167 int migratetype; 1168 int wasMlocked = __TestClearPageMlocked(page); 1169 1170 if (!free_pages_prepare(page, 0)) 1171 return; 1172 1173 migratetype = get_pageblock_migratetype(page); 1174 set_page_private(page, migratetype); 1175 local_irq_save(flags); 1176 if (unlikely(wasMlocked)) 1177 free_page_mlock(page); 1178 __count_vm_event(PGFREE); 1179 1180 /* 1181 * We only track unmovable, reclaimable and movable on pcp lists. 1182 * Free ISOLATE pages back to the allocator because they are being 1183 * offlined but treat RESERVE as movable pages so we can get those 1184 * areas back if necessary. Otherwise, we may have to free 1185 * excessively into the page allocator 1186 */ 1187 if (migratetype >= MIGRATE_PCPTYPES) { 1188 if (unlikely(migratetype == MIGRATE_ISOLATE)) { 1189 free_one_page(zone, page, 0, migratetype); 1190 goto out; 1191 } 1192 migratetype = MIGRATE_MOVABLE; 1193 } 1194 1195 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1196 if (cold) 1197 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1198 else 1199 list_add(&page->lru, &pcp->lists[migratetype]); 1200 pcp->count++; 1201 if (pcp->count >= pcp->high) { 1202 free_pcppages_bulk(zone, pcp->batch, pcp); 1203 pcp->count -= pcp->batch; 1204 } 1205 1206 out: 1207 local_irq_restore(flags); 1208 } 1209 1210 /* 1211 * split_page takes a non-compound higher-order page, and splits it into 1212 * n (1<<order) sub-pages: page[0..n] 1213 * Each sub-page must be freed individually. 1214 * 1215 * Note: this is probably too low level an operation for use in drivers. 1216 * Please consult with lkml before using this in your driver. 1217 */ 1218 void split_page(struct page *page, unsigned int order) 1219 { 1220 int i; 1221 1222 VM_BUG_ON(PageCompound(page)); 1223 VM_BUG_ON(!page_count(page)); 1224 1225 #ifdef CONFIG_KMEMCHECK 1226 /* 1227 * Split shadow pages too, because free(page[0]) would 1228 * otherwise free the whole shadow. 1229 */ 1230 if (kmemcheck_page_is_tracked(page)) 1231 split_page(virt_to_page(page[0].shadow), order); 1232 #endif 1233 1234 for (i = 1; i < (1 << order); i++) 1235 set_page_refcounted(page + i); 1236 } 1237 1238 /* 1239 * Similar to split_page except the page is already free. As this is only 1240 * being used for migration, the migratetype of the block also changes. 1241 * As this is called with interrupts disabled, the caller is responsible 1242 * for calling arch_alloc_page() and kernel_map_page() after interrupts 1243 * are enabled. 1244 * 1245 * Note: this is probably too low level an operation for use in drivers. 1246 * Please consult with lkml before using this in your driver. 1247 */ 1248 int split_free_page(struct page *page) 1249 { 1250 unsigned int order; 1251 unsigned long watermark; 1252 struct zone *zone; 1253 1254 BUG_ON(!PageBuddy(page)); 1255 1256 zone = page_zone(page); 1257 order = page_order(page); 1258 1259 /* Obey watermarks as if the page was being allocated */ 1260 watermark = low_wmark_pages(zone) + (1 << order); 1261 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1262 return 0; 1263 1264 /* Remove page from free list */ 1265 list_del(&page->lru); 1266 zone->free_area[order].nr_free--; 1267 rmv_page_order(page); 1268 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); 1269 1270 /* Split into individual pages */ 1271 set_page_refcounted(page); 1272 split_page(page, order); 1273 1274 if (order >= pageblock_order - 1) { 1275 struct page *endpage = page + (1 << order) - 1; 1276 for (; page < endpage; page += pageblock_nr_pages) 1277 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1278 } 1279 1280 return 1 << order; 1281 } 1282 1283 /* 1284 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1285 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1286 * or two. 1287 */ 1288 static inline 1289 struct page *buffered_rmqueue(struct zone *preferred_zone, 1290 struct zone *zone, int order, gfp_t gfp_flags, 1291 int migratetype) 1292 { 1293 unsigned long flags; 1294 struct page *page; 1295 int cold = !!(gfp_flags & __GFP_COLD); 1296 1297 again: 1298 if (likely(order == 0)) { 1299 struct per_cpu_pages *pcp; 1300 struct list_head *list; 1301 1302 local_irq_save(flags); 1303 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1304 list = &pcp->lists[migratetype]; 1305 if (list_empty(list)) { 1306 pcp->count += rmqueue_bulk(zone, 0, 1307 pcp->batch, list, 1308 migratetype, cold); 1309 if (unlikely(list_empty(list))) 1310 goto failed; 1311 } 1312 1313 if (cold) 1314 page = list_entry(list->prev, struct page, lru); 1315 else 1316 page = list_entry(list->next, struct page, lru); 1317 1318 list_del(&page->lru); 1319 pcp->count--; 1320 } else { 1321 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 1322 /* 1323 * __GFP_NOFAIL is not to be used in new code. 1324 * 1325 * All __GFP_NOFAIL callers should be fixed so that they 1326 * properly detect and handle allocation failures. 1327 * 1328 * We most definitely don't want callers attempting to 1329 * allocate greater than order-1 page units with 1330 * __GFP_NOFAIL. 1331 */ 1332 WARN_ON_ONCE(order > 1); 1333 } 1334 spin_lock_irqsave(&zone->lock, flags); 1335 page = __rmqueue(zone, order, migratetype); 1336 spin_unlock(&zone->lock); 1337 if (!page) 1338 goto failed; 1339 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); 1340 } 1341 1342 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1343 zone_statistics(preferred_zone, zone, gfp_flags); 1344 local_irq_restore(flags); 1345 1346 VM_BUG_ON(bad_range(zone, page)); 1347 if (prep_new_page(page, order, gfp_flags)) 1348 goto again; 1349 return page; 1350 1351 failed: 1352 local_irq_restore(flags); 1353 return NULL; 1354 } 1355 1356 /* The ALLOC_WMARK bits are used as an index to zone->watermark */ 1357 #define ALLOC_WMARK_MIN WMARK_MIN 1358 #define ALLOC_WMARK_LOW WMARK_LOW 1359 #define ALLOC_WMARK_HIGH WMARK_HIGH 1360 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ 1361 1362 /* Mask to get the watermark bits */ 1363 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) 1364 1365 #define ALLOC_HARDER 0x10 /* try to alloc harder */ 1366 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1367 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1368 1369 #ifdef CONFIG_FAIL_PAGE_ALLOC 1370 1371 static struct fail_page_alloc_attr { 1372 struct fault_attr attr; 1373 1374 u32 ignore_gfp_highmem; 1375 u32 ignore_gfp_wait; 1376 u32 min_order; 1377 1378 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1379 1380 struct dentry *ignore_gfp_highmem_file; 1381 struct dentry *ignore_gfp_wait_file; 1382 struct dentry *min_order_file; 1383 1384 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1385 1386 } fail_page_alloc = { 1387 .attr = FAULT_ATTR_INITIALIZER, 1388 .ignore_gfp_wait = 1, 1389 .ignore_gfp_highmem = 1, 1390 .min_order = 1, 1391 }; 1392 1393 static int __init setup_fail_page_alloc(char *str) 1394 { 1395 return setup_fault_attr(&fail_page_alloc.attr, str); 1396 } 1397 __setup("fail_page_alloc=", setup_fail_page_alloc); 1398 1399 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1400 { 1401 if (order < fail_page_alloc.min_order) 1402 return 0; 1403 if (gfp_mask & __GFP_NOFAIL) 1404 return 0; 1405 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1406 return 0; 1407 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1408 return 0; 1409 1410 return should_fail(&fail_page_alloc.attr, 1 << order); 1411 } 1412 1413 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1414 1415 static int __init fail_page_alloc_debugfs(void) 1416 { 1417 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1418 struct dentry *dir; 1419 int err; 1420 1421 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1422 "fail_page_alloc"); 1423 if (err) 1424 return err; 1425 dir = fail_page_alloc.attr.dentries.dir; 1426 1427 fail_page_alloc.ignore_gfp_wait_file = 1428 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1429 &fail_page_alloc.ignore_gfp_wait); 1430 1431 fail_page_alloc.ignore_gfp_highmem_file = 1432 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1433 &fail_page_alloc.ignore_gfp_highmem); 1434 fail_page_alloc.min_order_file = 1435 debugfs_create_u32("min-order", mode, dir, 1436 &fail_page_alloc.min_order); 1437 1438 if (!fail_page_alloc.ignore_gfp_wait_file || 1439 !fail_page_alloc.ignore_gfp_highmem_file || 1440 !fail_page_alloc.min_order_file) { 1441 err = -ENOMEM; 1442 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1443 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1444 debugfs_remove(fail_page_alloc.min_order_file); 1445 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1446 } 1447 1448 return err; 1449 } 1450 1451 late_initcall(fail_page_alloc_debugfs); 1452 1453 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1454 1455 #else /* CONFIG_FAIL_PAGE_ALLOC */ 1456 1457 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1458 { 1459 return 0; 1460 } 1461 1462 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 1463 1464 /* 1465 * Return true if free pages are above 'mark'. This takes into account the order 1466 * of the allocation. 1467 */ 1468 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1469 int classzone_idx, int alloc_flags, long free_pages) 1470 { 1471 /* free_pages my go negative - that's OK */ 1472 long min = mark; 1473 int o; 1474 1475 free_pages -= (1 << order) + 1; 1476 if (alloc_flags & ALLOC_HIGH) 1477 min -= min / 2; 1478 if (alloc_flags & ALLOC_HARDER) 1479 min -= min / 4; 1480 1481 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1482 return false; 1483 for (o = 0; o < order; o++) { 1484 /* At the next order, this order's pages become unavailable */ 1485 free_pages -= z->free_area[o].nr_free << o; 1486 1487 /* Require fewer higher order pages to be free */ 1488 min >>= 1; 1489 1490 if (free_pages <= min) 1491 return false; 1492 } 1493 return true; 1494 } 1495 1496 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1497 int classzone_idx, int alloc_flags) 1498 { 1499 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1500 zone_page_state(z, NR_FREE_PAGES)); 1501 } 1502 1503 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, 1504 int classzone_idx, int alloc_flags) 1505 { 1506 long free_pages = zone_page_state(z, NR_FREE_PAGES); 1507 1508 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 1509 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 1510 1511 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1512 free_pages); 1513 } 1514 1515 #ifdef CONFIG_NUMA 1516 /* 1517 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1518 * skip over zones that are not allowed by the cpuset, or that have 1519 * been recently (in last second) found to be nearly full. See further 1520 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1521 * that have to skip over a lot of full or unallowed zones. 1522 * 1523 * If the zonelist cache is present in the passed in zonelist, then 1524 * returns a pointer to the allowed node mask (either the current 1525 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1526 * 1527 * If the zonelist cache is not available for this zonelist, does 1528 * nothing and returns NULL. 1529 * 1530 * If the fullzones BITMAP in the zonelist cache is stale (more than 1531 * a second since last zap'd) then we zap it out (clear its bits.) 1532 * 1533 * We hold off even calling zlc_setup, until after we've checked the 1534 * first zone in the zonelist, on the theory that most allocations will 1535 * be satisfied from that first zone, so best to examine that zone as 1536 * quickly as we can. 1537 */ 1538 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1539 { 1540 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1541 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1542 1543 zlc = zonelist->zlcache_ptr; 1544 if (!zlc) 1545 return NULL; 1546 1547 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1548 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1549 zlc->last_full_zap = jiffies; 1550 } 1551 1552 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1553 &cpuset_current_mems_allowed : 1554 &node_states[N_HIGH_MEMORY]; 1555 return allowednodes; 1556 } 1557 1558 /* 1559 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1560 * if it is worth looking at further for free memory: 1561 * 1) Check that the zone isn't thought to be full (doesn't have its 1562 * bit set in the zonelist_cache fullzones BITMAP). 1563 * 2) Check that the zones node (obtained from the zonelist_cache 1564 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1565 * Return true (non-zero) if zone is worth looking at further, or 1566 * else return false (zero) if it is not. 1567 * 1568 * This check -ignores- the distinction between various watermarks, 1569 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1570 * found to be full for any variation of these watermarks, it will 1571 * be considered full for up to one second by all requests, unless 1572 * we are so low on memory on all allowed nodes that we are forced 1573 * into the second scan of the zonelist. 1574 * 1575 * In the second scan we ignore this zonelist cache and exactly 1576 * apply the watermarks to all zones, even it is slower to do so. 1577 * We are low on memory in the second scan, and should leave no stone 1578 * unturned looking for a free page. 1579 */ 1580 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1581 nodemask_t *allowednodes) 1582 { 1583 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1584 int i; /* index of *z in zonelist zones */ 1585 int n; /* node that zone *z is on */ 1586 1587 zlc = zonelist->zlcache_ptr; 1588 if (!zlc) 1589 return 1; 1590 1591 i = z - zonelist->_zonerefs; 1592 n = zlc->z_to_n[i]; 1593 1594 /* This zone is worth trying if it is allowed but not full */ 1595 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1596 } 1597 1598 /* 1599 * Given 'z' scanning a zonelist, set the corresponding bit in 1600 * zlc->fullzones, so that subsequent attempts to allocate a page 1601 * from that zone don't waste time re-examining it. 1602 */ 1603 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1604 { 1605 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1606 int i; /* index of *z in zonelist zones */ 1607 1608 zlc = zonelist->zlcache_ptr; 1609 if (!zlc) 1610 return; 1611 1612 i = z - zonelist->_zonerefs; 1613 1614 set_bit(i, zlc->fullzones); 1615 } 1616 1617 #else /* CONFIG_NUMA */ 1618 1619 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1620 { 1621 return NULL; 1622 } 1623 1624 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1625 nodemask_t *allowednodes) 1626 { 1627 return 1; 1628 } 1629 1630 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1631 { 1632 } 1633 #endif /* CONFIG_NUMA */ 1634 1635 /* 1636 * get_page_from_freelist goes through the zonelist trying to allocate 1637 * a page. 1638 */ 1639 static struct page * 1640 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1641 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 1642 struct zone *preferred_zone, int migratetype) 1643 { 1644 struct zoneref *z; 1645 struct page *page = NULL; 1646 int classzone_idx; 1647 struct zone *zone; 1648 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1649 int zlc_active = 0; /* set if using zonelist_cache */ 1650 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1651 1652 classzone_idx = zone_idx(preferred_zone); 1653 zonelist_scan: 1654 /* 1655 * Scan zonelist, looking for a zone with enough free. 1656 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1657 */ 1658 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1659 high_zoneidx, nodemask) { 1660 if (NUMA_BUILD && zlc_active && 1661 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1662 continue; 1663 if ((alloc_flags & ALLOC_CPUSET) && 1664 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1665 goto try_next_zone; 1666 1667 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 1668 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1669 unsigned long mark; 1670 int ret; 1671 1672 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 1673 if (zone_watermark_ok(zone, order, mark, 1674 classzone_idx, alloc_flags)) 1675 goto try_this_zone; 1676 1677 if (zone_reclaim_mode == 0) 1678 goto this_zone_full; 1679 1680 ret = zone_reclaim(zone, gfp_mask, order); 1681 switch (ret) { 1682 case ZONE_RECLAIM_NOSCAN: 1683 /* did not scan */ 1684 goto try_next_zone; 1685 case ZONE_RECLAIM_FULL: 1686 /* scanned but unreclaimable */ 1687 goto this_zone_full; 1688 default: 1689 /* did we reclaim enough */ 1690 if (!zone_watermark_ok(zone, order, mark, 1691 classzone_idx, alloc_flags)) 1692 goto this_zone_full; 1693 } 1694 } 1695 1696 try_this_zone: 1697 page = buffered_rmqueue(preferred_zone, zone, order, 1698 gfp_mask, migratetype); 1699 if (page) 1700 break; 1701 this_zone_full: 1702 if (NUMA_BUILD) 1703 zlc_mark_zone_full(zonelist, z); 1704 try_next_zone: 1705 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { 1706 /* 1707 * we do zlc_setup after the first zone is tried but only 1708 * if there are multiple nodes make it worthwhile 1709 */ 1710 allowednodes = zlc_setup(zonelist, alloc_flags); 1711 zlc_active = 1; 1712 did_zlc_setup = 1; 1713 } 1714 } 1715 1716 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1717 /* Disable zlc cache for second zonelist scan */ 1718 zlc_active = 0; 1719 goto zonelist_scan; 1720 } 1721 return page; 1722 } 1723 1724 /* 1725 * Large machines with many possible nodes should not always dump per-node 1726 * meminfo in irq context. 1727 */ 1728 static inline bool should_suppress_show_mem(void) 1729 { 1730 bool ret = false; 1731 1732 #if NODES_SHIFT > 8 1733 ret = in_interrupt(); 1734 #endif 1735 return ret; 1736 } 1737 1738 static inline int 1739 should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1740 unsigned long pages_reclaimed) 1741 { 1742 /* Do not loop if specifically requested */ 1743 if (gfp_mask & __GFP_NORETRY) 1744 return 0; 1745 1746 /* 1747 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 1748 * means __GFP_NOFAIL, but that may not be true in other 1749 * implementations. 1750 */ 1751 if (order <= PAGE_ALLOC_COSTLY_ORDER) 1752 return 1; 1753 1754 /* 1755 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 1756 * specified, then we retry until we no longer reclaim any pages 1757 * (above), or we've reclaimed an order of pages at least as 1758 * large as the allocation's order. In both cases, if the 1759 * allocation still fails, we stop retrying. 1760 */ 1761 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 1762 return 1; 1763 1764 /* 1765 * Don't let big-order allocations loop unless the caller 1766 * explicitly requests that. 1767 */ 1768 if (gfp_mask & __GFP_NOFAIL) 1769 return 1; 1770 1771 return 0; 1772 } 1773 1774 static inline struct page * 1775 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 1776 struct zonelist *zonelist, enum zone_type high_zoneidx, 1777 nodemask_t *nodemask, struct zone *preferred_zone, 1778 int migratetype) 1779 { 1780 struct page *page; 1781 1782 /* Acquire the OOM killer lock for the zones in zonelist */ 1783 if (!try_set_zonelist_oom(zonelist, gfp_mask)) { 1784 schedule_timeout_uninterruptible(1); 1785 return NULL; 1786 } 1787 1788 /* 1789 * Go through the zonelist yet one more time, keep very high watermark 1790 * here, this is only to catch a parallel oom killing, we must fail if 1791 * we're still under heavy pressure. 1792 */ 1793 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1794 order, zonelist, high_zoneidx, 1795 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 1796 preferred_zone, migratetype); 1797 if (page) 1798 goto out; 1799 1800 if (!(gfp_mask & __GFP_NOFAIL)) { 1801 /* The OOM killer will not help higher order allocs */ 1802 if (order > PAGE_ALLOC_COSTLY_ORDER) 1803 goto out; 1804 /* The OOM killer does not needlessly kill tasks for lowmem */ 1805 if (high_zoneidx < ZONE_NORMAL) 1806 goto out; 1807 /* 1808 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 1809 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 1810 * The caller should handle page allocation failure by itself if 1811 * it specifies __GFP_THISNODE. 1812 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. 1813 */ 1814 if (gfp_mask & __GFP_THISNODE) 1815 goto out; 1816 } 1817 /* Exhausted what can be done so it's blamo time */ 1818 out_of_memory(zonelist, gfp_mask, order, nodemask); 1819 1820 out: 1821 clear_zonelist_oom(zonelist, gfp_mask); 1822 return page; 1823 } 1824 1825 #ifdef CONFIG_COMPACTION 1826 /* Try memory compaction for high-order allocations before reclaim */ 1827 static struct page * 1828 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1829 struct zonelist *zonelist, enum zone_type high_zoneidx, 1830 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1831 int migratetype, unsigned long *did_some_progress, 1832 bool sync_migration) 1833 { 1834 struct page *page; 1835 1836 if (!order || compaction_deferred(preferred_zone)) 1837 return NULL; 1838 1839 current->flags |= PF_MEMALLOC; 1840 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1841 nodemask, sync_migration); 1842 current->flags &= ~PF_MEMALLOC; 1843 if (*did_some_progress != COMPACT_SKIPPED) { 1844 1845 /* Page migration frees to the PCP lists but we want merging */ 1846 drain_pages(get_cpu()); 1847 put_cpu(); 1848 1849 page = get_page_from_freelist(gfp_mask, nodemask, 1850 order, zonelist, high_zoneidx, 1851 alloc_flags, preferred_zone, 1852 migratetype); 1853 if (page) { 1854 preferred_zone->compact_considered = 0; 1855 preferred_zone->compact_defer_shift = 0; 1856 count_vm_event(COMPACTSUCCESS); 1857 return page; 1858 } 1859 1860 /* 1861 * It's bad if compaction run occurs and fails. 1862 * The most likely reason is that pages exist, 1863 * but not enough to satisfy watermarks. 1864 */ 1865 count_vm_event(COMPACTFAIL); 1866 defer_compaction(preferred_zone); 1867 1868 cond_resched(); 1869 } 1870 1871 return NULL; 1872 } 1873 #else 1874 static inline struct page * 1875 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1876 struct zonelist *zonelist, enum zone_type high_zoneidx, 1877 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1878 int migratetype, unsigned long *did_some_progress, 1879 bool sync_migration) 1880 { 1881 return NULL; 1882 } 1883 #endif /* CONFIG_COMPACTION */ 1884 1885 /* The really slow allocator path where we enter direct reclaim */ 1886 static inline struct page * 1887 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 1888 struct zonelist *zonelist, enum zone_type high_zoneidx, 1889 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1890 int migratetype, unsigned long *did_some_progress) 1891 { 1892 struct page *page = NULL; 1893 struct reclaim_state reclaim_state; 1894 bool drained = false; 1895 1896 cond_resched(); 1897 1898 /* We now go into synchronous reclaim */ 1899 cpuset_memory_pressure_bump(); 1900 current->flags |= PF_MEMALLOC; 1901 lockdep_set_current_reclaim_state(gfp_mask); 1902 reclaim_state.reclaimed_slab = 0; 1903 current->reclaim_state = &reclaim_state; 1904 1905 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1906 1907 current->reclaim_state = NULL; 1908 lockdep_clear_current_reclaim_state(); 1909 current->flags &= ~PF_MEMALLOC; 1910 1911 cond_resched(); 1912 1913 if (unlikely(!(*did_some_progress))) 1914 return NULL; 1915 1916 retry: 1917 page = get_page_from_freelist(gfp_mask, nodemask, order, 1918 zonelist, high_zoneidx, 1919 alloc_flags, preferred_zone, 1920 migratetype); 1921 1922 /* 1923 * If an allocation failed after direct reclaim, it could be because 1924 * pages are pinned on the per-cpu lists. Drain them and try again 1925 */ 1926 if (!page && !drained) { 1927 drain_all_pages(); 1928 drained = true; 1929 goto retry; 1930 } 1931 1932 return page; 1933 } 1934 1935 /* 1936 * This is called in the allocator slow-path if the allocation request is of 1937 * sufficient urgency to ignore watermarks and take other desperate measures 1938 */ 1939 static inline struct page * 1940 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 1941 struct zonelist *zonelist, enum zone_type high_zoneidx, 1942 nodemask_t *nodemask, struct zone *preferred_zone, 1943 int migratetype) 1944 { 1945 struct page *page; 1946 1947 do { 1948 page = get_page_from_freelist(gfp_mask, nodemask, order, 1949 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 1950 preferred_zone, migratetype); 1951 1952 if (!page && gfp_mask & __GFP_NOFAIL) 1953 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 1954 } while (!page && (gfp_mask & __GFP_NOFAIL)); 1955 1956 return page; 1957 } 1958 1959 static inline 1960 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, 1961 enum zone_type high_zoneidx, 1962 enum zone_type classzone_idx) 1963 { 1964 struct zoneref *z; 1965 struct zone *zone; 1966 1967 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1968 wakeup_kswapd(zone, order, classzone_idx); 1969 } 1970 1971 static inline int 1972 gfp_to_alloc_flags(gfp_t gfp_mask) 1973 { 1974 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 1975 const gfp_t wait = gfp_mask & __GFP_WAIT; 1976 1977 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 1978 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 1979 1980 /* 1981 * The caller may dip into page reserves a bit more if the caller 1982 * cannot run direct reclaim, or if the caller has realtime scheduling 1983 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1984 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1985 */ 1986 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 1987 1988 if (!wait) { 1989 /* 1990 * Not worth trying to allocate harder for 1991 * __GFP_NOMEMALLOC even if it can't schedule. 1992 */ 1993 if (!(gfp_mask & __GFP_NOMEMALLOC)) 1994 alloc_flags |= ALLOC_HARDER; 1995 /* 1996 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1997 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1998 */ 1999 alloc_flags &= ~ALLOC_CPUSET; 2000 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2001 alloc_flags |= ALLOC_HARDER; 2002 2003 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2004 if (!in_interrupt() && 2005 ((current->flags & PF_MEMALLOC) || 2006 unlikely(test_thread_flag(TIF_MEMDIE)))) 2007 alloc_flags |= ALLOC_NO_WATERMARKS; 2008 } 2009 2010 return alloc_flags; 2011 } 2012 2013 static inline struct page * 2014 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2015 struct zonelist *zonelist, enum zone_type high_zoneidx, 2016 nodemask_t *nodemask, struct zone *preferred_zone, 2017 int migratetype) 2018 { 2019 const gfp_t wait = gfp_mask & __GFP_WAIT; 2020 struct page *page = NULL; 2021 int alloc_flags; 2022 unsigned long pages_reclaimed = 0; 2023 unsigned long did_some_progress; 2024 bool sync_migration = false; 2025 2026 /* 2027 * In the slowpath, we sanity check order to avoid ever trying to 2028 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2029 * be using allocators in order of preference for an area that is 2030 * too large. 2031 */ 2032 if (order >= MAX_ORDER) { 2033 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2034 return NULL; 2035 } 2036 2037 /* 2038 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 2039 * __GFP_NOWARN set) should not cause reclaim since the subsystem 2040 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 2041 * using a larger set of nodes after it has established that the 2042 * allowed per node queues are empty and that nodes are 2043 * over allocated. 2044 */ 2045 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 2046 goto nopage; 2047 2048 restart: 2049 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2050 wake_all_kswapd(order, zonelist, high_zoneidx, 2051 zone_idx(preferred_zone)); 2052 2053 /* 2054 * OK, we're below the kswapd watermark and have kicked background 2055 * reclaim. Now things get more complex, so set up alloc_flags according 2056 * to how we want to proceed. 2057 */ 2058 alloc_flags = gfp_to_alloc_flags(gfp_mask); 2059 2060 /* 2061 * Find the true preferred zone if the allocation is unconstrained by 2062 * cpusets. 2063 */ 2064 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) 2065 first_zones_zonelist(zonelist, high_zoneidx, NULL, 2066 &preferred_zone); 2067 2068 /* This is the last chance, in general, before the goto nopage. */ 2069 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2070 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2071 preferred_zone, migratetype); 2072 if (page) 2073 goto got_pg; 2074 2075 rebalance: 2076 /* Allocate without watermarks if the context allows */ 2077 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2078 page = __alloc_pages_high_priority(gfp_mask, order, 2079 zonelist, high_zoneidx, nodemask, 2080 preferred_zone, migratetype); 2081 if (page) 2082 goto got_pg; 2083 } 2084 2085 /* Atomic allocations - we can't balance anything */ 2086 if (!wait) 2087 goto nopage; 2088 2089 /* Avoid recursion of direct reclaim */ 2090 if (current->flags & PF_MEMALLOC) 2091 goto nopage; 2092 2093 /* Avoid allocations with no watermarks from looping endlessly */ 2094 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2095 goto nopage; 2096 2097 /* 2098 * Try direct compaction. The first pass is asynchronous. Subsequent 2099 * attempts after direct reclaim are synchronous 2100 */ 2101 page = __alloc_pages_direct_compact(gfp_mask, order, 2102 zonelist, high_zoneidx, 2103 nodemask, 2104 alloc_flags, preferred_zone, 2105 migratetype, &did_some_progress, 2106 sync_migration); 2107 if (page) 2108 goto got_pg; 2109 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD); 2110 2111 /* Try direct reclaim and then allocating */ 2112 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2113 zonelist, high_zoneidx, 2114 nodemask, 2115 alloc_flags, preferred_zone, 2116 migratetype, &did_some_progress); 2117 if (page) 2118 goto got_pg; 2119 2120 /* 2121 * If we failed to make any progress reclaiming, then we are 2122 * running out of options and have to consider going OOM 2123 */ 2124 if (!did_some_progress) { 2125 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 2126 if (oom_killer_disabled) 2127 goto nopage; 2128 page = __alloc_pages_may_oom(gfp_mask, order, 2129 zonelist, high_zoneidx, 2130 nodemask, preferred_zone, 2131 migratetype); 2132 if (page) 2133 goto got_pg; 2134 2135 if (!(gfp_mask & __GFP_NOFAIL)) { 2136 /* 2137 * The oom killer is not called for high-order 2138 * allocations that may fail, so if no progress 2139 * is being made, there are no other options and 2140 * retrying is unlikely to help. 2141 */ 2142 if (order > PAGE_ALLOC_COSTLY_ORDER) 2143 goto nopage; 2144 /* 2145 * The oom killer is not called for lowmem 2146 * allocations to prevent needlessly killing 2147 * innocent tasks. 2148 */ 2149 if (high_zoneidx < ZONE_NORMAL) 2150 goto nopage; 2151 } 2152 2153 goto restart; 2154 } 2155 } 2156 2157 /* Check if we should retry the allocation */ 2158 pages_reclaimed += did_some_progress; 2159 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { 2160 /* Wait for some write requests to complete then retry */ 2161 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2162 goto rebalance; 2163 } else { 2164 /* 2165 * High-order allocations do not necessarily loop after 2166 * direct reclaim and reclaim/compaction depends on compaction 2167 * being called after reclaim so call directly if necessary 2168 */ 2169 page = __alloc_pages_direct_compact(gfp_mask, order, 2170 zonelist, high_zoneidx, 2171 nodemask, 2172 alloc_flags, preferred_zone, 2173 migratetype, &did_some_progress, 2174 sync_migration); 2175 if (page) 2176 goto got_pg; 2177 } 2178 2179 nopage: 2180 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2181 unsigned int filter = SHOW_MEM_FILTER_NODES; 2182 2183 /* 2184 * This documents exceptions given to allocations in certain 2185 * contexts that are allowed to allocate outside current's set 2186 * of allowed nodes. 2187 */ 2188 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2189 if (test_thread_flag(TIF_MEMDIE) || 2190 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2191 filter &= ~SHOW_MEM_FILTER_NODES; 2192 if (in_interrupt() || !wait) 2193 filter &= ~SHOW_MEM_FILTER_NODES; 2194 2195 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n", 2196 current->comm, order, gfp_mask); 2197 dump_stack(); 2198 if (!should_suppress_show_mem()) 2199 show_mem(filter); 2200 } 2201 return page; 2202 got_pg: 2203 if (kmemcheck_enabled) 2204 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 2205 return page; 2206 2207 } 2208 2209 /* 2210 * This is the 'heart' of the zoned buddy allocator. 2211 */ 2212 struct page * 2213 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 2214 struct zonelist *zonelist, nodemask_t *nodemask) 2215 { 2216 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2217 struct zone *preferred_zone; 2218 struct page *page; 2219 int migratetype = allocflags_to_migratetype(gfp_mask); 2220 2221 gfp_mask &= gfp_allowed_mask; 2222 2223 lockdep_trace_alloc(gfp_mask); 2224 2225 might_sleep_if(gfp_mask & __GFP_WAIT); 2226 2227 if (should_fail_alloc_page(gfp_mask, order)) 2228 return NULL; 2229 2230 /* 2231 * Check the zones suitable for the gfp_mask contain at least one 2232 * valid zone. It's possible to have an empty zonelist as a result 2233 * of GFP_THISNODE and a memoryless node 2234 */ 2235 if (unlikely(!zonelist->_zonerefs->zone)) 2236 return NULL; 2237 2238 get_mems_allowed(); 2239 /* The preferred zone is used for statistics later */ 2240 first_zones_zonelist(zonelist, high_zoneidx, 2241 nodemask ? : &cpuset_current_mems_allowed, 2242 &preferred_zone); 2243 if (!preferred_zone) { 2244 put_mems_allowed(); 2245 return NULL; 2246 } 2247 2248 /* First allocation attempt */ 2249 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2250 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, 2251 preferred_zone, migratetype); 2252 if (unlikely(!page)) 2253 page = __alloc_pages_slowpath(gfp_mask, order, 2254 zonelist, high_zoneidx, nodemask, 2255 preferred_zone, migratetype); 2256 put_mems_allowed(); 2257 2258 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2259 return page; 2260 } 2261 EXPORT_SYMBOL(__alloc_pages_nodemask); 2262 2263 /* 2264 * Common helper functions. 2265 */ 2266 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 2267 { 2268 struct page *page; 2269 2270 /* 2271 * __get_free_pages() returns a 32-bit address, which cannot represent 2272 * a highmem page 2273 */ 2274 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 2275 2276 page = alloc_pages(gfp_mask, order); 2277 if (!page) 2278 return 0; 2279 return (unsigned long) page_address(page); 2280 } 2281 EXPORT_SYMBOL(__get_free_pages); 2282 2283 unsigned long get_zeroed_page(gfp_t gfp_mask) 2284 { 2285 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 2286 } 2287 EXPORT_SYMBOL(get_zeroed_page); 2288 2289 void __pagevec_free(struct pagevec *pvec) 2290 { 2291 int i = pagevec_count(pvec); 2292 2293 while (--i >= 0) { 2294 trace_mm_pagevec_free(pvec->pages[i], pvec->cold); 2295 free_hot_cold_page(pvec->pages[i], pvec->cold); 2296 } 2297 } 2298 2299 void __free_pages(struct page *page, unsigned int order) 2300 { 2301 if (put_page_testzero(page)) { 2302 if (order == 0) 2303 free_hot_cold_page(page, 0); 2304 else 2305 __free_pages_ok(page, order); 2306 } 2307 } 2308 2309 EXPORT_SYMBOL(__free_pages); 2310 2311 void free_pages(unsigned long addr, unsigned int order) 2312 { 2313 if (addr != 0) { 2314 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2315 __free_pages(virt_to_page((void *)addr), order); 2316 } 2317 } 2318 2319 EXPORT_SYMBOL(free_pages); 2320 2321 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2322 { 2323 if (addr) { 2324 unsigned long alloc_end = addr + (PAGE_SIZE << order); 2325 unsigned long used = addr + PAGE_ALIGN(size); 2326 2327 split_page(virt_to_page((void *)addr), order); 2328 while (used < alloc_end) { 2329 free_page(used); 2330 used += PAGE_SIZE; 2331 } 2332 } 2333 return (void *)addr; 2334 } 2335 2336 /** 2337 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 2338 * @size: the number of bytes to allocate 2339 * @gfp_mask: GFP flags for the allocation 2340 * 2341 * This function is similar to alloc_pages(), except that it allocates the 2342 * minimum number of pages to satisfy the request. alloc_pages() can only 2343 * allocate memory in power-of-two pages. 2344 * 2345 * This function is also limited by MAX_ORDER. 2346 * 2347 * Memory allocated by this function must be released by free_pages_exact(). 2348 */ 2349 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 2350 { 2351 unsigned int order = get_order(size); 2352 unsigned long addr; 2353 2354 addr = __get_free_pages(gfp_mask, order); 2355 return make_alloc_exact(addr, order, size); 2356 } 2357 EXPORT_SYMBOL(alloc_pages_exact); 2358 2359 /** 2360 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 2361 * pages on a node. 2362 * @nid: the preferred node ID where memory should be allocated 2363 * @size: the number of bytes to allocate 2364 * @gfp_mask: GFP flags for the allocation 2365 * 2366 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 2367 * back. 2368 * Note this is not alloc_pages_exact_node() which allocates on a specific node, 2369 * but is not exact. 2370 */ 2371 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 2372 { 2373 unsigned order = get_order(size); 2374 struct page *p = alloc_pages_node(nid, gfp_mask, order); 2375 if (!p) 2376 return NULL; 2377 return make_alloc_exact((unsigned long)page_address(p), order, size); 2378 } 2379 EXPORT_SYMBOL(alloc_pages_exact_nid); 2380 2381 /** 2382 * free_pages_exact - release memory allocated via alloc_pages_exact() 2383 * @virt: the value returned by alloc_pages_exact. 2384 * @size: size of allocation, same value as passed to alloc_pages_exact(). 2385 * 2386 * Release the memory allocated by a previous call to alloc_pages_exact. 2387 */ 2388 void free_pages_exact(void *virt, size_t size) 2389 { 2390 unsigned long addr = (unsigned long)virt; 2391 unsigned long end = addr + PAGE_ALIGN(size); 2392 2393 while (addr < end) { 2394 free_page(addr); 2395 addr += PAGE_SIZE; 2396 } 2397 } 2398 EXPORT_SYMBOL(free_pages_exact); 2399 2400 static unsigned int nr_free_zone_pages(int offset) 2401 { 2402 struct zoneref *z; 2403 struct zone *zone; 2404 2405 /* Just pick one node, since fallback list is circular */ 2406 unsigned int sum = 0; 2407 2408 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 2409 2410 for_each_zone_zonelist(zone, z, zonelist, offset) { 2411 unsigned long size = zone->present_pages; 2412 unsigned long high = high_wmark_pages(zone); 2413 if (size > high) 2414 sum += size - high; 2415 } 2416 2417 return sum; 2418 } 2419 2420 /* 2421 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 2422 */ 2423 unsigned int nr_free_buffer_pages(void) 2424 { 2425 return nr_free_zone_pages(gfp_zone(GFP_USER)); 2426 } 2427 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 2428 2429 /* 2430 * Amount of free RAM allocatable within all zones 2431 */ 2432 unsigned int nr_free_pagecache_pages(void) 2433 { 2434 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 2435 } 2436 2437 static inline void show_node(struct zone *zone) 2438 { 2439 if (NUMA_BUILD) 2440 printk("Node %d ", zone_to_nid(zone)); 2441 } 2442 2443 void si_meminfo(struct sysinfo *val) 2444 { 2445 val->totalram = totalram_pages; 2446 val->sharedram = 0; 2447 val->freeram = global_page_state(NR_FREE_PAGES); 2448 val->bufferram = nr_blockdev_pages(); 2449 val->totalhigh = totalhigh_pages; 2450 val->freehigh = nr_free_highpages(); 2451 val->mem_unit = PAGE_SIZE; 2452 } 2453 2454 EXPORT_SYMBOL(si_meminfo); 2455 2456 #ifdef CONFIG_NUMA 2457 void si_meminfo_node(struct sysinfo *val, int nid) 2458 { 2459 pg_data_t *pgdat = NODE_DATA(nid); 2460 2461 val->totalram = pgdat->node_present_pages; 2462 val->freeram = node_page_state(nid, NR_FREE_PAGES); 2463 #ifdef CONFIG_HIGHMEM 2464 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 2465 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 2466 NR_FREE_PAGES); 2467 #else 2468 val->totalhigh = 0; 2469 val->freehigh = 0; 2470 #endif 2471 val->mem_unit = PAGE_SIZE; 2472 } 2473 #endif 2474 2475 /* 2476 * Determine whether the zone's node should be displayed or not, depending on 2477 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). 2478 */ 2479 static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) 2480 { 2481 bool ret = false; 2482 2483 if (!(flags & SHOW_MEM_FILTER_NODES)) 2484 goto out; 2485 2486 get_mems_allowed(); 2487 ret = !node_isset(zone->zone_pgdat->node_id, 2488 cpuset_current_mems_allowed); 2489 put_mems_allowed(); 2490 out: 2491 return ret; 2492 } 2493 2494 #define K(x) ((x) << (PAGE_SHIFT-10)) 2495 2496 /* 2497 * Show free area list (used inside shift_scroll-lock stuff) 2498 * We also calculate the percentage fragmentation. We do this by counting the 2499 * memory on each free list with the exception of the first item on the list. 2500 * Suppresses nodes that are not allowed by current's cpuset if 2501 * SHOW_MEM_FILTER_NODES is passed. 2502 */ 2503 void __show_free_areas(unsigned int filter) 2504 { 2505 int cpu; 2506 struct zone *zone; 2507 2508 for_each_populated_zone(zone) { 2509 if (skip_free_areas_zone(filter, zone)) 2510 continue; 2511 show_node(zone); 2512 printk("%s per-cpu:\n", zone->name); 2513 2514 for_each_online_cpu(cpu) { 2515 struct per_cpu_pageset *pageset; 2516 2517 pageset = per_cpu_ptr(zone->pageset, cpu); 2518 2519 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 2520 cpu, pageset->pcp.high, 2521 pageset->pcp.batch, pageset->pcp.count); 2522 } 2523 } 2524 2525 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 2526 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 2527 " unevictable:%lu" 2528 " dirty:%lu writeback:%lu unstable:%lu\n" 2529 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 2530 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", 2531 global_page_state(NR_ACTIVE_ANON), 2532 global_page_state(NR_INACTIVE_ANON), 2533 global_page_state(NR_ISOLATED_ANON), 2534 global_page_state(NR_ACTIVE_FILE), 2535 global_page_state(NR_INACTIVE_FILE), 2536 global_page_state(NR_ISOLATED_FILE), 2537 global_page_state(NR_UNEVICTABLE), 2538 global_page_state(NR_FILE_DIRTY), 2539 global_page_state(NR_WRITEBACK), 2540 global_page_state(NR_UNSTABLE_NFS), 2541 global_page_state(NR_FREE_PAGES), 2542 global_page_state(NR_SLAB_RECLAIMABLE), 2543 global_page_state(NR_SLAB_UNRECLAIMABLE), 2544 global_page_state(NR_FILE_MAPPED), 2545 global_page_state(NR_SHMEM), 2546 global_page_state(NR_PAGETABLE), 2547 global_page_state(NR_BOUNCE)); 2548 2549 for_each_populated_zone(zone) { 2550 int i; 2551 2552 if (skip_free_areas_zone(filter, zone)) 2553 continue; 2554 show_node(zone); 2555 printk("%s" 2556 " free:%lukB" 2557 " min:%lukB" 2558 " low:%lukB" 2559 " high:%lukB" 2560 " active_anon:%lukB" 2561 " inactive_anon:%lukB" 2562 " active_file:%lukB" 2563 " inactive_file:%lukB" 2564 " unevictable:%lukB" 2565 " isolated(anon):%lukB" 2566 " isolated(file):%lukB" 2567 " present:%lukB" 2568 " mlocked:%lukB" 2569 " dirty:%lukB" 2570 " writeback:%lukB" 2571 " mapped:%lukB" 2572 " shmem:%lukB" 2573 " slab_reclaimable:%lukB" 2574 " slab_unreclaimable:%lukB" 2575 " kernel_stack:%lukB" 2576 " pagetables:%lukB" 2577 " unstable:%lukB" 2578 " bounce:%lukB" 2579 " writeback_tmp:%lukB" 2580 " pages_scanned:%lu" 2581 " all_unreclaimable? %s" 2582 "\n", 2583 zone->name, 2584 K(zone_page_state(zone, NR_FREE_PAGES)), 2585 K(min_wmark_pages(zone)), 2586 K(low_wmark_pages(zone)), 2587 K(high_wmark_pages(zone)), 2588 K(zone_page_state(zone, NR_ACTIVE_ANON)), 2589 K(zone_page_state(zone, NR_INACTIVE_ANON)), 2590 K(zone_page_state(zone, NR_ACTIVE_FILE)), 2591 K(zone_page_state(zone, NR_INACTIVE_FILE)), 2592 K(zone_page_state(zone, NR_UNEVICTABLE)), 2593 K(zone_page_state(zone, NR_ISOLATED_ANON)), 2594 K(zone_page_state(zone, NR_ISOLATED_FILE)), 2595 K(zone->present_pages), 2596 K(zone_page_state(zone, NR_MLOCK)), 2597 K(zone_page_state(zone, NR_FILE_DIRTY)), 2598 K(zone_page_state(zone, NR_WRITEBACK)), 2599 K(zone_page_state(zone, NR_FILE_MAPPED)), 2600 K(zone_page_state(zone, NR_SHMEM)), 2601 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 2602 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 2603 zone_page_state(zone, NR_KERNEL_STACK) * 2604 THREAD_SIZE / 1024, 2605 K(zone_page_state(zone, NR_PAGETABLE)), 2606 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 2607 K(zone_page_state(zone, NR_BOUNCE)), 2608 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 2609 zone->pages_scanned, 2610 (zone->all_unreclaimable ? "yes" : "no") 2611 ); 2612 printk("lowmem_reserve[]:"); 2613 for (i = 0; i < MAX_NR_ZONES; i++) 2614 printk(" %lu", zone->lowmem_reserve[i]); 2615 printk("\n"); 2616 } 2617 2618 for_each_populated_zone(zone) { 2619 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2620 2621 if (skip_free_areas_zone(filter, zone)) 2622 continue; 2623 show_node(zone); 2624 printk("%s: ", zone->name); 2625 2626 spin_lock_irqsave(&zone->lock, flags); 2627 for (order = 0; order < MAX_ORDER; order++) { 2628 nr[order] = zone->free_area[order].nr_free; 2629 total += nr[order] << order; 2630 } 2631 spin_unlock_irqrestore(&zone->lock, flags); 2632 for (order = 0; order < MAX_ORDER; order++) 2633 printk("%lu*%lukB ", nr[order], K(1UL) << order); 2634 printk("= %lukB\n", K(total)); 2635 } 2636 2637 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 2638 2639 show_swap_cache_info(); 2640 } 2641 2642 void show_free_areas(void) 2643 { 2644 __show_free_areas(0); 2645 } 2646 2647 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2648 { 2649 zoneref->zone = zone; 2650 zoneref->zone_idx = zone_idx(zone); 2651 } 2652 2653 /* 2654 * Builds allocation fallback zone lists. 2655 * 2656 * Add all populated zones of a node to the zonelist. 2657 */ 2658 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 2659 int nr_zones, enum zone_type zone_type) 2660 { 2661 struct zone *zone; 2662 2663 BUG_ON(zone_type >= MAX_NR_ZONES); 2664 zone_type++; 2665 2666 do { 2667 zone_type--; 2668 zone = pgdat->node_zones + zone_type; 2669 if (populated_zone(zone)) { 2670 zoneref_set_zone(zone, 2671 &zonelist->_zonerefs[nr_zones++]); 2672 check_highest_zone(zone_type); 2673 } 2674 2675 } while (zone_type); 2676 return nr_zones; 2677 } 2678 2679 2680 /* 2681 * zonelist_order: 2682 * 0 = automatic detection of better ordering. 2683 * 1 = order by ([node] distance, -zonetype) 2684 * 2 = order by (-zonetype, [node] distance) 2685 * 2686 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 2687 * the same zonelist. So only NUMA can configure this param. 2688 */ 2689 #define ZONELIST_ORDER_DEFAULT 0 2690 #define ZONELIST_ORDER_NODE 1 2691 #define ZONELIST_ORDER_ZONE 2 2692 2693 /* zonelist order in the kernel. 2694 * set_zonelist_order() will set this to NODE or ZONE. 2695 */ 2696 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 2697 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 2698 2699 2700 #ifdef CONFIG_NUMA 2701 /* The value user specified ....changed by config */ 2702 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2703 /* string for sysctl */ 2704 #define NUMA_ZONELIST_ORDER_LEN 16 2705 char numa_zonelist_order[16] = "default"; 2706 2707 /* 2708 * interface for configure zonelist ordering. 2709 * command line option "numa_zonelist_order" 2710 * = "[dD]efault - default, automatic configuration. 2711 * = "[nN]ode - order by node locality, then by zone within node 2712 * = "[zZ]one - order by zone, then by locality within zone 2713 */ 2714 2715 static int __parse_numa_zonelist_order(char *s) 2716 { 2717 if (*s == 'd' || *s == 'D') { 2718 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2719 } else if (*s == 'n' || *s == 'N') { 2720 user_zonelist_order = ZONELIST_ORDER_NODE; 2721 } else if (*s == 'z' || *s == 'Z') { 2722 user_zonelist_order = ZONELIST_ORDER_ZONE; 2723 } else { 2724 printk(KERN_WARNING 2725 "Ignoring invalid numa_zonelist_order value: " 2726 "%s\n", s); 2727 return -EINVAL; 2728 } 2729 return 0; 2730 } 2731 2732 static __init int setup_numa_zonelist_order(char *s) 2733 { 2734 int ret; 2735 2736 if (!s) 2737 return 0; 2738 2739 ret = __parse_numa_zonelist_order(s); 2740 if (ret == 0) 2741 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 2742 2743 return ret; 2744 } 2745 early_param("numa_zonelist_order", setup_numa_zonelist_order); 2746 2747 /* 2748 * sysctl handler for numa_zonelist_order 2749 */ 2750 int numa_zonelist_order_handler(ctl_table *table, int write, 2751 void __user *buffer, size_t *length, 2752 loff_t *ppos) 2753 { 2754 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 2755 int ret; 2756 static DEFINE_MUTEX(zl_order_mutex); 2757 2758 mutex_lock(&zl_order_mutex); 2759 if (write) 2760 strcpy(saved_string, (char*)table->data); 2761 ret = proc_dostring(table, write, buffer, length, ppos); 2762 if (ret) 2763 goto out; 2764 if (write) { 2765 int oldval = user_zonelist_order; 2766 if (__parse_numa_zonelist_order((char*)table->data)) { 2767 /* 2768 * bogus value. restore saved string 2769 */ 2770 strncpy((char*)table->data, saved_string, 2771 NUMA_ZONELIST_ORDER_LEN); 2772 user_zonelist_order = oldval; 2773 } else if (oldval != user_zonelist_order) { 2774 mutex_lock(&zonelists_mutex); 2775 build_all_zonelists(NULL); 2776 mutex_unlock(&zonelists_mutex); 2777 } 2778 } 2779 out: 2780 mutex_unlock(&zl_order_mutex); 2781 return ret; 2782 } 2783 2784 2785 #define MAX_NODE_LOAD (nr_online_nodes) 2786 static int node_load[MAX_NUMNODES]; 2787 2788 /** 2789 * find_next_best_node - find the next node that should appear in a given node's fallback list 2790 * @node: node whose fallback list we're appending 2791 * @used_node_mask: nodemask_t of already used nodes 2792 * 2793 * We use a number of factors to determine which is the next node that should 2794 * appear on a given node's fallback list. The node should not have appeared 2795 * already in @node's fallback list, and it should be the next closest node 2796 * according to the distance array (which contains arbitrary distance values 2797 * from each node to each node in the system), and should also prefer nodes 2798 * with no CPUs, since presumably they'll have very little allocation pressure 2799 * on them otherwise. 2800 * It returns -1 if no node is found. 2801 */ 2802 static int find_next_best_node(int node, nodemask_t *used_node_mask) 2803 { 2804 int n, val; 2805 int min_val = INT_MAX; 2806 int best_node = -1; 2807 const struct cpumask *tmp = cpumask_of_node(0); 2808 2809 /* Use the local node if we haven't already */ 2810 if (!node_isset(node, *used_node_mask)) { 2811 node_set(node, *used_node_mask); 2812 return node; 2813 } 2814 2815 for_each_node_state(n, N_HIGH_MEMORY) { 2816 2817 /* Don't want a node to appear more than once */ 2818 if (node_isset(n, *used_node_mask)) 2819 continue; 2820 2821 /* Use the distance array to find the distance */ 2822 val = node_distance(node, n); 2823 2824 /* Penalize nodes under us ("prefer the next node") */ 2825 val += (n < node); 2826 2827 /* Give preference to headless and unused nodes */ 2828 tmp = cpumask_of_node(n); 2829 if (!cpumask_empty(tmp)) 2830 val += PENALTY_FOR_NODE_WITH_CPUS; 2831 2832 /* Slight preference for less loaded node */ 2833 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2834 val += node_load[n]; 2835 2836 if (val < min_val) { 2837 min_val = val; 2838 best_node = n; 2839 } 2840 } 2841 2842 if (best_node >= 0) 2843 node_set(best_node, *used_node_mask); 2844 2845 return best_node; 2846 } 2847 2848 2849 /* 2850 * Build zonelists ordered by node and zones within node. 2851 * This results in maximum locality--normal zone overflows into local 2852 * DMA zone, if any--but risks exhausting DMA zone. 2853 */ 2854 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2855 { 2856 int j; 2857 struct zonelist *zonelist; 2858 2859 zonelist = &pgdat->node_zonelists[0]; 2860 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 2861 ; 2862 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2863 MAX_NR_ZONES - 1); 2864 zonelist->_zonerefs[j].zone = NULL; 2865 zonelist->_zonerefs[j].zone_idx = 0; 2866 } 2867 2868 /* 2869 * Build gfp_thisnode zonelists 2870 */ 2871 static void build_thisnode_zonelists(pg_data_t *pgdat) 2872 { 2873 int j; 2874 struct zonelist *zonelist; 2875 2876 zonelist = &pgdat->node_zonelists[1]; 2877 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2878 zonelist->_zonerefs[j].zone = NULL; 2879 zonelist->_zonerefs[j].zone_idx = 0; 2880 } 2881 2882 /* 2883 * Build zonelists ordered by zone and nodes within zones. 2884 * This results in conserving DMA zone[s] until all Normal memory is 2885 * exhausted, but results in overflowing to remote node while memory 2886 * may still exist in local DMA zone. 2887 */ 2888 static int node_order[MAX_NUMNODES]; 2889 2890 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2891 { 2892 int pos, j, node; 2893 int zone_type; /* needs to be signed */ 2894 struct zone *z; 2895 struct zonelist *zonelist; 2896 2897 zonelist = &pgdat->node_zonelists[0]; 2898 pos = 0; 2899 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2900 for (j = 0; j < nr_nodes; j++) { 2901 node = node_order[j]; 2902 z = &NODE_DATA(node)->node_zones[zone_type]; 2903 if (populated_zone(z)) { 2904 zoneref_set_zone(z, 2905 &zonelist->_zonerefs[pos++]); 2906 check_highest_zone(zone_type); 2907 } 2908 } 2909 } 2910 zonelist->_zonerefs[pos].zone = NULL; 2911 zonelist->_zonerefs[pos].zone_idx = 0; 2912 } 2913 2914 static int default_zonelist_order(void) 2915 { 2916 int nid, zone_type; 2917 unsigned long low_kmem_size,total_size; 2918 struct zone *z; 2919 int average_size; 2920 /* 2921 * ZONE_DMA and ZONE_DMA32 can be very small area in the system. 2922 * If they are really small and used heavily, the system can fall 2923 * into OOM very easily. 2924 * This function detect ZONE_DMA/DMA32 size and configures zone order. 2925 */ 2926 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2927 low_kmem_size = 0; 2928 total_size = 0; 2929 for_each_online_node(nid) { 2930 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2931 z = &NODE_DATA(nid)->node_zones[zone_type]; 2932 if (populated_zone(z)) { 2933 if (zone_type < ZONE_NORMAL) 2934 low_kmem_size += z->present_pages; 2935 total_size += z->present_pages; 2936 } else if (zone_type == ZONE_NORMAL) { 2937 /* 2938 * If any node has only lowmem, then node order 2939 * is preferred to allow kernel allocations 2940 * locally; otherwise, they can easily infringe 2941 * on other nodes when there is an abundance of 2942 * lowmem available to allocate from. 2943 */ 2944 return ZONELIST_ORDER_NODE; 2945 } 2946 } 2947 } 2948 if (!low_kmem_size || /* there are no DMA area. */ 2949 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2950 return ZONELIST_ORDER_NODE; 2951 /* 2952 * look into each node's config. 2953 * If there is a node whose DMA/DMA32 memory is very big area on 2954 * local memory, NODE_ORDER may be suitable. 2955 */ 2956 average_size = total_size / 2957 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2958 for_each_online_node(nid) { 2959 low_kmem_size = 0; 2960 total_size = 0; 2961 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2962 z = &NODE_DATA(nid)->node_zones[zone_type]; 2963 if (populated_zone(z)) { 2964 if (zone_type < ZONE_NORMAL) 2965 low_kmem_size += z->present_pages; 2966 total_size += z->present_pages; 2967 } 2968 } 2969 if (low_kmem_size && 2970 total_size > average_size && /* ignore small node */ 2971 low_kmem_size > total_size * 70/100) 2972 return ZONELIST_ORDER_NODE; 2973 } 2974 return ZONELIST_ORDER_ZONE; 2975 } 2976 2977 static void set_zonelist_order(void) 2978 { 2979 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2980 current_zonelist_order = default_zonelist_order(); 2981 else 2982 current_zonelist_order = user_zonelist_order; 2983 } 2984 2985 static void build_zonelists(pg_data_t *pgdat) 2986 { 2987 int j, node, load; 2988 enum zone_type i; 2989 nodemask_t used_mask; 2990 int local_node, prev_node; 2991 struct zonelist *zonelist; 2992 int order = current_zonelist_order; 2993 2994 /* initialize zonelists */ 2995 for (i = 0; i < MAX_ZONELISTS; i++) { 2996 zonelist = pgdat->node_zonelists + i; 2997 zonelist->_zonerefs[0].zone = NULL; 2998 zonelist->_zonerefs[0].zone_idx = 0; 2999 } 3000 3001 /* NUMA-aware ordering of nodes */ 3002 local_node = pgdat->node_id; 3003 load = nr_online_nodes; 3004 prev_node = local_node; 3005 nodes_clear(used_mask); 3006 3007 memset(node_order, 0, sizeof(node_order)); 3008 j = 0; 3009 3010 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 3011 int distance = node_distance(local_node, node); 3012 3013 /* 3014 * If another node is sufficiently far away then it is better 3015 * to reclaim pages in a zone before going off node. 3016 */ 3017 if (distance > RECLAIM_DISTANCE) 3018 zone_reclaim_mode = 1; 3019 3020 /* 3021 * We don't want to pressure a particular node. 3022 * So adding penalty to the first node in same 3023 * distance group to make it round-robin. 3024 */ 3025 if (distance != node_distance(local_node, prev_node)) 3026 node_load[node] = load; 3027 3028 prev_node = node; 3029 load--; 3030 if (order == ZONELIST_ORDER_NODE) 3031 build_zonelists_in_node_order(pgdat, node); 3032 else 3033 node_order[j++] = node; /* remember order */ 3034 } 3035 3036 if (order == ZONELIST_ORDER_ZONE) { 3037 /* calculate node order -- i.e., DMA last! */ 3038 build_zonelists_in_zone_order(pgdat, j); 3039 } 3040 3041 build_thisnode_zonelists(pgdat); 3042 } 3043 3044 /* Construct the zonelist performance cache - see further mmzone.h */ 3045 static void build_zonelist_cache(pg_data_t *pgdat) 3046 { 3047 struct zonelist *zonelist; 3048 struct zonelist_cache *zlc; 3049 struct zoneref *z; 3050 3051 zonelist = &pgdat->node_zonelists[0]; 3052 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 3053 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 3054 for (z = zonelist->_zonerefs; z->zone; z++) 3055 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 3056 } 3057 3058 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3059 /* 3060 * Return node id of node used for "local" allocations. 3061 * I.e., first node id of first zone in arg node's generic zonelist. 3062 * Used for initializing percpu 'numa_mem', which is used primarily 3063 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 3064 */ 3065 int local_memory_node(int node) 3066 { 3067 struct zone *zone; 3068 3069 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 3070 gfp_zone(GFP_KERNEL), 3071 NULL, 3072 &zone); 3073 return zone->node; 3074 } 3075 #endif 3076 3077 #else /* CONFIG_NUMA */ 3078 3079 static void set_zonelist_order(void) 3080 { 3081 current_zonelist_order = ZONELIST_ORDER_ZONE; 3082 } 3083 3084 static void build_zonelists(pg_data_t *pgdat) 3085 { 3086 int node, local_node; 3087 enum zone_type j; 3088 struct zonelist *zonelist; 3089 3090 local_node = pgdat->node_id; 3091 3092 zonelist = &pgdat->node_zonelists[0]; 3093 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 3094 3095 /* 3096 * Now we build the zonelist so that it contains the zones 3097 * of all the other nodes. 3098 * We don't want to pressure a particular node, so when 3099 * building the zones for node N, we make sure that the 3100 * zones coming right after the local ones are those from 3101 * node N+1 (modulo N) 3102 */ 3103 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 3104 if (!node_online(node)) 3105 continue; 3106 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 3107 MAX_NR_ZONES - 1); 3108 } 3109 for (node = 0; node < local_node; node++) { 3110 if (!node_online(node)) 3111 continue; 3112 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 3113 MAX_NR_ZONES - 1); 3114 } 3115 3116 zonelist->_zonerefs[j].zone = NULL; 3117 zonelist->_zonerefs[j].zone_idx = 0; 3118 } 3119 3120 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 3121 static void build_zonelist_cache(pg_data_t *pgdat) 3122 { 3123 pgdat->node_zonelists[0].zlcache_ptr = NULL; 3124 } 3125 3126 #endif /* CONFIG_NUMA */ 3127 3128 /* 3129 * Boot pageset table. One per cpu which is going to be used for all 3130 * zones and all nodes. The parameters will be set in such a way 3131 * that an item put on a list will immediately be handed over to 3132 * the buddy list. This is safe since pageset manipulation is done 3133 * with interrupts disabled. 3134 * 3135 * The boot_pagesets must be kept even after bootup is complete for 3136 * unused processors and/or zones. They do play a role for bootstrapping 3137 * hotplugged processors. 3138 * 3139 * zoneinfo_show() and maybe other functions do 3140 * not check if the processor is online before following the pageset pointer. 3141 * Other parts of the kernel may not check if the zone is available. 3142 */ 3143 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 3144 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 3145 static void setup_zone_pageset(struct zone *zone); 3146 3147 /* 3148 * Global mutex to protect against size modification of zonelists 3149 * as well as to serialize pageset setup for the new populated zone. 3150 */ 3151 DEFINE_MUTEX(zonelists_mutex); 3152 3153 /* return values int ....just for stop_machine() */ 3154 static __init_refok int __build_all_zonelists(void *data) 3155 { 3156 int nid; 3157 int cpu; 3158 3159 #ifdef CONFIG_NUMA 3160 memset(node_load, 0, sizeof(node_load)); 3161 #endif 3162 for_each_online_node(nid) { 3163 pg_data_t *pgdat = NODE_DATA(nid); 3164 3165 build_zonelists(pgdat); 3166 build_zonelist_cache(pgdat); 3167 } 3168 3169 /* 3170 * Initialize the boot_pagesets that are going to be used 3171 * for bootstrapping processors. The real pagesets for 3172 * each zone will be allocated later when the per cpu 3173 * allocator is available. 3174 * 3175 * boot_pagesets are used also for bootstrapping offline 3176 * cpus if the system is already booted because the pagesets 3177 * are needed to initialize allocators on a specific cpu too. 3178 * F.e. the percpu allocator needs the page allocator which 3179 * needs the percpu allocator in order to allocate its pagesets 3180 * (a chicken-egg dilemma). 3181 */ 3182 for_each_possible_cpu(cpu) { 3183 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3184 3185 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3186 /* 3187 * We now know the "local memory node" for each node-- 3188 * i.e., the node of the first zone in the generic zonelist. 3189 * Set up numa_mem percpu variable for on-line cpus. During 3190 * boot, only the boot cpu should be on-line; we'll init the 3191 * secondary cpus' numa_mem as they come on-line. During 3192 * node/memory hotplug, we'll fixup all on-line cpus. 3193 */ 3194 if (cpu_online(cpu)) 3195 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 3196 #endif 3197 } 3198 3199 return 0; 3200 } 3201 3202 /* 3203 * Called with zonelists_mutex held always 3204 * unless system_state == SYSTEM_BOOTING. 3205 */ 3206 void __ref build_all_zonelists(void *data) 3207 { 3208 set_zonelist_order(); 3209 3210 if (system_state == SYSTEM_BOOTING) { 3211 __build_all_zonelists(NULL); 3212 mminit_verify_zonelist(); 3213 cpuset_init_current_mems_allowed(); 3214 } else { 3215 /* we have to stop all cpus to guarantee there is no user 3216 of zonelist */ 3217 #ifdef CONFIG_MEMORY_HOTPLUG 3218 if (data) 3219 setup_zone_pageset((struct zone *)data); 3220 #endif 3221 stop_machine(__build_all_zonelists, NULL, NULL); 3222 /* cpuset refresh routine should be here */ 3223 } 3224 vm_total_pages = nr_free_pagecache_pages(); 3225 /* 3226 * Disable grouping by mobility if the number of pages in the 3227 * system is too low to allow the mechanism to work. It would be 3228 * more accurate, but expensive to check per-zone. This check is 3229 * made on memory-hotadd so a system can start with mobility 3230 * disabled and enable it later 3231 */ 3232 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 3233 page_group_by_mobility_disabled = 1; 3234 else 3235 page_group_by_mobility_disabled = 0; 3236 3237 printk("Built %i zonelists in %s order, mobility grouping %s. " 3238 "Total pages: %ld\n", 3239 nr_online_nodes, 3240 zonelist_order_name[current_zonelist_order], 3241 page_group_by_mobility_disabled ? "off" : "on", 3242 vm_total_pages); 3243 #ifdef CONFIG_NUMA 3244 printk("Policy zone: %s\n", zone_names[policy_zone]); 3245 #endif 3246 } 3247 3248 /* 3249 * Helper functions to size the waitqueue hash table. 3250 * Essentially these want to choose hash table sizes sufficiently 3251 * large so that collisions trying to wait on pages are rare. 3252 * But in fact, the number of active page waitqueues on typical 3253 * systems is ridiculously low, less than 200. So this is even 3254 * conservative, even though it seems large. 3255 * 3256 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 3257 * waitqueues, i.e. the size of the waitq table given the number of pages. 3258 */ 3259 #define PAGES_PER_WAITQUEUE 256 3260 3261 #ifndef CONFIG_MEMORY_HOTPLUG 3262 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3263 { 3264 unsigned long size = 1; 3265 3266 pages /= PAGES_PER_WAITQUEUE; 3267 3268 while (size < pages) 3269 size <<= 1; 3270 3271 /* 3272 * Once we have dozens or even hundreds of threads sleeping 3273 * on IO we've got bigger problems than wait queue collision. 3274 * Limit the size of the wait table to a reasonable size. 3275 */ 3276 size = min(size, 4096UL); 3277 3278 return max(size, 4UL); 3279 } 3280 #else 3281 /* 3282 * A zone's size might be changed by hot-add, so it is not possible to determine 3283 * a suitable size for its wait_table. So we use the maximum size now. 3284 * 3285 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 3286 * 3287 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 3288 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 3289 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 3290 * 3291 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 3292 * or more by the traditional way. (See above). It equals: 3293 * 3294 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 3295 * ia64(16K page size) : = ( 8G + 4M)byte. 3296 * powerpc (64K page size) : = (32G +16M)byte. 3297 */ 3298 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3299 { 3300 return 4096UL; 3301 } 3302 #endif 3303 3304 /* 3305 * This is an integer logarithm so that shifts can be used later 3306 * to extract the more random high bits from the multiplicative 3307 * hash function before the remainder is taken. 3308 */ 3309 static inline unsigned long wait_table_bits(unsigned long size) 3310 { 3311 return ffz(~size); 3312 } 3313 3314 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 3315 3316 /* 3317 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3318 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3319 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3320 * higher will lead to a bigger reserve which will get freed as contiguous 3321 * blocks as reclaim kicks in 3322 */ 3323 static void setup_zone_migrate_reserve(struct zone *zone) 3324 { 3325 unsigned long start_pfn, pfn, end_pfn; 3326 struct page *page; 3327 unsigned long block_migratetype; 3328 int reserve; 3329 3330 /* Get the start pfn, end pfn and the number of blocks to reserve */ 3331 start_pfn = zone->zone_start_pfn; 3332 end_pfn = start_pfn + zone->spanned_pages; 3333 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3334 pageblock_order; 3335 3336 /* 3337 * Reserve blocks are generally in place to help high-order atomic 3338 * allocations that are short-lived. A min_free_kbytes value that 3339 * would result in more than 2 reserve blocks for atomic allocations 3340 * is assumed to be in place to help anti-fragmentation for the 3341 * future allocation of hugepages at runtime. 3342 */ 3343 reserve = min(2, reserve); 3344 3345 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 3346 if (!pfn_valid(pfn)) 3347 continue; 3348 page = pfn_to_page(pfn); 3349 3350 /* Watch out for overlapping nodes */ 3351 if (page_to_nid(page) != zone_to_nid(zone)) 3352 continue; 3353 3354 /* Blocks with reserved pages will never free, skip them. */ 3355 if (PageReserved(page)) 3356 continue; 3357 3358 block_migratetype = get_pageblock_migratetype(page); 3359 3360 /* If this block is reserved, account for it */ 3361 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 3362 reserve--; 3363 continue; 3364 } 3365 3366 /* Suitable for reserving if this block is movable */ 3367 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 3368 set_pageblock_migratetype(page, MIGRATE_RESERVE); 3369 move_freepages_block(zone, page, MIGRATE_RESERVE); 3370 reserve--; 3371 continue; 3372 } 3373 3374 /* 3375 * If the reserve is met and this is a previous reserved block, 3376 * take it back 3377 */ 3378 if (block_migratetype == MIGRATE_RESERVE) { 3379 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 3380 move_freepages_block(zone, page, MIGRATE_MOVABLE); 3381 } 3382 } 3383 } 3384 3385 /* 3386 * Initially all pages are reserved - free ones are freed 3387 * up by free_all_bootmem() once the early boot process is 3388 * done. Non-atomic initialization, single-pass. 3389 */ 3390 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 3391 unsigned long start_pfn, enum memmap_context context) 3392 { 3393 struct page *page; 3394 unsigned long end_pfn = start_pfn + size; 3395 unsigned long pfn; 3396 struct zone *z; 3397 3398 if (highest_memmap_pfn < end_pfn - 1) 3399 highest_memmap_pfn = end_pfn - 1; 3400 3401 z = &NODE_DATA(nid)->node_zones[zone]; 3402 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 3403 /* 3404 * There can be holes in boot-time mem_map[]s 3405 * handed to this function. They do not 3406 * exist on hotplugged memory. 3407 */ 3408 if (context == MEMMAP_EARLY) { 3409 if (!early_pfn_valid(pfn)) 3410 continue; 3411 if (!early_pfn_in_nid(pfn, nid)) 3412 continue; 3413 } 3414 page = pfn_to_page(pfn); 3415 set_page_links(page, zone, nid, pfn); 3416 mminit_verify_page_links(page, zone, nid, pfn); 3417 init_page_count(page); 3418 reset_page_mapcount(page); 3419 SetPageReserved(page); 3420 /* 3421 * Mark the block movable so that blocks are reserved for 3422 * movable at startup. This will force kernel allocations 3423 * to reserve their blocks rather than leaking throughout 3424 * the address space during boot when many long-lived 3425 * kernel allocations are made. Later some blocks near 3426 * the start are marked MIGRATE_RESERVE by 3427 * setup_zone_migrate_reserve() 3428 * 3429 * bitmap is created for zone's valid pfn range. but memmap 3430 * can be created for invalid pages (for alignment) 3431 * check here not to call set_pageblock_migratetype() against 3432 * pfn out of zone. 3433 */ 3434 if ((z->zone_start_pfn <= pfn) 3435 && (pfn < z->zone_start_pfn + z->spanned_pages) 3436 && !(pfn & (pageblock_nr_pages - 1))) 3437 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 3438 3439 INIT_LIST_HEAD(&page->lru); 3440 #ifdef WANT_PAGE_VIRTUAL 3441 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 3442 if (!is_highmem_idx(zone)) 3443 set_page_address(page, __va(pfn << PAGE_SHIFT)); 3444 #endif 3445 } 3446 } 3447 3448 static void __meminit zone_init_free_lists(struct zone *zone) 3449 { 3450 int order, t; 3451 for_each_migratetype_order(order, t) { 3452 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 3453 zone->free_area[order].nr_free = 0; 3454 } 3455 } 3456 3457 #ifndef __HAVE_ARCH_MEMMAP_INIT 3458 #define memmap_init(size, nid, zone, start_pfn) \ 3459 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 3460 #endif 3461 3462 static int zone_batchsize(struct zone *zone) 3463 { 3464 #ifdef CONFIG_MMU 3465 int batch; 3466 3467 /* 3468 * The per-cpu-pages pools are set to around 1000th of the 3469 * size of the zone. But no more than 1/2 of a meg. 3470 * 3471 * OK, so we don't know how big the cache is. So guess. 3472 */ 3473 batch = zone->present_pages / 1024; 3474 if (batch * PAGE_SIZE > 512 * 1024) 3475 batch = (512 * 1024) / PAGE_SIZE; 3476 batch /= 4; /* We effectively *= 4 below */ 3477 if (batch < 1) 3478 batch = 1; 3479 3480 /* 3481 * Clamp the batch to a 2^n - 1 value. Having a power 3482 * of 2 value was found to be more likely to have 3483 * suboptimal cache aliasing properties in some cases. 3484 * 3485 * For example if 2 tasks are alternately allocating 3486 * batches of pages, one task can end up with a lot 3487 * of pages of one half of the possible page colors 3488 * and the other with pages of the other colors. 3489 */ 3490 batch = rounddown_pow_of_two(batch + batch/2) - 1; 3491 3492 return batch; 3493 3494 #else 3495 /* The deferral and batching of frees should be suppressed under NOMMU 3496 * conditions. 3497 * 3498 * The problem is that NOMMU needs to be able to allocate large chunks 3499 * of contiguous memory as there's no hardware page translation to 3500 * assemble apparent contiguous memory from discontiguous pages. 3501 * 3502 * Queueing large contiguous runs of pages for batching, however, 3503 * causes the pages to actually be freed in smaller chunks. As there 3504 * can be a significant delay between the individual batches being 3505 * recycled, this leads to the once large chunks of space being 3506 * fragmented and becoming unavailable for high-order allocations. 3507 */ 3508 return 0; 3509 #endif 3510 } 3511 3512 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 3513 { 3514 struct per_cpu_pages *pcp; 3515 int migratetype; 3516 3517 memset(p, 0, sizeof(*p)); 3518 3519 pcp = &p->pcp; 3520 pcp->count = 0; 3521 pcp->high = 6 * batch; 3522 pcp->batch = max(1UL, 1 * batch); 3523 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 3524 INIT_LIST_HEAD(&pcp->lists[migratetype]); 3525 } 3526 3527 /* 3528 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 3529 * to the value high for the pageset p. 3530 */ 3531 3532 static void setup_pagelist_highmark(struct per_cpu_pageset *p, 3533 unsigned long high) 3534 { 3535 struct per_cpu_pages *pcp; 3536 3537 pcp = &p->pcp; 3538 pcp->high = high; 3539 pcp->batch = max(1UL, high/4); 3540 if ((high/4) > (PAGE_SHIFT * 8)) 3541 pcp->batch = PAGE_SHIFT * 8; 3542 } 3543 3544 static __meminit void setup_zone_pageset(struct zone *zone) 3545 { 3546 int cpu; 3547 3548 zone->pageset = alloc_percpu(struct per_cpu_pageset); 3549 3550 for_each_possible_cpu(cpu) { 3551 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 3552 3553 setup_pageset(pcp, zone_batchsize(zone)); 3554 3555 if (percpu_pagelist_fraction) 3556 setup_pagelist_highmark(pcp, 3557 (zone->present_pages / 3558 percpu_pagelist_fraction)); 3559 } 3560 } 3561 3562 /* 3563 * Allocate per cpu pagesets and initialize them. 3564 * Before this call only boot pagesets were available. 3565 */ 3566 void __init setup_per_cpu_pageset(void) 3567 { 3568 struct zone *zone; 3569 3570 for_each_populated_zone(zone) 3571 setup_zone_pageset(zone); 3572 } 3573 3574 static noinline __init_refok 3575 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 3576 { 3577 int i; 3578 struct pglist_data *pgdat = zone->zone_pgdat; 3579 size_t alloc_size; 3580 3581 /* 3582 * The per-page waitqueue mechanism uses hashed waitqueues 3583 * per zone. 3584 */ 3585 zone->wait_table_hash_nr_entries = 3586 wait_table_hash_nr_entries(zone_size_pages); 3587 zone->wait_table_bits = 3588 wait_table_bits(zone->wait_table_hash_nr_entries); 3589 alloc_size = zone->wait_table_hash_nr_entries 3590 * sizeof(wait_queue_head_t); 3591 3592 if (!slab_is_available()) { 3593 zone->wait_table = (wait_queue_head_t *) 3594 alloc_bootmem_node_nopanic(pgdat, alloc_size); 3595 } else { 3596 /* 3597 * This case means that a zone whose size was 0 gets new memory 3598 * via memory hot-add. 3599 * But it may be the case that a new node was hot-added. In 3600 * this case vmalloc() will not be able to use this new node's 3601 * memory - this wait_table must be initialized to use this new 3602 * node itself as well. 3603 * To use this new node's memory, further consideration will be 3604 * necessary. 3605 */ 3606 zone->wait_table = vmalloc(alloc_size); 3607 } 3608 if (!zone->wait_table) 3609 return -ENOMEM; 3610 3611 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 3612 init_waitqueue_head(zone->wait_table + i); 3613 3614 return 0; 3615 } 3616 3617 static int __zone_pcp_update(void *data) 3618 { 3619 struct zone *zone = data; 3620 int cpu; 3621 unsigned long batch = zone_batchsize(zone), flags; 3622 3623 for_each_possible_cpu(cpu) { 3624 struct per_cpu_pageset *pset; 3625 struct per_cpu_pages *pcp; 3626 3627 pset = per_cpu_ptr(zone->pageset, cpu); 3628 pcp = &pset->pcp; 3629 3630 local_irq_save(flags); 3631 free_pcppages_bulk(zone, pcp->count, pcp); 3632 setup_pageset(pset, batch); 3633 local_irq_restore(flags); 3634 } 3635 return 0; 3636 } 3637 3638 void zone_pcp_update(struct zone *zone) 3639 { 3640 stop_machine(__zone_pcp_update, zone, NULL); 3641 } 3642 3643 static __meminit void zone_pcp_init(struct zone *zone) 3644 { 3645 /* 3646 * per cpu subsystem is not up at this point. The following code 3647 * relies on the ability of the linker to provide the 3648 * offset of a (static) per cpu variable into the per cpu area. 3649 */ 3650 zone->pageset = &boot_pageset; 3651 3652 if (zone->present_pages) 3653 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 3654 zone->name, zone->present_pages, 3655 zone_batchsize(zone)); 3656 } 3657 3658 __meminit int init_currently_empty_zone(struct zone *zone, 3659 unsigned long zone_start_pfn, 3660 unsigned long size, 3661 enum memmap_context context) 3662 { 3663 struct pglist_data *pgdat = zone->zone_pgdat; 3664 int ret; 3665 ret = zone_wait_table_init(zone, size); 3666 if (ret) 3667 return ret; 3668 pgdat->nr_zones = zone_idx(zone) + 1; 3669 3670 zone->zone_start_pfn = zone_start_pfn; 3671 3672 mminit_dprintk(MMINIT_TRACE, "memmap_init", 3673 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 3674 pgdat->node_id, 3675 (unsigned long)zone_idx(zone), 3676 zone_start_pfn, (zone_start_pfn + size)); 3677 3678 zone_init_free_lists(zone); 3679 3680 return 0; 3681 } 3682 3683 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3684 /* 3685 * Basic iterator support. Return the first range of PFNs for a node 3686 * Note: nid == MAX_NUMNODES returns first region regardless of node 3687 */ 3688 static int __meminit first_active_region_index_in_nid(int nid) 3689 { 3690 int i; 3691 3692 for (i = 0; i < nr_nodemap_entries; i++) 3693 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 3694 return i; 3695 3696 return -1; 3697 } 3698 3699 /* 3700 * Basic iterator support. Return the next active range of PFNs for a node 3701 * Note: nid == MAX_NUMNODES returns next region regardless of node 3702 */ 3703 static int __meminit next_active_region_index_in_nid(int index, int nid) 3704 { 3705 for (index = index + 1; index < nr_nodemap_entries; index++) 3706 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 3707 return index; 3708 3709 return -1; 3710 } 3711 3712 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 3713 /* 3714 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 3715 * Architectures may implement their own version but if add_active_range() 3716 * was used and there are no special requirements, this is a convenient 3717 * alternative 3718 */ 3719 int __meminit __early_pfn_to_nid(unsigned long pfn) 3720 { 3721 int i; 3722 3723 for (i = 0; i < nr_nodemap_entries; i++) { 3724 unsigned long start_pfn = early_node_map[i].start_pfn; 3725 unsigned long end_pfn = early_node_map[i].end_pfn; 3726 3727 if (start_pfn <= pfn && pfn < end_pfn) 3728 return early_node_map[i].nid; 3729 } 3730 /* This is a memory hole */ 3731 return -1; 3732 } 3733 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3734 3735 int __meminit early_pfn_to_nid(unsigned long pfn) 3736 { 3737 int nid; 3738 3739 nid = __early_pfn_to_nid(pfn); 3740 if (nid >= 0) 3741 return nid; 3742 /* just returns 0 */ 3743 return 0; 3744 } 3745 3746 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 3747 bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 3748 { 3749 int nid; 3750 3751 nid = __early_pfn_to_nid(pfn); 3752 if (nid >= 0 && nid != node) 3753 return false; 3754 return true; 3755 } 3756 #endif 3757 3758 /* Basic iterator support to walk early_node_map[] */ 3759 #define for_each_active_range_index_in_nid(i, nid) \ 3760 for (i = first_active_region_index_in_nid(nid); i != -1; \ 3761 i = next_active_region_index_in_nid(i, nid)) 3762 3763 /** 3764 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 3765 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 3766 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 3767 * 3768 * If an architecture guarantees that all ranges registered with 3769 * add_active_ranges() contain no holes and may be freed, this 3770 * this function may be used instead of calling free_bootmem() manually. 3771 */ 3772 void __init free_bootmem_with_active_regions(int nid, 3773 unsigned long max_low_pfn) 3774 { 3775 int i; 3776 3777 for_each_active_range_index_in_nid(i, nid) { 3778 unsigned long size_pages = 0; 3779 unsigned long end_pfn = early_node_map[i].end_pfn; 3780 3781 if (early_node_map[i].start_pfn >= max_low_pfn) 3782 continue; 3783 3784 if (end_pfn > max_low_pfn) 3785 end_pfn = max_low_pfn; 3786 3787 size_pages = end_pfn - early_node_map[i].start_pfn; 3788 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 3789 PFN_PHYS(early_node_map[i].start_pfn), 3790 size_pages << PAGE_SHIFT); 3791 } 3792 } 3793 3794 #ifdef CONFIG_HAVE_MEMBLOCK 3795 /* 3796 * Basic iterator support. Return the last range of PFNs for a node 3797 * Note: nid == MAX_NUMNODES returns last region regardless of node 3798 */ 3799 static int __meminit last_active_region_index_in_nid(int nid) 3800 { 3801 int i; 3802 3803 for (i = nr_nodemap_entries - 1; i >= 0; i--) 3804 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 3805 return i; 3806 3807 return -1; 3808 } 3809 3810 /* 3811 * Basic iterator support. Return the previous active range of PFNs for a node 3812 * Note: nid == MAX_NUMNODES returns next region regardless of node 3813 */ 3814 static int __meminit previous_active_region_index_in_nid(int index, int nid) 3815 { 3816 for (index = index - 1; index >= 0; index--) 3817 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 3818 return index; 3819 3820 return -1; 3821 } 3822 3823 #define for_each_active_range_index_in_nid_reverse(i, nid) \ 3824 for (i = last_active_region_index_in_nid(nid); i != -1; \ 3825 i = previous_active_region_index_in_nid(i, nid)) 3826 3827 u64 __init find_memory_core_early(int nid, u64 size, u64 align, 3828 u64 goal, u64 limit) 3829 { 3830 int i; 3831 3832 /* Need to go over early_node_map to find out good range for node */ 3833 for_each_active_range_index_in_nid_reverse(i, nid) { 3834 u64 addr; 3835 u64 ei_start, ei_last; 3836 u64 final_start, final_end; 3837 3838 ei_last = early_node_map[i].end_pfn; 3839 ei_last <<= PAGE_SHIFT; 3840 ei_start = early_node_map[i].start_pfn; 3841 ei_start <<= PAGE_SHIFT; 3842 3843 final_start = max(ei_start, goal); 3844 final_end = min(ei_last, limit); 3845 3846 if (final_start >= final_end) 3847 continue; 3848 3849 addr = memblock_find_in_range(final_start, final_end, size, align); 3850 3851 if (addr == MEMBLOCK_ERROR) 3852 continue; 3853 3854 return addr; 3855 } 3856 3857 return MEMBLOCK_ERROR; 3858 } 3859 #endif 3860 3861 int __init add_from_early_node_map(struct range *range, int az, 3862 int nr_range, int nid) 3863 { 3864 int i; 3865 u64 start, end; 3866 3867 /* need to go over early_node_map to find out good range for node */ 3868 for_each_active_range_index_in_nid(i, nid) { 3869 start = early_node_map[i].start_pfn; 3870 end = early_node_map[i].end_pfn; 3871 nr_range = add_range(range, az, nr_range, start, end); 3872 } 3873 return nr_range; 3874 } 3875 3876 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 3877 { 3878 int i; 3879 int ret; 3880 3881 for_each_active_range_index_in_nid(i, nid) { 3882 ret = work_fn(early_node_map[i].start_pfn, 3883 early_node_map[i].end_pfn, data); 3884 if (ret) 3885 break; 3886 } 3887 } 3888 /** 3889 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3890 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3891 * 3892 * If an architecture guarantees that all ranges registered with 3893 * add_active_ranges() contain no holes and may be freed, this 3894 * function may be used instead of calling memory_present() manually. 3895 */ 3896 void __init sparse_memory_present_with_active_regions(int nid) 3897 { 3898 int i; 3899 3900 for_each_active_range_index_in_nid(i, nid) 3901 memory_present(early_node_map[i].nid, 3902 early_node_map[i].start_pfn, 3903 early_node_map[i].end_pfn); 3904 } 3905 3906 /** 3907 * get_pfn_range_for_nid - Return the start and end page frames for a node 3908 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3909 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3910 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 3911 * 3912 * It returns the start and end page frame of a node based on information 3913 * provided by an arch calling add_active_range(). If called for a node 3914 * with no available memory, a warning is printed and the start and end 3915 * PFNs will be 0. 3916 */ 3917 void __meminit get_pfn_range_for_nid(unsigned int nid, 3918 unsigned long *start_pfn, unsigned long *end_pfn) 3919 { 3920 int i; 3921 *start_pfn = -1UL; 3922 *end_pfn = 0; 3923 3924 for_each_active_range_index_in_nid(i, nid) { 3925 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3926 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3927 } 3928 3929 if (*start_pfn == -1UL) 3930 *start_pfn = 0; 3931 } 3932 3933 /* 3934 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3935 * assumption is made that zones within a node are ordered in monotonic 3936 * increasing memory addresses so that the "highest" populated zone is used 3937 */ 3938 static void __init find_usable_zone_for_movable(void) 3939 { 3940 int zone_index; 3941 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3942 if (zone_index == ZONE_MOVABLE) 3943 continue; 3944 3945 if (arch_zone_highest_possible_pfn[zone_index] > 3946 arch_zone_lowest_possible_pfn[zone_index]) 3947 break; 3948 } 3949 3950 VM_BUG_ON(zone_index == -1); 3951 movable_zone = zone_index; 3952 } 3953 3954 /* 3955 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3956 * because it is sized independent of architecture. Unlike the other zones, 3957 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3958 * in each node depending on the size of each node and how evenly kernelcore 3959 * is distributed. This helper function adjusts the zone ranges 3960 * provided by the architecture for a given node by using the end of the 3961 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3962 * zones within a node are in order of monotonic increases memory addresses 3963 */ 3964 static void __meminit adjust_zone_range_for_zone_movable(int nid, 3965 unsigned long zone_type, 3966 unsigned long node_start_pfn, 3967 unsigned long node_end_pfn, 3968 unsigned long *zone_start_pfn, 3969 unsigned long *zone_end_pfn) 3970 { 3971 /* Only adjust if ZONE_MOVABLE is on this node */ 3972 if (zone_movable_pfn[nid]) { 3973 /* Size ZONE_MOVABLE */ 3974 if (zone_type == ZONE_MOVABLE) { 3975 *zone_start_pfn = zone_movable_pfn[nid]; 3976 *zone_end_pfn = min(node_end_pfn, 3977 arch_zone_highest_possible_pfn[movable_zone]); 3978 3979 /* Adjust for ZONE_MOVABLE starting within this range */ 3980 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3981 *zone_end_pfn > zone_movable_pfn[nid]) { 3982 *zone_end_pfn = zone_movable_pfn[nid]; 3983 3984 /* Check if this whole range is within ZONE_MOVABLE */ 3985 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3986 *zone_start_pfn = *zone_end_pfn; 3987 } 3988 } 3989 3990 /* 3991 * Return the number of pages a zone spans in a node, including holes 3992 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3993 */ 3994 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3995 unsigned long zone_type, 3996 unsigned long *ignored) 3997 { 3998 unsigned long node_start_pfn, node_end_pfn; 3999 unsigned long zone_start_pfn, zone_end_pfn; 4000 4001 /* Get the start and end of the node and zone */ 4002 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 4003 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4004 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4005 adjust_zone_range_for_zone_movable(nid, zone_type, 4006 node_start_pfn, node_end_pfn, 4007 &zone_start_pfn, &zone_end_pfn); 4008 4009 /* Check that this node has pages within the zone's required range */ 4010 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4011 return 0; 4012 4013 /* Move the zone boundaries inside the node if necessary */ 4014 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4015 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4016 4017 /* Return the spanned pages */ 4018 return zone_end_pfn - zone_start_pfn; 4019 } 4020 4021 /* 4022 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4023 * then all holes in the requested range will be accounted for. 4024 */ 4025 unsigned long __meminit __absent_pages_in_range(int nid, 4026 unsigned long range_start_pfn, 4027 unsigned long range_end_pfn) 4028 { 4029 int i = 0; 4030 unsigned long prev_end_pfn = 0, hole_pages = 0; 4031 unsigned long start_pfn; 4032 4033 /* Find the end_pfn of the first active range of pfns in the node */ 4034 i = first_active_region_index_in_nid(nid); 4035 if (i == -1) 4036 return 0; 4037 4038 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 4039 4040 /* Account for ranges before physical memory on this node */ 4041 if (early_node_map[i].start_pfn > range_start_pfn) 4042 hole_pages = prev_end_pfn - range_start_pfn; 4043 4044 /* Find all holes for the zone within the node */ 4045 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 4046 4047 /* No need to continue if prev_end_pfn is outside the zone */ 4048 if (prev_end_pfn >= range_end_pfn) 4049 break; 4050 4051 /* Make sure the end of the zone is not within the hole */ 4052 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 4053 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 4054 4055 /* Update the hole size cound and move on */ 4056 if (start_pfn > range_start_pfn) { 4057 BUG_ON(prev_end_pfn > start_pfn); 4058 hole_pages += start_pfn - prev_end_pfn; 4059 } 4060 prev_end_pfn = early_node_map[i].end_pfn; 4061 } 4062 4063 /* Account for ranges past physical memory on this node */ 4064 if (range_end_pfn > prev_end_pfn) 4065 hole_pages += range_end_pfn - 4066 max(range_start_pfn, prev_end_pfn); 4067 4068 return hole_pages; 4069 } 4070 4071 /** 4072 * absent_pages_in_range - Return number of page frames in holes within a range 4073 * @start_pfn: The start PFN to start searching for holes 4074 * @end_pfn: The end PFN to stop searching for holes 4075 * 4076 * It returns the number of pages frames in memory holes within a range. 4077 */ 4078 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4079 unsigned long end_pfn) 4080 { 4081 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4082 } 4083 4084 /* Return the number of page frames in holes in a zone on a node */ 4085 static unsigned long __meminit zone_absent_pages_in_node(int nid, 4086 unsigned long zone_type, 4087 unsigned long *ignored) 4088 { 4089 unsigned long node_start_pfn, node_end_pfn; 4090 unsigned long zone_start_pfn, zone_end_pfn; 4091 4092 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 4093 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 4094 node_start_pfn); 4095 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 4096 node_end_pfn); 4097 4098 adjust_zone_range_for_zone_movable(nid, zone_type, 4099 node_start_pfn, node_end_pfn, 4100 &zone_start_pfn, &zone_end_pfn); 4101 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 4102 } 4103 4104 #else 4105 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 4106 unsigned long zone_type, 4107 unsigned long *zones_size) 4108 { 4109 return zones_size[zone_type]; 4110 } 4111 4112 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 4113 unsigned long zone_type, 4114 unsigned long *zholes_size) 4115 { 4116 if (!zholes_size) 4117 return 0; 4118 4119 return zholes_size[zone_type]; 4120 } 4121 4122 #endif 4123 4124 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 4125 unsigned long *zones_size, unsigned long *zholes_size) 4126 { 4127 unsigned long realtotalpages, totalpages = 0; 4128 enum zone_type i; 4129 4130 for (i = 0; i < MAX_NR_ZONES; i++) 4131 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 4132 zones_size); 4133 pgdat->node_spanned_pages = totalpages; 4134 4135 realtotalpages = totalpages; 4136 for (i = 0; i < MAX_NR_ZONES; i++) 4137 realtotalpages -= 4138 zone_absent_pages_in_node(pgdat->node_id, i, 4139 zholes_size); 4140 pgdat->node_present_pages = realtotalpages; 4141 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 4142 realtotalpages); 4143 } 4144 4145 #ifndef CONFIG_SPARSEMEM 4146 /* 4147 * Calculate the size of the zone->blockflags rounded to an unsigned long 4148 * Start by making sure zonesize is a multiple of pageblock_order by rounding 4149 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 4150 * round what is now in bits to nearest long in bits, then return it in 4151 * bytes. 4152 */ 4153 static unsigned long __init usemap_size(unsigned long zonesize) 4154 { 4155 unsigned long usemapsize; 4156 4157 usemapsize = roundup(zonesize, pageblock_nr_pages); 4158 usemapsize = usemapsize >> pageblock_order; 4159 usemapsize *= NR_PAGEBLOCK_BITS; 4160 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 4161 4162 return usemapsize / 8; 4163 } 4164 4165 static void __init setup_usemap(struct pglist_data *pgdat, 4166 struct zone *zone, unsigned long zonesize) 4167 { 4168 unsigned long usemapsize = usemap_size(zonesize); 4169 zone->pageblock_flags = NULL; 4170 if (usemapsize) 4171 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4172 usemapsize); 4173 } 4174 #else 4175 static inline void setup_usemap(struct pglist_data *pgdat, 4176 struct zone *zone, unsigned long zonesize) {} 4177 #endif /* CONFIG_SPARSEMEM */ 4178 4179 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4180 4181 /* Return a sensible default order for the pageblock size. */ 4182 static inline int pageblock_default_order(void) 4183 { 4184 if (HPAGE_SHIFT > PAGE_SHIFT) 4185 return HUGETLB_PAGE_ORDER; 4186 4187 return MAX_ORDER-1; 4188 } 4189 4190 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4191 static inline void __init set_pageblock_order(unsigned int order) 4192 { 4193 /* Check that pageblock_nr_pages has not already been setup */ 4194 if (pageblock_order) 4195 return; 4196 4197 /* 4198 * Assume the largest contiguous order of interest is a huge page. 4199 * This value may be variable depending on boot parameters on IA64 4200 */ 4201 pageblock_order = order; 4202 } 4203 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4204 4205 /* 4206 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4207 * and pageblock_default_order() are unused as pageblock_order is set 4208 * at compile-time. See include/linux/pageblock-flags.h for the values of 4209 * pageblock_order based on the kernel config 4210 */ 4211 static inline int pageblock_default_order(unsigned int order) 4212 { 4213 return MAX_ORDER-1; 4214 } 4215 #define set_pageblock_order(x) do {} while (0) 4216 4217 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4218 4219 /* 4220 * Set up the zone data structures: 4221 * - mark all pages reserved 4222 * - mark all memory queues empty 4223 * - clear the memory bitmaps 4224 */ 4225 static void __paginginit free_area_init_core(struct pglist_data *pgdat, 4226 unsigned long *zones_size, unsigned long *zholes_size) 4227 { 4228 enum zone_type j; 4229 int nid = pgdat->node_id; 4230 unsigned long zone_start_pfn = pgdat->node_start_pfn; 4231 int ret; 4232 4233 pgdat_resize_init(pgdat); 4234 pgdat->nr_zones = 0; 4235 init_waitqueue_head(&pgdat->kswapd_wait); 4236 pgdat->kswapd_max_order = 0; 4237 pgdat_page_cgroup_init(pgdat); 4238 4239 for (j = 0; j < MAX_NR_ZONES; j++) { 4240 struct zone *zone = pgdat->node_zones + j; 4241 unsigned long size, realsize, memmap_pages; 4242 enum lru_list l; 4243 4244 size = zone_spanned_pages_in_node(nid, j, zones_size); 4245 realsize = size - zone_absent_pages_in_node(nid, j, 4246 zholes_size); 4247 4248 /* 4249 * Adjust realsize so that it accounts for how much memory 4250 * is used by this zone for memmap. This affects the watermark 4251 * and per-cpu initialisations 4252 */ 4253 memmap_pages = 4254 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 4255 if (realsize >= memmap_pages) { 4256 realsize -= memmap_pages; 4257 if (memmap_pages) 4258 printk(KERN_DEBUG 4259 " %s zone: %lu pages used for memmap\n", 4260 zone_names[j], memmap_pages); 4261 } else 4262 printk(KERN_WARNING 4263 " %s zone: %lu pages exceeds realsize %lu\n", 4264 zone_names[j], memmap_pages, realsize); 4265 4266 /* Account for reserved pages */ 4267 if (j == 0 && realsize > dma_reserve) { 4268 realsize -= dma_reserve; 4269 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 4270 zone_names[0], dma_reserve); 4271 } 4272 4273 if (!is_highmem_idx(j)) 4274 nr_kernel_pages += realsize; 4275 nr_all_pages += realsize; 4276 4277 zone->spanned_pages = size; 4278 zone->present_pages = realsize; 4279 #ifdef CONFIG_NUMA 4280 zone->node = nid; 4281 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 4282 / 100; 4283 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 4284 #endif 4285 zone->name = zone_names[j]; 4286 spin_lock_init(&zone->lock); 4287 spin_lock_init(&zone->lru_lock); 4288 zone_seqlock_init(zone); 4289 zone->zone_pgdat = pgdat; 4290 4291 zone_pcp_init(zone); 4292 for_each_lru(l) { 4293 INIT_LIST_HEAD(&zone->lru[l].list); 4294 zone->reclaim_stat.nr_saved_scan[l] = 0; 4295 } 4296 zone->reclaim_stat.recent_rotated[0] = 0; 4297 zone->reclaim_stat.recent_rotated[1] = 0; 4298 zone->reclaim_stat.recent_scanned[0] = 0; 4299 zone->reclaim_stat.recent_scanned[1] = 0; 4300 zap_zone_vm_stats(zone); 4301 zone->flags = 0; 4302 if (!size) 4303 continue; 4304 4305 set_pageblock_order(pageblock_default_order()); 4306 setup_usemap(pgdat, zone, size); 4307 ret = init_currently_empty_zone(zone, zone_start_pfn, 4308 size, MEMMAP_EARLY); 4309 BUG_ON(ret); 4310 memmap_init(size, nid, j, zone_start_pfn); 4311 zone_start_pfn += size; 4312 } 4313 } 4314 4315 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 4316 { 4317 /* Skip empty nodes */ 4318 if (!pgdat->node_spanned_pages) 4319 return; 4320 4321 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4322 /* ia64 gets its own node_mem_map, before this, without bootmem */ 4323 if (!pgdat->node_mem_map) { 4324 unsigned long size, start, end; 4325 struct page *map; 4326 4327 /* 4328 * The zone's endpoints aren't required to be MAX_ORDER 4329 * aligned but the node_mem_map endpoints must be in order 4330 * for the buddy allocator to function correctly. 4331 */ 4332 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 4333 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 4334 end = ALIGN(end, MAX_ORDER_NR_PAGES); 4335 size = (end - start) * sizeof(struct page); 4336 map = alloc_remap(pgdat->node_id, size); 4337 if (!map) 4338 map = alloc_bootmem_node_nopanic(pgdat, size); 4339 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4340 } 4341 #ifndef CONFIG_NEED_MULTIPLE_NODES 4342 /* 4343 * With no DISCONTIG, the global mem_map is just set as node 0's 4344 */ 4345 if (pgdat == NODE_DATA(0)) { 4346 mem_map = NODE_DATA(0)->node_mem_map; 4347 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 4348 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 4349 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 4350 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 4351 } 4352 #endif 4353 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 4354 } 4355 4356 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 4357 unsigned long node_start_pfn, unsigned long *zholes_size) 4358 { 4359 pg_data_t *pgdat = NODE_DATA(nid); 4360 4361 pgdat->node_id = nid; 4362 pgdat->node_start_pfn = node_start_pfn; 4363 calculate_node_totalpages(pgdat, zones_size, zholes_size); 4364 4365 alloc_node_mem_map(pgdat); 4366 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4367 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 4368 nid, (unsigned long)pgdat, 4369 (unsigned long)pgdat->node_mem_map); 4370 #endif 4371 4372 free_area_init_core(pgdat, zones_size, zholes_size); 4373 } 4374 4375 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 4376 4377 #if MAX_NUMNODES > 1 4378 /* 4379 * Figure out the number of possible node ids. 4380 */ 4381 static void __init setup_nr_node_ids(void) 4382 { 4383 unsigned int node; 4384 unsigned int highest = 0; 4385 4386 for_each_node_mask(node, node_possible_map) 4387 highest = node; 4388 nr_node_ids = highest + 1; 4389 } 4390 #else 4391 static inline void setup_nr_node_ids(void) 4392 { 4393 } 4394 #endif 4395 4396 /** 4397 * add_active_range - Register a range of PFNs backed by physical memory 4398 * @nid: The node ID the range resides on 4399 * @start_pfn: The start PFN of the available physical memory 4400 * @end_pfn: The end PFN of the available physical memory 4401 * 4402 * These ranges are stored in an early_node_map[] and later used by 4403 * free_area_init_nodes() to calculate zone sizes and holes. If the 4404 * range spans a memory hole, it is up to the architecture to ensure 4405 * the memory is not freed by the bootmem allocator. If possible 4406 * the range being registered will be merged with existing ranges. 4407 */ 4408 void __init add_active_range(unsigned int nid, unsigned long start_pfn, 4409 unsigned long end_pfn) 4410 { 4411 int i; 4412 4413 mminit_dprintk(MMINIT_TRACE, "memory_register", 4414 "Entering add_active_range(%d, %#lx, %#lx) " 4415 "%d entries of %d used\n", 4416 nid, start_pfn, end_pfn, 4417 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 4418 4419 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 4420 4421 /* Merge with existing active regions if possible */ 4422 for (i = 0; i < nr_nodemap_entries; i++) { 4423 if (early_node_map[i].nid != nid) 4424 continue; 4425 4426 /* Skip if an existing region covers this new one */ 4427 if (start_pfn >= early_node_map[i].start_pfn && 4428 end_pfn <= early_node_map[i].end_pfn) 4429 return; 4430 4431 /* Merge forward if suitable */ 4432 if (start_pfn <= early_node_map[i].end_pfn && 4433 end_pfn > early_node_map[i].end_pfn) { 4434 early_node_map[i].end_pfn = end_pfn; 4435 return; 4436 } 4437 4438 /* Merge backward if suitable */ 4439 if (start_pfn < early_node_map[i].start_pfn && 4440 end_pfn >= early_node_map[i].start_pfn) { 4441 early_node_map[i].start_pfn = start_pfn; 4442 return; 4443 } 4444 } 4445 4446 /* Check that early_node_map is large enough */ 4447 if (i >= MAX_ACTIVE_REGIONS) { 4448 printk(KERN_CRIT "More than %d memory regions, truncating\n", 4449 MAX_ACTIVE_REGIONS); 4450 return; 4451 } 4452 4453 early_node_map[i].nid = nid; 4454 early_node_map[i].start_pfn = start_pfn; 4455 early_node_map[i].end_pfn = end_pfn; 4456 nr_nodemap_entries = i + 1; 4457 } 4458 4459 /** 4460 * remove_active_range - Shrink an existing registered range of PFNs 4461 * @nid: The node id the range is on that should be shrunk 4462 * @start_pfn: The new PFN of the range 4463 * @end_pfn: The new PFN of the range 4464 * 4465 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 4466 * The map is kept near the end physical page range that has already been 4467 * registered. This function allows an arch to shrink an existing registered 4468 * range. 4469 */ 4470 void __init remove_active_range(unsigned int nid, unsigned long start_pfn, 4471 unsigned long end_pfn) 4472 { 4473 int i, j; 4474 int removed = 0; 4475 4476 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", 4477 nid, start_pfn, end_pfn); 4478 4479 /* Find the old active region end and shrink */ 4480 for_each_active_range_index_in_nid(i, nid) { 4481 if (early_node_map[i].start_pfn >= start_pfn && 4482 early_node_map[i].end_pfn <= end_pfn) { 4483 /* clear it */ 4484 early_node_map[i].start_pfn = 0; 4485 early_node_map[i].end_pfn = 0; 4486 removed = 1; 4487 continue; 4488 } 4489 if (early_node_map[i].start_pfn < start_pfn && 4490 early_node_map[i].end_pfn > start_pfn) { 4491 unsigned long temp_end_pfn = early_node_map[i].end_pfn; 4492 early_node_map[i].end_pfn = start_pfn; 4493 if (temp_end_pfn > end_pfn) 4494 add_active_range(nid, end_pfn, temp_end_pfn); 4495 continue; 4496 } 4497 if (early_node_map[i].start_pfn >= start_pfn && 4498 early_node_map[i].end_pfn > end_pfn && 4499 early_node_map[i].start_pfn < end_pfn) { 4500 early_node_map[i].start_pfn = end_pfn; 4501 continue; 4502 } 4503 } 4504 4505 if (!removed) 4506 return; 4507 4508 /* remove the blank ones */ 4509 for (i = nr_nodemap_entries - 1; i > 0; i--) { 4510 if (early_node_map[i].nid != nid) 4511 continue; 4512 if (early_node_map[i].end_pfn) 4513 continue; 4514 /* we found it, get rid of it */ 4515 for (j = i; j < nr_nodemap_entries - 1; j++) 4516 memcpy(&early_node_map[j], &early_node_map[j+1], 4517 sizeof(early_node_map[j])); 4518 j = nr_nodemap_entries - 1; 4519 memset(&early_node_map[j], 0, sizeof(early_node_map[j])); 4520 nr_nodemap_entries--; 4521 } 4522 } 4523 4524 /** 4525 * remove_all_active_ranges - Remove all currently registered regions 4526 * 4527 * During discovery, it may be found that a table like SRAT is invalid 4528 * and an alternative discovery method must be used. This function removes 4529 * all currently registered regions. 4530 */ 4531 void __init remove_all_active_ranges(void) 4532 { 4533 memset(early_node_map, 0, sizeof(early_node_map)); 4534 nr_nodemap_entries = 0; 4535 } 4536 4537 /* Compare two active node_active_regions */ 4538 static int __init cmp_node_active_region(const void *a, const void *b) 4539 { 4540 struct node_active_region *arange = (struct node_active_region *)a; 4541 struct node_active_region *brange = (struct node_active_region *)b; 4542 4543 /* Done this way to avoid overflows */ 4544 if (arange->start_pfn > brange->start_pfn) 4545 return 1; 4546 if (arange->start_pfn < brange->start_pfn) 4547 return -1; 4548 4549 return 0; 4550 } 4551 4552 /* sort the node_map by start_pfn */ 4553 void __init sort_node_map(void) 4554 { 4555 sort(early_node_map, (size_t)nr_nodemap_entries, 4556 sizeof(struct node_active_region), 4557 cmp_node_active_region, NULL); 4558 } 4559 4560 /* Find the lowest pfn for a node */ 4561 static unsigned long __init find_min_pfn_for_node(int nid) 4562 { 4563 int i; 4564 unsigned long min_pfn = ULONG_MAX; 4565 4566 /* Assuming a sorted map, the first range found has the starting pfn */ 4567 for_each_active_range_index_in_nid(i, nid) 4568 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 4569 4570 if (min_pfn == ULONG_MAX) { 4571 printk(KERN_WARNING 4572 "Could not find start_pfn for node %d\n", nid); 4573 return 0; 4574 } 4575 4576 return min_pfn; 4577 } 4578 4579 /** 4580 * find_min_pfn_with_active_regions - Find the minimum PFN registered 4581 * 4582 * It returns the minimum PFN based on information provided via 4583 * add_active_range(). 4584 */ 4585 unsigned long __init find_min_pfn_with_active_regions(void) 4586 { 4587 return find_min_pfn_for_node(MAX_NUMNODES); 4588 } 4589 4590 /* 4591 * early_calculate_totalpages() 4592 * Sum pages in active regions for movable zone. 4593 * Populate N_HIGH_MEMORY for calculating usable_nodes. 4594 */ 4595 static unsigned long __init early_calculate_totalpages(void) 4596 { 4597 int i; 4598 unsigned long totalpages = 0; 4599 4600 for (i = 0; i < nr_nodemap_entries; i++) { 4601 unsigned long pages = early_node_map[i].end_pfn - 4602 early_node_map[i].start_pfn; 4603 totalpages += pages; 4604 if (pages) 4605 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 4606 } 4607 return totalpages; 4608 } 4609 4610 /* 4611 * Find the PFN the Movable zone begins in each node. Kernel memory 4612 * is spread evenly between nodes as long as the nodes have enough 4613 * memory. When they don't, some nodes will have more kernelcore than 4614 * others 4615 */ 4616 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 4617 { 4618 int i, nid; 4619 unsigned long usable_startpfn; 4620 unsigned long kernelcore_node, kernelcore_remaining; 4621 /* save the state before borrow the nodemask */ 4622 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY]; 4623 unsigned long totalpages = early_calculate_totalpages(); 4624 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 4625 4626 /* 4627 * If movablecore was specified, calculate what size of 4628 * kernelcore that corresponds so that memory usable for 4629 * any allocation type is evenly spread. If both kernelcore 4630 * and movablecore are specified, then the value of kernelcore 4631 * will be used for required_kernelcore if it's greater than 4632 * what movablecore would have allowed. 4633 */ 4634 if (required_movablecore) { 4635 unsigned long corepages; 4636 4637 /* 4638 * Round-up so that ZONE_MOVABLE is at least as large as what 4639 * was requested by the user 4640 */ 4641 required_movablecore = 4642 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 4643 corepages = totalpages - required_movablecore; 4644 4645 required_kernelcore = max(required_kernelcore, corepages); 4646 } 4647 4648 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 4649 if (!required_kernelcore) 4650 goto out; 4651 4652 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 4653 find_usable_zone_for_movable(); 4654 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 4655 4656 restart: 4657 /* Spread kernelcore memory as evenly as possible throughout nodes */ 4658 kernelcore_node = required_kernelcore / usable_nodes; 4659 for_each_node_state(nid, N_HIGH_MEMORY) { 4660 /* 4661 * Recalculate kernelcore_node if the division per node 4662 * now exceeds what is necessary to satisfy the requested 4663 * amount of memory for the kernel 4664 */ 4665 if (required_kernelcore < kernelcore_node) 4666 kernelcore_node = required_kernelcore / usable_nodes; 4667 4668 /* 4669 * As the map is walked, we track how much memory is usable 4670 * by the kernel using kernelcore_remaining. When it is 4671 * 0, the rest of the node is usable by ZONE_MOVABLE 4672 */ 4673 kernelcore_remaining = kernelcore_node; 4674 4675 /* Go through each range of PFNs within this node */ 4676 for_each_active_range_index_in_nid(i, nid) { 4677 unsigned long start_pfn, end_pfn; 4678 unsigned long size_pages; 4679 4680 start_pfn = max(early_node_map[i].start_pfn, 4681 zone_movable_pfn[nid]); 4682 end_pfn = early_node_map[i].end_pfn; 4683 if (start_pfn >= end_pfn) 4684 continue; 4685 4686 /* Account for what is only usable for kernelcore */ 4687 if (start_pfn < usable_startpfn) { 4688 unsigned long kernel_pages; 4689 kernel_pages = min(end_pfn, usable_startpfn) 4690 - start_pfn; 4691 4692 kernelcore_remaining -= min(kernel_pages, 4693 kernelcore_remaining); 4694 required_kernelcore -= min(kernel_pages, 4695 required_kernelcore); 4696 4697 /* Continue if range is now fully accounted */ 4698 if (end_pfn <= usable_startpfn) { 4699 4700 /* 4701 * Push zone_movable_pfn to the end so 4702 * that if we have to rebalance 4703 * kernelcore across nodes, we will 4704 * not double account here 4705 */ 4706 zone_movable_pfn[nid] = end_pfn; 4707 continue; 4708 } 4709 start_pfn = usable_startpfn; 4710 } 4711 4712 /* 4713 * The usable PFN range for ZONE_MOVABLE is from 4714 * start_pfn->end_pfn. Calculate size_pages as the 4715 * number of pages used as kernelcore 4716 */ 4717 size_pages = end_pfn - start_pfn; 4718 if (size_pages > kernelcore_remaining) 4719 size_pages = kernelcore_remaining; 4720 zone_movable_pfn[nid] = start_pfn + size_pages; 4721 4722 /* 4723 * Some kernelcore has been met, update counts and 4724 * break if the kernelcore for this node has been 4725 * satisified 4726 */ 4727 required_kernelcore -= min(required_kernelcore, 4728 size_pages); 4729 kernelcore_remaining -= size_pages; 4730 if (!kernelcore_remaining) 4731 break; 4732 } 4733 } 4734 4735 /* 4736 * If there is still required_kernelcore, we do another pass with one 4737 * less node in the count. This will push zone_movable_pfn[nid] further 4738 * along on the nodes that still have memory until kernelcore is 4739 * satisified 4740 */ 4741 usable_nodes--; 4742 if (usable_nodes && required_kernelcore > usable_nodes) 4743 goto restart; 4744 4745 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 4746 for (nid = 0; nid < MAX_NUMNODES; nid++) 4747 zone_movable_pfn[nid] = 4748 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 4749 4750 out: 4751 /* restore the node_state */ 4752 node_states[N_HIGH_MEMORY] = saved_node_state; 4753 } 4754 4755 /* Any regular memory on that node ? */ 4756 static void check_for_regular_memory(pg_data_t *pgdat) 4757 { 4758 #ifdef CONFIG_HIGHMEM 4759 enum zone_type zone_type; 4760 4761 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 4762 struct zone *zone = &pgdat->node_zones[zone_type]; 4763 if (zone->present_pages) 4764 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 4765 } 4766 #endif 4767 } 4768 4769 /** 4770 * free_area_init_nodes - Initialise all pg_data_t and zone data 4771 * @max_zone_pfn: an array of max PFNs for each zone 4772 * 4773 * This will call free_area_init_node() for each active node in the system. 4774 * Using the page ranges provided by add_active_range(), the size of each 4775 * zone in each node and their holes is calculated. If the maximum PFN 4776 * between two adjacent zones match, it is assumed that the zone is empty. 4777 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 4778 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 4779 * starts where the previous one ended. For example, ZONE_DMA32 starts 4780 * at arch_max_dma_pfn. 4781 */ 4782 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 4783 { 4784 unsigned long nid; 4785 int i; 4786 4787 /* Sort early_node_map as initialisation assumes it is sorted */ 4788 sort_node_map(); 4789 4790 /* Record where the zone boundaries are */ 4791 memset(arch_zone_lowest_possible_pfn, 0, 4792 sizeof(arch_zone_lowest_possible_pfn)); 4793 memset(arch_zone_highest_possible_pfn, 0, 4794 sizeof(arch_zone_highest_possible_pfn)); 4795 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 4796 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 4797 for (i = 1; i < MAX_NR_ZONES; i++) { 4798 if (i == ZONE_MOVABLE) 4799 continue; 4800 arch_zone_lowest_possible_pfn[i] = 4801 arch_zone_highest_possible_pfn[i-1]; 4802 arch_zone_highest_possible_pfn[i] = 4803 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 4804 } 4805 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 4806 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 4807 4808 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 4809 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 4810 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 4811 4812 /* Print out the zone ranges */ 4813 printk("Zone PFN ranges:\n"); 4814 for (i = 0; i < MAX_NR_ZONES; i++) { 4815 if (i == ZONE_MOVABLE) 4816 continue; 4817 printk(" %-8s ", zone_names[i]); 4818 if (arch_zone_lowest_possible_pfn[i] == 4819 arch_zone_highest_possible_pfn[i]) 4820 printk("empty\n"); 4821 else 4822 printk("%0#10lx -> %0#10lx\n", 4823 arch_zone_lowest_possible_pfn[i], 4824 arch_zone_highest_possible_pfn[i]); 4825 } 4826 4827 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 4828 printk("Movable zone start PFN for each node\n"); 4829 for (i = 0; i < MAX_NUMNODES; i++) { 4830 if (zone_movable_pfn[i]) 4831 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4832 } 4833 4834 /* Print out the early_node_map[] */ 4835 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 4836 for (i = 0; i < nr_nodemap_entries; i++) 4837 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, 4838 early_node_map[i].start_pfn, 4839 early_node_map[i].end_pfn); 4840 4841 /* Initialise every node */ 4842 mminit_verify_pageflags_layout(); 4843 setup_nr_node_ids(); 4844 for_each_online_node(nid) { 4845 pg_data_t *pgdat = NODE_DATA(nid); 4846 free_area_init_node(nid, NULL, 4847 find_min_pfn_for_node(nid), NULL); 4848 4849 /* Any memory on that node */ 4850 if (pgdat->node_present_pages) 4851 node_set_state(nid, N_HIGH_MEMORY); 4852 check_for_regular_memory(pgdat); 4853 } 4854 } 4855 4856 static int __init cmdline_parse_core(char *p, unsigned long *core) 4857 { 4858 unsigned long long coremem; 4859 if (!p) 4860 return -EINVAL; 4861 4862 coremem = memparse(p, &p); 4863 *core = coremem >> PAGE_SHIFT; 4864 4865 /* Paranoid check that UL is enough for the coremem value */ 4866 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 4867 4868 return 0; 4869 } 4870 4871 /* 4872 * kernelcore=size sets the amount of memory for use for allocations that 4873 * cannot be reclaimed or migrated. 4874 */ 4875 static int __init cmdline_parse_kernelcore(char *p) 4876 { 4877 return cmdline_parse_core(p, &required_kernelcore); 4878 } 4879 4880 /* 4881 * movablecore=size sets the amount of memory for use for allocations that 4882 * can be reclaimed or migrated. 4883 */ 4884 static int __init cmdline_parse_movablecore(char *p) 4885 { 4886 return cmdline_parse_core(p, &required_movablecore); 4887 } 4888 4889 early_param("kernelcore", cmdline_parse_kernelcore); 4890 early_param("movablecore", cmdline_parse_movablecore); 4891 4892 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 4893 4894 /** 4895 * set_dma_reserve - set the specified number of pages reserved in the first zone 4896 * @new_dma_reserve: The number of pages to mark reserved 4897 * 4898 * The per-cpu batchsize and zone watermarks are determined by present_pages. 4899 * In the DMA zone, a significant percentage may be consumed by kernel image 4900 * and other unfreeable allocations which can skew the watermarks badly. This 4901 * function may optionally be used to account for unfreeable pages in the 4902 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 4903 * smaller per-cpu batchsize. 4904 */ 4905 void __init set_dma_reserve(unsigned long new_dma_reserve) 4906 { 4907 dma_reserve = new_dma_reserve; 4908 } 4909 4910 void __init free_area_init(unsigned long *zones_size) 4911 { 4912 free_area_init_node(0, zones_size, 4913 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4914 } 4915 4916 static int page_alloc_cpu_notify(struct notifier_block *self, 4917 unsigned long action, void *hcpu) 4918 { 4919 int cpu = (unsigned long)hcpu; 4920 4921 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4922 drain_pages(cpu); 4923 4924 /* 4925 * Spill the event counters of the dead processor 4926 * into the current processors event counters. 4927 * This artificially elevates the count of the current 4928 * processor. 4929 */ 4930 vm_events_fold_cpu(cpu); 4931 4932 /* 4933 * Zero the differential counters of the dead processor 4934 * so that the vm statistics are consistent. 4935 * 4936 * This is only okay since the processor is dead and cannot 4937 * race with what we are doing. 4938 */ 4939 refresh_cpu_vm_stats(cpu); 4940 } 4941 return NOTIFY_OK; 4942 } 4943 4944 void __init page_alloc_init(void) 4945 { 4946 hotcpu_notifier(page_alloc_cpu_notify, 0); 4947 } 4948 4949 /* 4950 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 4951 * or min_free_kbytes changes. 4952 */ 4953 static void calculate_totalreserve_pages(void) 4954 { 4955 struct pglist_data *pgdat; 4956 unsigned long reserve_pages = 0; 4957 enum zone_type i, j; 4958 4959 for_each_online_pgdat(pgdat) { 4960 for (i = 0; i < MAX_NR_ZONES; i++) { 4961 struct zone *zone = pgdat->node_zones + i; 4962 unsigned long max = 0; 4963 4964 /* Find valid and maximum lowmem_reserve in the zone */ 4965 for (j = i; j < MAX_NR_ZONES; j++) { 4966 if (zone->lowmem_reserve[j] > max) 4967 max = zone->lowmem_reserve[j]; 4968 } 4969 4970 /* we treat the high watermark as reserved pages. */ 4971 max += high_wmark_pages(zone); 4972 4973 if (max > zone->present_pages) 4974 max = zone->present_pages; 4975 reserve_pages += max; 4976 } 4977 } 4978 totalreserve_pages = reserve_pages; 4979 } 4980 4981 /* 4982 * setup_per_zone_lowmem_reserve - called whenever 4983 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4984 * has a correct pages reserved value, so an adequate number of 4985 * pages are left in the zone after a successful __alloc_pages(). 4986 */ 4987 static void setup_per_zone_lowmem_reserve(void) 4988 { 4989 struct pglist_data *pgdat; 4990 enum zone_type j, idx; 4991 4992 for_each_online_pgdat(pgdat) { 4993 for (j = 0; j < MAX_NR_ZONES; j++) { 4994 struct zone *zone = pgdat->node_zones + j; 4995 unsigned long present_pages = zone->present_pages; 4996 4997 zone->lowmem_reserve[j] = 0; 4998 4999 idx = j; 5000 while (idx) { 5001 struct zone *lower_zone; 5002 5003 idx--; 5004 5005 if (sysctl_lowmem_reserve_ratio[idx] < 1) 5006 sysctl_lowmem_reserve_ratio[idx] = 1; 5007 5008 lower_zone = pgdat->node_zones + idx; 5009 lower_zone->lowmem_reserve[j] = present_pages / 5010 sysctl_lowmem_reserve_ratio[idx]; 5011 present_pages += lower_zone->present_pages; 5012 } 5013 } 5014 } 5015 5016 /* update totalreserve_pages */ 5017 calculate_totalreserve_pages(); 5018 } 5019 5020 /** 5021 * setup_per_zone_wmarks - called when min_free_kbytes changes 5022 * or when memory is hot-{added|removed} 5023 * 5024 * Ensures that the watermark[min,low,high] values for each zone are set 5025 * correctly with respect to min_free_kbytes. 5026 */ 5027 void setup_per_zone_wmarks(void) 5028 { 5029 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5030 unsigned long lowmem_pages = 0; 5031 struct zone *zone; 5032 unsigned long flags; 5033 5034 /* Calculate total number of !ZONE_HIGHMEM pages */ 5035 for_each_zone(zone) { 5036 if (!is_highmem(zone)) 5037 lowmem_pages += zone->present_pages; 5038 } 5039 5040 for_each_zone(zone) { 5041 u64 tmp; 5042 5043 spin_lock_irqsave(&zone->lock, flags); 5044 tmp = (u64)pages_min * zone->present_pages; 5045 do_div(tmp, lowmem_pages); 5046 if (is_highmem(zone)) { 5047 /* 5048 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5049 * need highmem pages, so cap pages_min to a small 5050 * value here. 5051 * 5052 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5053 * deltas controls asynch page reclaim, and so should 5054 * not be capped for highmem. 5055 */ 5056 int min_pages; 5057 5058 min_pages = zone->present_pages / 1024; 5059 if (min_pages < SWAP_CLUSTER_MAX) 5060 min_pages = SWAP_CLUSTER_MAX; 5061 if (min_pages > 128) 5062 min_pages = 128; 5063 zone->watermark[WMARK_MIN] = min_pages; 5064 } else { 5065 /* 5066 * If it's a lowmem zone, reserve a number of pages 5067 * proportionate to the zone's size. 5068 */ 5069 zone->watermark[WMARK_MIN] = tmp; 5070 } 5071 5072 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5073 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5074 setup_zone_migrate_reserve(zone); 5075 spin_unlock_irqrestore(&zone->lock, flags); 5076 } 5077 5078 /* update totalreserve_pages */ 5079 calculate_totalreserve_pages(); 5080 } 5081 5082 /* 5083 * The inactive anon list should be small enough that the VM never has to 5084 * do too much work, but large enough that each inactive page has a chance 5085 * to be referenced again before it is swapped out. 5086 * 5087 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 5088 * INACTIVE_ANON pages on this zone's LRU, maintained by the 5089 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 5090 * the anonymous pages are kept on the inactive list. 5091 * 5092 * total target max 5093 * memory ratio inactive anon 5094 * ------------------------------------- 5095 * 10MB 1 5MB 5096 * 100MB 1 50MB 5097 * 1GB 3 250MB 5098 * 10GB 10 0.9GB 5099 * 100GB 31 3GB 5100 * 1TB 101 10GB 5101 * 10TB 320 32GB 5102 */ 5103 void calculate_zone_inactive_ratio(struct zone *zone) 5104 { 5105 unsigned int gb, ratio; 5106 5107 /* Zone size in gigabytes */ 5108 gb = zone->present_pages >> (30 - PAGE_SHIFT); 5109 if (gb) 5110 ratio = int_sqrt(10 * gb); 5111 else 5112 ratio = 1; 5113 5114 zone->inactive_ratio = ratio; 5115 } 5116 5117 static void __init setup_per_zone_inactive_ratio(void) 5118 { 5119 struct zone *zone; 5120 5121 for_each_zone(zone) 5122 calculate_zone_inactive_ratio(zone); 5123 } 5124 5125 /* 5126 * Initialise min_free_kbytes. 5127 * 5128 * For small machines we want it small (128k min). For large machines 5129 * we want it large (64MB max). But it is not linear, because network 5130 * bandwidth does not increase linearly with machine size. We use 5131 * 5132 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5133 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5134 * 5135 * which yields 5136 * 5137 * 16MB: 512k 5138 * 32MB: 724k 5139 * 64MB: 1024k 5140 * 128MB: 1448k 5141 * 256MB: 2048k 5142 * 512MB: 2896k 5143 * 1024MB: 4096k 5144 * 2048MB: 5792k 5145 * 4096MB: 8192k 5146 * 8192MB: 11584k 5147 * 16384MB: 16384k 5148 */ 5149 static int __init init_per_zone_wmark_min(void) 5150 { 5151 unsigned long lowmem_kbytes; 5152 5153 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5154 5155 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5156 if (min_free_kbytes < 128) 5157 min_free_kbytes = 128; 5158 if (min_free_kbytes > 65536) 5159 min_free_kbytes = 65536; 5160 setup_per_zone_wmarks(); 5161 setup_per_zone_lowmem_reserve(); 5162 setup_per_zone_inactive_ratio(); 5163 return 0; 5164 } 5165 module_init(init_per_zone_wmark_min) 5166 5167 /* 5168 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5169 * that we can call two helper functions whenever min_free_kbytes 5170 * changes. 5171 */ 5172 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 5173 void __user *buffer, size_t *length, loff_t *ppos) 5174 { 5175 proc_dointvec(table, write, buffer, length, ppos); 5176 if (write) 5177 setup_per_zone_wmarks(); 5178 return 0; 5179 } 5180 5181 #ifdef CONFIG_NUMA 5182 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 5183 void __user *buffer, size_t *length, loff_t *ppos) 5184 { 5185 struct zone *zone; 5186 int rc; 5187 5188 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5189 if (rc) 5190 return rc; 5191 5192 for_each_zone(zone) 5193 zone->min_unmapped_pages = (zone->present_pages * 5194 sysctl_min_unmapped_ratio) / 100; 5195 return 0; 5196 } 5197 5198 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 5199 void __user *buffer, size_t *length, loff_t *ppos) 5200 { 5201 struct zone *zone; 5202 int rc; 5203 5204 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5205 if (rc) 5206 return rc; 5207 5208 for_each_zone(zone) 5209 zone->min_slab_pages = (zone->present_pages * 5210 sysctl_min_slab_ratio) / 100; 5211 return 0; 5212 } 5213 #endif 5214 5215 /* 5216 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5217 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5218 * whenever sysctl_lowmem_reserve_ratio changes. 5219 * 5220 * The reserve ratio obviously has absolutely no relation with the 5221 * minimum watermarks. The lowmem reserve ratio can only make sense 5222 * if in function of the boot time zone sizes. 5223 */ 5224 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 5225 void __user *buffer, size_t *length, loff_t *ppos) 5226 { 5227 proc_dointvec_minmax(table, write, buffer, length, ppos); 5228 setup_per_zone_lowmem_reserve(); 5229 return 0; 5230 } 5231 5232 /* 5233 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 5234 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 5235 * can have before it gets flushed back to buddy allocator. 5236 */ 5237 5238 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 5239 void __user *buffer, size_t *length, loff_t *ppos) 5240 { 5241 struct zone *zone; 5242 unsigned int cpu; 5243 int ret; 5244 5245 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5246 if (!write || (ret == -EINVAL)) 5247 return ret; 5248 for_each_populated_zone(zone) { 5249 for_each_possible_cpu(cpu) { 5250 unsigned long high; 5251 high = zone->present_pages / percpu_pagelist_fraction; 5252 setup_pagelist_highmark( 5253 per_cpu_ptr(zone->pageset, cpu), high); 5254 } 5255 } 5256 return 0; 5257 } 5258 5259 int hashdist = HASHDIST_DEFAULT; 5260 5261 #ifdef CONFIG_NUMA 5262 static int __init set_hashdist(char *str) 5263 { 5264 if (!str) 5265 return 0; 5266 hashdist = simple_strtoul(str, &str, 0); 5267 return 1; 5268 } 5269 __setup("hashdist=", set_hashdist); 5270 #endif 5271 5272 /* 5273 * allocate a large system hash table from bootmem 5274 * - it is assumed that the hash table must contain an exact power-of-2 5275 * quantity of entries 5276 * - limit is the number of hash buckets, not the total allocation size 5277 */ 5278 void *__init alloc_large_system_hash(const char *tablename, 5279 unsigned long bucketsize, 5280 unsigned long numentries, 5281 int scale, 5282 int flags, 5283 unsigned int *_hash_shift, 5284 unsigned int *_hash_mask, 5285 unsigned long limit) 5286 { 5287 unsigned long long max = limit; 5288 unsigned long log2qty, size; 5289 void *table = NULL; 5290 5291 /* allow the kernel cmdline to have a say */ 5292 if (!numentries) { 5293 /* round applicable memory size up to nearest megabyte */ 5294 numentries = nr_kernel_pages; 5295 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 5296 numentries >>= 20 - PAGE_SHIFT; 5297 numentries <<= 20 - PAGE_SHIFT; 5298 5299 /* limit to 1 bucket per 2^scale bytes of low memory */ 5300 if (scale > PAGE_SHIFT) 5301 numentries >>= (scale - PAGE_SHIFT); 5302 else 5303 numentries <<= (PAGE_SHIFT - scale); 5304 5305 /* Make sure we've got at least a 0-order allocation.. */ 5306 if (unlikely(flags & HASH_SMALL)) { 5307 /* Makes no sense without HASH_EARLY */ 5308 WARN_ON(!(flags & HASH_EARLY)); 5309 if (!(numentries >> *_hash_shift)) { 5310 numentries = 1UL << *_hash_shift; 5311 BUG_ON(!numentries); 5312 } 5313 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 5314 numentries = PAGE_SIZE / bucketsize; 5315 } 5316 numentries = roundup_pow_of_two(numentries); 5317 5318 /* limit allocation size to 1/16 total memory by default */ 5319 if (max == 0) { 5320 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 5321 do_div(max, bucketsize); 5322 } 5323 5324 if (numentries > max) 5325 numentries = max; 5326 5327 log2qty = ilog2(numentries); 5328 5329 do { 5330 size = bucketsize << log2qty; 5331 if (flags & HASH_EARLY) 5332 table = alloc_bootmem_nopanic(size); 5333 else if (hashdist) 5334 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 5335 else { 5336 /* 5337 * If bucketsize is not a power-of-two, we may free 5338 * some pages at the end of hash table which 5339 * alloc_pages_exact() automatically does 5340 */ 5341 if (get_order(size) < MAX_ORDER) { 5342 table = alloc_pages_exact(size, GFP_ATOMIC); 5343 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 5344 } 5345 } 5346 } while (!table && size > PAGE_SIZE && --log2qty); 5347 5348 if (!table) 5349 panic("Failed to allocate %s hash table\n", tablename); 5350 5351 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 5352 tablename, 5353 (1UL << log2qty), 5354 ilog2(size) - PAGE_SHIFT, 5355 size); 5356 5357 if (_hash_shift) 5358 *_hash_shift = log2qty; 5359 if (_hash_mask) 5360 *_hash_mask = (1 << log2qty) - 1; 5361 5362 return table; 5363 } 5364 5365 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 5366 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 5367 unsigned long pfn) 5368 { 5369 #ifdef CONFIG_SPARSEMEM 5370 return __pfn_to_section(pfn)->pageblock_flags; 5371 #else 5372 return zone->pageblock_flags; 5373 #endif /* CONFIG_SPARSEMEM */ 5374 } 5375 5376 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 5377 { 5378 #ifdef CONFIG_SPARSEMEM 5379 pfn &= (PAGES_PER_SECTION-1); 5380 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5381 #else 5382 pfn = pfn - zone->zone_start_pfn; 5383 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5384 #endif /* CONFIG_SPARSEMEM */ 5385 } 5386 5387 /** 5388 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 5389 * @page: The page within the block of interest 5390 * @start_bitidx: The first bit of interest to retrieve 5391 * @end_bitidx: The last bit of interest 5392 * returns pageblock_bits flags 5393 */ 5394 unsigned long get_pageblock_flags_group(struct page *page, 5395 int start_bitidx, int end_bitidx) 5396 { 5397 struct zone *zone; 5398 unsigned long *bitmap; 5399 unsigned long pfn, bitidx; 5400 unsigned long flags = 0; 5401 unsigned long value = 1; 5402 5403 zone = page_zone(page); 5404 pfn = page_to_pfn(page); 5405 bitmap = get_pageblock_bitmap(zone, pfn); 5406 bitidx = pfn_to_bitidx(zone, pfn); 5407 5408 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 5409 if (test_bit(bitidx + start_bitidx, bitmap)) 5410 flags |= value; 5411 5412 return flags; 5413 } 5414 5415 /** 5416 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 5417 * @page: The page within the block of interest 5418 * @start_bitidx: The first bit of interest 5419 * @end_bitidx: The last bit of interest 5420 * @flags: The flags to set 5421 */ 5422 void set_pageblock_flags_group(struct page *page, unsigned long flags, 5423 int start_bitidx, int end_bitidx) 5424 { 5425 struct zone *zone; 5426 unsigned long *bitmap; 5427 unsigned long pfn, bitidx; 5428 unsigned long value = 1; 5429 5430 zone = page_zone(page); 5431 pfn = page_to_pfn(page); 5432 bitmap = get_pageblock_bitmap(zone, pfn); 5433 bitidx = pfn_to_bitidx(zone, pfn); 5434 VM_BUG_ON(pfn < zone->zone_start_pfn); 5435 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); 5436 5437 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 5438 if (flags & value) 5439 __set_bit(bitidx + start_bitidx, bitmap); 5440 else 5441 __clear_bit(bitidx + start_bitidx, bitmap); 5442 } 5443 5444 /* 5445 * This is designed as sub function...plz see page_isolation.c also. 5446 * set/clear page block's type to be ISOLATE. 5447 * page allocater never alloc memory from ISOLATE block. 5448 */ 5449 5450 static int 5451 __count_immobile_pages(struct zone *zone, struct page *page, int count) 5452 { 5453 unsigned long pfn, iter, found; 5454 /* 5455 * For avoiding noise data, lru_add_drain_all() should be called 5456 * If ZONE_MOVABLE, the zone never contains immobile pages 5457 */ 5458 if (zone_idx(zone) == ZONE_MOVABLE) 5459 return true; 5460 5461 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) 5462 return true; 5463 5464 pfn = page_to_pfn(page); 5465 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 5466 unsigned long check = pfn + iter; 5467 5468 if (!pfn_valid_within(check)) 5469 continue; 5470 5471 page = pfn_to_page(check); 5472 if (!page_count(page)) { 5473 if (PageBuddy(page)) 5474 iter += (1 << page_order(page)) - 1; 5475 continue; 5476 } 5477 if (!PageLRU(page)) 5478 found++; 5479 /* 5480 * If there are RECLAIMABLE pages, we need to check it. 5481 * But now, memory offline itself doesn't call shrink_slab() 5482 * and it still to be fixed. 5483 */ 5484 /* 5485 * If the page is not RAM, page_count()should be 0. 5486 * we don't need more check. This is an _used_ not-movable page. 5487 * 5488 * The problematic thing here is PG_reserved pages. PG_reserved 5489 * is set to both of a memory hole page and a _used_ kernel 5490 * page at boot. 5491 */ 5492 if (found > count) 5493 return false; 5494 } 5495 return true; 5496 } 5497 5498 bool is_pageblock_removable_nolock(struct page *page) 5499 { 5500 struct zone *zone = page_zone(page); 5501 return __count_immobile_pages(zone, page, 0); 5502 } 5503 5504 int set_migratetype_isolate(struct page *page) 5505 { 5506 struct zone *zone; 5507 unsigned long flags, pfn; 5508 struct memory_isolate_notify arg; 5509 int notifier_ret; 5510 int ret = -EBUSY; 5511 int zone_idx; 5512 5513 zone = page_zone(page); 5514 zone_idx = zone_idx(zone); 5515 5516 spin_lock_irqsave(&zone->lock, flags); 5517 5518 pfn = page_to_pfn(page); 5519 arg.start_pfn = pfn; 5520 arg.nr_pages = pageblock_nr_pages; 5521 arg.pages_found = 0; 5522 5523 /* 5524 * It may be possible to isolate a pageblock even if the 5525 * migratetype is not MIGRATE_MOVABLE. The memory isolation 5526 * notifier chain is used by balloon drivers to return the 5527 * number of pages in a range that are held by the balloon 5528 * driver to shrink memory. If all the pages are accounted for 5529 * by balloons, are free, or on the LRU, isolation can continue. 5530 * Later, for example, when memory hotplug notifier runs, these 5531 * pages reported as "can be isolated" should be isolated(freed) 5532 * by the balloon driver through the memory notifier chain. 5533 */ 5534 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); 5535 notifier_ret = notifier_to_errno(notifier_ret); 5536 if (notifier_ret) 5537 goto out; 5538 /* 5539 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 5540 * We just check MOVABLE pages. 5541 */ 5542 if (__count_immobile_pages(zone, page, arg.pages_found)) 5543 ret = 0; 5544 5545 /* 5546 * immobile means "not-on-lru" paes. If immobile is larger than 5547 * removable-by-driver pages reported by notifier, we'll fail. 5548 */ 5549 5550 out: 5551 if (!ret) { 5552 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 5553 move_freepages_block(zone, page, MIGRATE_ISOLATE); 5554 } 5555 5556 spin_unlock_irqrestore(&zone->lock, flags); 5557 if (!ret) 5558 drain_all_pages(); 5559 return ret; 5560 } 5561 5562 void unset_migratetype_isolate(struct page *page) 5563 { 5564 struct zone *zone; 5565 unsigned long flags; 5566 zone = page_zone(page); 5567 spin_lock_irqsave(&zone->lock, flags); 5568 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 5569 goto out; 5570 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5571 move_freepages_block(zone, page, MIGRATE_MOVABLE); 5572 out: 5573 spin_unlock_irqrestore(&zone->lock, flags); 5574 } 5575 5576 #ifdef CONFIG_MEMORY_HOTREMOVE 5577 /* 5578 * All pages in the range must be isolated before calling this. 5579 */ 5580 void 5581 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 5582 { 5583 struct page *page; 5584 struct zone *zone; 5585 int order, i; 5586 unsigned long pfn; 5587 unsigned long flags; 5588 /* find the first valid pfn */ 5589 for (pfn = start_pfn; pfn < end_pfn; pfn++) 5590 if (pfn_valid(pfn)) 5591 break; 5592 if (pfn == end_pfn) 5593 return; 5594 zone = page_zone(pfn_to_page(pfn)); 5595 spin_lock_irqsave(&zone->lock, flags); 5596 pfn = start_pfn; 5597 while (pfn < end_pfn) { 5598 if (!pfn_valid(pfn)) { 5599 pfn++; 5600 continue; 5601 } 5602 page = pfn_to_page(pfn); 5603 BUG_ON(page_count(page)); 5604 BUG_ON(!PageBuddy(page)); 5605 order = page_order(page); 5606 #ifdef CONFIG_DEBUG_VM 5607 printk(KERN_INFO "remove from free list %lx %d %lx\n", 5608 pfn, 1 << order, end_pfn); 5609 #endif 5610 list_del(&page->lru); 5611 rmv_page_order(page); 5612 zone->free_area[order].nr_free--; 5613 __mod_zone_page_state(zone, NR_FREE_PAGES, 5614 - (1UL << order)); 5615 for (i = 0; i < (1 << order); i++) 5616 SetPageReserved((page+i)); 5617 pfn += (1 << order); 5618 } 5619 spin_unlock_irqrestore(&zone->lock, flags); 5620 } 5621 #endif 5622 5623 #ifdef CONFIG_MEMORY_FAILURE 5624 bool is_free_buddy_page(struct page *page) 5625 { 5626 struct zone *zone = page_zone(page); 5627 unsigned long pfn = page_to_pfn(page); 5628 unsigned long flags; 5629 int order; 5630 5631 spin_lock_irqsave(&zone->lock, flags); 5632 for (order = 0; order < MAX_ORDER; order++) { 5633 struct page *page_head = page - (pfn & ((1 << order) - 1)); 5634 5635 if (PageBuddy(page_head) && page_order(page_head) >= order) 5636 break; 5637 } 5638 spin_unlock_irqrestore(&zone->lock, flags); 5639 5640 return order < MAX_ORDER; 5641 } 5642 #endif 5643 5644 static struct trace_print_flags pageflag_names[] = { 5645 {1UL << PG_locked, "locked" }, 5646 {1UL << PG_error, "error" }, 5647 {1UL << PG_referenced, "referenced" }, 5648 {1UL << PG_uptodate, "uptodate" }, 5649 {1UL << PG_dirty, "dirty" }, 5650 {1UL << PG_lru, "lru" }, 5651 {1UL << PG_active, "active" }, 5652 {1UL << PG_slab, "slab" }, 5653 {1UL << PG_owner_priv_1, "owner_priv_1" }, 5654 {1UL << PG_arch_1, "arch_1" }, 5655 {1UL << PG_reserved, "reserved" }, 5656 {1UL << PG_private, "private" }, 5657 {1UL << PG_private_2, "private_2" }, 5658 {1UL << PG_writeback, "writeback" }, 5659 #ifdef CONFIG_PAGEFLAGS_EXTENDED 5660 {1UL << PG_head, "head" }, 5661 {1UL << PG_tail, "tail" }, 5662 #else 5663 {1UL << PG_compound, "compound" }, 5664 #endif 5665 {1UL << PG_swapcache, "swapcache" }, 5666 {1UL << PG_mappedtodisk, "mappedtodisk" }, 5667 {1UL << PG_reclaim, "reclaim" }, 5668 {1UL << PG_swapbacked, "swapbacked" }, 5669 {1UL << PG_unevictable, "unevictable" }, 5670 #ifdef CONFIG_MMU 5671 {1UL << PG_mlocked, "mlocked" }, 5672 #endif 5673 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 5674 {1UL << PG_uncached, "uncached" }, 5675 #endif 5676 #ifdef CONFIG_MEMORY_FAILURE 5677 {1UL << PG_hwpoison, "hwpoison" }, 5678 #endif 5679 {-1UL, NULL }, 5680 }; 5681 5682 static void dump_page_flags(unsigned long flags) 5683 { 5684 const char *delim = ""; 5685 unsigned long mask; 5686 int i; 5687 5688 printk(KERN_ALERT "page flags: %#lx(", flags); 5689 5690 /* remove zone id */ 5691 flags &= (1UL << NR_PAGEFLAGS) - 1; 5692 5693 for (i = 0; pageflag_names[i].name && flags; i++) { 5694 5695 mask = pageflag_names[i].mask; 5696 if ((flags & mask) != mask) 5697 continue; 5698 5699 flags &= ~mask; 5700 printk("%s%s", delim, pageflag_names[i].name); 5701 delim = "|"; 5702 } 5703 5704 /* check for left over flags */ 5705 if (flags) 5706 printk("%s%#lx", delim, flags); 5707 5708 printk(")\n"); 5709 } 5710 5711 void dump_page(struct page *page) 5712 { 5713 printk(KERN_ALERT 5714 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 5715 page, atomic_read(&page->_count), page_mapcount(page), 5716 page->mapping, page->index); 5717 dump_page_flags(page->flags); 5718 mem_cgroup_print_bad_page(page); 5719 } 5720