1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 92 static DEFINE_MUTEX(pcp_batch_high_lock); 93 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 94 95 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 96 /* 97 * On SMP, spin_trylock is sufficient protection. 98 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 99 */ 100 #define pcp_trylock_prepare(flags) do { } while (0) 101 #define pcp_trylock_finish(flag) do { } while (0) 102 #else 103 104 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 105 #define pcp_trylock_prepare(flags) local_irq_save(flags) 106 #define pcp_trylock_finish(flags) local_irq_restore(flags) 107 #endif 108 109 /* 110 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 111 * a migration causing the wrong PCP to be locked and remote memory being 112 * potentially allocated, pin the task to the CPU for the lookup+lock. 113 * preempt_disable is used on !RT because it is faster than migrate_disable. 114 * migrate_disable is used on RT because otherwise RT spinlock usage is 115 * interfered with and a high priority task cannot preempt the allocator. 116 */ 117 #ifndef CONFIG_PREEMPT_RT 118 #define pcpu_task_pin() preempt_disable() 119 #define pcpu_task_unpin() preempt_enable() 120 #else 121 #define pcpu_task_pin() migrate_disable() 122 #define pcpu_task_unpin() migrate_enable() 123 #endif 124 125 /* 126 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 127 * Return value should be used with equivalent unlock helper. 128 */ 129 #define pcpu_spin_lock(type, member, ptr) \ 130 ({ \ 131 type *_ret; \ 132 pcpu_task_pin(); \ 133 _ret = this_cpu_ptr(ptr); \ 134 spin_lock(&_ret->member); \ 135 _ret; \ 136 }) 137 138 #define pcpu_spin_trylock(type, member, ptr) \ 139 ({ \ 140 type *_ret; \ 141 pcpu_task_pin(); \ 142 _ret = this_cpu_ptr(ptr); \ 143 if (!spin_trylock(&_ret->member)) { \ 144 pcpu_task_unpin(); \ 145 _ret = NULL; \ 146 } \ 147 _ret; \ 148 }) 149 150 #define pcpu_spin_unlock(member, ptr) \ 151 ({ \ 152 spin_unlock(&ptr->member); \ 153 pcpu_task_unpin(); \ 154 }) 155 156 /* struct per_cpu_pages specific helpers. */ 157 #define pcp_spin_lock(ptr) \ 158 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_trylock(ptr) \ 161 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_unlock(ptr) \ 164 pcpu_spin_unlock(lock, ptr) 165 166 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 167 DEFINE_PER_CPU(int, numa_node); 168 EXPORT_PER_CPU_SYMBOL(numa_node); 169 #endif 170 171 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 172 173 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 174 /* 175 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 176 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 177 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 178 * defined in <linux/topology.h>. 179 */ 180 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 181 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 182 #endif 183 184 static DEFINE_MUTEX(pcpu_drain_mutex); 185 186 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 187 volatile unsigned long latent_entropy __latent_entropy; 188 EXPORT_SYMBOL(latent_entropy); 189 #endif 190 191 /* 192 * Array of node states. 193 */ 194 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 195 [N_POSSIBLE] = NODE_MASK_ALL, 196 [N_ONLINE] = { { [0] = 1UL } }, 197 #ifndef CONFIG_NUMA 198 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 199 #ifdef CONFIG_HIGHMEM 200 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 201 #endif 202 [N_MEMORY] = { { [0] = 1UL } }, 203 [N_CPU] = { { [0] = 1UL } }, 204 #endif /* NUMA */ 205 }; 206 EXPORT_SYMBOL(node_states); 207 208 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 209 210 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 211 unsigned int pageblock_order __read_mostly; 212 #endif 213 214 static void __free_pages_ok(struct page *page, unsigned int order, 215 fpi_t fpi_flags); 216 217 /* 218 * results with 256, 32 in the lowmem_reserve sysctl: 219 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 220 * 1G machine -> (16M dma, 784M normal, 224M high) 221 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 222 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 223 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 224 * 225 * TBD: should special case ZONE_DMA32 machines here - in those we normally 226 * don't need any ZONE_NORMAL reservation 227 */ 228 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 229 #ifdef CONFIG_ZONE_DMA 230 [ZONE_DMA] = 256, 231 #endif 232 #ifdef CONFIG_ZONE_DMA32 233 [ZONE_DMA32] = 256, 234 #endif 235 [ZONE_NORMAL] = 32, 236 #ifdef CONFIG_HIGHMEM 237 [ZONE_HIGHMEM] = 0, 238 #endif 239 [ZONE_MOVABLE] = 0, 240 }; 241 242 char * const zone_names[MAX_NR_ZONES] = { 243 #ifdef CONFIG_ZONE_DMA 244 "DMA", 245 #endif 246 #ifdef CONFIG_ZONE_DMA32 247 "DMA32", 248 #endif 249 "Normal", 250 #ifdef CONFIG_HIGHMEM 251 "HighMem", 252 #endif 253 "Movable", 254 #ifdef CONFIG_ZONE_DEVICE 255 "Device", 256 #endif 257 }; 258 259 const char * const migratetype_names[MIGRATE_TYPES] = { 260 "Unmovable", 261 "Movable", 262 "Reclaimable", 263 "HighAtomic", 264 #ifdef CONFIG_CMA 265 "CMA", 266 #endif 267 #ifdef CONFIG_MEMORY_ISOLATION 268 "Isolate", 269 #endif 270 }; 271 272 int min_free_kbytes = 1024; 273 int user_min_free_kbytes = -1; 274 static int watermark_boost_factor __read_mostly = 15000; 275 static int watermark_scale_factor = 10; 276 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 278 int movable_zone; 279 EXPORT_SYMBOL(movable_zone); 280 281 #if MAX_NUMNODES > 1 282 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 283 unsigned int nr_online_nodes __read_mostly = 1; 284 EXPORT_SYMBOL(nr_node_ids); 285 EXPORT_SYMBOL(nr_online_nodes); 286 #endif 287 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 290 static bool __free_unaccepted(struct page *page); 291 292 int page_group_by_mobility_disabled __read_mostly; 293 294 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 295 /* 296 * During boot we initialize deferred pages on-demand, as needed, but once 297 * page_alloc_init_late() has finished, the deferred pages are all initialized, 298 * and we can permanently disable that path. 299 */ 300 DEFINE_STATIC_KEY_TRUE(deferred_pages); 301 302 static inline bool deferred_pages_enabled(void) 303 { 304 return static_branch_unlikely(&deferred_pages); 305 } 306 307 /* 308 * deferred_grow_zone() is __init, but it is called from 309 * get_page_from_freelist() during early boot until deferred_pages permanently 310 * disables this call. This is why we have refdata wrapper to avoid warning, 311 * and to ensure that the function body gets unloaded. 312 */ 313 static bool __ref 314 _deferred_grow_zone(struct zone *zone, unsigned int order) 315 { 316 return deferred_grow_zone(zone, order); 317 } 318 #else 319 static inline bool deferred_pages_enabled(void) 320 { 321 return false; 322 } 323 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 325 { 326 return false; 327 } 328 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 329 330 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 331 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 332 unsigned long pfn) 333 { 334 #ifdef CONFIG_SPARSEMEM 335 return section_to_usemap(__pfn_to_section(pfn)); 336 #else 337 return page_zone(page)->pageblock_flags; 338 #endif /* CONFIG_SPARSEMEM */ 339 } 340 341 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 342 { 343 #ifdef CONFIG_SPARSEMEM 344 pfn &= (PAGES_PER_SECTION-1); 345 #else 346 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 347 #endif /* CONFIG_SPARSEMEM */ 348 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 349 } 350 351 /** 352 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 353 * @page: The page within the block of interest 354 * @pfn: The target page frame number 355 * @mask: mask of bits that the caller is interested in 356 * 357 * Return: pageblock_bits flags 358 */ 359 unsigned long get_pfnblock_flags_mask(const struct page *page, 360 unsigned long pfn, unsigned long mask) 361 { 362 unsigned long *bitmap; 363 unsigned long bitidx, word_bitidx; 364 unsigned long word; 365 366 bitmap = get_pageblock_bitmap(page, pfn); 367 bitidx = pfn_to_bitidx(page, pfn); 368 word_bitidx = bitidx / BITS_PER_LONG; 369 bitidx &= (BITS_PER_LONG-1); 370 /* 371 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 372 * a consistent read of the memory array, so that results, even though 373 * racy, are not corrupted. 374 */ 375 word = READ_ONCE(bitmap[word_bitidx]); 376 return (word >> bitidx) & mask; 377 } 378 379 static __always_inline int get_pfnblock_migratetype(const struct page *page, 380 unsigned long pfn) 381 { 382 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 383 } 384 385 /** 386 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 387 * @page: The page within the block of interest 388 * @flags: The flags to set 389 * @pfn: The target page frame number 390 * @mask: mask of bits that the caller is interested in 391 */ 392 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 393 unsigned long pfn, 394 unsigned long mask) 395 { 396 unsigned long *bitmap; 397 unsigned long bitidx, word_bitidx; 398 unsigned long word; 399 400 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 401 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 402 403 bitmap = get_pageblock_bitmap(page, pfn); 404 bitidx = pfn_to_bitidx(page, pfn); 405 word_bitidx = bitidx / BITS_PER_LONG; 406 bitidx &= (BITS_PER_LONG-1); 407 408 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 409 410 mask <<= bitidx; 411 flags <<= bitidx; 412 413 word = READ_ONCE(bitmap[word_bitidx]); 414 do { 415 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 416 } 417 418 void set_pageblock_migratetype(struct page *page, int migratetype) 419 { 420 if (unlikely(page_group_by_mobility_disabled && 421 migratetype < MIGRATE_PCPTYPES)) 422 migratetype = MIGRATE_UNMOVABLE; 423 424 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 425 page_to_pfn(page), MIGRATETYPE_MASK); 426 } 427 428 #ifdef CONFIG_DEBUG_VM 429 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 430 { 431 int ret; 432 unsigned seq; 433 unsigned long pfn = page_to_pfn(page); 434 unsigned long sp, start_pfn; 435 436 do { 437 seq = zone_span_seqbegin(zone); 438 start_pfn = zone->zone_start_pfn; 439 sp = zone->spanned_pages; 440 ret = !zone_spans_pfn(zone, pfn); 441 } while (zone_span_seqretry(zone, seq)); 442 443 if (ret) 444 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 445 pfn, zone_to_nid(zone), zone->name, 446 start_pfn, start_pfn + sp); 447 448 return ret; 449 } 450 451 /* 452 * Temporary debugging check for pages not lying within a given zone. 453 */ 454 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 455 { 456 if (page_outside_zone_boundaries(zone, page)) 457 return true; 458 if (zone != page_zone(page)) 459 return true; 460 461 return false; 462 } 463 #else 464 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 465 { 466 return false; 467 } 468 #endif 469 470 static void bad_page(struct page *page, const char *reason) 471 { 472 static unsigned long resume; 473 static unsigned long nr_shown; 474 static unsigned long nr_unshown; 475 476 /* 477 * Allow a burst of 60 reports, then keep quiet for that minute; 478 * or allow a steady drip of one report per second. 479 */ 480 if (nr_shown == 60) { 481 if (time_before(jiffies, resume)) { 482 nr_unshown++; 483 goto out; 484 } 485 if (nr_unshown) { 486 pr_alert( 487 "BUG: Bad page state: %lu messages suppressed\n", 488 nr_unshown); 489 nr_unshown = 0; 490 } 491 nr_shown = 0; 492 } 493 if (nr_shown++ == 0) 494 resume = jiffies + 60 * HZ; 495 496 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 497 current->comm, page_to_pfn(page)); 498 dump_page(page, reason); 499 500 print_modules(); 501 dump_stack(); 502 out: 503 /* Leave bad fields for debug, except PageBuddy could make trouble */ 504 if (PageBuddy(page)) 505 __ClearPageBuddy(page); 506 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 507 } 508 509 static inline unsigned int order_to_pindex(int migratetype, int order) 510 { 511 bool __maybe_unused movable; 512 513 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 514 if (order > PAGE_ALLOC_COSTLY_ORDER) { 515 VM_BUG_ON(order != HPAGE_PMD_ORDER); 516 517 movable = migratetype == MIGRATE_MOVABLE; 518 519 return NR_LOWORDER_PCP_LISTS + movable; 520 } 521 #else 522 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 523 #endif 524 525 return (MIGRATE_PCPTYPES * order) + migratetype; 526 } 527 528 static inline int pindex_to_order(unsigned int pindex) 529 { 530 int order = pindex / MIGRATE_PCPTYPES; 531 532 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 533 if (pindex >= NR_LOWORDER_PCP_LISTS) 534 order = HPAGE_PMD_ORDER; 535 #else 536 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 537 #endif 538 539 return order; 540 } 541 542 static inline bool pcp_allowed_order(unsigned int order) 543 { 544 if (order <= PAGE_ALLOC_COSTLY_ORDER) 545 return true; 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 if (order == HPAGE_PMD_ORDER) 548 return true; 549 #endif 550 return false; 551 } 552 553 /* 554 * Higher-order pages are called "compound pages". They are structured thusly: 555 * 556 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 557 * 558 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 559 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 560 * 561 * The first tail page's ->compound_order holds the order of allocation. 562 * This usage means that zero-order pages may not be compound. 563 */ 564 565 void prep_compound_page(struct page *page, unsigned int order) 566 { 567 int i; 568 int nr_pages = 1 << order; 569 570 __SetPageHead(page); 571 for (i = 1; i < nr_pages; i++) 572 prep_compound_tail(page, i); 573 574 prep_compound_head(page, order); 575 } 576 577 static inline void set_buddy_order(struct page *page, unsigned int order) 578 { 579 set_page_private(page, order); 580 __SetPageBuddy(page); 581 } 582 583 #ifdef CONFIG_COMPACTION 584 static inline struct capture_control *task_capc(struct zone *zone) 585 { 586 struct capture_control *capc = current->capture_control; 587 588 return unlikely(capc) && 589 !(current->flags & PF_KTHREAD) && 590 !capc->page && 591 capc->cc->zone == zone ? capc : NULL; 592 } 593 594 static inline bool 595 compaction_capture(struct capture_control *capc, struct page *page, 596 int order, int migratetype) 597 { 598 if (!capc || order != capc->cc->order) 599 return false; 600 601 /* Do not accidentally pollute CMA or isolated regions*/ 602 if (is_migrate_cma(migratetype) || 603 is_migrate_isolate(migratetype)) 604 return false; 605 606 /* 607 * Do not let lower order allocations pollute a movable pageblock 608 * unless compaction is also requesting movable pages. 609 * This might let an unmovable request use a reclaimable pageblock 610 * and vice-versa but no more than normal fallback logic which can 611 * have trouble finding a high-order free page. 612 */ 613 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 614 capc->cc->migratetype != MIGRATE_MOVABLE) 615 return false; 616 617 capc->page = page; 618 return true; 619 } 620 621 #else 622 static inline struct capture_control *task_capc(struct zone *zone) 623 { 624 return NULL; 625 } 626 627 static inline bool 628 compaction_capture(struct capture_control *capc, struct page *page, 629 int order, int migratetype) 630 { 631 return false; 632 } 633 #endif /* CONFIG_COMPACTION */ 634 635 static inline void account_freepages(struct zone *zone, int nr_pages, 636 int migratetype) 637 { 638 lockdep_assert_held(&zone->lock); 639 640 if (is_migrate_isolate(migratetype)) 641 return; 642 643 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 644 645 if (is_migrate_cma(migratetype)) 646 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 647 else if (is_migrate_highatomic(migratetype)) 648 WRITE_ONCE(zone->nr_free_highatomic, 649 zone->nr_free_highatomic + nr_pages); 650 } 651 652 /* Used for pages not on another list */ 653 static inline void __add_to_free_list(struct page *page, struct zone *zone, 654 unsigned int order, int migratetype, 655 bool tail) 656 { 657 struct free_area *area = &zone->free_area[order]; 658 659 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 660 "page type is %lu, passed migratetype is %d (nr=%d)\n", 661 get_pageblock_migratetype(page), migratetype, 1 << order); 662 663 if (tail) 664 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 665 else 666 list_add(&page->buddy_list, &area->free_list[migratetype]); 667 area->nr_free++; 668 } 669 670 /* 671 * Used for pages which are on another list. Move the pages to the tail 672 * of the list - so the moved pages won't immediately be considered for 673 * allocation again (e.g., optimization for memory onlining). 674 */ 675 static inline void move_to_free_list(struct page *page, struct zone *zone, 676 unsigned int order, int old_mt, int new_mt) 677 { 678 struct free_area *area = &zone->free_area[order]; 679 680 /* Free page moving can fail, so it happens before the type update */ 681 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 682 "page type is %lu, passed migratetype is %d (nr=%d)\n", 683 get_pageblock_migratetype(page), old_mt, 1 << order); 684 685 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 686 687 account_freepages(zone, -(1 << order), old_mt); 688 account_freepages(zone, 1 << order, new_mt); 689 } 690 691 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 692 unsigned int order, int migratetype) 693 { 694 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 695 "page type is %lu, passed migratetype is %d (nr=%d)\n", 696 get_pageblock_migratetype(page), migratetype, 1 << order); 697 698 /* clear reported state and update reported page count */ 699 if (page_reported(page)) 700 __ClearPageReported(page); 701 702 list_del(&page->buddy_list); 703 __ClearPageBuddy(page); 704 set_page_private(page, 0); 705 zone->free_area[order].nr_free--; 706 } 707 708 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 709 unsigned int order, int migratetype) 710 { 711 __del_page_from_free_list(page, zone, order, migratetype); 712 account_freepages(zone, -(1 << order), migratetype); 713 } 714 715 static inline struct page *get_page_from_free_area(struct free_area *area, 716 int migratetype) 717 { 718 return list_first_entry_or_null(&area->free_list[migratetype], 719 struct page, buddy_list); 720 } 721 722 /* 723 * If this is less than the 2nd largest possible page, check if the buddy 724 * of the next-higher order is free. If it is, it's possible 725 * that pages are being freed that will coalesce soon. In case, 726 * that is happening, add the free page to the tail of the list 727 * so it's less likely to be used soon and more likely to be merged 728 * as a 2-level higher order page 729 */ 730 static inline bool 731 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 732 struct page *page, unsigned int order) 733 { 734 unsigned long higher_page_pfn; 735 struct page *higher_page; 736 737 if (order >= MAX_PAGE_ORDER - 1) 738 return false; 739 740 higher_page_pfn = buddy_pfn & pfn; 741 higher_page = page + (higher_page_pfn - pfn); 742 743 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 744 NULL) != NULL; 745 } 746 747 /* 748 * Freeing function for a buddy system allocator. 749 * 750 * The concept of a buddy system is to maintain direct-mapped table 751 * (containing bit values) for memory blocks of various "orders". 752 * The bottom level table contains the map for the smallest allocatable 753 * units of memory (here, pages), and each level above it describes 754 * pairs of units from the levels below, hence, "buddies". 755 * At a high level, all that happens here is marking the table entry 756 * at the bottom level available, and propagating the changes upward 757 * as necessary, plus some accounting needed to play nicely with other 758 * parts of the VM system. 759 * At each level, we keep a list of pages, which are heads of continuous 760 * free pages of length of (1 << order) and marked with PageBuddy. 761 * Page's order is recorded in page_private(page) field. 762 * So when we are allocating or freeing one, we can derive the state of the 763 * other. That is, if we allocate a small block, and both were 764 * free, the remainder of the region must be split into blocks. 765 * If a block is freed, and its buddy is also free, then this 766 * triggers coalescing into a block of larger size. 767 * 768 * -- nyc 769 */ 770 771 static inline void __free_one_page(struct page *page, 772 unsigned long pfn, 773 struct zone *zone, unsigned int order, 774 int migratetype, fpi_t fpi_flags) 775 { 776 struct capture_control *capc = task_capc(zone); 777 unsigned long buddy_pfn = 0; 778 unsigned long combined_pfn; 779 struct page *buddy; 780 bool to_tail; 781 782 VM_BUG_ON(!zone_is_initialized(zone)); 783 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 784 785 VM_BUG_ON(migratetype == -1); 786 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 787 VM_BUG_ON_PAGE(bad_range(zone, page), page); 788 789 account_freepages(zone, 1 << order, migratetype); 790 791 while (order < MAX_PAGE_ORDER) { 792 int buddy_mt = migratetype; 793 794 if (compaction_capture(capc, page, order, migratetype)) { 795 account_freepages(zone, -(1 << order), migratetype); 796 return; 797 } 798 799 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 800 if (!buddy) 801 goto done_merging; 802 803 if (unlikely(order >= pageblock_order)) { 804 /* 805 * We want to prevent merge between freepages on pageblock 806 * without fallbacks and normal pageblock. Without this, 807 * pageblock isolation could cause incorrect freepage or CMA 808 * accounting or HIGHATOMIC accounting. 809 */ 810 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 811 812 if (migratetype != buddy_mt && 813 (!migratetype_is_mergeable(migratetype) || 814 !migratetype_is_mergeable(buddy_mt))) 815 goto done_merging; 816 } 817 818 /* 819 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 820 * merge with it and move up one order. 821 */ 822 if (page_is_guard(buddy)) 823 clear_page_guard(zone, buddy, order); 824 else 825 __del_page_from_free_list(buddy, zone, order, buddy_mt); 826 827 if (unlikely(buddy_mt != migratetype)) { 828 /* 829 * Match buddy type. This ensures that an 830 * expand() down the line puts the sub-blocks 831 * on the right freelists. 832 */ 833 set_pageblock_migratetype(buddy, migratetype); 834 } 835 836 combined_pfn = buddy_pfn & pfn; 837 page = page + (combined_pfn - pfn); 838 pfn = combined_pfn; 839 order++; 840 } 841 842 done_merging: 843 set_buddy_order(page, order); 844 845 if (fpi_flags & FPI_TO_TAIL) 846 to_tail = true; 847 else if (is_shuffle_order(order)) 848 to_tail = shuffle_pick_tail(); 849 else 850 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 851 852 __add_to_free_list(page, zone, order, migratetype, to_tail); 853 854 /* Notify page reporting subsystem of freed page */ 855 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 856 page_reporting_notify_free(order); 857 } 858 859 /* 860 * A bad page could be due to a number of fields. Instead of multiple branches, 861 * try and check multiple fields with one check. The caller must do a detailed 862 * check if necessary. 863 */ 864 static inline bool page_expected_state(struct page *page, 865 unsigned long check_flags) 866 { 867 if (unlikely(atomic_read(&page->_mapcount) != -1)) 868 return false; 869 870 if (unlikely((unsigned long)page->mapping | 871 page_ref_count(page) | 872 #ifdef CONFIG_MEMCG 873 page->memcg_data | 874 #endif 875 #ifdef CONFIG_PAGE_POOL 876 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 877 #endif 878 (page->flags & check_flags))) 879 return false; 880 881 return true; 882 } 883 884 static const char *page_bad_reason(struct page *page, unsigned long flags) 885 { 886 const char *bad_reason = NULL; 887 888 if (unlikely(atomic_read(&page->_mapcount) != -1)) 889 bad_reason = "nonzero mapcount"; 890 if (unlikely(page->mapping != NULL)) 891 bad_reason = "non-NULL mapping"; 892 if (unlikely(page_ref_count(page) != 0)) 893 bad_reason = "nonzero _refcount"; 894 if (unlikely(page->flags & flags)) { 895 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 896 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 897 else 898 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 899 } 900 #ifdef CONFIG_MEMCG 901 if (unlikely(page->memcg_data)) 902 bad_reason = "page still charged to cgroup"; 903 #endif 904 #ifdef CONFIG_PAGE_POOL 905 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 906 bad_reason = "page_pool leak"; 907 #endif 908 return bad_reason; 909 } 910 911 static void free_page_is_bad_report(struct page *page) 912 { 913 bad_page(page, 914 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 915 } 916 917 static inline bool free_page_is_bad(struct page *page) 918 { 919 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 920 return false; 921 922 /* Something has gone sideways, find it */ 923 free_page_is_bad_report(page); 924 return true; 925 } 926 927 static inline bool is_check_pages_enabled(void) 928 { 929 return static_branch_unlikely(&check_pages_enabled); 930 } 931 932 static int free_tail_page_prepare(struct page *head_page, struct page *page) 933 { 934 struct folio *folio = (struct folio *)head_page; 935 int ret = 1; 936 937 /* 938 * We rely page->lru.next never has bit 0 set, unless the page 939 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 940 */ 941 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 942 943 if (!is_check_pages_enabled()) { 944 ret = 0; 945 goto out; 946 } 947 switch (page - head_page) { 948 case 1: 949 /* the first tail page: these may be in place of ->mapping */ 950 if (unlikely(folio_entire_mapcount(folio))) { 951 bad_page(page, "nonzero entire_mapcount"); 952 goto out; 953 } 954 if (unlikely(folio_large_mapcount(folio))) { 955 bad_page(page, "nonzero large_mapcount"); 956 goto out; 957 } 958 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 959 bad_page(page, "nonzero nr_pages_mapped"); 960 goto out; 961 } 962 if (unlikely(atomic_read(&folio->_pincount))) { 963 bad_page(page, "nonzero pincount"); 964 goto out; 965 } 966 break; 967 case 2: 968 /* the second tail page: deferred_list overlaps ->mapping */ 969 if (unlikely(!list_empty(&folio->_deferred_list))) { 970 bad_page(page, "on deferred list"); 971 goto out; 972 } 973 break; 974 default: 975 if (page->mapping != TAIL_MAPPING) { 976 bad_page(page, "corrupted mapping in tail page"); 977 goto out; 978 } 979 break; 980 } 981 if (unlikely(!PageTail(page))) { 982 bad_page(page, "PageTail not set"); 983 goto out; 984 } 985 if (unlikely(compound_head(page) != head_page)) { 986 bad_page(page, "compound_head not consistent"); 987 goto out; 988 } 989 ret = 0; 990 out: 991 page->mapping = NULL; 992 clear_compound_head(page); 993 return ret; 994 } 995 996 /* 997 * Skip KASAN memory poisoning when either: 998 * 999 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1000 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1001 * using page tags instead (see below). 1002 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1003 * that error detection is disabled for accesses via the page address. 1004 * 1005 * Pages will have match-all tags in the following circumstances: 1006 * 1007 * 1. Pages are being initialized for the first time, including during deferred 1008 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1009 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1010 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1011 * 3. The allocation was excluded from being checked due to sampling, 1012 * see the call to kasan_unpoison_pages. 1013 * 1014 * Poisoning pages during deferred memory init will greatly lengthen the 1015 * process and cause problem in large memory systems as the deferred pages 1016 * initialization is done with interrupt disabled. 1017 * 1018 * Assuming that there will be no reference to those newly initialized 1019 * pages before they are ever allocated, this should have no effect on 1020 * KASAN memory tracking as the poison will be properly inserted at page 1021 * allocation time. The only corner case is when pages are allocated by 1022 * on-demand allocation and then freed again before the deferred pages 1023 * initialization is done, but this is not likely to happen. 1024 */ 1025 static inline bool should_skip_kasan_poison(struct page *page) 1026 { 1027 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1028 return deferred_pages_enabled(); 1029 1030 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1031 } 1032 1033 static void kernel_init_pages(struct page *page, int numpages) 1034 { 1035 int i; 1036 1037 /* s390's use of memset() could override KASAN redzones. */ 1038 kasan_disable_current(); 1039 for (i = 0; i < numpages; i++) 1040 clear_highpage_kasan_tagged(page + i); 1041 kasan_enable_current(); 1042 } 1043 1044 #ifdef CONFIG_MEM_ALLOC_PROFILING 1045 1046 /* Should be called only if mem_alloc_profiling_enabled() */ 1047 void __clear_page_tag_ref(struct page *page) 1048 { 1049 union pgtag_ref_handle handle; 1050 union codetag_ref ref; 1051 1052 if (get_page_tag_ref(page, &ref, &handle)) { 1053 set_codetag_empty(&ref); 1054 update_page_tag_ref(handle, &ref); 1055 put_page_tag_ref(handle); 1056 } 1057 } 1058 1059 /* Should be called only if mem_alloc_profiling_enabled() */ 1060 static noinline 1061 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1062 unsigned int nr) 1063 { 1064 union pgtag_ref_handle handle; 1065 union codetag_ref ref; 1066 1067 if (get_page_tag_ref(page, &ref, &handle)) { 1068 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1069 update_page_tag_ref(handle, &ref); 1070 put_page_tag_ref(handle); 1071 } 1072 } 1073 1074 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1075 unsigned int nr) 1076 { 1077 if (mem_alloc_profiling_enabled()) 1078 __pgalloc_tag_add(page, task, nr); 1079 } 1080 1081 /* Should be called only if mem_alloc_profiling_enabled() */ 1082 static noinline 1083 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1084 { 1085 union pgtag_ref_handle handle; 1086 union codetag_ref ref; 1087 1088 if (get_page_tag_ref(page, &ref, &handle)) { 1089 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1090 update_page_tag_ref(handle, &ref); 1091 put_page_tag_ref(handle); 1092 } 1093 } 1094 1095 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1096 { 1097 if (mem_alloc_profiling_enabled()) 1098 __pgalloc_tag_sub(page, nr); 1099 } 1100 1101 static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) 1102 { 1103 struct alloc_tag *tag; 1104 1105 if (!mem_alloc_profiling_enabled()) 1106 return; 1107 1108 tag = __pgalloc_tag_get(page); 1109 if (tag) 1110 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1111 } 1112 1113 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1114 1115 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1116 unsigned int nr) {} 1117 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1118 static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} 1119 1120 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1121 1122 __always_inline bool free_pages_prepare(struct page *page, 1123 unsigned int order) 1124 { 1125 int bad = 0; 1126 bool skip_kasan_poison = should_skip_kasan_poison(page); 1127 bool init = want_init_on_free(); 1128 bool compound = PageCompound(page); 1129 struct folio *folio = page_folio(page); 1130 1131 VM_BUG_ON_PAGE(PageTail(page), page); 1132 1133 trace_mm_page_free(page, order); 1134 kmsan_free_page(page, order); 1135 1136 if (memcg_kmem_online() && PageMemcgKmem(page)) 1137 __memcg_kmem_uncharge_page(page, order); 1138 1139 /* 1140 * In rare cases, when truncation or holepunching raced with 1141 * munlock after VM_LOCKED was cleared, Mlocked may still be 1142 * found set here. This does not indicate a problem, unless 1143 * "unevictable_pgs_cleared" appears worryingly large. 1144 */ 1145 if (unlikely(folio_test_mlocked(folio))) { 1146 long nr_pages = folio_nr_pages(folio); 1147 1148 __folio_clear_mlocked(folio); 1149 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1150 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1151 } 1152 1153 if (unlikely(PageHWPoison(page)) && !order) { 1154 /* Do not let hwpoison pages hit pcplists/buddy */ 1155 reset_page_owner(page, order); 1156 page_table_check_free(page, order); 1157 pgalloc_tag_sub(page, 1 << order); 1158 1159 /* 1160 * The page is isolated and accounted for. 1161 * Mark the codetag as empty to avoid accounting error 1162 * when the page is freed by unpoison_memory(). 1163 */ 1164 clear_page_tag_ref(page); 1165 return false; 1166 } 1167 1168 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1169 1170 /* 1171 * Check tail pages before head page information is cleared to 1172 * avoid checking PageCompound for order-0 pages. 1173 */ 1174 if (unlikely(order)) { 1175 int i; 1176 1177 if (compound) 1178 page[1].flags &= ~PAGE_FLAGS_SECOND; 1179 for (i = 1; i < (1 << order); i++) { 1180 if (compound) 1181 bad += free_tail_page_prepare(page, page + i); 1182 if (is_check_pages_enabled()) { 1183 if (free_page_is_bad(page + i)) { 1184 bad++; 1185 continue; 1186 } 1187 } 1188 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1189 } 1190 } 1191 if (PageMappingFlags(page)) { 1192 if (PageAnon(page)) 1193 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1194 page->mapping = NULL; 1195 } 1196 if (is_check_pages_enabled()) { 1197 if (free_page_is_bad(page)) 1198 bad++; 1199 if (bad) 1200 return false; 1201 } 1202 1203 page_cpupid_reset_last(page); 1204 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1205 reset_page_owner(page, order); 1206 page_table_check_free(page, order); 1207 pgalloc_tag_sub(page, 1 << order); 1208 1209 if (!PageHighMem(page)) { 1210 debug_check_no_locks_freed(page_address(page), 1211 PAGE_SIZE << order); 1212 debug_check_no_obj_freed(page_address(page), 1213 PAGE_SIZE << order); 1214 } 1215 1216 kernel_poison_pages(page, 1 << order); 1217 1218 /* 1219 * As memory initialization might be integrated into KASAN, 1220 * KASAN poisoning and memory initialization code must be 1221 * kept together to avoid discrepancies in behavior. 1222 * 1223 * With hardware tag-based KASAN, memory tags must be set before the 1224 * page becomes unavailable via debug_pagealloc or arch_free_page. 1225 */ 1226 if (!skip_kasan_poison) { 1227 kasan_poison_pages(page, order, init); 1228 1229 /* Memory is already initialized if KASAN did it internally. */ 1230 if (kasan_has_integrated_init()) 1231 init = false; 1232 } 1233 if (init) 1234 kernel_init_pages(page, 1 << order); 1235 1236 /* 1237 * arch_free_page() can make the page's contents inaccessible. s390 1238 * does this. So nothing which can access the page's contents should 1239 * happen after this. 1240 */ 1241 arch_free_page(page, order); 1242 1243 debug_pagealloc_unmap_pages(page, 1 << order); 1244 1245 return true; 1246 } 1247 1248 /* 1249 * Frees a number of pages from the PCP lists 1250 * Assumes all pages on list are in same zone. 1251 * count is the number of pages to free. 1252 */ 1253 static void free_pcppages_bulk(struct zone *zone, int count, 1254 struct per_cpu_pages *pcp, 1255 int pindex) 1256 { 1257 unsigned long flags; 1258 unsigned int order; 1259 struct page *page; 1260 1261 /* 1262 * Ensure proper count is passed which otherwise would stuck in the 1263 * below while (list_empty(list)) loop. 1264 */ 1265 count = min(pcp->count, count); 1266 1267 /* Ensure requested pindex is drained first. */ 1268 pindex = pindex - 1; 1269 1270 spin_lock_irqsave(&zone->lock, flags); 1271 1272 while (count > 0) { 1273 struct list_head *list; 1274 int nr_pages; 1275 1276 /* Remove pages from lists in a round-robin fashion. */ 1277 do { 1278 if (++pindex > NR_PCP_LISTS - 1) 1279 pindex = 0; 1280 list = &pcp->lists[pindex]; 1281 } while (list_empty(list)); 1282 1283 order = pindex_to_order(pindex); 1284 nr_pages = 1 << order; 1285 do { 1286 unsigned long pfn; 1287 int mt; 1288 1289 page = list_last_entry(list, struct page, pcp_list); 1290 pfn = page_to_pfn(page); 1291 mt = get_pfnblock_migratetype(page, pfn); 1292 1293 /* must delete to avoid corrupting pcp list */ 1294 list_del(&page->pcp_list); 1295 count -= nr_pages; 1296 pcp->count -= nr_pages; 1297 1298 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1299 trace_mm_page_pcpu_drain(page, order, mt); 1300 } while (count > 0 && !list_empty(list)); 1301 } 1302 1303 spin_unlock_irqrestore(&zone->lock, flags); 1304 } 1305 1306 /* Split a multi-block free page into its individual pageblocks. */ 1307 static void split_large_buddy(struct zone *zone, struct page *page, 1308 unsigned long pfn, int order, fpi_t fpi) 1309 { 1310 unsigned long end = pfn + (1 << order); 1311 1312 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1313 /* Caller removed page from freelist, buddy info cleared! */ 1314 VM_WARN_ON_ONCE(PageBuddy(page)); 1315 1316 if (order > pageblock_order) 1317 order = pageblock_order; 1318 1319 do { 1320 int mt = get_pfnblock_migratetype(page, pfn); 1321 1322 __free_one_page(page, pfn, zone, order, mt, fpi); 1323 pfn += 1 << order; 1324 if (pfn == end) 1325 break; 1326 page = pfn_to_page(pfn); 1327 } while (1); 1328 } 1329 1330 static void free_one_page(struct zone *zone, struct page *page, 1331 unsigned long pfn, unsigned int order, 1332 fpi_t fpi_flags) 1333 { 1334 unsigned long flags; 1335 1336 spin_lock_irqsave(&zone->lock, flags); 1337 split_large_buddy(zone, page, pfn, order, fpi_flags); 1338 spin_unlock_irqrestore(&zone->lock, flags); 1339 1340 __count_vm_events(PGFREE, 1 << order); 1341 } 1342 1343 static void __free_pages_ok(struct page *page, unsigned int order, 1344 fpi_t fpi_flags) 1345 { 1346 unsigned long pfn = page_to_pfn(page); 1347 struct zone *zone = page_zone(page); 1348 1349 if (free_pages_prepare(page, order)) 1350 free_one_page(zone, page, pfn, order, fpi_flags); 1351 } 1352 1353 void __meminit __free_pages_core(struct page *page, unsigned int order, 1354 enum meminit_context context) 1355 { 1356 unsigned int nr_pages = 1 << order; 1357 struct page *p = page; 1358 unsigned int loop; 1359 1360 /* 1361 * When initializing the memmap, __init_single_page() sets the refcount 1362 * of all pages to 1 ("allocated"/"not free"). We have to set the 1363 * refcount of all involved pages to 0. 1364 * 1365 * Note that hotplugged memory pages are initialized to PageOffline(). 1366 * Pages freed from memblock might be marked as reserved. 1367 */ 1368 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1369 unlikely(context == MEMINIT_HOTPLUG)) { 1370 for (loop = 0; loop < nr_pages; loop++, p++) { 1371 VM_WARN_ON_ONCE(PageReserved(p)); 1372 __ClearPageOffline(p); 1373 set_page_count(p, 0); 1374 } 1375 1376 adjust_managed_page_count(page, nr_pages); 1377 } else { 1378 for (loop = 0; loop < nr_pages; loop++, p++) { 1379 __ClearPageReserved(p); 1380 set_page_count(p, 0); 1381 } 1382 1383 /* memblock adjusts totalram_pages() manually. */ 1384 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1385 } 1386 1387 if (page_contains_unaccepted(page, order)) { 1388 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1389 return; 1390 1391 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1392 } 1393 1394 /* 1395 * Bypass PCP and place fresh pages right to the tail, primarily 1396 * relevant for memory onlining. 1397 */ 1398 __free_pages_ok(page, order, FPI_TO_TAIL); 1399 } 1400 1401 /* 1402 * Check that the whole (or subset of) a pageblock given by the interval of 1403 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1404 * with the migration of free compaction scanner. 1405 * 1406 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1407 * 1408 * It's possible on some configurations to have a setup like node0 node1 node0 1409 * i.e. it's possible that all pages within a zones range of pages do not 1410 * belong to a single zone. We assume that a border between node0 and node1 1411 * can occur within a single pageblock, but not a node0 node1 node0 1412 * interleaving within a single pageblock. It is therefore sufficient to check 1413 * the first and last page of a pageblock and avoid checking each individual 1414 * page in a pageblock. 1415 * 1416 * Note: the function may return non-NULL struct page even for a page block 1417 * which contains a memory hole (i.e. there is no physical memory for a subset 1418 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1419 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1420 * even though the start pfn is online and valid. This should be safe most of 1421 * the time because struct pages are still initialized via init_unavailable_range() 1422 * and pfn walkers shouldn't touch any physical memory range for which they do 1423 * not recognize any specific metadata in struct pages. 1424 */ 1425 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1426 unsigned long end_pfn, struct zone *zone) 1427 { 1428 struct page *start_page; 1429 struct page *end_page; 1430 1431 /* end_pfn is one past the range we are checking */ 1432 end_pfn--; 1433 1434 if (!pfn_valid(end_pfn)) 1435 return NULL; 1436 1437 start_page = pfn_to_online_page(start_pfn); 1438 if (!start_page) 1439 return NULL; 1440 1441 if (page_zone(start_page) != zone) 1442 return NULL; 1443 1444 end_page = pfn_to_page(end_pfn); 1445 1446 /* This gives a shorter code than deriving page_zone(end_page) */ 1447 if (page_zone_id(start_page) != page_zone_id(end_page)) 1448 return NULL; 1449 1450 return start_page; 1451 } 1452 1453 /* 1454 * The order of subdivision here is critical for the IO subsystem. 1455 * Please do not alter this order without good reasons and regression 1456 * testing. Specifically, as large blocks of memory are subdivided, 1457 * the order in which smaller blocks are delivered depends on the order 1458 * they're subdivided in this function. This is the primary factor 1459 * influencing the order in which pages are delivered to the IO 1460 * subsystem according to empirical testing, and this is also justified 1461 * by considering the behavior of a buddy system containing a single 1462 * large block of memory acted on by a series of small allocations. 1463 * This behavior is a critical factor in sglist merging's success. 1464 * 1465 * -- nyc 1466 */ 1467 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1468 int high, int migratetype) 1469 { 1470 unsigned int size = 1 << high; 1471 unsigned int nr_added = 0; 1472 1473 while (high > low) { 1474 high--; 1475 size >>= 1; 1476 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1477 1478 /* 1479 * Mark as guard pages (or page), that will allow to 1480 * merge back to allocator when buddy will be freed. 1481 * Corresponding page table entries will not be touched, 1482 * pages will stay not present in virtual address space 1483 */ 1484 if (set_page_guard(zone, &page[size], high)) 1485 continue; 1486 1487 __add_to_free_list(&page[size], zone, high, migratetype, false); 1488 set_buddy_order(&page[size], high); 1489 nr_added += size; 1490 } 1491 1492 return nr_added; 1493 } 1494 1495 static __always_inline void page_del_and_expand(struct zone *zone, 1496 struct page *page, int low, 1497 int high, int migratetype) 1498 { 1499 int nr_pages = 1 << high; 1500 1501 __del_page_from_free_list(page, zone, high, migratetype); 1502 nr_pages -= expand(zone, page, low, high, migratetype); 1503 account_freepages(zone, -nr_pages, migratetype); 1504 } 1505 1506 static void check_new_page_bad(struct page *page) 1507 { 1508 if (unlikely(page->flags & __PG_HWPOISON)) { 1509 /* Don't complain about hwpoisoned pages */ 1510 if (PageBuddy(page)) 1511 __ClearPageBuddy(page); 1512 return; 1513 } 1514 1515 bad_page(page, 1516 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1517 } 1518 1519 /* 1520 * This page is about to be returned from the page allocator 1521 */ 1522 static bool check_new_page(struct page *page) 1523 { 1524 if (likely(page_expected_state(page, 1525 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1526 return false; 1527 1528 check_new_page_bad(page); 1529 return true; 1530 } 1531 1532 static inline bool check_new_pages(struct page *page, unsigned int order) 1533 { 1534 if (is_check_pages_enabled()) { 1535 for (int i = 0; i < (1 << order); i++) { 1536 struct page *p = page + i; 1537 1538 if (check_new_page(p)) 1539 return true; 1540 } 1541 } 1542 1543 return false; 1544 } 1545 1546 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1547 { 1548 /* Don't skip if a software KASAN mode is enabled. */ 1549 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1550 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1551 return false; 1552 1553 /* Skip, if hardware tag-based KASAN is not enabled. */ 1554 if (!kasan_hw_tags_enabled()) 1555 return true; 1556 1557 /* 1558 * With hardware tag-based KASAN enabled, skip if this has been 1559 * requested via __GFP_SKIP_KASAN. 1560 */ 1561 return flags & __GFP_SKIP_KASAN; 1562 } 1563 1564 static inline bool should_skip_init(gfp_t flags) 1565 { 1566 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1567 if (!kasan_hw_tags_enabled()) 1568 return false; 1569 1570 /* For hardware tag-based KASAN, skip if requested. */ 1571 return (flags & __GFP_SKIP_ZERO); 1572 } 1573 1574 inline void post_alloc_hook(struct page *page, unsigned int order, 1575 gfp_t gfp_flags) 1576 { 1577 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1578 !should_skip_init(gfp_flags); 1579 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1580 int i; 1581 1582 set_page_private(page, 0); 1583 1584 arch_alloc_page(page, order); 1585 debug_pagealloc_map_pages(page, 1 << order); 1586 1587 /* 1588 * Page unpoisoning must happen before memory initialization. 1589 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1590 * allocations and the page unpoisoning code will complain. 1591 */ 1592 kernel_unpoison_pages(page, 1 << order); 1593 1594 /* 1595 * As memory initialization might be integrated into KASAN, 1596 * KASAN unpoisoning and memory initializion code must be 1597 * kept together to avoid discrepancies in behavior. 1598 */ 1599 1600 /* 1601 * If memory tags should be zeroed 1602 * (which happens only when memory should be initialized as well). 1603 */ 1604 if (zero_tags) { 1605 /* Initialize both memory and memory tags. */ 1606 for (i = 0; i != 1 << order; ++i) 1607 tag_clear_highpage(page + i); 1608 1609 /* Take note that memory was initialized by the loop above. */ 1610 init = false; 1611 } 1612 if (!should_skip_kasan_unpoison(gfp_flags) && 1613 kasan_unpoison_pages(page, order, init)) { 1614 /* Take note that memory was initialized by KASAN. */ 1615 if (kasan_has_integrated_init()) 1616 init = false; 1617 } else { 1618 /* 1619 * If memory tags have not been set by KASAN, reset the page 1620 * tags to ensure page_address() dereferencing does not fault. 1621 */ 1622 for (i = 0; i != 1 << order; ++i) 1623 page_kasan_tag_reset(page + i); 1624 } 1625 /* If memory is still not initialized, initialize it now. */ 1626 if (init) 1627 kernel_init_pages(page, 1 << order); 1628 1629 set_page_owner(page, order, gfp_flags); 1630 page_table_check_alloc(page, order); 1631 pgalloc_tag_add(page, current, 1 << order); 1632 } 1633 1634 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1635 unsigned int alloc_flags) 1636 { 1637 post_alloc_hook(page, order, gfp_flags); 1638 1639 if (order && (gfp_flags & __GFP_COMP)) 1640 prep_compound_page(page, order); 1641 1642 /* 1643 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1644 * allocate the page. The expectation is that the caller is taking 1645 * steps that will free more memory. The caller should avoid the page 1646 * being used for !PFMEMALLOC purposes. 1647 */ 1648 if (alloc_flags & ALLOC_NO_WATERMARKS) 1649 set_page_pfmemalloc(page); 1650 else 1651 clear_page_pfmemalloc(page); 1652 } 1653 1654 /* 1655 * Go through the free lists for the given migratetype and remove 1656 * the smallest available page from the freelists 1657 */ 1658 static __always_inline 1659 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1660 int migratetype) 1661 { 1662 unsigned int current_order; 1663 struct free_area *area; 1664 struct page *page; 1665 1666 /* Find a page of the appropriate size in the preferred list */ 1667 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1668 area = &(zone->free_area[current_order]); 1669 page = get_page_from_free_area(area, migratetype); 1670 if (!page) 1671 continue; 1672 1673 page_del_and_expand(zone, page, order, current_order, 1674 migratetype); 1675 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1676 pcp_allowed_order(order) && 1677 migratetype < MIGRATE_PCPTYPES); 1678 return page; 1679 } 1680 1681 return NULL; 1682 } 1683 1684 1685 /* 1686 * This array describes the order lists are fallen back to when 1687 * the free lists for the desirable migrate type are depleted 1688 * 1689 * The other migratetypes do not have fallbacks. 1690 */ 1691 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1692 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1693 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1694 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1695 }; 1696 1697 #ifdef CONFIG_CMA 1698 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1699 unsigned int order) 1700 { 1701 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1702 } 1703 #else 1704 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1705 unsigned int order) { return NULL; } 1706 #endif 1707 1708 /* 1709 * Change the type of a block and move all its free pages to that 1710 * type's freelist. 1711 */ 1712 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1713 int old_mt, int new_mt) 1714 { 1715 struct page *page; 1716 unsigned long pfn, end_pfn; 1717 unsigned int order; 1718 int pages_moved = 0; 1719 1720 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1721 end_pfn = pageblock_end_pfn(start_pfn); 1722 1723 for (pfn = start_pfn; pfn < end_pfn;) { 1724 page = pfn_to_page(pfn); 1725 if (!PageBuddy(page)) { 1726 pfn++; 1727 continue; 1728 } 1729 1730 /* Make sure we are not inadvertently changing nodes */ 1731 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1732 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1733 1734 order = buddy_order(page); 1735 1736 move_to_free_list(page, zone, order, old_mt, new_mt); 1737 1738 pfn += 1 << order; 1739 pages_moved += 1 << order; 1740 } 1741 1742 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1743 1744 return pages_moved; 1745 } 1746 1747 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1748 unsigned long *start_pfn, 1749 int *num_free, int *num_movable) 1750 { 1751 unsigned long pfn, start, end; 1752 1753 pfn = page_to_pfn(page); 1754 start = pageblock_start_pfn(pfn); 1755 end = pageblock_end_pfn(pfn); 1756 1757 /* 1758 * The caller only has the lock for @zone, don't touch ranges 1759 * that straddle into other zones. While we could move part of 1760 * the range that's inside the zone, this call is usually 1761 * accompanied by other operations such as migratetype updates 1762 * which also should be locked. 1763 */ 1764 if (!zone_spans_pfn(zone, start)) 1765 return false; 1766 if (!zone_spans_pfn(zone, end - 1)) 1767 return false; 1768 1769 *start_pfn = start; 1770 1771 if (num_free) { 1772 *num_free = 0; 1773 *num_movable = 0; 1774 for (pfn = start; pfn < end;) { 1775 page = pfn_to_page(pfn); 1776 if (PageBuddy(page)) { 1777 int nr = 1 << buddy_order(page); 1778 1779 *num_free += nr; 1780 pfn += nr; 1781 continue; 1782 } 1783 /* 1784 * We assume that pages that could be isolated for 1785 * migration are movable. But we don't actually try 1786 * isolating, as that would be expensive. 1787 */ 1788 if (PageLRU(page) || __PageMovable(page)) 1789 (*num_movable)++; 1790 pfn++; 1791 } 1792 } 1793 1794 return true; 1795 } 1796 1797 static int move_freepages_block(struct zone *zone, struct page *page, 1798 int old_mt, int new_mt) 1799 { 1800 unsigned long start_pfn; 1801 1802 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1803 return -1; 1804 1805 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1806 } 1807 1808 #ifdef CONFIG_MEMORY_ISOLATION 1809 /* Look for a buddy that straddles start_pfn */ 1810 static unsigned long find_large_buddy(unsigned long start_pfn) 1811 { 1812 int order = 0; 1813 struct page *page; 1814 unsigned long pfn = start_pfn; 1815 1816 while (!PageBuddy(page = pfn_to_page(pfn))) { 1817 /* Nothing found */ 1818 if (++order > MAX_PAGE_ORDER) 1819 return start_pfn; 1820 pfn &= ~0UL << order; 1821 } 1822 1823 /* 1824 * Found a preceding buddy, but does it straddle? 1825 */ 1826 if (pfn + (1 << buddy_order(page)) > start_pfn) 1827 return pfn; 1828 1829 /* Nothing found */ 1830 return start_pfn; 1831 } 1832 1833 /** 1834 * move_freepages_block_isolate - move free pages in block for page isolation 1835 * @zone: the zone 1836 * @page: the pageblock page 1837 * @migratetype: migratetype to set on the pageblock 1838 * 1839 * This is similar to move_freepages_block(), but handles the special 1840 * case encountered in page isolation, where the block of interest 1841 * might be part of a larger buddy spanning multiple pageblocks. 1842 * 1843 * Unlike the regular page allocator path, which moves pages while 1844 * stealing buddies off the freelist, page isolation is interested in 1845 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1846 * 1847 * This function handles that. Straddling buddies are split into 1848 * individual pageblocks. Only the block of interest is moved. 1849 * 1850 * Returns %true if pages could be moved, %false otherwise. 1851 */ 1852 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1853 int migratetype) 1854 { 1855 unsigned long start_pfn, pfn; 1856 1857 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1858 return false; 1859 1860 /* No splits needed if buddies can't span multiple blocks */ 1861 if (pageblock_order == MAX_PAGE_ORDER) 1862 goto move; 1863 1864 /* We're a tail block in a larger buddy */ 1865 pfn = find_large_buddy(start_pfn); 1866 if (pfn != start_pfn) { 1867 struct page *buddy = pfn_to_page(pfn); 1868 int order = buddy_order(buddy); 1869 1870 del_page_from_free_list(buddy, zone, order, 1871 get_pfnblock_migratetype(buddy, pfn)); 1872 set_pageblock_migratetype(page, migratetype); 1873 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 1874 return true; 1875 } 1876 1877 /* We're the starting block of a larger buddy */ 1878 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1879 int order = buddy_order(page); 1880 1881 del_page_from_free_list(page, zone, order, 1882 get_pfnblock_migratetype(page, pfn)); 1883 set_pageblock_migratetype(page, migratetype); 1884 split_large_buddy(zone, page, pfn, order, FPI_NONE); 1885 return true; 1886 } 1887 move: 1888 __move_freepages_block(zone, start_pfn, 1889 get_pfnblock_migratetype(page, start_pfn), 1890 migratetype); 1891 return true; 1892 } 1893 #endif /* CONFIG_MEMORY_ISOLATION */ 1894 1895 static void change_pageblock_range(struct page *pageblock_page, 1896 int start_order, int migratetype) 1897 { 1898 int nr_pageblocks = 1 << (start_order - pageblock_order); 1899 1900 while (nr_pageblocks--) { 1901 set_pageblock_migratetype(pageblock_page, migratetype); 1902 pageblock_page += pageblock_nr_pages; 1903 } 1904 } 1905 1906 /* 1907 * When we are falling back to another migratetype during allocation, try to 1908 * steal extra free pages from the same pageblocks to satisfy further 1909 * allocations, instead of polluting multiple pageblocks. 1910 * 1911 * If we are stealing a relatively large buddy page, it is likely there will 1912 * be more free pages in the pageblock, so try to steal them all. For 1913 * reclaimable and unmovable allocations, we steal regardless of page size, 1914 * as fragmentation caused by those allocations polluting movable pageblocks 1915 * is worse than movable allocations stealing from unmovable and reclaimable 1916 * pageblocks. 1917 */ 1918 static bool can_steal_fallback(unsigned int order, int start_mt) 1919 { 1920 /* 1921 * Leaving this order check is intended, although there is 1922 * relaxed order check in next check. The reason is that 1923 * we can actually steal whole pageblock if this condition met, 1924 * but, below check doesn't guarantee it and that is just heuristic 1925 * so could be changed anytime. 1926 */ 1927 if (order >= pageblock_order) 1928 return true; 1929 1930 /* 1931 * Movable pages won't cause permanent fragmentation, so when you alloc 1932 * small pages, you just need to temporarily steal unmovable or 1933 * reclaimable pages that are closest to the request size. After a 1934 * while, memory compaction may occur to form large contiguous pages, 1935 * and the next movable allocation may not need to steal. Unmovable and 1936 * reclaimable allocations need to actually steal pages. 1937 */ 1938 if (order >= pageblock_order / 2 || 1939 start_mt == MIGRATE_RECLAIMABLE || 1940 start_mt == MIGRATE_UNMOVABLE || 1941 page_group_by_mobility_disabled) 1942 return true; 1943 1944 return false; 1945 } 1946 1947 static inline bool boost_watermark(struct zone *zone) 1948 { 1949 unsigned long max_boost; 1950 1951 if (!watermark_boost_factor) 1952 return false; 1953 /* 1954 * Don't bother in zones that are unlikely to produce results. 1955 * On small machines, including kdump capture kernels running 1956 * in a small area, boosting the watermark can cause an out of 1957 * memory situation immediately. 1958 */ 1959 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1960 return false; 1961 1962 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1963 watermark_boost_factor, 10000); 1964 1965 /* 1966 * high watermark may be uninitialised if fragmentation occurs 1967 * very early in boot so do not boost. We do not fall 1968 * through and boost by pageblock_nr_pages as failing 1969 * allocations that early means that reclaim is not going 1970 * to help and it may even be impossible to reclaim the 1971 * boosted watermark resulting in a hang. 1972 */ 1973 if (!max_boost) 1974 return false; 1975 1976 max_boost = max(pageblock_nr_pages, max_boost); 1977 1978 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1979 max_boost); 1980 1981 return true; 1982 } 1983 1984 /* 1985 * This function implements actual steal behaviour. If order is large enough, we 1986 * can claim the whole pageblock for the requested migratetype. If not, we check 1987 * the pageblock for constituent pages; if at least half of the pages are free 1988 * or compatible, we can still claim the whole block, so pages freed in the 1989 * future will be put on the correct free list. Otherwise, we isolate exactly 1990 * the order we need from the fallback block and leave its migratetype alone. 1991 */ 1992 static struct page * 1993 steal_suitable_fallback(struct zone *zone, struct page *page, 1994 int current_order, int order, int start_type, 1995 unsigned int alloc_flags, bool whole_block) 1996 { 1997 int free_pages, movable_pages, alike_pages; 1998 unsigned long start_pfn; 1999 int block_type; 2000 2001 block_type = get_pageblock_migratetype(page); 2002 2003 /* 2004 * This can happen due to races and we want to prevent broken 2005 * highatomic accounting. 2006 */ 2007 if (is_migrate_highatomic(block_type)) 2008 goto single_page; 2009 2010 /* Take ownership for orders >= pageblock_order */ 2011 if (current_order >= pageblock_order) { 2012 unsigned int nr_added; 2013 2014 del_page_from_free_list(page, zone, current_order, block_type); 2015 change_pageblock_range(page, current_order, start_type); 2016 nr_added = expand(zone, page, order, current_order, start_type); 2017 account_freepages(zone, nr_added, start_type); 2018 return page; 2019 } 2020 2021 /* 2022 * Boost watermarks to increase reclaim pressure to reduce the 2023 * likelihood of future fallbacks. Wake kswapd now as the node 2024 * may be balanced overall and kswapd will not wake naturally. 2025 */ 2026 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2027 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2028 2029 /* We are not allowed to try stealing from the whole block */ 2030 if (!whole_block) 2031 goto single_page; 2032 2033 /* moving whole block can fail due to zone boundary conditions */ 2034 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2035 &movable_pages)) 2036 goto single_page; 2037 2038 /* 2039 * Determine how many pages are compatible with our allocation. 2040 * For movable allocation, it's the number of movable pages which 2041 * we just obtained. For other types it's a bit more tricky. 2042 */ 2043 if (start_type == MIGRATE_MOVABLE) { 2044 alike_pages = movable_pages; 2045 } else { 2046 /* 2047 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2048 * to MOVABLE pageblock, consider all non-movable pages as 2049 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2050 * vice versa, be conservative since we can't distinguish the 2051 * exact migratetype of non-movable pages. 2052 */ 2053 if (block_type == MIGRATE_MOVABLE) 2054 alike_pages = pageblock_nr_pages 2055 - (free_pages + movable_pages); 2056 else 2057 alike_pages = 0; 2058 } 2059 /* 2060 * If a sufficient number of pages in the block are either free or of 2061 * compatible migratability as our allocation, claim the whole block. 2062 */ 2063 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2064 page_group_by_mobility_disabled) { 2065 __move_freepages_block(zone, start_pfn, block_type, start_type); 2066 return __rmqueue_smallest(zone, order, start_type); 2067 } 2068 2069 single_page: 2070 page_del_and_expand(zone, page, order, current_order, block_type); 2071 return page; 2072 } 2073 2074 /* 2075 * Check whether there is a suitable fallback freepage with requested order. 2076 * If only_stealable is true, this function returns fallback_mt only if 2077 * we can steal other freepages all together. This would help to reduce 2078 * fragmentation due to mixed migratetype pages in one pageblock. 2079 */ 2080 int find_suitable_fallback(struct free_area *area, unsigned int order, 2081 int migratetype, bool only_stealable, bool *can_steal) 2082 { 2083 int i; 2084 int fallback_mt; 2085 2086 if (area->nr_free == 0) 2087 return -1; 2088 2089 *can_steal = false; 2090 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2091 fallback_mt = fallbacks[migratetype][i]; 2092 if (free_area_empty(area, fallback_mt)) 2093 continue; 2094 2095 if (can_steal_fallback(order, migratetype)) 2096 *can_steal = true; 2097 2098 if (!only_stealable) 2099 return fallback_mt; 2100 2101 if (*can_steal) 2102 return fallback_mt; 2103 } 2104 2105 return -1; 2106 } 2107 2108 /* 2109 * Reserve the pageblock(s) surrounding an allocation request for 2110 * exclusive use of high-order atomic allocations if there are no 2111 * empty page blocks that contain a page with a suitable order 2112 */ 2113 static void reserve_highatomic_pageblock(struct page *page, int order, 2114 struct zone *zone) 2115 { 2116 int mt; 2117 unsigned long max_managed, flags; 2118 2119 /* 2120 * The number reserved as: minimum is 1 pageblock, maximum is 2121 * roughly 1% of a zone. But if 1% of a zone falls below a 2122 * pageblock size, then don't reserve any pageblocks. 2123 * Check is race-prone but harmless. 2124 */ 2125 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 2126 return; 2127 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 2128 if (zone->nr_reserved_highatomic >= max_managed) 2129 return; 2130 2131 spin_lock_irqsave(&zone->lock, flags); 2132 2133 /* Recheck the nr_reserved_highatomic limit under the lock */ 2134 if (zone->nr_reserved_highatomic >= max_managed) 2135 goto out_unlock; 2136 2137 /* Yoink! */ 2138 mt = get_pageblock_migratetype(page); 2139 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2140 if (!migratetype_is_mergeable(mt)) 2141 goto out_unlock; 2142 2143 if (order < pageblock_order) { 2144 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 2145 goto out_unlock; 2146 zone->nr_reserved_highatomic += pageblock_nr_pages; 2147 } else { 2148 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 2149 zone->nr_reserved_highatomic += 1 << order; 2150 } 2151 2152 out_unlock: 2153 spin_unlock_irqrestore(&zone->lock, flags); 2154 } 2155 2156 /* 2157 * Used when an allocation is about to fail under memory pressure. This 2158 * potentially hurts the reliability of high-order allocations when under 2159 * intense memory pressure but failed atomic allocations should be easier 2160 * to recover from than an OOM. 2161 * 2162 * If @force is true, try to unreserve pageblocks even though highatomic 2163 * pageblock is exhausted. 2164 */ 2165 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2166 bool force) 2167 { 2168 struct zonelist *zonelist = ac->zonelist; 2169 unsigned long flags; 2170 struct zoneref *z; 2171 struct zone *zone; 2172 struct page *page; 2173 int order; 2174 int ret; 2175 2176 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2177 ac->nodemask) { 2178 /* 2179 * Preserve at least one pageblock unless memory pressure 2180 * is really high. 2181 */ 2182 if (!force && zone->nr_reserved_highatomic <= 2183 pageblock_nr_pages) 2184 continue; 2185 2186 spin_lock_irqsave(&zone->lock, flags); 2187 for (order = 0; order < NR_PAGE_ORDERS; order++) { 2188 struct free_area *area = &(zone->free_area[order]); 2189 int mt; 2190 2191 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2192 if (!page) 2193 continue; 2194 2195 mt = get_pageblock_migratetype(page); 2196 /* 2197 * In page freeing path, migratetype change is racy so 2198 * we can counter several free pages in a pageblock 2199 * in this loop although we changed the pageblock type 2200 * from highatomic to ac->migratetype. So we should 2201 * adjust the count once. 2202 */ 2203 if (is_migrate_highatomic(mt)) { 2204 unsigned long size; 2205 /* 2206 * It should never happen but changes to 2207 * locking could inadvertently allow a per-cpu 2208 * drain to add pages to MIGRATE_HIGHATOMIC 2209 * while unreserving so be safe and watch for 2210 * underflows. 2211 */ 2212 size = max(pageblock_nr_pages, 1UL << order); 2213 size = min(size, zone->nr_reserved_highatomic); 2214 zone->nr_reserved_highatomic -= size; 2215 } 2216 2217 /* 2218 * Convert to ac->migratetype and avoid the normal 2219 * pageblock stealing heuristics. Minimally, the caller 2220 * is doing the work and needs the pages. More 2221 * importantly, if the block was always converted to 2222 * MIGRATE_UNMOVABLE or another type then the number 2223 * of pageblocks that cannot be completely freed 2224 * may increase. 2225 */ 2226 if (order < pageblock_order) 2227 ret = move_freepages_block(zone, page, mt, 2228 ac->migratetype); 2229 else { 2230 move_to_free_list(page, zone, order, mt, 2231 ac->migratetype); 2232 change_pageblock_range(page, order, 2233 ac->migratetype); 2234 ret = 1; 2235 } 2236 /* 2237 * Reserving the block(s) already succeeded, 2238 * so this should not fail on zone boundaries. 2239 */ 2240 WARN_ON_ONCE(ret == -1); 2241 if (ret > 0) { 2242 spin_unlock_irqrestore(&zone->lock, flags); 2243 return ret; 2244 } 2245 } 2246 spin_unlock_irqrestore(&zone->lock, flags); 2247 } 2248 2249 return false; 2250 } 2251 2252 /* 2253 * Try finding a free buddy page on the fallback list and put it on the free 2254 * list of requested migratetype, possibly along with other pages from the same 2255 * block, depending on fragmentation avoidance heuristics. Returns true if 2256 * fallback was found so that __rmqueue_smallest() can grab it. 2257 * 2258 * The use of signed ints for order and current_order is a deliberate 2259 * deviation from the rest of this file, to make the for loop 2260 * condition simpler. 2261 */ 2262 static __always_inline struct page * 2263 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2264 unsigned int alloc_flags) 2265 { 2266 struct free_area *area; 2267 int current_order; 2268 int min_order = order; 2269 struct page *page; 2270 int fallback_mt; 2271 bool can_steal; 2272 2273 /* 2274 * Do not steal pages from freelists belonging to other pageblocks 2275 * i.e. orders < pageblock_order. If there are no local zones free, 2276 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2277 */ 2278 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2279 min_order = pageblock_order; 2280 2281 /* 2282 * Find the largest available free page in the other list. This roughly 2283 * approximates finding the pageblock with the most free pages, which 2284 * would be too costly to do exactly. 2285 */ 2286 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2287 --current_order) { 2288 area = &(zone->free_area[current_order]); 2289 fallback_mt = find_suitable_fallback(area, current_order, 2290 start_migratetype, false, &can_steal); 2291 if (fallback_mt == -1) 2292 continue; 2293 2294 /* 2295 * We cannot steal all free pages from the pageblock and the 2296 * requested migratetype is movable. In that case it's better to 2297 * steal and split the smallest available page instead of the 2298 * largest available page, because even if the next movable 2299 * allocation falls back into a different pageblock than this 2300 * one, it won't cause permanent fragmentation. 2301 */ 2302 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2303 && current_order > order) 2304 goto find_smallest; 2305 2306 goto do_steal; 2307 } 2308 2309 return NULL; 2310 2311 find_smallest: 2312 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2313 area = &(zone->free_area[current_order]); 2314 fallback_mt = find_suitable_fallback(area, current_order, 2315 start_migratetype, false, &can_steal); 2316 if (fallback_mt != -1) 2317 break; 2318 } 2319 2320 /* 2321 * This should not happen - we already found a suitable fallback 2322 * when looking for the largest page. 2323 */ 2324 VM_BUG_ON(current_order > MAX_PAGE_ORDER); 2325 2326 do_steal: 2327 page = get_page_from_free_area(area, fallback_mt); 2328 2329 /* take off list, maybe claim block, expand remainder */ 2330 page = steal_suitable_fallback(zone, page, current_order, order, 2331 start_migratetype, alloc_flags, can_steal); 2332 2333 trace_mm_page_alloc_extfrag(page, order, current_order, 2334 start_migratetype, fallback_mt); 2335 2336 return page; 2337 } 2338 2339 /* 2340 * Do the hard work of removing an element from the buddy allocator. 2341 * Call me with the zone->lock already held. 2342 */ 2343 static __always_inline struct page * 2344 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2345 unsigned int alloc_flags) 2346 { 2347 struct page *page; 2348 2349 if (IS_ENABLED(CONFIG_CMA)) { 2350 /* 2351 * Balance movable allocations between regular and CMA areas by 2352 * allocating from CMA when over half of the zone's free memory 2353 * is in the CMA area. 2354 */ 2355 if (alloc_flags & ALLOC_CMA && 2356 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2357 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2358 page = __rmqueue_cma_fallback(zone, order); 2359 if (page) 2360 return page; 2361 } 2362 } 2363 2364 page = __rmqueue_smallest(zone, order, migratetype); 2365 if (unlikely(!page)) { 2366 if (alloc_flags & ALLOC_CMA) 2367 page = __rmqueue_cma_fallback(zone, order); 2368 2369 if (!page) 2370 page = __rmqueue_fallback(zone, order, migratetype, 2371 alloc_flags); 2372 } 2373 return page; 2374 } 2375 2376 /* 2377 * Obtain a specified number of elements from the buddy allocator, all under 2378 * a single hold of the lock, for efficiency. Add them to the supplied list. 2379 * Returns the number of new pages which were placed at *list. 2380 */ 2381 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2382 unsigned long count, struct list_head *list, 2383 int migratetype, unsigned int alloc_flags) 2384 { 2385 unsigned long flags; 2386 int i; 2387 2388 spin_lock_irqsave(&zone->lock, flags); 2389 for (i = 0; i < count; ++i) { 2390 struct page *page = __rmqueue(zone, order, migratetype, 2391 alloc_flags); 2392 if (unlikely(page == NULL)) 2393 break; 2394 2395 /* 2396 * Split buddy pages returned by expand() are received here in 2397 * physical page order. The page is added to the tail of 2398 * caller's list. From the callers perspective, the linked list 2399 * is ordered by page number under some conditions. This is 2400 * useful for IO devices that can forward direction from the 2401 * head, thus also in the physical page order. This is useful 2402 * for IO devices that can merge IO requests if the physical 2403 * pages are ordered properly. 2404 */ 2405 list_add_tail(&page->pcp_list, list); 2406 } 2407 spin_unlock_irqrestore(&zone->lock, flags); 2408 2409 return i; 2410 } 2411 2412 /* 2413 * Called from the vmstat counter updater to decay the PCP high. 2414 * Return whether there are addition works to do. 2415 */ 2416 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2417 { 2418 int high_min, to_drain, batch; 2419 int todo = 0; 2420 2421 high_min = READ_ONCE(pcp->high_min); 2422 batch = READ_ONCE(pcp->batch); 2423 /* 2424 * Decrease pcp->high periodically to try to free possible 2425 * idle PCP pages. And, avoid to free too many pages to 2426 * control latency. This caps pcp->high decrement too. 2427 */ 2428 if (pcp->high > high_min) { 2429 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2430 pcp->high - (pcp->high >> 3), high_min); 2431 if (pcp->high > high_min) 2432 todo++; 2433 } 2434 2435 to_drain = pcp->count - pcp->high; 2436 if (to_drain > 0) { 2437 spin_lock(&pcp->lock); 2438 free_pcppages_bulk(zone, to_drain, pcp, 0); 2439 spin_unlock(&pcp->lock); 2440 todo++; 2441 } 2442 2443 return todo; 2444 } 2445 2446 #ifdef CONFIG_NUMA 2447 /* 2448 * Called from the vmstat counter updater to drain pagesets of this 2449 * currently executing processor on remote nodes after they have 2450 * expired. 2451 */ 2452 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2453 { 2454 int to_drain, batch; 2455 2456 batch = READ_ONCE(pcp->batch); 2457 to_drain = min(pcp->count, batch); 2458 if (to_drain > 0) { 2459 spin_lock(&pcp->lock); 2460 free_pcppages_bulk(zone, to_drain, pcp, 0); 2461 spin_unlock(&pcp->lock); 2462 } 2463 } 2464 #endif 2465 2466 /* 2467 * Drain pcplists of the indicated processor and zone. 2468 */ 2469 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2470 { 2471 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2472 int count; 2473 2474 do { 2475 spin_lock(&pcp->lock); 2476 count = pcp->count; 2477 if (count) { 2478 int to_drain = min(count, 2479 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2480 2481 free_pcppages_bulk(zone, to_drain, pcp, 0); 2482 count -= to_drain; 2483 } 2484 spin_unlock(&pcp->lock); 2485 } while (count); 2486 } 2487 2488 /* 2489 * Drain pcplists of all zones on the indicated processor. 2490 */ 2491 static void drain_pages(unsigned int cpu) 2492 { 2493 struct zone *zone; 2494 2495 for_each_populated_zone(zone) { 2496 drain_pages_zone(cpu, zone); 2497 } 2498 } 2499 2500 /* 2501 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2502 */ 2503 void drain_local_pages(struct zone *zone) 2504 { 2505 int cpu = smp_processor_id(); 2506 2507 if (zone) 2508 drain_pages_zone(cpu, zone); 2509 else 2510 drain_pages(cpu); 2511 } 2512 2513 /* 2514 * The implementation of drain_all_pages(), exposing an extra parameter to 2515 * drain on all cpus. 2516 * 2517 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2518 * not empty. The check for non-emptiness can however race with a free to 2519 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2520 * that need the guarantee that every CPU has drained can disable the 2521 * optimizing racy check. 2522 */ 2523 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2524 { 2525 int cpu; 2526 2527 /* 2528 * Allocate in the BSS so we won't require allocation in 2529 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2530 */ 2531 static cpumask_t cpus_with_pcps; 2532 2533 /* 2534 * Do not drain if one is already in progress unless it's specific to 2535 * a zone. Such callers are primarily CMA and memory hotplug and need 2536 * the drain to be complete when the call returns. 2537 */ 2538 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2539 if (!zone) 2540 return; 2541 mutex_lock(&pcpu_drain_mutex); 2542 } 2543 2544 /* 2545 * We don't care about racing with CPU hotplug event 2546 * as offline notification will cause the notified 2547 * cpu to drain that CPU pcps and on_each_cpu_mask 2548 * disables preemption as part of its processing 2549 */ 2550 for_each_online_cpu(cpu) { 2551 struct per_cpu_pages *pcp; 2552 struct zone *z; 2553 bool has_pcps = false; 2554 2555 if (force_all_cpus) { 2556 /* 2557 * The pcp.count check is racy, some callers need a 2558 * guarantee that no cpu is missed. 2559 */ 2560 has_pcps = true; 2561 } else if (zone) { 2562 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2563 if (pcp->count) 2564 has_pcps = true; 2565 } else { 2566 for_each_populated_zone(z) { 2567 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2568 if (pcp->count) { 2569 has_pcps = true; 2570 break; 2571 } 2572 } 2573 } 2574 2575 if (has_pcps) 2576 cpumask_set_cpu(cpu, &cpus_with_pcps); 2577 else 2578 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2579 } 2580 2581 for_each_cpu(cpu, &cpus_with_pcps) { 2582 if (zone) 2583 drain_pages_zone(cpu, zone); 2584 else 2585 drain_pages(cpu); 2586 } 2587 2588 mutex_unlock(&pcpu_drain_mutex); 2589 } 2590 2591 /* 2592 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2593 * 2594 * When zone parameter is non-NULL, spill just the single zone's pages. 2595 */ 2596 void drain_all_pages(struct zone *zone) 2597 { 2598 __drain_all_pages(zone, false); 2599 } 2600 2601 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2602 { 2603 int min_nr_free, max_nr_free; 2604 2605 /* Free as much as possible if batch freeing high-order pages. */ 2606 if (unlikely(free_high)) 2607 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2608 2609 /* Check for PCP disabled or boot pageset */ 2610 if (unlikely(high < batch)) 2611 return 1; 2612 2613 /* Leave at least pcp->batch pages on the list */ 2614 min_nr_free = batch; 2615 max_nr_free = high - batch; 2616 2617 /* 2618 * Increase the batch number to the number of the consecutive 2619 * freed pages to reduce zone lock contention. 2620 */ 2621 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2622 2623 return batch; 2624 } 2625 2626 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2627 int batch, bool free_high) 2628 { 2629 int high, high_min, high_max; 2630 2631 high_min = READ_ONCE(pcp->high_min); 2632 high_max = READ_ONCE(pcp->high_max); 2633 high = pcp->high = clamp(pcp->high, high_min, high_max); 2634 2635 if (unlikely(!high)) 2636 return 0; 2637 2638 if (unlikely(free_high)) { 2639 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2640 high_min); 2641 return 0; 2642 } 2643 2644 /* 2645 * If reclaim is active, limit the number of pages that can be 2646 * stored on pcp lists 2647 */ 2648 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2649 int free_count = max_t(int, pcp->free_count, batch); 2650 2651 pcp->high = max(high - free_count, high_min); 2652 return min(batch << 2, pcp->high); 2653 } 2654 2655 if (high_min == high_max) 2656 return high; 2657 2658 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2659 int free_count = max_t(int, pcp->free_count, batch); 2660 2661 pcp->high = max(high - free_count, high_min); 2662 high = max(pcp->count, high_min); 2663 } else if (pcp->count >= high) { 2664 int need_high = pcp->free_count + batch; 2665 2666 /* pcp->high should be large enough to hold batch freed pages */ 2667 if (pcp->high < need_high) 2668 pcp->high = clamp(need_high, high_min, high_max); 2669 } 2670 2671 return high; 2672 } 2673 2674 static void free_frozen_page_commit(struct zone *zone, 2675 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2676 unsigned int order) 2677 { 2678 int high, batch; 2679 int pindex; 2680 bool free_high = false; 2681 2682 /* 2683 * On freeing, reduce the number of pages that are batch allocated. 2684 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2685 * allocations. 2686 */ 2687 pcp->alloc_factor >>= 1; 2688 __count_vm_events(PGFREE, 1 << order); 2689 pindex = order_to_pindex(migratetype, order); 2690 list_add(&page->pcp_list, &pcp->lists[pindex]); 2691 pcp->count += 1 << order; 2692 2693 batch = READ_ONCE(pcp->batch); 2694 /* 2695 * As high-order pages other than THP's stored on PCP can contribute 2696 * to fragmentation, limit the number stored when PCP is heavily 2697 * freeing without allocation. The remainder after bulk freeing 2698 * stops will be drained from vmstat refresh context. 2699 */ 2700 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2701 free_high = (pcp->free_count >= batch && 2702 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2703 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2704 pcp->count >= READ_ONCE(batch))); 2705 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2706 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2707 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2708 } 2709 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2710 pcp->free_count += (1 << order); 2711 high = nr_pcp_high(pcp, zone, batch, free_high); 2712 if (pcp->count >= high) { 2713 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2714 pcp, pindex); 2715 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2716 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2717 ZONE_MOVABLE, 0)) 2718 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2719 } 2720 } 2721 2722 /* 2723 * Free a pcp page 2724 */ 2725 void free_frozen_pages(struct page *page, unsigned int order) 2726 { 2727 unsigned long __maybe_unused UP_flags; 2728 struct per_cpu_pages *pcp; 2729 struct zone *zone; 2730 unsigned long pfn = page_to_pfn(page); 2731 int migratetype; 2732 2733 if (!pcp_allowed_order(order)) { 2734 __free_pages_ok(page, order, FPI_NONE); 2735 return; 2736 } 2737 2738 if (!free_pages_prepare(page, order)) 2739 return; 2740 2741 /* 2742 * We only track unmovable, reclaimable and movable on pcp lists. 2743 * Place ISOLATE pages on the isolated list because they are being 2744 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2745 * get those areas back if necessary. Otherwise, we may have to free 2746 * excessively into the page allocator 2747 */ 2748 zone = page_zone(page); 2749 migratetype = get_pfnblock_migratetype(page, pfn); 2750 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2751 if (unlikely(is_migrate_isolate(migratetype))) { 2752 free_one_page(zone, page, pfn, order, FPI_NONE); 2753 return; 2754 } 2755 migratetype = MIGRATE_MOVABLE; 2756 } 2757 2758 pcp_trylock_prepare(UP_flags); 2759 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2760 if (pcp) { 2761 free_frozen_page_commit(zone, pcp, page, migratetype, order); 2762 pcp_spin_unlock(pcp); 2763 } else { 2764 free_one_page(zone, page, pfn, order, FPI_NONE); 2765 } 2766 pcp_trylock_finish(UP_flags); 2767 } 2768 2769 /* 2770 * Free a batch of folios 2771 */ 2772 void free_unref_folios(struct folio_batch *folios) 2773 { 2774 unsigned long __maybe_unused UP_flags; 2775 struct per_cpu_pages *pcp = NULL; 2776 struct zone *locked_zone = NULL; 2777 int i, j; 2778 2779 /* Prepare folios for freeing */ 2780 for (i = 0, j = 0; i < folios->nr; i++) { 2781 struct folio *folio = folios->folios[i]; 2782 unsigned long pfn = folio_pfn(folio); 2783 unsigned int order = folio_order(folio); 2784 2785 if (!free_pages_prepare(&folio->page, order)) 2786 continue; 2787 /* 2788 * Free orders not handled on the PCP directly to the 2789 * allocator. 2790 */ 2791 if (!pcp_allowed_order(order)) { 2792 free_one_page(folio_zone(folio), &folio->page, 2793 pfn, order, FPI_NONE); 2794 continue; 2795 } 2796 folio->private = (void *)(unsigned long)order; 2797 if (j != i) 2798 folios->folios[j] = folio; 2799 j++; 2800 } 2801 folios->nr = j; 2802 2803 for (i = 0; i < folios->nr; i++) { 2804 struct folio *folio = folios->folios[i]; 2805 struct zone *zone = folio_zone(folio); 2806 unsigned long pfn = folio_pfn(folio); 2807 unsigned int order = (unsigned long)folio->private; 2808 int migratetype; 2809 2810 folio->private = NULL; 2811 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2812 2813 /* Different zone requires a different pcp lock */ 2814 if (zone != locked_zone || 2815 is_migrate_isolate(migratetype)) { 2816 if (pcp) { 2817 pcp_spin_unlock(pcp); 2818 pcp_trylock_finish(UP_flags); 2819 locked_zone = NULL; 2820 pcp = NULL; 2821 } 2822 2823 /* 2824 * Free isolated pages directly to the 2825 * allocator, see comment in free_frozen_pages. 2826 */ 2827 if (is_migrate_isolate(migratetype)) { 2828 free_one_page(zone, &folio->page, pfn, 2829 order, FPI_NONE); 2830 continue; 2831 } 2832 2833 /* 2834 * trylock is necessary as folios may be getting freed 2835 * from IRQ or SoftIRQ context after an IO completion. 2836 */ 2837 pcp_trylock_prepare(UP_flags); 2838 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2839 if (unlikely(!pcp)) { 2840 pcp_trylock_finish(UP_flags); 2841 free_one_page(zone, &folio->page, pfn, 2842 order, FPI_NONE); 2843 continue; 2844 } 2845 locked_zone = zone; 2846 } 2847 2848 /* 2849 * Non-isolated types over MIGRATE_PCPTYPES get added 2850 * to the MIGRATE_MOVABLE pcp list. 2851 */ 2852 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2853 migratetype = MIGRATE_MOVABLE; 2854 2855 trace_mm_page_free_batched(&folio->page); 2856 free_frozen_page_commit(zone, pcp, &folio->page, migratetype, 2857 order); 2858 } 2859 2860 if (pcp) { 2861 pcp_spin_unlock(pcp); 2862 pcp_trylock_finish(UP_flags); 2863 } 2864 folio_batch_reinit(folios); 2865 } 2866 2867 /* 2868 * split_page takes a non-compound higher-order page, and splits it into 2869 * n (1<<order) sub-pages: page[0..n] 2870 * Each sub-page must be freed individually. 2871 * 2872 * Note: this is probably too low level an operation for use in drivers. 2873 * Please consult with lkml before using this in your driver. 2874 */ 2875 void split_page(struct page *page, unsigned int order) 2876 { 2877 int i; 2878 2879 VM_BUG_ON_PAGE(PageCompound(page), page); 2880 VM_BUG_ON_PAGE(!page_count(page), page); 2881 2882 for (i = 1; i < (1 << order); i++) 2883 set_page_refcounted(page + i); 2884 split_page_owner(page, order, 0); 2885 pgalloc_tag_split(page_folio(page), order, 0); 2886 split_page_memcg(page, order, 0); 2887 } 2888 EXPORT_SYMBOL_GPL(split_page); 2889 2890 int __isolate_free_page(struct page *page, unsigned int order) 2891 { 2892 struct zone *zone = page_zone(page); 2893 int mt = get_pageblock_migratetype(page); 2894 2895 if (!is_migrate_isolate(mt)) { 2896 unsigned long watermark; 2897 /* 2898 * Obey watermarks as if the page was being allocated. We can 2899 * emulate a high-order watermark check with a raised order-0 2900 * watermark, because we already know our high-order page 2901 * exists. 2902 */ 2903 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2904 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2905 return 0; 2906 } 2907 2908 del_page_from_free_list(page, zone, order, mt); 2909 2910 /* 2911 * Set the pageblock if the isolated page is at least half of a 2912 * pageblock 2913 */ 2914 if (order >= pageblock_order - 1) { 2915 struct page *endpage = page + (1 << order) - 1; 2916 for (; page < endpage; page += pageblock_nr_pages) { 2917 int mt = get_pageblock_migratetype(page); 2918 /* 2919 * Only change normal pageblocks (i.e., they can merge 2920 * with others) 2921 */ 2922 if (migratetype_is_mergeable(mt)) 2923 move_freepages_block(zone, page, mt, 2924 MIGRATE_MOVABLE); 2925 } 2926 } 2927 2928 return 1UL << order; 2929 } 2930 2931 /** 2932 * __putback_isolated_page - Return a now-isolated page back where we got it 2933 * @page: Page that was isolated 2934 * @order: Order of the isolated page 2935 * @mt: The page's pageblock's migratetype 2936 * 2937 * This function is meant to return a page pulled from the free lists via 2938 * __isolate_free_page back to the free lists they were pulled from. 2939 */ 2940 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2941 { 2942 struct zone *zone = page_zone(page); 2943 2944 /* zone lock should be held when this function is called */ 2945 lockdep_assert_held(&zone->lock); 2946 2947 /* Return isolated page to tail of freelist. */ 2948 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2949 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2950 } 2951 2952 /* 2953 * Update NUMA hit/miss statistics 2954 */ 2955 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2956 long nr_account) 2957 { 2958 #ifdef CONFIG_NUMA 2959 enum numa_stat_item local_stat = NUMA_LOCAL; 2960 2961 /* skip numa counters update if numa stats is disabled */ 2962 if (!static_branch_likely(&vm_numa_stat_key)) 2963 return; 2964 2965 if (zone_to_nid(z) != numa_node_id()) 2966 local_stat = NUMA_OTHER; 2967 2968 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2969 __count_numa_events(z, NUMA_HIT, nr_account); 2970 else { 2971 __count_numa_events(z, NUMA_MISS, nr_account); 2972 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2973 } 2974 __count_numa_events(z, local_stat, nr_account); 2975 #endif 2976 } 2977 2978 static __always_inline 2979 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2980 unsigned int order, unsigned int alloc_flags, 2981 int migratetype) 2982 { 2983 struct page *page; 2984 unsigned long flags; 2985 2986 do { 2987 page = NULL; 2988 spin_lock_irqsave(&zone->lock, flags); 2989 if (alloc_flags & ALLOC_HIGHATOMIC) 2990 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2991 if (!page) { 2992 page = __rmqueue(zone, order, migratetype, alloc_flags); 2993 2994 /* 2995 * If the allocation fails, allow OOM handling and 2996 * order-0 (atomic) allocs access to HIGHATOMIC 2997 * reserves as failing now is worse than failing a 2998 * high-order atomic allocation in the future. 2999 */ 3000 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 3001 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3002 3003 if (!page) { 3004 spin_unlock_irqrestore(&zone->lock, flags); 3005 return NULL; 3006 } 3007 } 3008 spin_unlock_irqrestore(&zone->lock, flags); 3009 } while (check_new_pages(page, order)); 3010 3011 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3012 zone_statistics(preferred_zone, zone, 1); 3013 3014 return page; 3015 } 3016 3017 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3018 { 3019 int high, base_batch, batch, max_nr_alloc; 3020 int high_max, high_min; 3021 3022 base_batch = READ_ONCE(pcp->batch); 3023 high_min = READ_ONCE(pcp->high_min); 3024 high_max = READ_ONCE(pcp->high_max); 3025 high = pcp->high = clamp(pcp->high, high_min, high_max); 3026 3027 /* Check for PCP disabled or boot pageset */ 3028 if (unlikely(high < base_batch)) 3029 return 1; 3030 3031 if (order) 3032 batch = base_batch; 3033 else 3034 batch = (base_batch << pcp->alloc_factor); 3035 3036 /* 3037 * If we had larger pcp->high, we could avoid to allocate from 3038 * zone. 3039 */ 3040 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3041 high = pcp->high = min(high + batch, high_max); 3042 3043 if (!order) { 3044 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3045 /* 3046 * Double the number of pages allocated each time there is 3047 * subsequent allocation of order-0 pages without any freeing. 3048 */ 3049 if (batch <= max_nr_alloc && 3050 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3051 pcp->alloc_factor++; 3052 batch = min(batch, max_nr_alloc); 3053 } 3054 3055 /* 3056 * Scale batch relative to order if batch implies free pages 3057 * can be stored on the PCP. Batch can be 1 for small zones or 3058 * for boot pagesets which should never store free pages as 3059 * the pages may belong to arbitrary zones. 3060 */ 3061 if (batch > 1) 3062 batch = max(batch >> order, 2); 3063 3064 return batch; 3065 } 3066 3067 /* Remove page from the per-cpu list, caller must protect the list */ 3068 static inline 3069 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3070 int migratetype, 3071 unsigned int alloc_flags, 3072 struct per_cpu_pages *pcp, 3073 struct list_head *list) 3074 { 3075 struct page *page; 3076 3077 do { 3078 if (list_empty(list)) { 3079 int batch = nr_pcp_alloc(pcp, zone, order); 3080 int alloced; 3081 3082 alloced = rmqueue_bulk(zone, order, 3083 batch, list, 3084 migratetype, alloc_flags); 3085 3086 pcp->count += alloced << order; 3087 if (unlikely(list_empty(list))) 3088 return NULL; 3089 } 3090 3091 page = list_first_entry(list, struct page, pcp_list); 3092 list_del(&page->pcp_list); 3093 pcp->count -= 1 << order; 3094 } while (check_new_pages(page, order)); 3095 3096 return page; 3097 } 3098 3099 /* Lock and remove page from the per-cpu list */ 3100 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3101 struct zone *zone, unsigned int order, 3102 int migratetype, unsigned int alloc_flags) 3103 { 3104 struct per_cpu_pages *pcp; 3105 struct list_head *list; 3106 struct page *page; 3107 unsigned long __maybe_unused UP_flags; 3108 3109 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3110 pcp_trylock_prepare(UP_flags); 3111 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3112 if (!pcp) { 3113 pcp_trylock_finish(UP_flags); 3114 return NULL; 3115 } 3116 3117 /* 3118 * On allocation, reduce the number of pages that are batch freed. 3119 * See nr_pcp_free() where free_factor is increased for subsequent 3120 * frees. 3121 */ 3122 pcp->free_count >>= 1; 3123 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3124 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3125 pcp_spin_unlock(pcp); 3126 pcp_trylock_finish(UP_flags); 3127 if (page) { 3128 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3129 zone_statistics(preferred_zone, zone, 1); 3130 } 3131 return page; 3132 } 3133 3134 /* 3135 * Allocate a page from the given zone. 3136 * Use pcplists for THP or "cheap" high-order allocations. 3137 */ 3138 3139 /* 3140 * Do not instrument rmqueue() with KMSAN. This function may call 3141 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3142 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3143 * may call rmqueue() again, which will result in a deadlock. 3144 */ 3145 __no_sanitize_memory 3146 static inline 3147 struct page *rmqueue(struct zone *preferred_zone, 3148 struct zone *zone, unsigned int order, 3149 gfp_t gfp_flags, unsigned int alloc_flags, 3150 int migratetype) 3151 { 3152 struct page *page; 3153 3154 if (likely(pcp_allowed_order(order))) { 3155 page = rmqueue_pcplist(preferred_zone, zone, order, 3156 migratetype, alloc_flags); 3157 if (likely(page)) 3158 goto out; 3159 } 3160 3161 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3162 migratetype); 3163 3164 out: 3165 /* Separate test+clear to avoid unnecessary atomics */ 3166 if ((alloc_flags & ALLOC_KSWAPD) && 3167 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3168 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3169 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3170 } 3171 3172 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3173 return page; 3174 } 3175 3176 static inline long __zone_watermark_unusable_free(struct zone *z, 3177 unsigned int order, unsigned int alloc_flags) 3178 { 3179 long unusable_free = (1 << order) - 1; 3180 3181 /* 3182 * If the caller does not have rights to reserves below the min 3183 * watermark then subtract the free pages reserved for highatomic. 3184 */ 3185 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3186 unusable_free += READ_ONCE(z->nr_free_highatomic); 3187 3188 #ifdef CONFIG_CMA 3189 /* If allocation can't use CMA areas don't use free CMA pages */ 3190 if (!(alloc_flags & ALLOC_CMA)) 3191 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3192 #endif 3193 3194 return unusable_free; 3195 } 3196 3197 /* 3198 * Return true if free base pages are above 'mark'. For high-order checks it 3199 * will return true of the order-0 watermark is reached and there is at least 3200 * one free page of a suitable size. Checking now avoids taking the zone lock 3201 * to check in the allocation paths if no pages are free. 3202 */ 3203 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3204 int highest_zoneidx, unsigned int alloc_flags, 3205 long free_pages) 3206 { 3207 long min = mark; 3208 int o; 3209 3210 /* free_pages may go negative - that's OK */ 3211 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3212 3213 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3214 /* 3215 * __GFP_HIGH allows access to 50% of the min reserve as well 3216 * as OOM. 3217 */ 3218 if (alloc_flags & ALLOC_MIN_RESERVE) { 3219 min -= min / 2; 3220 3221 /* 3222 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3223 * access more reserves than just __GFP_HIGH. Other 3224 * non-blocking allocations requests such as GFP_NOWAIT 3225 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3226 * access to the min reserve. 3227 */ 3228 if (alloc_flags & ALLOC_NON_BLOCK) 3229 min -= min / 4; 3230 } 3231 3232 /* 3233 * OOM victims can try even harder than the normal reserve 3234 * users on the grounds that it's definitely going to be in 3235 * the exit path shortly and free memory. Any allocation it 3236 * makes during the free path will be small and short-lived. 3237 */ 3238 if (alloc_flags & ALLOC_OOM) 3239 min -= min / 2; 3240 } 3241 3242 /* 3243 * Check watermarks for an order-0 allocation request. If these 3244 * are not met, then a high-order request also cannot go ahead 3245 * even if a suitable page happened to be free. 3246 */ 3247 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3248 return false; 3249 3250 /* If this is an order-0 request then the watermark is fine */ 3251 if (!order) 3252 return true; 3253 3254 /* For a high-order request, check at least one suitable page is free */ 3255 for (o = order; o < NR_PAGE_ORDERS; o++) { 3256 struct free_area *area = &z->free_area[o]; 3257 int mt; 3258 3259 if (!area->nr_free) 3260 continue; 3261 3262 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3263 if (!free_area_empty(area, mt)) 3264 return true; 3265 } 3266 3267 #ifdef CONFIG_CMA 3268 if ((alloc_flags & ALLOC_CMA) && 3269 !free_area_empty(area, MIGRATE_CMA)) { 3270 return true; 3271 } 3272 #endif 3273 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3274 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3275 return true; 3276 } 3277 } 3278 return false; 3279 } 3280 3281 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3282 int highest_zoneidx, unsigned int alloc_flags) 3283 { 3284 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3285 zone_page_state(z, NR_FREE_PAGES)); 3286 } 3287 3288 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3289 unsigned long mark, int highest_zoneidx, 3290 unsigned int alloc_flags, gfp_t gfp_mask) 3291 { 3292 long free_pages; 3293 3294 free_pages = zone_page_state(z, NR_FREE_PAGES); 3295 3296 /* 3297 * Fast check for order-0 only. If this fails then the reserves 3298 * need to be calculated. 3299 */ 3300 if (!order) { 3301 long usable_free; 3302 long reserved; 3303 3304 usable_free = free_pages; 3305 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3306 3307 /* reserved may over estimate high-atomic reserves. */ 3308 usable_free -= min(usable_free, reserved); 3309 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3310 return true; 3311 } 3312 3313 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3314 free_pages)) 3315 return true; 3316 3317 /* 3318 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3319 * when checking the min watermark. The min watermark is the 3320 * point where boosting is ignored so that kswapd is woken up 3321 * when below the low watermark. 3322 */ 3323 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3324 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3325 mark = z->_watermark[WMARK_MIN]; 3326 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3327 alloc_flags, free_pages); 3328 } 3329 3330 return false; 3331 } 3332 3333 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3334 unsigned long mark, int highest_zoneidx) 3335 { 3336 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3337 3338 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3339 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3340 3341 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3342 free_pages); 3343 } 3344 3345 #ifdef CONFIG_NUMA 3346 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3347 3348 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3349 { 3350 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3351 node_reclaim_distance; 3352 } 3353 #else /* CONFIG_NUMA */ 3354 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3355 { 3356 return true; 3357 } 3358 #endif /* CONFIG_NUMA */ 3359 3360 /* 3361 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3362 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3363 * premature use of a lower zone may cause lowmem pressure problems that 3364 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3365 * probably too small. It only makes sense to spread allocations to avoid 3366 * fragmentation between the Normal and DMA32 zones. 3367 */ 3368 static inline unsigned int 3369 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3370 { 3371 unsigned int alloc_flags; 3372 3373 /* 3374 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3375 * to save a branch. 3376 */ 3377 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3378 3379 #ifdef CONFIG_ZONE_DMA32 3380 if (!zone) 3381 return alloc_flags; 3382 3383 if (zone_idx(zone) != ZONE_NORMAL) 3384 return alloc_flags; 3385 3386 /* 3387 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3388 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3389 * on UMA that if Normal is populated then so is DMA32. 3390 */ 3391 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3392 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3393 return alloc_flags; 3394 3395 alloc_flags |= ALLOC_NOFRAGMENT; 3396 #endif /* CONFIG_ZONE_DMA32 */ 3397 return alloc_flags; 3398 } 3399 3400 /* Must be called after current_gfp_context() which can change gfp_mask */ 3401 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3402 unsigned int alloc_flags) 3403 { 3404 #ifdef CONFIG_CMA 3405 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3406 alloc_flags |= ALLOC_CMA; 3407 #endif 3408 return alloc_flags; 3409 } 3410 3411 /* 3412 * get_page_from_freelist goes through the zonelist trying to allocate 3413 * a page. 3414 */ 3415 static struct page * 3416 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3417 const struct alloc_context *ac) 3418 { 3419 struct zoneref *z; 3420 struct zone *zone; 3421 struct pglist_data *last_pgdat = NULL; 3422 bool last_pgdat_dirty_ok = false; 3423 bool no_fallback; 3424 3425 retry: 3426 /* 3427 * Scan zonelist, looking for a zone with enough free. 3428 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3429 */ 3430 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3431 z = ac->preferred_zoneref; 3432 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3433 ac->nodemask) { 3434 struct page *page; 3435 unsigned long mark; 3436 3437 if (cpusets_enabled() && 3438 (alloc_flags & ALLOC_CPUSET) && 3439 !__cpuset_zone_allowed(zone, gfp_mask)) 3440 continue; 3441 /* 3442 * When allocating a page cache page for writing, we 3443 * want to get it from a node that is within its dirty 3444 * limit, such that no single node holds more than its 3445 * proportional share of globally allowed dirty pages. 3446 * The dirty limits take into account the node's 3447 * lowmem reserves and high watermark so that kswapd 3448 * should be able to balance it without having to 3449 * write pages from its LRU list. 3450 * 3451 * XXX: For now, allow allocations to potentially 3452 * exceed the per-node dirty limit in the slowpath 3453 * (spread_dirty_pages unset) before going into reclaim, 3454 * which is important when on a NUMA setup the allowed 3455 * nodes are together not big enough to reach the 3456 * global limit. The proper fix for these situations 3457 * will require awareness of nodes in the 3458 * dirty-throttling and the flusher threads. 3459 */ 3460 if (ac->spread_dirty_pages) { 3461 if (last_pgdat != zone->zone_pgdat) { 3462 last_pgdat = zone->zone_pgdat; 3463 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3464 } 3465 3466 if (!last_pgdat_dirty_ok) 3467 continue; 3468 } 3469 3470 if (no_fallback && nr_online_nodes > 1 && 3471 zone != zonelist_zone(ac->preferred_zoneref)) { 3472 int local_nid; 3473 3474 /* 3475 * If moving to a remote node, retry but allow 3476 * fragmenting fallbacks. Locality is more important 3477 * than fragmentation avoidance. 3478 */ 3479 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3480 if (zone_to_nid(zone) != local_nid) { 3481 alloc_flags &= ~ALLOC_NOFRAGMENT; 3482 goto retry; 3483 } 3484 } 3485 3486 cond_accept_memory(zone, order); 3487 3488 /* 3489 * Detect whether the number of free pages is below high 3490 * watermark. If so, we will decrease pcp->high and free 3491 * PCP pages in free path to reduce the possibility of 3492 * premature page reclaiming. Detection is done here to 3493 * avoid to do that in hotter free path. 3494 */ 3495 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3496 goto check_alloc_wmark; 3497 3498 mark = high_wmark_pages(zone); 3499 if (zone_watermark_fast(zone, order, mark, 3500 ac->highest_zoneidx, alloc_flags, 3501 gfp_mask)) 3502 goto try_this_zone; 3503 else 3504 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3505 3506 check_alloc_wmark: 3507 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3508 if (!zone_watermark_fast(zone, order, mark, 3509 ac->highest_zoneidx, alloc_flags, 3510 gfp_mask)) { 3511 int ret; 3512 3513 if (cond_accept_memory(zone, order)) 3514 goto try_this_zone; 3515 3516 /* 3517 * Watermark failed for this zone, but see if we can 3518 * grow this zone if it contains deferred pages. 3519 */ 3520 if (deferred_pages_enabled()) { 3521 if (_deferred_grow_zone(zone, order)) 3522 goto try_this_zone; 3523 } 3524 /* Checked here to keep the fast path fast */ 3525 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3526 if (alloc_flags & ALLOC_NO_WATERMARKS) 3527 goto try_this_zone; 3528 3529 if (!node_reclaim_enabled() || 3530 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3531 continue; 3532 3533 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3534 switch (ret) { 3535 case NODE_RECLAIM_NOSCAN: 3536 /* did not scan */ 3537 continue; 3538 case NODE_RECLAIM_FULL: 3539 /* scanned but unreclaimable */ 3540 continue; 3541 default: 3542 /* did we reclaim enough */ 3543 if (zone_watermark_ok(zone, order, mark, 3544 ac->highest_zoneidx, alloc_flags)) 3545 goto try_this_zone; 3546 3547 continue; 3548 } 3549 } 3550 3551 try_this_zone: 3552 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3553 gfp_mask, alloc_flags, ac->migratetype); 3554 if (page) { 3555 prep_new_page(page, order, gfp_mask, alloc_flags); 3556 3557 /* 3558 * If this is a high-order atomic allocation then check 3559 * if the pageblock should be reserved for the future 3560 */ 3561 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3562 reserve_highatomic_pageblock(page, order, zone); 3563 3564 return page; 3565 } else { 3566 if (cond_accept_memory(zone, order)) 3567 goto try_this_zone; 3568 3569 /* Try again if zone has deferred pages */ 3570 if (deferred_pages_enabled()) { 3571 if (_deferred_grow_zone(zone, order)) 3572 goto try_this_zone; 3573 } 3574 } 3575 } 3576 3577 /* 3578 * It's possible on a UMA machine to get through all zones that are 3579 * fragmented. If avoiding fragmentation, reset and try again. 3580 */ 3581 if (no_fallback) { 3582 alloc_flags &= ~ALLOC_NOFRAGMENT; 3583 goto retry; 3584 } 3585 3586 return NULL; 3587 } 3588 3589 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3590 { 3591 unsigned int filter = SHOW_MEM_FILTER_NODES; 3592 3593 /* 3594 * This documents exceptions given to allocations in certain 3595 * contexts that are allowed to allocate outside current's set 3596 * of allowed nodes. 3597 */ 3598 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3599 if (tsk_is_oom_victim(current) || 3600 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3601 filter &= ~SHOW_MEM_FILTER_NODES; 3602 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3603 filter &= ~SHOW_MEM_FILTER_NODES; 3604 3605 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3606 } 3607 3608 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3609 { 3610 struct va_format vaf; 3611 va_list args; 3612 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3613 3614 if ((gfp_mask & __GFP_NOWARN) || 3615 !__ratelimit(&nopage_rs) || 3616 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3617 return; 3618 3619 va_start(args, fmt); 3620 vaf.fmt = fmt; 3621 vaf.va = &args; 3622 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3623 current->comm, &vaf, gfp_mask, &gfp_mask, 3624 nodemask_pr_args(nodemask)); 3625 va_end(args); 3626 3627 cpuset_print_current_mems_allowed(); 3628 pr_cont("\n"); 3629 dump_stack(); 3630 warn_alloc_show_mem(gfp_mask, nodemask); 3631 } 3632 3633 static inline struct page * 3634 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3635 unsigned int alloc_flags, 3636 const struct alloc_context *ac) 3637 { 3638 struct page *page; 3639 3640 page = get_page_from_freelist(gfp_mask, order, 3641 alloc_flags|ALLOC_CPUSET, ac); 3642 /* 3643 * fallback to ignore cpuset restriction if our nodes 3644 * are depleted 3645 */ 3646 if (!page) 3647 page = get_page_from_freelist(gfp_mask, order, 3648 alloc_flags, ac); 3649 return page; 3650 } 3651 3652 static inline struct page * 3653 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3654 const struct alloc_context *ac, unsigned long *did_some_progress) 3655 { 3656 struct oom_control oc = { 3657 .zonelist = ac->zonelist, 3658 .nodemask = ac->nodemask, 3659 .memcg = NULL, 3660 .gfp_mask = gfp_mask, 3661 .order = order, 3662 }; 3663 struct page *page; 3664 3665 *did_some_progress = 0; 3666 3667 /* 3668 * Acquire the oom lock. If that fails, somebody else is 3669 * making progress for us. 3670 */ 3671 if (!mutex_trylock(&oom_lock)) { 3672 *did_some_progress = 1; 3673 schedule_timeout_uninterruptible(1); 3674 return NULL; 3675 } 3676 3677 /* 3678 * Go through the zonelist yet one more time, keep very high watermark 3679 * here, this is only to catch a parallel oom killing, we must fail if 3680 * we're still under heavy pressure. But make sure that this reclaim 3681 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3682 * allocation which will never fail due to oom_lock already held. 3683 */ 3684 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3685 ~__GFP_DIRECT_RECLAIM, order, 3686 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3687 if (page) 3688 goto out; 3689 3690 /* Coredumps can quickly deplete all memory reserves */ 3691 if (current->flags & PF_DUMPCORE) 3692 goto out; 3693 /* The OOM killer will not help higher order allocs */ 3694 if (order > PAGE_ALLOC_COSTLY_ORDER) 3695 goto out; 3696 /* 3697 * We have already exhausted all our reclaim opportunities without any 3698 * success so it is time to admit defeat. We will skip the OOM killer 3699 * because it is very likely that the caller has a more reasonable 3700 * fallback than shooting a random task. 3701 * 3702 * The OOM killer may not free memory on a specific node. 3703 */ 3704 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3705 goto out; 3706 /* The OOM killer does not needlessly kill tasks for lowmem */ 3707 if (ac->highest_zoneidx < ZONE_NORMAL) 3708 goto out; 3709 if (pm_suspended_storage()) 3710 goto out; 3711 /* 3712 * XXX: GFP_NOFS allocations should rather fail than rely on 3713 * other request to make a forward progress. 3714 * We are in an unfortunate situation where out_of_memory cannot 3715 * do much for this context but let's try it to at least get 3716 * access to memory reserved if the current task is killed (see 3717 * out_of_memory). Once filesystems are ready to handle allocation 3718 * failures more gracefully we should just bail out here. 3719 */ 3720 3721 /* Exhausted what can be done so it's blame time */ 3722 if (out_of_memory(&oc) || 3723 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3724 *did_some_progress = 1; 3725 3726 /* 3727 * Help non-failing allocations by giving them access to memory 3728 * reserves 3729 */ 3730 if (gfp_mask & __GFP_NOFAIL) 3731 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3732 ALLOC_NO_WATERMARKS, ac); 3733 } 3734 out: 3735 mutex_unlock(&oom_lock); 3736 return page; 3737 } 3738 3739 /* 3740 * Maximum number of compaction retries with a progress before OOM 3741 * killer is consider as the only way to move forward. 3742 */ 3743 #define MAX_COMPACT_RETRIES 16 3744 3745 #ifdef CONFIG_COMPACTION 3746 /* Try memory compaction for high-order allocations before reclaim */ 3747 static struct page * 3748 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3749 unsigned int alloc_flags, const struct alloc_context *ac, 3750 enum compact_priority prio, enum compact_result *compact_result) 3751 { 3752 struct page *page = NULL; 3753 unsigned long pflags; 3754 unsigned int noreclaim_flag; 3755 3756 if (!order) 3757 return NULL; 3758 3759 psi_memstall_enter(&pflags); 3760 delayacct_compact_start(); 3761 noreclaim_flag = memalloc_noreclaim_save(); 3762 3763 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3764 prio, &page); 3765 3766 memalloc_noreclaim_restore(noreclaim_flag); 3767 psi_memstall_leave(&pflags); 3768 delayacct_compact_end(); 3769 3770 if (*compact_result == COMPACT_SKIPPED) 3771 return NULL; 3772 /* 3773 * At least in one zone compaction wasn't deferred or skipped, so let's 3774 * count a compaction stall 3775 */ 3776 count_vm_event(COMPACTSTALL); 3777 3778 /* Prep a captured page if available */ 3779 if (page) 3780 prep_new_page(page, order, gfp_mask, alloc_flags); 3781 3782 /* Try get a page from the freelist if available */ 3783 if (!page) 3784 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3785 3786 if (page) { 3787 struct zone *zone = page_zone(page); 3788 3789 zone->compact_blockskip_flush = false; 3790 compaction_defer_reset(zone, order, true); 3791 count_vm_event(COMPACTSUCCESS); 3792 return page; 3793 } 3794 3795 /* 3796 * It's bad if compaction run occurs and fails. The most likely reason 3797 * is that pages exist, but not enough to satisfy watermarks. 3798 */ 3799 count_vm_event(COMPACTFAIL); 3800 3801 cond_resched(); 3802 3803 return NULL; 3804 } 3805 3806 static inline bool 3807 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3808 enum compact_result compact_result, 3809 enum compact_priority *compact_priority, 3810 int *compaction_retries) 3811 { 3812 int max_retries = MAX_COMPACT_RETRIES; 3813 int min_priority; 3814 bool ret = false; 3815 int retries = *compaction_retries; 3816 enum compact_priority priority = *compact_priority; 3817 3818 if (!order) 3819 return false; 3820 3821 if (fatal_signal_pending(current)) 3822 return false; 3823 3824 /* 3825 * Compaction was skipped due to a lack of free order-0 3826 * migration targets. Continue if reclaim can help. 3827 */ 3828 if (compact_result == COMPACT_SKIPPED) { 3829 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3830 goto out; 3831 } 3832 3833 /* 3834 * Compaction managed to coalesce some page blocks, but the 3835 * allocation failed presumably due to a race. Retry some. 3836 */ 3837 if (compact_result == COMPACT_SUCCESS) { 3838 /* 3839 * !costly requests are much more important than 3840 * __GFP_RETRY_MAYFAIL costly ones because they are de 3841 * facto nofail and invoke OOM killer to move on while 3842 * costly can fail and users are ready to cope with 3843 * that. 1/4 retries is rather arbitrary but we would 3844 * need much more detailed feedback from compaction to 3845 * make a better decision. 3846 */ 3847 if (order > PAGE_ALLOC_COSTLY_ORDER) 3848 max_retries /= 4; 3849 3850 if (++(*compaction_retries) <= max_retries) { 3851 ret = true; 3852 goto out; 3853 } 3854 } 3855 3856 /* 3857 * Compaction failed. Retry with increasing priority. 3858 */ 3859 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3860 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3861 3862 if (*compact_priority > min_priority) { 3863 (*compact_priority)--; 3864 *compaction_retries = 0; 3865 ret = true; 3866 } 3867 out: 3868 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3869 return ret; 3870 } 3871 #else 3872 static inline struct page * 3873 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3874 unsigned int alloc_flags, const struct alloc_context *ac, 3875 enum compact_priority prio, enum compact_result *compact_result) 3876 { 3877 *compact_result = COMPACT_SKIPPED; 3878 return NULL; 3879 } 3880 3881 static inline bool 3882 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3883 enum compact_result compact_result, 3884 enum compact_priority *compact_priority, 3885 int *compaction_retries) 3886 { 3887 struct zone *zone; 3888 struct zoneref *z; 3889 3890 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3891 return false; 3892 3893 /* 3894 * There are setups with compaction disabled which would prefer to loop 3895 * inside the allocator rather than hit the oom killer prematurely. 3896 * Let's give them a good hope and keep retrying while the order-0 3897 * watermarks are OK. 3898 */ 3899 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3900 ac->highest_zoneidx, ac->nodemask) { 3901 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3902 ac->highest_zoneidx, alloc_flags)) 3903 return true; 3904 } 3905 return false; 3906 } 3907 #endif /* CONFIG_COMPACTION */ 3908 3909 #ifdef CONFIG_LOCKDEP 3910 static struct lockdep_map __fs_reclaim_map = 3911 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3912 3913 static bool __need_reclaim(gfp_t gfp_mask) 3914 { 3915 /* no reclaim without waiting on it */ 3916 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3917 return false; 3918 3919 /* this guy won't enter reclaim */ 3920 if (current->flags & PF_MEMALLOC) 3921 return false; 3922 3923 if (gfp_mask & __GFP_NOLOCKDEP) 3924 return false; 3925 3926 return true; 3927 } 3928 3929 void __fs_reclaim_acquire(unsigned long ip) 3930 { 3931 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3932 } 3933 3934 void __fs_reclaim_release(unsigned long ip) 3935 { 3936 lock_release(&__fs_reclaim_map, ip); 3937 } 3938 3939 void fs_reclaim_acquire(gfp_t gfp_mask) 3940 { 3941 gfp_mask = current_gfp_context(gfp_mask); 3942 3943 if (__need_reclaim(gfp_mask)) { 3944 if (gfp_mask & __GFP_FS) 3945 __fs_reclaim_acquire(_RET_IP_); 3946 3947 #ifdef CONFIG_MMU_NOTIFIER 3948 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3949 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3950 #endif 3951 3952 } 3953 } 3954 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3955 3956 void fs_reclaim_release(gfp_t gfp_mask) 3957 { 3958 gfp_mask = current_gfp_context(gfp_mask); 3959 3960 if (__need_reclaim(gfp_mask)) { 3961 if (gfp_mask & __GFP_FS) 3962 __fs_reclaim_release(_RET_IP_); 3963 } 3964 } 3965 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3966 #endif 3967 3968 /* 3969 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3970 * have been rebuilt so allocation retries. Reader side does not lock and 3971 * retries the allocation if zonelist changes. Writer side is protected by the 3972 * embedded spin_lock. 3973 */ 3974 static DEFINE_SEQLOCK(zonelist_update_seq); 3975 3976 static unsigned int zonelist_iter_begin(void) 3977 { 3978 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3979 return read_seqbegin(&zonelist_update_seq); 3980 3981 return 0; 3982 } 3983 3984 static unsigned int check_retry_zonelist(unsigned int seq) 3985 { 3986 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3987 return read_seqretry(&zonelist_update_seq, seq); 3988 3989 return seq; 3990 } 3991 3992 /* Perform direct synchronous page reclaim */ 3993 static unsigned long 3994 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3995 const struct alloc_context *ac) 3996 { 3997 unsigned int noreclaim_flag; 3998 unsigned long progress; 3999 4000 cond_resched(); 4001 4002 /* We now go into synchronous reclaim */ 4003 cpuset_memory_pressure_bump(); 4004 fs_reclaim_acquire(gfp_mask); 4005 noreclaim_flag = memalloc_noreclaim_save(); 4006 4007 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4008 ac->nodemask); 4009 4010 memalloc_noreclaim_restore(noreclaim_flag); 4011 fs_reclaim_release(gfp_mask); 4012 4013 cond_resched(); 4014 4015 return progress; 4016 } 4017 4018 /* The really slow allocator path where we enter direct reclaim */ 4019 static inline struct page * 4020 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4021 unsigned int alloc_flags, const struct alloc_context *ac, 4022 unsigned long *did_some_progress) 4023 { 4024 struct page *page = NULL; 4025 unsigned long pflags; 4026 bool drained = false; 4027 4028 psi_memstall_enter(&pflags); 4029 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4030 if (unlikely(!(*did_some_progress))) 4031 goto out; 4032 4033 retry: 4034 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4035 4036 /* 4037 * If an allocation failed after direct reclaim, it could be because 4038 * pages are pinned on the per-cpu lists or in high alloc reserves. 4039 * Shrink them and try again 4040 */ 4041 if (!page && !drained) { 4042 unreserve_highatomic_pageblock(ac, false); 4043 drain_all_pages(NULL); 4044 drained = true; 4045 goto retry; 4046 } 4047 out: 4048 psi_memstall_leave(&pflags); 4049 4050 return page; 4051 } 4052 4053 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4054 const struct alloc_context *ac) 4055 { 4056 struct zoneref *z; 4057 struct zone *zone; 4058 pg_data_t *last_pgdat = NULL; 4059 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4060 4061 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4062 ac->nodemask) { 4063 if (!managed_zone(zone)) 4064 continue; 4065 if (last_pgdat != zone->zone_pgdat) { 4066 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4067 last_pgdat = zone->zone_pgdat; 4068 } 4069 } 4070 } 4071 4072 static inline unsigned int 4073 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4074 { 4075 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4076 4077 /* 4078 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4079 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4080 * to save two branches. 4081 */ 4082 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4083 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4084 4085 /* 4086 * The caller may dip into page reserves a bit more if the caller 4087 * cannot run direct reclaim, or if the caller has realtime scheduling 4088 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4089 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4090 */ 4091 alloc_flags |= (__force int) 4092 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4093 4094 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4095 /* 4096 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4097 * if it can't schedule. 4098 */ 4099 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4100 alloc_flags |= ALLOC_NON_BLOCK; 4101 4102 if (order > 0) 4103 alloc_flags |= ALLOC_HIGHATOMIC; 4104 } 4105 4106 /* 4107 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4108 * GFP_ATOMIC) rather than fail, see the comment for 4109 * cpuset_node_allowed(). 4110 */ 4111 if (alloc_flags & ALLOC_MIN_RESERVE) 4112 alloc_flags &= ~ALLOC_CPUSET; 4113 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4114 alloc_flags |= ALLOC_MIN_RESERVE; 4115 4116 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4117 4118 return alloc_flags; 4119 } 4120 4121 static bool oom_reserves_allowed(struct task_struct *tsk) 4122 { 4123 if (!tsk_is_oom_victim(tsk)) 4124 return false; 4125 4126 /* 4127 * !MMU doesn't have oom reaper so give access to memory reserves 4128 * only to the thread with TIF_MEMDIE set 4129 */ 4130 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4131 return false; 4132 4133 return true; 4134 } 4135 4136 /* 4137 * Distinguish requests which really need access to full memory 4138 * reserves from oom victims which can live with a portion of it 4139 */ 4140 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4141 { 4142 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4143 return 0; 4144 if (gfp_mask & __GFP_MEMALLOC) 4145 return ALLOC_NO_WATERMARKS; 4146 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4147 return ALLOC_NO_WATERMARKS; 4148 if (!in_interrupt()) { 4149 if (current->flags & PF_MEMALLOC) 4150 return ALLOC_NO_WATERMARKS; 4151 else if (oom_reserves_allowed(current)) 4152 return ALLOC_OOM; 4153 } 4154 4155 return 0; 4156 } 4157 4158 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4159 { 4160 return !!__gfp_pfmemalloc_flags(gfp_mask); 4161 } 4162 4163 /* 4164 * Checks whether it makes sense to retry the reclaim to make a forward progress 4165 * for the given allocation request. 4166 * 4167 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4168 * without success, or when we couldn't even meet the watermark if we 4169 * reclaimed all remaining pages on the LRU lists. 4170 * 4171 * Returns true if a retry is viable or false to enter the oom path. 4172 */ 4173 static inline bool 4174 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4175 struct alloc_context *ac, int alloc_flags, 4176 bool did_some_progress, int *no_progress_loops) 4177 { 4178 struct zone *zone; 4179 struct zoneref *z; 4180 bool ret = false; 4181 4182 /* 4183 * Costly allocations might have made a progress but this doesn't mean 4184 * their order will become available due to high fragmentation so 4185 * always increment the no progress counter for them 4186 */ 4187 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4188 *no_progress_loops = 0; 4189 else 4190 (*no_progress_loops)++; 4191 4192 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4193 goto out; 4194 4195 4196 /* 4197 * Keep reclaiming pages while there is a chance this will lead 4198 * somewhere. If none of the target zones can satisfy our allocation 4199 * request even if all reclaimable pages are considered then we are 4200 * screwed and have to go OOM. 4201 */ 4202 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4203 ac->highest_zoneidx, ac->nodemask) { 4204 unsigned long available; 4205 unsigned long reclaimable; 4206 unsigned long min_wmark = min_wmark_pages(zone); 4207 bool wmark; 4208 4209 if (cpusets_enabled() && 4210 (alloc_flags & ALLOC_CPUSET) && 4211 !__cpuset_zone_allowed(zone, gfp_mask)) 4212 continue; 4213 4214 available = reclaimable = zone_reclaimable_pages(zone); 4215 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4216 4217 /* 4218 * Would the allocation succeed if we reclaimed all 4219 * reclaimable pages? 4220 */ 4221 wmark = __zone_watermark_ok(zone, order, min_wmark, 4222 ac->highest_zoneidx, alloc_flags, available); 4223 trace_reclaim_retry_zone(z, order, reclaimable, 4224 available, min_wmark, *no_progress_loops, wmark); 4225 if (wmark) { 4226 ret = true; 4227 break; 4228 } 4229 } 4230 4231 /* 4232 * Memory allocation/reclaim might be called from a WQ context and the 4233 * current implementation of the WQ concurrency control doesn't 4234 * recognize that a particular WQ is congested if the worker thread is 4235 * looping without ever sleeping. Therefore we have to do a short sleep 4236 * here rather than calling cond_resched(). 4237 */ 4238 if (current->flags & PF_WQ_WORKER) 4239 schedule_timeout_uninterruptible(1); 4240 else 4241 cond_resched(); 4242 out: 4243 /* Before OOM, exhaust highatomic_reserve */ 4244 if (!ret) 4245 return unreserve_highatomic_pageblock(ac, true); 4246 4247 return ret; 4248 } 4249 4250 static inline bool 4251 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4252 { 4253 /* 4254 * It's possible that cpuset's mems_allowed and the nodemask from 4255 * mempolicy don't intersect. This should be normally dealt with by 4256 * policy_nodemask(), but it's possible to race with cpuset update in 4257 * such a way the check therein was true, and then it became false 4258 * before we got our cpuset_mems_cookie here. 4259 * This assumes that for all allocations, ac->nodemask can come only 4260 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4261 * when it does not intersect with the cpuset restrictions) or the 4262 * caller can deal with a violated nodemask. 4263 */ 4264 if (cpusets_enabled() && ac->nodemask && 4265 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4266 ac->nodemask = NULL; 4267 return true; 4268 } 4269 4270 /* 4271 * When updating a task's mems_allowed or mempolicy nodemask, it is 4272 * possible to race with parallel threads in such a way that our 4273 * allocation can fail while the mask is being updated. If we are about 4274 * to fail, check if the cpuset changed during allocation and if so, 4275 * retry. 4276 */ 4277 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4278 return true; 4279 4280 return false; 4281 } 4282 4283 static inline struct page * 4284 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4285 struct alloc_context *ac) 4286 { 4287 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4288 bool can_compact = gfp_compaction_allowed(gfp_mask); 4289 bool nofail = gfp_mask & __GFP_NOFAIL; 4290 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4291 struct page *page = NULL; 4292 unsigned int alloc_flags; 4293 unsigned long did_some_progress; 4294 enum compact_priority compact_priority; 4295 enum compact_result compact_result; 4296 int compaction_retries; 4297 int no_progress_loops; 4298 unsigned int cpuset_mems_cookie; 4299 unsigned int zonelist_iter_cookie; 4300 int reserve_flags; 4301 4302 if (unlikely(nofail)) { 4303 /* 4304 * We most definitely don't want callers attempting to 4305 * allocate greater than order-1 page units with __GFP_NOFAIL. 4306 */ 4307 WARN_ON_ONCE(order > 1); 4308 /* 4309 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4310 * otherwise, we may result in lockup. 4311 */ 4312 WARN_ON_ONCE(!can_direct_reclaim); 4313 /* 4314 * PF_MEMALLOC request from this context is rather bizarre 4315 * because we cannot reclaim anything and only can loop waiting 4316 * for somebody to do a work for us. 4317 */ 4318 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4319 } 4320 4321 restart: 4322 compaction_retries = 0; 4323 no_progress_loops = 0; 4324 compact_result = COMPACT_SKIPPED; 4325 compact_priority = DEF_COMPACT_PRIORITY; 4326 cpuset_mems_cookie = read_mems_allowed_begin(); 4327 zonelist_iter_cookie = zonelist_iter_begin(); 4328 4329 /* 4330 * The fast path uses conservative alloc_flags to succeed only until 4331 * kswapd needs to be woken up, and to avoid the cost of setting up 4332 * alloc_flags precisely. So we do that now. 4333 */ 4334 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4335 4336 /* 4337 * We need to recalculate the starting point for the zonelist iterator 4338 * because we might have used different nodemask in the fast path, or 4339 * there was a cpuset modification and we are retrying - otherwise we 4340 * could end up iterating over non-eligible zones endlessly. 4341 */ 4342 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4343 ac->highest_zoneidx, ac->nodemask); 4344 if (!zonelist_zone(ac->preferred_zoneref)) 4345 goto nopage; 4346 4347 /* 4348 * Check for insane configurations where the cpuset doesn't contain 4349 * any suitable zone to satisfy the request - e.g. non-movable 4350 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4351 */ 4352 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4353 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4354 ac->highest_zoneidx, 4355 &cpuset_current_mems_allowed); 4356 if (!zonelist_zone(z)) 4357 goto nopage; 4358 } 4359 4360 if (alloc_flags & ALLOC_KSWAPD) 4361 wake_all_kswapds(order, gfp_mask, ac); 4362 4363 /* 4364 * The adjusted alloc_flags might result in immediate success, so try 4365 * that first 4366 */ 4367 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4368 if (page) 4369 goto got_pg; 4370 4371 /* 4372 * For costly allocations, try direct compaction first, as it's likely 4373 * that we have enough base pages and don't need to reclaim. For non- 4374 * movable high-order allocations, do that as well, as compaction will 4375 * try prevent permanent fragmentation by migrating from blocks of the 4376 * same migratetype. 4377 * Don't try this for allocations that are allowed to ignore 4378 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4379 */ 4380 if (can_direct_reclaim && can_compact && 4381 (costly_order || 4382 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4383 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4384 page = __alloc_pages_direct_compact(gfp_mask, order, 4385 alloc_flags, ac, 4386 INIT_COMPACT_PRIORITY, 4387 &compact_result); 4388 if (page) 4389 goto got_pg; 4390 4391 /* 4392 * Checks for costly allocations with __GFP_NORETRY, which 4393 * includes some THP page fault allocations 4394 */ 4395 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4396 /* 4397 * If allocating entire pageblock(s) and compaction 4398 * failed because all zones are below low watermarks 4399 * or is prohibited because it recently failed at this 4400 * order, fail immediately unless the allocator has 4401 * requested compaction and reclaim retry. 4402 * 4403 * Reclaim is 4404 * - potentially very expensive because zones are far 4405 * below their low watermarks or this is part of very 4406 * bursty high order allocations, 4407 * - not guaranteed to help because isolate_freepages() 4408 * may not iterate over freed pages as part of its 4409 * linear scan, and 4410 * - unlikely to make entire pageblocks free on its 4411 * own. 4412 */ 4413 if (compact_result == COMPACT_SKIPPED || 4414 compact_result == COMPACT_DEFERRED) 4415 goto nopage; 4416 4417 /* 4418 * Looks like reclaim/compaction is worth trying, but 4419 * sync compaction could be very expensive, so keep 4420 * using async compaction. 4421 */ 4422 compact_priority = INIT_COMPACT_PRIORITY; 4423 } 4424 } 4425 4426 retry: 4427 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4428 if (alloc_flags & ALLOC_KSWAPD) 4429 wake_all_kswapds(order, gfp_mask, ac); 4430 4431 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4432 if (reserve_flags) 4433 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4434 (alloc_flags & ALLOC_KSWAPD); 4435 4436 /* 4437 * Reset the nodemask and zonelist iterators if memory policies can be 4438 * ignored. These allocations are high priority and system rather than 4439 * user oriented. 4440 */ 4441 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4442 ac->nodemask = NULL; 4443 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4444 ac->highest_zoneidx, ac->nodemask); 4445 } 4446 4447 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4448 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4449 if (page) 4450 goto got_pg; 4451 4452 /* Caller is not willing to reclaim, we can't balance anything */ 4453 if (!can_direct_reclaim) 4454 goto nopage; 4455 4456 /* Avoid recursion of direct reclaim */ 4457 if (current->flags & PF_MEMALLOC) 4458 goto nopage; 4459 4460 /* Try direct reclaim and then allocating */ 4461 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4462 &did_some_progress); 4463 if (page) 4464 goto got_pg; 4465 4466 /* Try direct compaction and then allocating */ 4467 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4468 compact_priority, &compact_result); 4469 if (page) 4470 goto got_pg; 4471 4472 /* Do not loop if specifically requested */ 4473 if (gfp_mask & __GFP_NORETRY) 4474 goto nopage; 4475 4476 /* 4477 * Do not retry costly high order allocations unless they are 4478 * __GFP_RETRY_MAYFAIL and we can compact 4479 */ 4480 if (costly_order && (!can_compact || 4481 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4482 goto nopage; 4483 4484 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4485 did_some_progress > 0, &no_progress_loops)) 4486 goto retry; 4487 4488 /* 4489 * It doesn't make any sense to retry for the compaction if the order-0 4490 * reclaim is not able to make any progress because the current 4491 * implementation of the compaction depends on the sufficient amount 4492 * of free memory (see __compaction_suitable) 4493 */ 4494 if (did_some_progress > 0 && can_compact && 4495 should_compact_retry(ac, order, alloc_flags, 4496 compact_result, &compact_priority, 4497 &compaction_retries)) 4498 goto retry; 4499 4500 4501 /* 4502 * Deal with possible cpuset update races or zonelist updates to avoid 4503 * a unnecessary OOM kill. 4504 */ 4505 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4506 check_retry_zonelist(zonelist_iter_cookie)) 4507 goto restart; 4508 4509 /* Reclaim has failed us, start killing things */ 4510 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4511 if (page) 4512 goto got_pg; 4513 4514 /* Avoid allocations with no watermarks from looping endlessly */ 4515 if (tsk_is_oom_victim(current) && 4516 (alloc_flags & ALLOC_OOM || 4517 (gfp_mask & __GFP_NOMEMALLOC))) 4518 goto nopage; 4519 4520 /* Retry as long as the OOM killer is making progress */ 4521 if (did_some_progress) { 4522 no_progress_loops = 0; 4523 goto retry; 4524 } 4525 4526 nopage: 4527 /* 4528 * Deal with possible cpuset update races or zonelist updates to avoid 4529 * a unnecessary OOM kill. 4530 */ 4531 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4532 check_retry_zonelist(zonelist_iter_cookie)) 4533 goto restart; 4534 4535 /* 4536 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4537 * we always retry 4538 */ 4539 if (unlikely(nofail)) { 4540 /* 4541 * Lacking direct_reclaim we can't do anything to reclaim memory, 4542 * we disregard these unreasonable nofail requests and still 4543 * return NULL 4544 */ 4545 if (!can_direct_reclaim) 4546 goto fail; 4547 4548 /* 4549 * Help non-failing allocations by giving some access to memory 4550 * reserves normally used for high priority non-blocking 4551 * allocations but do not use ALLOC_NO_WATERMARKS because this 4552 * could deplete whole memory reserves which would just make 4553 * the situation worse. 4554 */ 4555 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4556 if (page) 4557 goto got_pg; 4558 4559 cond_resched(); 4560 goto retry; 4561 } 4562 fail: 4563 warn_alloc(gfp_mask, ac->nodemask, 4564 "page allocation failure: order:%u", order); 4565 got_pg: 4566 return page; 4567 } 4568 4569 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4570 int preferred_nid, nodemask_t *nodemask, 4571 struct alloc_context *ac, gfp_t *alloc_gfp, 4572 unsigned int *alloc_flags) 4573 { 4574 ac->highest_zoneidx = gfp_zone(gfp_mask); 4575 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4576 ac->nodemask = nodemask; 4577 ac->migratetype = gfp_migratetype(gfp_mask); 4578 4579 if (cpusets_enabled()) { 4580 *alloc_gfp |= __GFP_HARDWALL; 4581 /* 4582 * When we are in the interrupt context, it is irrelevant 4583 * to the current task context. It means that any node ok. 4584 */ 4585 if (in_task() && !ac->nodemask) 4586 ac->nodemask = &cpuset_current_mems_allowed; 4587 else 4588 *alloc_flags |= ALLOC_CPUSET; 4589 } 4590 4591 might_alloc(gfp_mask); 4592 4593 if (should_fail_alloc_page(gfp_mask, order)) 4594 return false; 4595 4596 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4597 4598 /* Dirty zone balancing only done in the fast path */ 4599 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4600 4601 /* 4602 * The preferred zone is used for statistics but crucially it is 4603 * also used as the starting point for the zonelist iterator. It 4604 * may get reset for allocations that ignore memory policies. 4605 */ 4606 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4607 ac->highest_zoneidx, ac->nodemask); 4608 4609 return true; 4610 } 4611 4612 /* 4613 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 4614 * @gfp: GFP flags for the allocation 4615 * @preferred_nid: The preferred NUMA node ID to allocate from 4616 * @nodemask: Set of nodes to allocate from, may be NULL 4617 * @nr_pages: The number of pages desired in the array 4618 * @page_array: Array to store the pages 4619 * 4620 * This is a batched version of the page allocator that attempts to 4621 * allocate nr_pages quickly. Pages are added to the page_array. 4622 * 4623 * Note that only NULL elements are populated with pages and nr_pages 4624 * is the maximum number of pages that will be stored in the array. 4625 * 4626 * Returns the number of pages in the array. 4627 */ 4628 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4629 nodemask_t *nodemask, int nr_pages, 4630 struct page **page_array) 4631 { 4632 struct page *page; 4633 unsigned long __maybe_unused UP_flags; 4634 struct zone *zone; 4635 struct zoneref *z; 4636 struct per_cpu_pages *pcp; 4637 struct list_head *pcp_list; 4638 struct alloc_context ac; 4639 gfp_t alloc_gfp; 4640 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4641 int nr_populated = 0, nr_account = 0; 4642 4643 /* 4644 * Skip populated array elements to determine if any pages need 4645 * to be allocated before disabling IRQs. 4646 */ 4647 while (nr_populated < nr_pages && page_array[nr_populated]) 4648 nr_populated++; 4649 4650 /* No pages requested? */ 4651 if (unlikely(nr_pages <= 0)) 4652 goto out; 4653 4654 /* Already populated array? */ 4655 if (unlikely(nr_pages - nr_populated == 0)) 4656 goto out; 4657 4658 /* Bulk allocator does not support memcg accounting. */ 4659 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4660 goto failed; 4661 4662 /* Use the single page allocator for one page. */ 4663 if (nr_pages - nr_populated == 1) 4664 goto failed; 4665 4666 #ifdef CONFIG_PAGE_OWNER 4667 /* 4668 * PAGE_OWNER may recurse into the allocator to allocate space to 4669 * save the stack with pagesets.lock held. Releasing/reacquiring 4670 * removes much of the performance benefit of bulk allocation so 4671 * force the caller to allocate one page at a time as it'll have 4672 * similar performance to added complexity to the bulk allocator. 4673 */ 4674 if (static_branch_unlikely(&page_owner_inited)) 4675 goto failed; 4676 #endif 4677 4678 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4679 gfp &= gfp_allowed_mask; 4680 alloc_gfp = gfp; 4681 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4682 goto out; 4683 gfp = alloc_gfp; 4684 4685 /* Find an allowed local zone that meets the low watermark. */ 4686 z = ac.preferred_zoneref; 4687 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4688 unsigned long mark; 4689 4690 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4691 !__cpuset_zone_allowed(zone, gfp)) { 4692 continue; 4693 } 4694 4695 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4696 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4697 goto failed; 4698 } 4699 4700 cond_accept_memory(zone, 0); 4701 retry_this_zone: 4702 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4703 if (zone_watermark_fast(zone, 0, mark, 4704 zonelist_zone_idx(ac.preferred_zoneref), 4705 alloc_flags, gfp)) { 4706 break; 4707 } 4708 4709 if (cond_accept_memory(zone, 0)) 4710 goto retry_this_zone; 4711 4712 /* Try again if zone has deferred pages */ 4713 if (deferred_pages_enabled()) { 4714 if (_deferred_grow_zone(zone, 0)) 4715 goto retry_this_zone; 4716 } 4717 } 4718 4719 /* 4720 * If there are no allowed local zones that meets the watermarks then 4721 * try to allocate a single page and reclaim if necessary. 4722 */ 4723 if (unlikely(!zone)) 4724 goto failed; 4725 4726 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4727 pcp_trylock_prepare(UP_flags); 4728 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4729 if (!pcp) 4730 goto failed_irq; 4731 4732 /* Attempt the batch allocation */ 4733 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4734 while (nr_populated < nr_pages) { 4735 4736 /* Skip existing pages */ 4737 if (page_array[nr_populated]) { 4738 nr_populated++; 4739 continue; 4740 } 4741 4742 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4743 pcp, pcp_list); 4744 if (unlikely(!page)) { 4745 /* Try and allocate at least one page */ 4746 if (!nr_account) { 4747 pcp_spin_unlock(pcp); 4748 goto failed_irq; 4749 } 4750 break; 4751 } 4752 nr_account++; 4753 4754 prep_new_page(page, 0, gfp, 0); 4755 set_page_refcounted(page); 4756 page_array[nr_populated++] = page; 4757 } 4758 4759 pcp_spin_unlock(pcp); 4760 pcp_trylock_finish(UP_flags); 4761 4762 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4763 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4764 4765 out: 4766 return nr_populated; 4767 4768 failed_irq: 4769 pcp_trylock_finish(UP_flags); 4770 4771 failed: 4772 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4773 if (page) 4774 page_array[nr_populated++] = page; 4775 goto out; 4776 } 4777 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4778 4779 /* 4780 * This is the 'heart' of the zoned buddy allocator. 4781 */ 4782 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 4783 int preferred_nid, nodemask_t *nodemask) 4784 { 4785 struct page *page; 4786 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4787 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4788 struct alloc_context ac = { }; 4789 4790 /* 4791 * There are several places where we assume that the order value is sane 4792 * so bail out early if the request is out of bound. 4793 */ 4794 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4795 return NULL; 4796 4797 gfp &= gfp_allowed_mask; 4798 /* 4799 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4800 * resp. GFP_NOIO which has to be inherited for all allocation requests 4801 * from a particular context which has been marked by 4802 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4803 * movable zones are not used during allocation. 4804 */ 4805 gfp = current_gfp_context(gfp); 4806 alloc_gfp = gfp; 4807 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4808 &alloc_gfp, &alloc_flags)) 4809 return NULL; 4810 4811 /* 4812 * Forbid the first pass from falling back to types that fragment 4813 * memory until all local zones are considered. 4814 */ 4815 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4816 4817 /* First allocation attempt */ 4818 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4819 if (likely(page)) 4820 goto out; 4821 4822 alloc_gfp = gfp; 4823 ac.spread_dirty_pages = false; 4824 4825 /* 4826 * Restore the original nodemask if it was potentially replaced with 4827 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4828 */ 4829 ac.nodemask = nodemask; 4830 4831 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4832 4833 out: 4834 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4835 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4836 free_frozen_pages(page, order); 4837 page = NULL; 4838 } 4839 4840 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4841 kmsan_alloc_page(page, order, alloc_gfp); 4842 4843 return page; 4844 } 4845 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 4846 4847 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4848 int preferred_nid, nodemask_t *nodemask) 4849 { 4850 struct page *page; 4851 4852 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 4853 if (page) 4854 set_page_refcounted(page); 4855 return page; 4856 } 4857 EXPORT_SYMBOL(__alloc_pages_noprof); 4858 4859 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 4860 nodemask_t *nodemask) 4861 { 4862 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 4863 preferred_nid, nodemask); 4864 return page_rmappable_folio(page); 4865 } 4866 EXPORT_SYMBOL(__folio_alloc_noprof); 4867 4868 /* 4869 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4870 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4871 * you need to access high mem. 4872 */ 4873 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 4874 { 4875 struct page *page; 4876 4877 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 4878 if (!page) 4879 return 0; 4880 return (unsigned long) page_address(page); 4881 } 4882 EXPORT_SYMBOL(get_free_pages_noprof); 4883 4884 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 4885 { 4886 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 4887 } 4888 EXPORT_SYMBOL(get_zeroed_page_noprof); 4889 4890 /** 4891 * __free_pages - Free pages allocated with alloc_pages(). 4892 * @page: The page pointer returned from alloc_pages(). 4893 * @order: The order of the allocation. 4894 * 4895 * This function can free multi-page allocations that are not compound 4896 * pages. It does not check that the @order passed in matches that of 4897 * the allocation, so it is easy to leak memory. Freeing more memory 4898 * than was allocated will probably emit a warning. 4899 * 4900 * If the last reference to this page is speculative, it will be released 4901 * by put_page() which only frees the first page of a non-compound 4902 * allocation. To prevent the remaining pages from being leaked, we free 4903 * the subsequent pages here. If you want to use the page's reference 4904 * count to decide when to free the allocation, you should allocate a 4905 * compound page, and use put_page() instead of __free_pages(). 4906 * 4907 * Context: May be called in interrupt context or while holding a normal 4908 * spinlock, but not in NMI context or while holding a raw spinlock. 4909 */ 4910 void __free_pages(struct page *page, unsigned int order) 4911 { 4912 /* get PageHead before we drop reference */ 4913 int head = PageHead(page); 4914 4915 if (put_page_testzero(page)) 4916 free_frozen_pages(page, order); 4917 else if (!head) { 4918 pgalloc_tag_sub_pages(page, (1 << order) - 1); 4919 while (order-- > 0) 4920 free_frozen_pages(page + (1 << order), order); 4921 } 4922 } 4923 EXPORT_SYMBOL(__free_pages); 4924 4925 void free_pages(unsigned long addr, unsigned int order) 4926 { 4927 if (addr != 0) { 4928 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4929 __free_pages(virt_to_page((void *)addr), order); 4930 } 4931 } 4932 4933 EXPORT_SYMBOL(free_pages); 4934 4935 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4936 size_t size) 4937 { 4938 if (addr) { 4939 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4940 struct page *page = virt_to_page((void *)addr); 4941 struct page *last = page + nr; 4942 4943 split_page_owner(page, order, 0); 4944 pgalloc_tag_split(page_folio(page), order, 0); 4945 split_page_memcg(page, order, 0); 4946 while (page < --last) 4947 set_page_refcounted(last); 4948 4949 last = page + (1UL << order); 4950 for (page += nr; page < last; page++) 4951 __free_pages_ok(page, 0, FPI_TO_TAIL); 4952 } 4953 return (void *)addr; 4954 } 4955 4956 /** 4957 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4958 * @size: the number of bytes to allocate 4959 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4960 * 4961 * This function is similar to alloc_pages(), except that it allocates the 4962 * minimum number of pages to satisfy the request. alloc_pages() can only 4963 * allocate memory in power-of-two pages. 4964 * 4965 * This function is also limited by MAX_PAGE_ORDER. 4966 * 4967 * Memory allocated by this function must be released by free_pages_exact(). 4968 * 4969 * Return: pointer to the allocated area or %NULL in case of error. 4970 */ 4971 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 4972 { 4973 unsigned int order = get_order(size); 4974 unsigned long addr; 4975 4976 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4977 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4978 4979 addr = get_free_pages_noprof(gfp_mask, order); 4980 return make_alloc_exact(addr, order, size); 4981 } 4982 EXPORT_SYMBOL(alloc_pages_exact_noprof); 4983 4984 /** 4985 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4986 * pages on a node. 4987 * @nid: the preferred node ID where memory should be allocated 4988 * @size: the number of bytes to allocate 4989 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4990 * 4991 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4992 * back. 4993 * 4994 * Return: pointer to the allocated area or %NULL in case of error. 4995 */ 4996 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 4997 { 4998 unsigned int order = get_order(size); 4999 struct page *p; 5000 5001 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5002 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5003 5004 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5005 if (!p) 5006 return NULL; 5007 return make_alloc_exact((unsigned long)page_address(p), order, size); 5008 } 5009 5010 /** 5011 * free_pages_exact - release memory allocated via alloc_pages_exact() 5012 * @virt: the value returned by alloc_pages_exact. 5013 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5014 * 5015 * Release the memory allocated by a previous call to alloc_pages_exact. 5016 */ 5017 void free_pages_exact(void *virt, size_t size) 5018 { 5019 unsigned long addr = (unsigned long)virt; 5020 unsigned long end = addr + PAGE_ALIGN(size); 5021 5022 while (addr < end) { 5023 free_page(addr); 5024 addr += PAGE_SIZE; 5025 } 5026 } 5027 EXPORT_SYMBOL(free_pages_exact); 5028 5029 /** 5030 * nr_free_zone_pages - count number of pages beyond high watermark 5031 * @offset: The zone index of the highest zone 5032 * 5033 * nr_free_zone_pages() counts the number of pages which are beyond the 5034 * high watermark within all zones at or below a given zone index. For each 5035 * zone, the number of pages is calculated as: 5036 * 5037 * nr_free_zone_pages = managed_pages - high_pages 5038 * 5039 * Return: number of pages beyond high watermark. 5040 */ 5041 static unsigned long nr_free_zone_pages(int offset) 5042 { 5043 struct zoneref *z; 5044 struct zone *zone; 5045 5046 /* Just pick one node, since fallback list is circular */ 5047 unsigned long sum = 0; 5048 5049 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5050 5051 for_each_zone_zonelist(zone, z, zonelist, offset) { 5052 unsigned long size = zone_managed_pages(zone); 5053 unsigned long high = high_wmark_pages(zone); 5054 if (size > high) 5055 sum += size - high; 5056 } 5057 5058 return sum; 5059 } 5060 5061 /** 5062 * nr_free_buffer_pages - count number of pages beyond high watermark 5063 * 5064 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5065 * watermark within ZONE_DMA and ZONE_NORMAL. 5066 * 5067 * Return: number of pages beyond high watermark within ZONE_DMA and 5068 * ZONE_NORMAL. 5069 */ 5070 unsigned long nr_free_buffer_pages(void) 5071 { 5072 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5073 } 5074 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5075 5076 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5077 { 5078 zoneref->zone = zone; 5079 zoneref->zone_idx = zone_idx(zone); 5080 } 5081 5082 /* 5083 * Builds allocation fallback zone lists. 5084 * 5085 * Add all populated zones of a node to the zonelist. 5086 */ 5087 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5088 { 5089 struct zone *zone; 5090 enum zone_type zone_type = MAX_NR_ZONES; 5091 int nr_zones = 0; 5092 5093 do { 5094 zone_type--; 5095 zone = pgdat->node_zones + zone_type; 5096 if (populated_zone(zone)) { 5097 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5098 check_highest_zone(zone_type); 5099 } 5100 } while (zone_type); 5101 5102 return nr_zones; 5103 } 5104 5105 #ifdef CONFIG_NUMA 5106 5107 static int __parse_numa_zonelist_order(char *s) 5108 { 5109 /* 5110 * We used to support different zonelists modes but they turned 5111 * out to be just not useful. Let's keep the warning in place 5112 * if somebody still use the cmd line parameter so that we do 5113 * not fail it silently 5114 */ 5115 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5116 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5117 return -EINVAL; 5118 } 5119 return 0; 5120 } 5121 5122 static char numa_zonelist_order[] = "Node"; 5123 #define NUMA_ZONELIST_ORDER_LEN 16 5124 /* 5125 * sysctl handler for numa_zonelist_order 5126 */ 5127 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5128 void *buffer, size_t *length, loff_t *ppos) 5129 { 5130 if (write) 5131 return __parse_numa_zonelist_order(buffer); 5132 return proc_dostring(table, write, buffer, length, ppos); 5133 } 5134 5135 static int node_load[MAX_NUMNODES]; 5136 5137 /** 5138 * find_next_best_node - find the next node that should appear in a given node's fallback list 5139 * @node: node whose fallback list we're appending 5140 * @used_node_mask: nodemask_t of already used nodes 5141 * 5142 * We use a number of factors to determine which is the next node that should 5143 * appear on a given node's fallback list. The node should not have appeared 5144 * already in @node's fallback list, and it should be the next closest node 5145 * according to the distance array (which contains arbitrary distance values 5146 * from each node to each node in the system), and should also prefer nodes 5147 * with no CPUs, since presumably they'll have very little allocation pressure 5148 * on them otherwise. 5149 * 5150 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5151 */ 5152 int find_next_best_node(int node, nodemask_t *used_node_mask) 5153 { 5154 int n, val; 5155 int min_val = INT_MAX; 5156 int best_node = NUMA_NO_NODE; 5157 5158 /* 5159 * Use the local node if we haven't already, but for memoryless local 5160 * node, we should skip it and fall back to other nodes. 5161 */ 5162 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5163 node_set(node, *used_node_mask); 5164 return node; 5165 } 5166 5167 for_each_node_state(n, N_MEMORY) { 5168 5169 /* Don't want a node to appear more than once */ 5170 if (node_isset(n, *used_node_mask)) 5171 continue; 5172 5173 /* Use the distance array to find the distance */ 5174 val = node_distance(node, n); 5175 5176 /* Penalize nodes under us ("prefer the next node") */ 5177 val += (n < node); 5178 5179 /* Give preference to headless and unused nodes */ 5180 if (!cpumask_empty(cpumask_of_node(n))) 5181 val += PENALTY_FOR_NODE_WITH_CPUS; 5182 5183 /* Slight preference for less loaded node */ 5184 val *= MAX_NUMNODES; 5185 val += node_load[n]; 5186 5187 if (val < min_val) { 5188 min_val = val; 5189 best_node = n; 5190 } 5191 } 5192 5193 if (best_node >= 0) 5194 node_set(best_node, *used_node_mask); 5195 5196 return best_node; 5197 } 5198 5199 5200 /* 5201 * Build zonelists ordered by node and zones within node. 5202 * This results in maximum locality--normal zone overflows into local 5203 * DMA zone, if any--but risks exhausting DMA zone. 5204 */ 5205 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5206 unsigned nr_nodes) 5207 { 5208 struct zoneref *zonerefs; 5209 int i; 5210 5211 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5212 5213 for (i = 0; i < nr_nodes; i++) { 5214 int nr_zones; 5215 5216 pg_data_t *node = NODE_DATA(node_order[i]); 5217 5218 nr_zones = build_zonerefs_node(node, zonerefs); 5219 zonerefs += nr_zones; 5220 } 5221 zonerefs->zone = NULL; 5222 zonerefs->zone_idx = 0; 5223 } 5224 5225 /* 5226 * Build __GFP_THISNODE zonelists 5227 */ 5228 static void build_thisnode_zonelists(pg_data_t *pgdat) 5229 { 5230 struct zoneref *zonerefs; 5231 int nr_zones; 5232 5233 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5234 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5235 zonerefs += nr_zones; 5236 zonerefs->zone = NULL; 5237 zonerefs->zone_idx = 0; 5238 } 5239 5240 static void build_zonelists(pg_data_t *pgdat) 5241 { 5242 static int node_order[MAX_NUMNODES]; 5243 int node, nr_nodes = 0; 5244 nodemask_t used_mask = NODE_MASK_NONE; 5245 int local_node, prev_node; 5246 5247 /* NUMA-aware ordering of nodes */ 5248 local_node = pgdat->node_id; 5249 prev_node = local_node; 5250 5251 memset(node_order, 0, sizeof(node_order)); 5252 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5253 /* 5254 * We don't want to pressure a particular node. 5255 * So adding penalty to the first node in same 5256 * distance group to make it round-robin. 5257 */ 5258 if (node_distance(local_node, node) != 5259 node_distance(local_node, prev_node)) 5260 node_load[node] += 1; 5261 5262 node_order[nr_nodes++] = node; 5263 prev_node = node; 5264 } 5265 5266 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5267 build_thisnode_zonelists(pgdat); 5268 pr_info("Fallback order for Node %d: ", local_node); 5269 for (node = 0; node < nr_nodes; node++) 5270 pr_cont("%d ", node_order[node]); 5271 pr_cont("\n"); 5272 } 5273 5274 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5275 /* 5276 * Return node id of node used for "local" allocations. 5277 * I.e., first node id of first zone in arg node's generic zonelist. 5278 * Used for initializing percpu 'numa_mem', which is used primarily 5279 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5280 */ 5281 int local_memory_node(int node) 5282 { 5283 struct zoneref *z; 5284 5285 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5286 gfp_zone(GFP_KERNEL), 5287 NULL); 5288 return zonelist_node_idx(z); 5289 } 5290 #endif 5291 5292 static void setup_min_unmapped_ratio(void); 5293 static void setup_min_slab_ratio(void); 5294 #else /* CONFIG_NUMA */ 5295 5296 static void build_zonelists(pg_data_t *pgdat) 5297 { 5298 struct zoneref *zonerefs; 5299 int nr_zones; 5300 5301 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5302 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5303 zonerefs += nr_zones; 5304 5305 zonerefs->zone = NULL; 5306 zonerefs->zone_idx = 0; 5307 } 5308 5309 #endif /* CONFIG_NUMA */ 5310 5311 /* 5312 * Boot pageset table. One per cpu which is going to be used for all 5313 * zones and all nodes. The parameters will be set in such a way 5314 * that an item put on a list will immediately be handed over to 5315 * the buddy list. This is safe since pageset manipulation is done 5316 * with interrupts disabled. 5317 * 5318 * The boot_pagesets must be kept even after bootup is complete for 5319 * unused processors and/or zones. They do play a role for bootstrapping 5320 * hotplugged processors. 5321 * 5322 * zoneinfo_show() and maybe other functions do 5323 * not check if the processor is online before following the pageset pointer. 5324 * Other parts of the kernel may not check if the zone is available. 5325 */ 5326 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5327 /* These effectively disable the pcplists in the boot pageset completely */ 5328 #define BOOT_PAGESET_HIGH 0 5329 #define BOOT_PAGESET_BATCH 1 5330 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5331 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5332 5333 static void __build_all_zonelists(void *data) 5334 { 5335 int nid; 5336 int __maybe_unused cpu; 5337 pg_data_t *self = data; 5338 unsigned long flags; 5339 5340 /* 5341 * The zonelist_update_seq must be acquired with irqsave because the 5342 * reader can be invoked from IRQ with GFP_ATOMIC. 5343 */ 5344 write_seqlock_irqsave(&zonelist_update_seq, flags); 5345 /* 5346 * Also disable synchronous printk() to prevent any printk() from 5347 * trying to hold port->lock, for 5348 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5349 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5350 */ 5351 printk_deferred_enter(); 5352 5353 #ifdef CONFIG_NUMA 5354 memset(node_load, 0, sizeof(node_load)); 5355 #endif 5356 5357 /* 5358 * This node is hotadded and no memory is yet present. So just 5359 * building zonelists is fine - no need to touch other nodes. 5360 */ 5361 if (self && !node_online(self->node_id)) { 5362 build_zonelists(self); 5363 } else { 5364 /* 5365 * All possible nodes have pgdat preallocated 5366 * in free_area_init 5367 */ 5368 for_each_node(nid) { 5369 pg_data_t *pgdat = NODE_DATA(nid); 5370 5371 build_zonelists(pgdat); 5372 } 5373 5374 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5375 /* 5376 * We now know the "local memory node" for each node-- 5377 * i.e., the node of the first zone in the generic zonelist. 5378 * Set up numa_mem percpu variable for on-line cpus. During 5379 * boot, only the boot cpu should be on-line; we'll init the 5380 * secondary cpus' numa_mem as they come on-line. During 5381 * node/memory hotplug, we'll fixup all on-line cpus. 5382 */ 5383 for_each_online_cpu(cpu) 5384 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5385 #endif 5386 } 5387 5388 printk_deferred_exit(); 5389 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5390 } 5391 5392 static noinline void __init 5393 build_all_zonelists_init(void) 5394 { 5395 int cpu; 5396 5397 __build_all_zonelists(NULL); 5398 5399 /* 5400 * Initialize the boot_pagesets that are going to be used 5401 * for bootstrapping processors. The real pagesets for 5402 * each zone will be allocated later when the per cpu 5403 * allocator is available. 5404 * 5405 * boot_pagesets are used also for bootstrapping offline 5406 * cpus if the system is already booted because the pagesets 5407 * are needed to initialize allocators on a specific cpu too. 5408 * F.e. the percpu allocator needs the page allocator which 5409 * needs the percpu allocator in order to allocate its pagesets 5410 * (a chicken-egg dilemma). 5411 */ 5412 for_each_possible_cpu(cpu) 5413 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5414 5415 mminit_verify_zonelist(); 5416 cpuset_init_current_mems_allowed(); 5417 } 5418 5419 /* 5420 * unless system_state == SYSTEM_BOOTING. 5421 * 5422 * __ref due to call of __init annotated helper build_all_zonelists_init 5423 * [protected by SYSTEM_BOOTING]. 5424 */ 5425 void __ref build_all_zonelists(pg_data_t *pgdat) 5426 { 5427 unsigned long vm_total_pages; 5428 5429 if (system_state == SYSTEM_BOOTING) { 5430 build_all_zonelists_init(); 5431 } else { 5432 __build_all_zonelists(pgdat); 5433 /* cpuset refresh routine should be here */ 5434 } 5435 /* Get the number of free pages beyond high watermark in all zones. */ 5436 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5437 /* 5438 * Disable grouping by mobility if the number of pages in the 5439 * system is too low to allow the mechanism to work. It would be 5440 * more accurate, but expensive to check per-zone. This check is 5441 * made on memory-hotadd so a system can start with mobility 5442 * disabled and enable it later 5443 */ 5444 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5445 page_group_by_mobility_disabled = 1; 5446 else 5447 page_group_by_mobility_disabled = 0; 5448 5449 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5450 nr_online_nodes, 5451 str_off_on(page_group_by_mobility_disabled), 5452 vm_total_pages); 5453 #ifdef CONFIG_NUMA 5454 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5455 #endif 5456 } 5457 5458 static int zone_batchsize(struct zone *zone) 5459 { 5460 #ifdef CONFIG_MMU 5461 int batch; 5462 5463 /* 5464 * The number of pages to batch allocate is either ~0.1% 5465 * of the zone or 1MB, whichever is smaller. The batch 5466 * size is striking a balance between allocation latency 5467 * and zone lock contention. 5468 */ 5469 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5470 batch /= 4; /* We effectively *= 4 below */ 5471 if (batch < 1) 5472 batch = 1; 5473 5474 /* 5475 * Clamp the batch to a 2^n - 1 value. Having a power 5476 * of 2 value was found to be more likely to have 5477 * suboptimal cache aliasing properties in some cases. 5478 * 5479 * For example if 2 tasks are alternately allocating 5480 * batches of pages, one task can end up with a lot 5481 * of pages of one half of the possible page colors 5482 * and the other with pages of the other colors. 5483 */ 5484 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5485 5486 return batch; 5487 5488 #else 5489 /* The deferral and batching of frees should be suppressed under NOMMU 5490 * conditions. 5491 * 5492 * The problem is that NOMMU needs to be able to allocate large chunks 5493 * of contiguous memory as there's no hardware page translation to 5494 * assemble apparent contiguous memory from discontiguous pages. 5495 * 5496 * Queueing large contiguous runs of pages for batching, however, 5497 * causes the pages to actually be freed in smaller chunks. As there 5498 * can be a significant delay between the individual batches being 5499 * recycled, this leads to the once large chunks of space being 5500 * fragmented and becoming unavailable for high-order allocations. 5501 */ 5502 return 0; 5503 #endif 5504 } 5505 5506 static int percpu_pagelist_high_fraction; 5507 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5508 int high_fraction) 5509 { 5510 #ifdef CONFIG_MMU 5511 int high; 5512 int nr_split_cpus; 5513 unsigned long total_pages; 5514 5515 if (!high_fraction) { 5516 /* 5517 * By default, the high value of the pcp is based on the zone 5518 * low watermark so that if they are full then background 5519 * reclaim will not be started prematurely. 5520 */ 5521 total_pages = low_wmark_pages(zone); 5522 } else { 5523 /* 5524 * If percpu_pagelist_high_fraction is configured, the high 5525 * value is based on a fraction of the managed pages in the 5526 * zone. 5527 */ 5528 total_pages = zone_managed_pages(zone) / high_fraction; 5529 } 5530 5531 /* 5532 * Split the high value across all online CPUs local to the zone. Note 5533 * that early in boot that CPUs may not be online yet and that during 5534 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5535 * onlined. For memory nodes that have no CPUs, split the high value 5536 * across all online CPUs to mitigate the risk that reclaim is triggered 5537 * prematurely due to pages stored on pcp lists. 5538 */ 5539 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5540 if (!nr_split_cpus) 5541 nr_split_cpus = num_online_cpus(); 5542 high = total_pages / nr_split_cpus; 5543 5544 /* 5545 * Ensure high is at least batch*4. The multiple is based on the 5546 * historical relationship between high and batch. 5547 */ 5548 high = max(high, batch << 2); 5549 5550 return high; 5551 #else 5552 return 0; 5553 #endif 5554 } 5555 5556 /* 5557 * pcp->high and pcp->batch values are related and generally batch is lower 5558 * than high. They are also related to pcp->count such that count is lower 5559 * than high, and as soon as it reaches high, the pcplist is flushed. 5560 * 5561 * However, guaranteeing these relations at all times would require e.g. write 5562 * barriers here but also careful usage of read barriers at the read side, and 5563 * thus be prone to error and bad for performance. Thus the update only prevents 5564 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5565 * should ensure they can cope with those fields changing asynchronously, and 5566 * fully trust only the pcp->count field on the local CPU with interrupts 5567 * disabled. 5568 * 5569 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5570 * outside of boot time (or some other assurance that no concurrent updaters 5571 * exist). 5572 */ 5573 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5574 unsigned long high_max, unsigned long batch) 5575 { 5576 WRITE_ONCE(pcp->batch, batch); 5577 WRITE_ONCE(pcp->high_min, high_min); 5578 WRITE_ONCE(pcp->high_max, high_max); 5579 } 5580 5581 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5582 { 5583 int pindex; 5584 5585 memset(pcp, 0, sizeof(*pcp)); 5586 memset(pzstats, 0, sizeof(*pzstats)); 5587 5588 spin_lock_init(&pcp->lock); 5589 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5590 INIT_LIST_HEAD(&pcp->lists[pindex]); 5591 5592 /* 5593 * Set batch and high values safe for a boot pageset. A true percpu 5594 * pageset's initialization will update them subsequently. Here we don't 5595 * need to be as careful as pageset_update() as nobody can access the 5596 * pageset yet. 5597 */ 5598 pcp->high_min = BOOT_PAGESET_HIGH; 5599 pcp->high_max = BOOT_PAGESET_HIGH; 5600 pcp->batch = BOOT_PAGESET_BATCH; 5601 pcp->free_count = 0; 5602 } 5603 5604 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5605 unsigned long high_max, unsigned long batch) 5606 { 5607 struct per_cpu_pages *pcp; 5608 int cpu; 5609 5610 for_each_possible_cpu(cpu) { 5611 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5612 pageset_update(pcp, high_min, high_max, batch); 5613 } 5614 } 5615 5616 /* 5617 * Calculate and set new high and batch values for all per-cpu pagesets of a 5618 * zone based on the zone's size. 5619 */ 5620 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5621 { 5622 int new_high_min, new_high_max, new_batch; 5623 5624 new_batch = max(1, zone_batchsize(zone)); 5625 if (percpu_pagelist_high_fraction) { 5626 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5627 percpu_pagelist_high_fraction); 5628 /* 5629 * PCP high is tuned manually, disable auto-tuning via 5630 * setting high_min and high_max to the manual value. 5631 */ 5632 new_high_max = new_high_min; 5633 } else { 5634 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5635 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5636 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5637 } 5638 5639 if (zone->pageset_high_min == new_high_min && 5640 zone->pageset_high_max == new_high_max && 5641 zone->pageset_batch == new_batch) 5642 return; 5643 5644 zone->pageset_high_min = new_high_min; 5645 zone->pageset_high_max = new_high_max; 5646 zone->pageset_batch = new_batch; 5647 5648 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5649 new_batch); 5650 } 5651 5652 void __meminit setup_zone_pageset(struct zone *zone) 5653 { 5654 int cpu; 5655 5656 /* Size may be 0 on !SMP && !NUMA */ 5657 if (sizeof(struct per_cpu_zonestat) > 0) 5658 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5659 5660 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5661 for_each_possible_cpu(cpu) { 5662 struct per_cpu_pages *pcp; 5663 struct per_cpu_zonestat *pzstats; 5664 5665 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5666 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5667 per_cpu_pages_init(pcp, pzstats); 5668 } 5669 5670 zone_set_pageset_high_and_batch(zone, 0); 5671 } 5672 5673 /* 5674 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5675 * page high values need to be recalculated. 5676 */ 5677 static void zone_pcp_update(struct zone *zone, int cpu_online) 5678 { 5679 mutex_lock(&pcp_batch_high_lock); 5680 zone_set_pageset_high_and_batch(zone, cpu_online); 5681 mutex_unlock(&pcp_batch_high_lock); 5682 } 5683 5684 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5685 { 5686 struct per_cpu_pages *pcp; 5687 struct cpu_cacheinfo *cci; 5688 5689 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5690 cci = get_cpu_cacheinfo(cpu); 5691 /* 5692 * If data cache slice of CPU is large enough, "pcp->batch" 5693 * pages can be preserved in PCP before draining PCP for 5694 * consecutive high-order pages freeing without allocation. 5695 * This can reduce zone lock contention without hurting 5696 * cache-hot pages sharing. 5697 */ 5698 spin_lock(&pcp->lock); 5699 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5700 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5701 else 5702 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5703 spin_unlock(&pcp->lock); 5704 } 5705 5706 void setup_pcp_cacheinfo(unsigned int cpu) 5707 { 5708 struct zone *zone; 5709 5710 for_each_populated_zone(zone) 5711 zone_pcp_update_cacheinfo(zone, cpu); 5712 } 5713 5714 /* 5715 * Allocate per cpu pagesets and initialize them. 5716 * Before this call only boot pagesets were available. 5717 */ 5718 void __init setup_per_cpu_pageset(void) 5719 { 5720 struct pglist_data *pgdat; 5721 struct zone *zone; 5722 int __maybe_unused cpu; 5723 5724 for_each_populated_zone(zone) 5725 setup_zone_pageset(zone); 5726 5727 #ifdef CONFIG_NUMA 5728 /* 5729 * Unpopulated zones continue using the boot pagesets. 5730 * The numa stats for these pagesets need to be reset. 5731 * Otherwise, they will end up skewing the stats of 5732 * the nodes these zones are associated with. 5733 */ 5734 for_each_possible_cpu(cpu) { 5735 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5736 memset(pzstats->vm_numa_event, 0, 5737 sizeof(pzstats->vm_numa_event)); 5738 } 5739 #endif 5740 5741 for_each_online_pgdat(pgdat) 5742 pgdat->per_cpu_nodestats = 5743 alloc_percpu(struct per_cpu_nodestat); 5744 } 5745 5746 __meminit void zone_pcp_init(struct zone *zone) 5747 { 5748 /* 5749 * per cpu subsystem is not up at this point. The following code 5750 * relies on the ability of the linker to provide the 5751 * offset of a (static) per cpu variable into the per cpu area. 5752 */ 5753 zone->per_cpu_pageset = &boot_pageset; 5754 zone->per_cpu_zonestats = &boot_zonestats; 5755 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5756 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5757 zone->pageset_batch = BOOT_PAGESET_BATCH; 5758 5759 if (populated_zone(zone)) 5760 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5761 zone->present_pages, zone_batchsize(zone)); 5762 } 5763 5764 static void setup_per_zone_lowmem_reserve(void); 5765 5766 void adjust_managed_page_count(struct page *page, long count) 5767 { 5768 atomic_long_add(count, &page_zone(page)->managed_pages); 5769 totalram_pages_add(count); 5770 setup_per_zone_lowmem_reserve(); 5771 } 5772 EXPORT_SYMBOL(adjust_managed_page_count); 5773 5774 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5775 { 5776 void *pos; 5777 unsigned long pages = 0; 5778 5779 start = (void *)PAGE_ALIGN((unsigned long)start); 5780 end = (void *)((unsigned long)end & PAGE_MASK); 5781 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5782 struct page *page = virt_to_page(pos); 5783 void *direct_map_addr; 5784 5785 /* 5786 * 'direct_map_addr' might be different from 'pos' 5787 * because some architectures' virt_to_page() 5788 * work with aliases. Getting the direct map 5789 * address ensures that we get a _writeable_ 5790 * alias for the memset(). 5791 */ 5792 direct_map_addr = page_address(page); 5793 /* 5794 * Perform a kasan-unchecked memset() since this memory 5795 * has not been initialized. 5796 */ 5797 direct_map_addr = kasan_reset_tag(direct_map_addr); 5798 if ((unsigned int)poison <= 0xFF) 5799 memset(direct_map_addr, poison, PAGE_SIZE); 5800 5801 free_reserved_page(page); 5802 } 5803 5804 if (pages && s) 5805 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5806 5807 return pages; 5808 } 5809 5810 void free_reserved_page(struct page *page) 5811 { 5812 clear_page_tag_ref(page); 5813 ClearPageReserved(page); 5814 init_page_count(page); 5815 __free_page(page); 5816 adjust_managed_page_count(page, 1); 5817 } 5818 EXPORT_SYMBOL(free_reserved_page); 5819 5820 static int page_alloc_cpu_dead(unsigned int cpu) 5821 { 5822 struct zone *zone; 5823 5824 lru_add_drain_cpu(cpu); 5825 mlock_drain_remote(cpu); 5826 drain_pages(cpu); 5827 5828 /* 5829 * Spill the event counters of the dead processor 5830 * into the current processors event counters. 5831 * This artificially elevates the count of the current 5832 * processor. 5833 */ 5834 vm_events_fold_cpu(cpu); 5835 5836 /* 5837 * Zero the differential counters of the dead processor 5838 * so that the vm statistics are consistent. 5839 * 5840 * This is only okay since the processor is dead and cannot 5841 * race with what we are doing. 5842 */ 5843 cpu_vm_stats_fold(cpu); 5844 5845 for_each_populated_zone(zone) 5846 zone_pcp_update(zone, 0); 5847 5848 return 0; 5849 } 5850 5851 static int page_alloc_cpu_online(unsigned int cpu) 5852 { 5853 struct zone *zone; 5854 5855 for_each_populated_zone(zone) 5856 zone_pcp_update(zone, 1); 5857 return 0; 5858 } 5859 5860 void __init page_alloc_init_cpuhp(void) 5861 { 5862 int ret; 5863 5864 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5865 "mm/page_alloc:pcp", 5866 page_alloc_cpu_online, 5867 page_alloc_cpu_dead); 5868 WARN_ON(ret < 0); 5869 } 5870 5871 /* 5872 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5873 * or min_free_kbytes changes. 5874 */ 5875 static void calculate_totalreserve_pages(void) 5876 { 5877 struct pglist_data *pgdat; 5878 unsigned long reserve_pages = 0; 5879 enum zone_type i, j; 5880 5881 for_each_online_pgdat(pgdat) { 5882 5883 pgdat->totalreserve_pages = 0; 5884 5885 for (i = 0; i < MAX_NR_ZONES; i++) { 5886 struct zone *zone = pgdat->node_zones + i; 5887 long max = 0; 5888 unsigned long managed_pages = zone_managed_pages(zone); 5889 5890 /* Find valid and maximum lowmem_reserve in the zone */ 5891 for (j = i; j < MAX_NR_ZONES; j++) { 5892 if (zone->lowmem_reserve[j] > max) 5893 max = zone->lowmem_reserve[j]; 5894 } 5895 5896 /* we treat the high watermark as reserved pages. */ 5897 max += high_wmark_pages(zone); 5898 5899 if (max > managed_pages) 5900 max = managed_pages; 5901 5902 pgdat->totalreserve_pages += max; 5903 5904 reserve_pages += max; 5905 } 5906 } 5907 totalreserve_pages = reserve_pages; 5908 } 5909 5910 /* 5911 * setup_per_zone_lowmem_reserve - called whenever 5912 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5913 * has a correct pages reserved value, so an adequate number of 5914 * pages are left in the zone after a successful __alloc_pages(). 5915 */ 5916 static void setup_per_zone_lowmem_reserve(void) 5917 { 5918 struct pglist_data *pgdat; 5919 enum zone_type i, j; 5920 5921 for_each_online_pgdat(pgdat) { 5922 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5923 struct zone *zone = &pgdat->node_zones[i]; 5924 int ratio = sysctl_lowmem_reserve_ratio[i]; 5925 bool clear = !ratio || !zone_managed_pages(zone); 5926 unsigned long managed_pages = 0; 5927 5928 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5929 struct zone *upper_zone = &pgdat->node_zones[j]; 5930 5931 managed_pages += zone_managed_pages(upper_zone); 5932 5933 if (clear) 5934 zone->lowmem_reserve[j] = 0; 5935 else 5936 zone->lowmem_reserve[j] = managed_pages / ratio; 5937 } 5938 } 5939 } 5940 5941 /* update totalreserve_pages */ 5942 calculate_totalreserve_pages(); 5943 } 5944 5945 static void __setup_per_zone_wmarks(void) 5946 { 5947 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5948 unsigned long lowmem_pages = 0; 5949 struct zone *zone; 5950 unsigned long flags; 5951 5952 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5953 for_each_zone(zone) { 5954 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5955 lowmem_pages += zone_managed_pages(zone); 5956 } 5957 5958 for_each_zone(zone) { 5959 u64 tmp; 5960 5961 spin_lock_irqsave(&zone->lock, flags); 5962 tmp = (u64)pages_min * zone_managed_pages(zone); 5963 tmp = div64_ul(tmp, lowmem_pages); 5964 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5965 /* 5966 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5967 * need highmem and movable zones pages, so cap pages_min 5968 * to a small value here. 5969 * 5970 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5971 * deltas control async page reclaim, and so should 5972 * not be capped for highmem and movable zones. 5973 */ 5974 unsigned long min_pages; 5975 5976 min_pages = zone_managed_pages(zone) / 1024; 5977 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5978 zone->_watermark[WMARK_MIN] = min_pages; 5979 } else { 5980 /* 5981 * If it's a lowmem zone, reserve a number of pages 5982 * proportionate to the zone's size. 5983 */ 5984 zone->_watermark[WMARK_MIN] = tmp; 5985 } 5986 5987 /* 5988 * Set the kswapd watermarks distance according to the 5989 * scale factor in proportion to available memory, but 5990 * ensure a minimum size on small systems. 5991 */ 5992 tmp = max_t(u64, tmp >> 2, 5993 mult_frac(zone_managed_pages(zone), 5994 watermark_scale_factor, 10000)); 5995 5996 zone->watermark_boost = 0; 5997 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5998 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5999 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6000 6001 spin_unlock_irqrestore(&zone->lock, flags); 6002 } 6003 6004 /* update totalreserve_pages */ 6005 calculate_totalreserve_pages(); 6006 } 6007 6008 /** 6009 * setup_per_zone_wmarks - called when min_free_kbytes changes 6010 * or when memory is hot-{added|removed} 6011 * 6012 * Ensures that the watermark[min,low,high] values for each zone are set 6013 * correctly with respect to min_free_kbytes. 6014 */ 6015 void setup_per_zone_wmarks(void) 6016 { 6017 struct zone *zone; 6018 static DEFINE_SPINLOCK(lock); 6019 6020 spin_lock(&lock); 6021 __setup_per_zone_wmarks(); 6022 spin_unlock(&lock); 6023 6024 /* 6025 * The watermark size have changed so update the pcpu batch 6026 * and high limits or the limits may be inappropriate. 6027 */ 6028 for_each_zone(zone) 6029 zone_pcp_update(zone, 0); 6030 } 6031 6032 /* 6033 * Initialise min_free_kbytes. 6034 * 6035 * For small machines we want it small (128k min). For large machines 6036 * we want it large (256MB max). But it is not linear, because network 6037 * bandwidth does not increase linearly with machine size. We use 6038 * 6039 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6040 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6041 * 6042 * which yields 6043 * 6044 * 16MB: 512k 6045 * 32MB: 724k 6046 * 64MB: 1024k 6047 * 128MB: 1448k 6048 * 256MB: 2048k 6049 * 512MB: 2896k 6050 * 1024MB: 4096k 6051 * 2048MB: 5792k 6052 * 4096MB: 8192k 6053 * 8192MB: 11584k 6054 * 16384MB: 16384k 6055 */ 6056 void calculate_min_free_kbytes(void) 6057 { 6058 unsigned long lowmem_kbytes; 6059 int new_min_free_kbytes; 6060 6061 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6062 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6063 6064 if (new_min_free_kbytes > user_min_free_kbytes) 6065 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6066 else 6067 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6068 new_min_free_kbytes, user_min_free_kbytes); 6069 6070 } 6071 6072 int __meminit init_per_zone_wmark_min(void) 6073 { 6074 calculate_min_free_kbytes(); 6075 setup_per_zone_wmarks(); 6076 refresh_zone_stat_thresholds(); 6077 setup_per_zone_lowmem_reserve(); 6078 6079 #ifdef CONFIG_NUMA 6080 setup_min_unmapped_ratio(); 6081 setup_min_slab_ratio(); 6082 #endif 6083 6084 khugepaged_min_free_kbytes_update(); 6085 6086 return 0; 6087 } 6088 postcore_initcall(init_per_zone_wmark_min) 6089 6090 /* 6091 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6092 * that we can call two helper functions whenever min_free_kbytes 6093 * changes. 6094 */ 6095 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6096 void *buffer, size_t *length, loff_t *ppos) 6097 { 6098 int rc; 6099 6100 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6101 if (rc) 6102 return rc; 6103 6104 if (write) { 6105 user_min_free_kbytes = min_free_kbytes; 6106 setup_per_zone_wmarks(); 6107 } 6108 return 0; 6109 } 6110 6111 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6112 void *buffer, size_t *length, loff_t *ppos) 6113 { 6114 int rc; 6115 6116 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6117 if (rc) 6118 return rc; 6119 6120 if (write) 6121 setup_per_zone_wmarks(); 6122 6123 return 0; 6124 } 6125 6126 #ifdef CONFIG_NUMA 6127 static void setup_min_unmapped_ratio(void) 6128 { 6129 pg_data_t *pgdat; 6130 struct zone *zone; 6131 6132 for_each_online_pgdat(pgdat) 6133 pgdat->min_unmapped_pages = 0; 6134 6135 for_each_zone(zone) 6136 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6137 sysctl_min_unmapped_ratio) / 100; 6138 } 6139 6140 6141 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6142 void *buffer, size_t *length, loff_t *ppos) 6143 { 6144 int rc; 6145 6146 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6147 if (rc) 6148 return rc; 6149 6150 setup_min_unmapped_ratio(); 6151 6152 return 0; 6153 } 6154 6155 static void setup_min_slab_ratio(void) 6156 { 6157 pg_data_t *pgdat; 6158 struct zone *zone; 6159 6160 for_each_online_pgdat(pgdat) 6161 pgdat->min_slab_pages = 0; 6162 6163 for_each_zone(zone) 6164 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6165 sysctl_min_slab_ratio) / 100; 6166 } 6167 6168 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6169 void *buffer, size_t *length, loff_t *ppos) 6170 { 6171 int rc; 6172 6173 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6174 if (rc) 6175 return rc; 6176 6177 setup_min_slab_ratio(); 6178 6179 return 0; 6180 } 6181 #endif 6182 6183 /* 6184 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6185 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6186 * whenever sysctl_lowmem_reserve_ratio changes. 6187 * 6188 * The reserve ratio obviously has absolutely no relation with the 6189 * minimum watermarks. The lowmem reserve ratio can only make sense 6190 * if in function of the boot time zone sizes. 6191 */ 6192 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6193 int write, void *buffer, size_t *length, loff_t *ppos) 6194 { 6195 int i; 6196 6197 proc_dointvec_minmax(table, write, buffer, length, ppos); 6198 6199 for (i = 0; i < MAX_NR_ZONES; i++) { 6200 if (sysctl_lowmem_reserve_ratio[i] < 1) 6201 sysctl_lowmem_reserve_ratio[i] = 0; 6202 } 6203 6204 setup_per_zone_lowmem_reserve(); 6205 return 0; 6206 } 6207 6208 /* 6209 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6210 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6211 * pagelist can have before it gets flushed back to buddy allocator. 6212 */ 6213 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6214 int write, void *buffer, size_t *length, loff_t *ppos) 6215 { 6216 struct zone *zone; 6217 int old_percpu_pagelist_high_fraction; 6218 int ret; 6219 6220 mutex_lock(&pcp_batch_high_lock); 6221 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6222 6223 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6224 if (!write || ret < 0) 6225 goto out; 6226 6227 /* Sanity checking to avoid pcp imbalance */ 6228 if (percpu_pagelist_high_fraction && 6229 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6230 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6231 ret = -EINVAL; 6232 goto out; 6233 } 6234 6235 /* No change? */ 6236 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6237 goto out; 6238 6239 for_each_populated_zone(zone) 6240 zone_set_pageset_high_and_batch(zone, 0); 6241 out: 6242 mutex_unlock(&pcp_batch_high_lock); 6243 return ret; 6244 } 6245 6246 static const struct ctl_table page_alloc_sysctl_table[] = { 6247 { 6248 .procname = "min_free_kbytes", 6249 .data = &min_free_kbytes, 6250 .maxlen = sizeof(min_free_kbytes), 6251 .mode = 0644, 6252 .proc_handler = min_free_kbytes_sysctl_handler, 6253 .extra1 = SYSCTL_ZERO, 6254 }, 6255 { 6256 .procname = "watermark_boost_factor", 6257 .data = &watermark_boost_factor, 6258 .maxlen = sizeof(watermark_boost_factor), 6259 .mode = 0644, 6260 .proc_handler = proc_dointvec_minmax, 6261 .extra1 = SYSCTL_ZERO, 6262 }, 6263 { 6264 .procname = "watermark_scale_factor", 6265 .data = &watermark_scale_factor, 6266 .maxlen = sizeof(watermark_scale_factor), 6267 .mode = 0644, 6268 .proc_handler = watermark_scale_factor_sysctl_handler, 6269 .extra1 = SYSCTL_ONE, 6270 .extra2 = SYSCTL_THREE_THOUSAND, 6271 }, 6272 { 6273 .procname = "percpu_pagelist_high_fraction", 6274 .data = &percpu_pagelist_high_fraction, 6275 .maxlen = sizeof(percpu_pagelist_high_fraction), 6276 .mode = 0644, 6277 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6278 .extra1 = SYSCTL_ZERO, 6279 }, 6280 { 6281 .procname = "lowmem_reserve_ratio", 6282 .data = &sysctl_lowmem_reserve_ratio, 6283 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6284 .mode = 0644, 6285 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6286 }, 6287 #ifdef CONFIG_NUMA 6288 { 6289 .procname = "numa_zonelist_order", 6290 .data = &numa_zonelist_order, 6291 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6292 .mode = 0644, 6293 .proc_handler = numa_zonelist_order_handler, 6294 }, 6295 { 6296 .procname = "min_unmapped_ratio", 6297 .data = &sysctl_min_unmapped_ratio, 6298 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6299 .mode = 0644, 6300 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6301 .extra1 = SYSCTL_ZERO, 6302 .extra2 = SYSCTL_ONE_HUNDRED, 6303 }, 6304 { 6305 .procname = "min_slab_ratio", 6306 .data = &sysctl_min_slab_ratio, 6307 .maxlen = sizeof(sysctl_min_slab_ratio), 6308 .mode = 0644, 6309 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6310 .extra1 = SYSCTL_ZERO, 6311 .extra2 = SYSCTL_ONE_HUNDRED, 6312 }, 6313 #endif 6314 }; 6315 6316 void __init page_alloc_sysctl_init(void) 6317 { 6318 register_sysctl_init("vm", page_alloc_sysctl_table); 6319 } 6320 6321 #ifdef CONFIG_CONTIG_ALLOC 6322 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6323 static void alloc_contig_dump_pages(struct list_head *page_list) 6324 { 6325 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6326 6327 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6328 struct page *page; 6329 6330 dump_stack(); 6331 list_for_each_entry(page, page_list, lru) 6332 dump_page(page, "migration failure"); 6333 } 6334 } 6335 6336 /* 6337 * [start, end) must belong to a single zone. 6338 * @migratetype: using migratetype to filter the type of migration in 6339 * trace_mm_alloc_contig_migrate_range_info. 6340 */ 6341 static int __alloc_contig_migrate_range(struct compact_control *cc, 6342 unsigned long start, unsigned long end, int migratetype) 6343 { 6344 /* This function is based on compact_zone() from compaction.c. */ 6345 unsigned int nr_reclaimed; 6346 unsigned long pfn = start; 6347 unsigned int tries = 0; 6348 int ret = 0; 6349 struct migration_target_control mtc = { 6350 .nid = zone_to_nid(cc->zone), 6351 .gfp_mask = cc->gfp_mask, 6352 .reason = MR_CONTIG_RANGE, 6353 }; 6354 struct page *page; 6355 unsigned long total_mapped = 0; 6356 unsigned long total_migrated = 0; 6357 unsigned long total_reclaimed = 0; 6358 6359 lru_cache_disable(); 6360 6361 while (pfn < end || !list_empty(&cc->migratepages)) { 6362 if (fatal_signal_pending(current)) { 6363 ret = -EINTR; 6364 break; 6365 } 6366 6367 if (list_empty(&cc->migratepages)) { 6368 cc->nr_migratepages = 0; 6369 ret = isolate_migratepages_range(cc, pfn, end); 6370 if (ret && ret != -EAGAIN) 6371 break; 6372 pfn = cc->migrate_pfn; 6373 tries = 0; 6374 } else if (++tries == 5) { 6375 ret = -EBUSY; 6376 break; 6377 } 6378 6379 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6380 &cc->migratepages); 6381 cc->nr_migratepages -= nr_reclaimed; 6382 6383 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6384 total_reclaimed += nr_reclaimed; 6385 list_for_each_entry(page, &cc->migratepages, lru) { 6386 struct folio *folio = page_folio(page); 6387 6388 total_mapped += folio_mapped(folio) * 6389 folio_nr_pages(folio); 6390 } 6391 } 6392 6393 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6394 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6395 6396 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6397 total_migrated += cc->nr_migratepages; 6398 6399 /* 6400 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6401 * to retry again over this error, so do the same here. 6402 */ 6403 if (ret == -ENOMEM) 6404 break; 6405 } 6406 6407 lru_cache_enable(); 6408 if (ret < 0) { 6409 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6410 alloc_contig_dump_pages(&cc->migratepages); 6411 putback_movable_pages(&cc->migratepages); 6412 } 6413 6414 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6415 total_migrated, 6416 total_reclaimed, 6417 total_mapped); 6418 return (ret < 0) ? ret : 0; 6419 } 6420 6421 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) 6422 { 6423 int order; 6424 6425 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6426 struct page *page, *next; 6427 int nr_pages = 1 << order; 6428 6429 list_for_each_entry_safe(page, next, &list[order], lru) { 6430 int i; 6431 6432 post_alloc_hook(page, order, gfp_mask); 6433 set_page_refcounted(page); 6434 if (!order) 6435 continue; 6436 6437 split_page(page, order); 6438 6439 /* Add all subpages to the order-0 head, in sequence. */ 6440 list_del(&page->lru); 6441 for (i = 0; i < nr_pages; i++) 6442 list_add_tail(&page[i].lru, &list[0]); 6443 } 6444 } 6445 } 6446 6447 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6448 { 6449 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6450 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6451 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6452 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6453 6454 /* 6455 * We are given the range to allocate; node, mobility and placement 6456 * hints are irrelevant at this point. We'll simply ignore them. 6457 */ 6458 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6459 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6460 6461 /* 6462 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6463 * selected action flags. 6464 */ 6465 if (gfp_mask & ~(reclaim_mask | action_mask)) 6466 return -EINVAL; 6467 6468 /* 6469 * Flags to control page compaction/migration/reclaim, to free up our 6470 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6471 * for them. 6472 * 6473 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6474 * to not degrade callers. 6475 */ 6476 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6477 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6478 return 0; 6479 } 6480 6481 /** 6482 * alloc_contig_range() -- tries to allocate given range of pages 6483 * @start: start PFN to allocate 6484 * @end: one-past-the-last PFN to allocate 6485 * @migratetype: migratetype of the underlying pageblocks (either 6486 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6487 * in range must have the same migratetype and it must 6488 * be either of the two. 6489 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6490 * action and reclaim modifiers are supported. Reclaim modifiers 6491 * control allocation behavior during compaction/migration/reclaim. 6492 * 6493 * The PFN range does not have to be pageblock aligned. The PFN range must 6494 * belong to a single zone. 6495 * 6496 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6497 * pageblocks in the range. Once isolated, the pageblocks should not 6498 * be modified by others. 6499 * 6500 * Return: zero on success or negative error code. On success all 6501 * pages which PFN is in [start, end) are allocated for the caller and 6502 * need to be freed with free_contig_range(). 6503 */ 6504 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6505 unsigned migratetype, gfp_t gfp_mask) 6506 { 6507 unsigned long outer_start, outer_end; 6508 int ret = 0; 6509 6510 struct compact_control cc = { 6511 .nr_migratepages = 0, 6512 .order = -1, 6513 .zone = page_zone(pfn_to_page(start)), 6514 .mode = MIGRATE_SYNC, 6515 .ignore_skip_hint = true, 6516 .no_set_skip_hint = true, 6517 .alloc_contig = true, 6518 }; 6519 INIT_LIST_HEAD(&cc.migratepages); 6520 6521 gfp_mask = current_gfp_context(gfp_mask); 6522 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 6523 return -EINVAL; 6524 6525 /* 6526 * What we do here is we mark all pageblocks in range as 6527 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6528 * have different sizes, and due to the way page allocator 6529 * work, start_isolate_page_range() has special handlings for this. 6530 * 6531 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6532 * migrate the pages from an unaligned range (ie. pages that 6533 * we are interested in). This will put all the pages in 6534 * range back to page allocator as MIGRATE_ISOLATE. 6535 * 6536 * When this is done, we take the pages in range from page 6537 * allocator removing them from the buddy system. This way 6538 * page allocator will never consider using them. 6539 * 6540 * This lets us mark the pageblocks back as 6541 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6542 * aligned range but not in the unaligned, original range are 6543 * put back to page allocator so that buddy can use them. 6544 */ 6545 6546 ret = start_isolate_page_range(start, end, migratetype, 0); 6547 if (ret) 6548 goto done; 6549 6550 drain_all_pages(cc.zone); 6551 6552 /* 6553 * In case of -EBUSY, we'd like to know which page causes problem. 6554 * So, just fall through. test_pages_isolated() has a tracepoint 6555 * which will report the busy page. 6556 * 6557 * It is possible that busy pages could become available before 6558 * the call to test_pages_isolated, and the range will actually be 6559 * allocated. So, if we fall through be sure to clear ret so that 6560 * -EBUSY is not accidentally used or returned to caller. 6561 */ 6562 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6563 if (ret && ret != -EBUSY) 6564 goto done; 6565 6566 /* 6567 * When in-use hugetlb pages are migrated, they may simply be released 6568 * back into the free hugepage pool instead of being returned to the 6569 * buddy system. After the migration of in-use huge pages is completed, 6570 * we will invoke replace_free_hugepage_folios() to ensure that these 6571 * hugepages are properly released to the buddy system. 6572 */ 6573 ret = replace_free_hugepage_folios(start, end); 6574 if (ret) 6575 goto done; 6576 6577 /* 6578 * Pages from [start, end) are within a pageblock_nr_pages 6579 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6580 * more, all pages in [start, end) are free in page allocator. 6581 * What we are going to do is to allocate all pages from 6582 * [start, end) (that is remove them from page allocator). 6583 * 6584 * The only problem is that pages at the beginning and at the 6585 * end of interesting range may be not aligned with pages that 6586 * page allocator holds, ie. they can be part of higher order 6587 * pages. Because of this, we reserve the bigger range and 6588 * once this is done free the pages we are not interested in. 6589 * 6590 * We don't have to hold zone->lock here because the pages are 6591 * isolated thus they won't get removed from buddy. 6592 */ 6593 outer_start = find_large_buddy(start); 6594 6595 /* Make sure the range is really isolated. */ 6596 if (test_pages_isolated(outer_start, end, 0)) { 6597 ret = -EBUSY; 6598 goto done; 6599 } 6600 6601 /* Grab isolated pages from freelists. */ 6602 outer_end = isolate_freepages_range(&cc, outer_start, end); 6603 if (!outer_end) { 6604 ret = -EBUSY; 6605 goto done; 6606 } 6607 6608 if (!(gfp_mask & __GFP_COMP)) { 6609 split_free_pages(cc.freepages, gfp_mask); 6610 6611 /* Free head and tail (if any) */ 6612 if (start != outer_start) 6613 free_contig_range(outer_start, start - outer_start); 6614 if (end != outer_end) 6615 free_contig_range(end, outer_end - end); 6616 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6617 struct page *head = pfn_to_page(start); 6618 int order = ilog2(end - start); 6619 6620 check_new_pages(head, order); 6621 prep_new_page(head, order, gfp_mask, 0); 6622 set_page_refcounted(head); 6623 } else { 6624 ret = -EINVAL; 6625 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6626 start, end, outer_start, outer_end); 6627 } 6628 done: 6629 undo_isolate_page_range(start, end, migratetype); 6630 return ret; 6631 } 6632 EXPORT_SYMBOL(alloc_contig_range_noprof); 6633 6634 static int __alloc_contig_pages(unsigned long start_pfn, 6635 unsigned long nr_pages, gfp_t gfp_mask) 6636 { 6637 unsigned long end_pfn = start_pfn + nr_pages; 6638 6639 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6640 gfp_mask); 6641 } 6642 6643 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6644 unsigned long nr_pages) 6645 { 6646 unsigned long i, end_pfn = start_pfn + nr_pages; 6647 struct page *page; 6648 6649 for (i = start_pfn; i < end_pfn; i++) { 6650 page = pfn_to_online_page(i); 6651 if (!page) 6652 return false; 6653 6654 if (page_zone(page) != z) 6655 return false; 6656 6657 if (PageReserved(page)) 6658 return false; 6659 6660 if (PageHuge(page)) 6661 return false; 6662 } 6663 return true; 6664 } 6665 6666 static bool zone_spans_last_pfn(const struct zone *zone, 6667 unsigned long start_pfn, unsigned long nr_pages) 6668 { 6669 unsigned long last_pfn = start_pfn + nr_pages - 1; 6670 6671 return zone_spans_pfn(zone, last_pfn); 6672 } 6673 6674 /** 6675 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6676 * @nr_pages: Number of contiguous pages to allocate 6677 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 6678 * action and reclaim modifiers are supported. Reclaim modifiers 6679 * control allocation behavior during compaction/migration/reclaim. 6680 * @nid: Target node 6681 * @nodemask: Mask for other possible nodes 6682 * 6683 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6684 * on an applicable zonelist to find a contiguous pfn range which can then be 6685 * tried for allocation with alloc_contig_range(). This routine is intended 6686 * for allocation requests which can not be fulfilled with the buddy allocator. 6687 * 6688 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6689 * power of two, then allocated range is also guaranteed to be aligned to same 6690 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6691 * 6692 * Allocated pages can be freed with free_contig_range() or by manually calling 6693 * __free_page() on each allocated page. 6694 * 6695 * Return: pointer to contiguous pages on success, or NULL if not successful. 6696 */ 6697 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6698 int nid, nodemask_t *nodemask) 6699 { 6700 unsigned long ret, pfn, flags; 6701 struct zonelist *zonelist; 6702 struct zone *zone; 6703 struct zoneref *z; 6704 6705 zonelist = node_zonelist(nid, gfp_mask); 6706 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6707 gfp_zone(gfp_mask), nodemask) { 6708 spin_lock_irqsave(&zone->lock, flags); 6709 6710 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6711 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6712 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6713 /* 6714 * We release the zone lock here because 6715 * alloc_contig_range() will also lock the zone 6716 * at some point. If there's an allocation 6717 * spinning on this lock, it may win the race 6718 * and cause alloc_contig_range() to fail... 6719 */ 6720 spin_unlock_irqrestore(&zone->lock, flags); 6721 ret = __alloc_contig_pages(pfn, nr_pages, 6722 gfp_mask); 6723 if (!ret) 6724 return pfn_to_page(pfn); 6725 spin_lock_irqsave(&zone->lock, flags); 6726 } 6727 pfn += nr_pages; 6728 } 6729 spin_unlock_irqrestore(&zone->lock, flags); 6730 } 6731 return NULL; 6732 } 6733 #endif /* CONFIG_CONTIG_ALLOC */ 6734 6735 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6736 { 6737 unsigned long count = 0; 6738 struct folio *folio = pfn_folio(pfn); 6739 6740 if (folio_test_large(folio)) { 6741 int expected = folio_nr_pages(folio); 6742 6743 if (nr_pages == expected) 6744 folio_put(folio); 6745 else 6746 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 6747 pfn, nr_pages, expected); 6748 return; 6749 } 6750 6751 for (; nr_pages--; pfn++) { 6752 struct page *page = pfn_to_page(pfn); 6753 6754 count += page_count(page) != 1; 6755 __free_page(page); 6756 } 6757 WARN(count != 0, "%lu pages are still in use!\n", count); 6758 } 6759 EXPORT_SYMBOL(free_contig_range); 6760 6761 /* 6762 * Effectively disable pcplists for the zone by setting the high limit to 0 6763 * and draining all cpus. A concurrent page freeing on another CPU that's about 6764 * to put the page on pcplist will either finish before the drain and the page 6765 * will be drained, or observe the new high limit and skip the pcplist. 6766 * 6767 * Must be paired with a call to zone_pcp_enable(). 6768 */ 6769 void zone_pcp_disable(struct zone *zone) 6770 { 6771 mutex_lock(&pcp_batch_high_lock); 6772 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6773 __drain_all_pages(zone, true); 6774 } 6775 6776 void zone_pcp_enable(struct zone *zone) 6777 { 6778 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6779 zone->pageset_high_max, zone->pageset_batch); 6780 mutex_unlock(&pcp_batch_high_lock); 6781 } 6782 6783 void zone_pcp_reset(struct zone *zone) 6784 { 6785 int cpu; 6786 struct per_cpu_zonestat *pzstats; 6787 6788 if (zone->per_cpu_pageset != &boot_pageset) { 6789 for_each_online_cpu(cpu) { 6790 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6791 drain_zonestat(zone, pzstats); 6792 } 6793 free_percpu(zone->per_cpu_pageset); 6794 zone->per_cpu_pageset = &boot_pageset; 6795 if (zone->per_cpu_zonestats != &boot_zonestats) { 6796 free_percpu(zone->per_cpu_zonestats); 6797 zone->per_cpu_zonestats = &boot_zonestats; 6798 } 6799 } 6800 } 6801 6802 #ifdef CONFIG_MEMORY_HOTREMOVE 6803 /* 6804 * All pages in the range must be in a single zone, must not contain holes, 6805 * must span full sections, and must be isolated before calling this function. 6806 * 6807 * Returns the number of managed (non-PageOffline()) pages in the range: the 6808 * number of pages for which memory offlining code must adjust managed page 6809 * counters using adjust_managed_page_count(). 6810 */ 6811 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6812 unsigned long end_pfn) 6813 { 6814 unsigned long already_offline = 0, flags; 6815 unsigned long pfn = start_pfn; 6816 struct page *page; 6817 struct zone *zone; 6818 unsigned int order; 6819 6820 offline_mem_sections(pfn, end_pfn); 6821 zone = page_zone(pfn_to_page(pfn)); 6822 spin_lock_irqsave(&zone->lock, flags); 6823 while (pfn < end_pfn) { 6824 page = pfn_to_page(pfn); 6825 /* 6826 * The HWPoisoned page may be not in buddy system, and 6827 * page_count() is not 0. 6828 */ 6829 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6830 pfn++; 6831 continue; 6832 } 6833 /* 6834 * At this point all remaining PageOffline() pages have a 6835 * reference count of 0 and can simply be skipped. 6836 */ 6837 if (PageOffline(page)) { 6838 BUG_ON(page_count(page)); 6839 BUG_ON(PageBuddy(page)); 6840 already_offline++; 6841 pfn++; 6842 continue; 6843 } 6844 6845 BUG_ON(page_count(page)); 6846 BUG_ON(!PageBuddy(page)); 6847 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 6848 order = buddy_order(page); 6849 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 6850 pfn += (1 << order); 6851 } 6852 spin_unlock_irqrestore(&zone->lock, flags); 6853 6854 return end_pfn - start_pfn - already_offline; 6855 } 6856 #endif 6857 6858 /* 6859 * This function returns a stable result only if called under zone lock. 6860 */ 6861 bool is_free_buddy_page(const struct page *page) 6862 { 6863 unsigned long pfn = page_to_pfn(page); 6864 unsigned int order; 6865 6866 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6867 const struct page *head = page - (pfn & ((1 << order) - 1)); 6868 6869 if (PageBuddy(head) && 6870 buddy_order_unsafe(head) >= order) 6871 break; 6872 } 6873 6874 return order <= MAX_PAGE_ORDER; 6875 } 6876 EXPORT_SYMBOL(is_free_buddy_page); 6877 6878 #ifdef CONFIG_MEMORY_FAILURE 6879 static inline void add_to_free_list(struct page *page, struct zone *zone, 6880 unsigned int order, int migratetype, 6881 bool tail) 6882 { 6883 __add_to_free_list(page, zone, order, migratetype, tail); 6884 account_freepages(zone, 1 << order, migratetype); 6885 } 6886 6887 /* 6888 * Break down a higher-order page in sub-pages, and keep our target out of 6889 * buddy allocator. 6890 */ 6891 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6892 struct page *target, int low, int high, 6893 int migratetype) 6894 { 6895 unsigned long size = 1 << high; 6896 struct page *current_buddy; 6897 6898 while (high > low) { 6899 high--; 6900 size >>= 1; 6901 6902 if (target >= &page[size]) { 6903 current_buddy = page; 6904 page = page + size; 6905 } else { 6906 current_buddy = page + size; 6907 } 6908 6909 if (set_page_guard(zone, current_buddy, high)) 6910 continue; 6911 6912 add_to_free_list(current_buddy, zone, high, migratetype, false); 6913 set_buddy_order(current_buddy, high); 6914 } 6915 } 6916 6917 /* 6918 * Take a page that will be marked as poisoned off the buddy allocator. 6919 */ 6920 bool take_page_off_buddy(struct page *page) 6921 { 6922 struct zone *zone = page_zone(page); 6923 unsigned long pfn = page_to_pfn(page); 6924 unsigned long flags; 6925 unsigned int order; 6926 bool ret = false; 6927 6928 spin_lock_irqsave(&zone->lock, flags); 6929 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6930 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6931 int page_order = buddy_order(page_head); 6932 6933 if (PageBuddy(page_head) && page_order >= order) { 6934 unsigned long pfn_head = page_to_pfn(page_head); 6935 int migratetype = get_pfnblock_migratetype(page_head, 6936 pfn_head); 6937 6938 del_page_from_free_list(page_head, zone, page_order, 6939 migratetype); 6940 break_down_buddy_pages(zone, page_head, page, 0, 6941 page_order, migratetype); 6942 SetPageHWPoisonTakenOff(page); 6943 ret = true; 6944 break; 6945 } 6946 if (page_count(page_head) > 0) 6947 break; 6948 } 6949 spin_unlock_irqrestore(&zone->lock, flags); 6950 return ret; 6951 } 6952 6953 /* 6954 * Cancel takeoff done by take_page_off_buddy(). 6955 */ 6956 bool put_page_back_buddy(struct page *page) 6957 { 6958 struct zone *zone = page_zone(page); 6959 unsigned long flags; 6960 bool ret = false; 6961 6962 spin_lock_irqsave(&zone->lock, flags); 6963 if (put_page_testzero(page)) { 6964 unsigned long pfn = page_to_pfn(page); 6965 int migratetype = get_pfnblock_migratetype(page, pfn); 6966 6967 ClearPageHWPoisonTakenOff(page); 6968 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6969 if (TestClearPageHWPoison(page)) { 6970 ret = true; 6971 } 6972 } 6973 spin_unlock_irqrestore(&zone->lock, flags); 6974 6975 return ret; 6976 } 6977 #endif 6978 6979 #ifdef CONFIG_ZONE_DMA 6980 bool has_managed_dma(void) 6981 { 6982 struct pglist_data *pgdat; 6983 6984 for_each_online_pgdat(pgdat) { 6985 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6986 6987 if (managed_zone(zone)) 6988 return true; 6989 } 6990 return false; 6991 } 6992 #endif /* CONFIG_ZONE_DMA */ 6993 6994 #ifdef CONFIG_UNACCEPTED_MEMORY 6995 6996 /* Counts number of zones with unaccepted pages. */ 6997 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6998 6999 static bool lazy_accept = true; 7000 7001 static int __init accept_memory_parse(char *p) 7002 { 7003 if (!strcmp(p, "lazy")) { 7004 lazy_accept = true; 7005 return 0; 7006 } else if (!strcmp(p, "eager")) { 7007 lazy_accept = false; 7008 return 0; 7009 } else { 7010 return -EINVAL; 7011 } 7012 } 7013 early_param("accept_memory", accept_memory_parse); 7014 7015 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7016 { 7017 phys_addr_t start = page_to_phys(page); 7018 7019 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7020 } 7021 7022 static void __accept_page(struct zone *zone, unsigned long *flags, 7023 struct page *page) 7024 { 7025 bool last; 7026 7027 list_del(&page->lru); 7028 last = list_empty(&zone->unaccepted_pages); 7029 7030 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7031 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7032 __ClearPageUnaccepted(page); 7033 spin_unlock_irqrestore(&zone->lock, *flags); 7034 7035 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7036 7037 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7038 7039 if (last) 7040 static_branch_dec(&zones_with_unaccepted_pages); 7041 } 7042 7043 void accept_page(struct page *page) 7044 { 7045 struct zone *zone = page_zone(page); 7046 unsigned long flags; 7047 7048 spin_lock_irqsave(&zone->lock, flags); 7049 if (!PageUnaccepted(page)) { 7050 spin_unlock_irqrestore(&zone->lock, flags); 7051 return; 7052 } 7053 7054 /* Unlocks zone->lock */ 7055 __accept_page(zone, &flags, page); 7056 } 7057 7058 static bool try_to_accept_memory_one(struct zone *zone) 7059 { 7060 unsigned long flags; 7061 struct page *page; 7062 7063 spin_lock_irqsave(&zone->lock, flags); 7064 page = list_first_entry_or_null(&zone->unaccepted_pages, 7065 struct page, lru); 7066 if (!page) { 7067 spin_unlock_irqrestore(&zone->lock, flags); 7068 return false; 7069 } 7070 7071 /* Unlocks zone->lock */ 7072 __accept_page(zone, &flags, page); 7073 7074 return true; 7075 } 7076 7077 static inline bool has_unaccepted_memory(void) 7078 { 7079 return static_branch_unlikely(&zones_with_unaccepted_pages); 7080 } 7081 7082 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7083 { 7084 long to_accept, wmark; 7085 bool ret = false; 7086 7087 if (!has_unaccepted_memory()) 7088 return false; 7089 7090 if (list_empty(&zone->unaccepted_pages)) 7091 return false; 7092 7093 wmark = promo_wmark_pages(zone); 7094 7095 /* 7096 * Watermarks have not been initialized yet. 7097 * 7098 * Accepting one MAX_ORDER page to ensure progress. 7099 */ 7100 if (!wmark) 7101 return try_to_accept_memory_one(zone); 7102 7103 /* How much to accept to get to promo watermark? */ 7104 to_accept = wmark - 7105 (zone_page_state(zone, NR_FREE_PAGES) - 7106 __zone_watermark_unusable_free(zone, order, 0) - 7107 zone_page_state(zone, NR_UNACCEPTED)); 7108 7109 while (to_accept > 0) { 7110 if (!try_to_accept_memory_one(zone)) 7111 break; 7112 ret = true; 7113 to_accept -= MAX_ORDER_NR_PAGES; 7114 } 7115 7116 return ret; 7117 } 7118 7119 static bool __free_unaccepted(struct page *page) 7120 { 7121 struct zone *zone = page_zone(page); 7122 unsigned long flags; 7123 bool first = false; 7124 7125 if (!lazy_accept) 7126 return false; 7127 7128 spin_lock_irqsave(&zone->lock, flags); 7129 first = list_empty(&zone->unaccepted_pages); 7130 list_add_tail(&page->lru, &zone->unaccepted_pages); 7131 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7132 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7133 __SetPageUnaccepted(page); 7134 spin_unlock_irqrestore(&zone->lock, flags); 7135 7136 if (first) 7137 static_branch_inc(&zones_with_unaccepted_pages); 7138 7139 return true; 7140 } 7141 7142 #else 7143 7144 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7145 { 7146 return false; 7147 } 7148 7149 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7150 { 7151 return false; 7152 } 7153 7154 static bool __free_unaccepted(struct page *page) 7155 { 7156 BUILD_BUG(); 7157 return false; 7158 } 7159 7160 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7161