1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/highmem.h> 20 #include <linux/interrupt.h> 21 #include <linux/jiffies.h> 22 #include <linux/compiler.h> 23 #include <linux/kernel.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/module.h> 27 #include <linux/suspend.h> 28 #include <linux/ratelimit.h> 29 #include <linux/oom.h> 30 #include <linux/topology.h> 31 #include <linux/sysctl.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/folio_batch.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <linux/cacheinfo.h> 56 #include <linux/pgalloc_tag.h> 57 #include <asm/div64.h> 58 #include "internal.h" 59 #include "shuffle.h" 60 #include "page_reporting.h" 61 62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 63 typedef int __bitwise fpi_t; 64 65 /* No special request */ 66 #define FPI_NONE ((__force fpi_t)0) 67 68 /* 69 * Skip free page reporting notification for the (possibly merged) page. 70 * This does not hinder free page reporting from grabbing the page, 71 * reporting it and marking it "reported" - it only skips notifying 72 * the free page reporting infrastructure about a newly freed page. For 73 * example, used when temporarily pulling a page from a freelist and 74 * putting it back unmodified. 75 */ 76 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 77 78 /* 79 * Place the (possibly merged) page to the tail of the freelist. Will ignore 80 * page shuffling (relevant code - e.g., memory onlining - is expected to 81 * shuffle the whole zone). 82 * 83 * Note: No code should rely on this flag for correctness - it's purely 84 * to allow for optimizations when handing back either fresh pages 85 * (memory onlining) or untouched pages (page isolation, free page 86 * reporting). 87 */ 88 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 89 90 /* Free the page without taking locks. Rely on trylock only. */ 91 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 92 93 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 94 static DEFINE_MUTEX(pcp_batch_high_lock); 95 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 96 97 /* 98 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 99 * a migration causing the wrong PCP to be locked and remote memory being 100 * potentially allocated, pin the task to the CPU for the lookup+lock. 101 * preempt_disable is used on !RT because it is faster than migrate_disable. 102 * migrate_disable is used on RT because otherwise RT spinlock usage is 103 * interfered with and a high priority task cannot preempt the allocator. 104 */ 105 #ifndef CONFIG_PREEMPT_RT 106 #define pcpu_task_pin() preempt_disable() 107 #define pcpu_task_unpin() preempt_enable() 108 #else 109 #define pcpu_task_pin() migrate_disable() 110 #define pcpu_task_unpin() migrate_enable() 111 #endif 112 113 /* 114 * A helper to lookup and trylock pcp with embedded spinlock. 115 * The return value should be used with the unlock helper. 116 * NULL return value means the trylock failed. 117 */ 118 #ifdef CONFIG_SMP 119 #define pcp_spin_trylock(ptr) \ 120 ({ \ 121 struct per_cpu_pages *_ret; \ 122 pcpu_task_pin(); \ 123 _ret = this_cpu_ptr(ptr); \ 124 if (!spin_trylock(&_ret->lock)) { \ 125 pcpu_task_unpin(); \ 126 _ret = NULL; \ 127 } \ 128 _ret; \ 129 }) 130 131 #define pcp_spin_unlock(ptr) \ 132 ({ \ 133 spin_unlock(&ptr->lock); \ 134 pcpu_task_unpin(); \ 135 }) 136 137 /* 138 * On CONFIG_SMP=n the UP implementation of spin_trylock() never fails and thus 139 * is not compatible with our locking scheme. However we do not need pcp for 140 * scalability in the first place, so just make all the trylocks fail and take 141 * the slow path unconditionally. 142 */ 143 #else 144 #define pcp_spin_trylock(ptr) \ 145 NULL 146 147 #define pcp_spin_unlock(ptr) \ 148 BUG_ON(1) 149 #endif 150 151 /* 152 * In some cases we do not need to pin the task to the CPU because we are 153 * already given a specific cpu's pcp pointer. 154 */ 155 #define pcp_spin_lock_nopin(ptr) \ 156 spin_lock(&(ptr)->lock) 157 #define pcp_spin_unlock_nopin(ptr) \ 158 spin_unlock(&(ptr)->lock) 159 160 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 161 DEFINE_PER_CPU(int, numa_node); 162 EXPORT_PER_CPU_SYMBOL(numa_node); 163 #endif 164 165 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 166 167 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 168 /* 169 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 170 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 171 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 172 * defined in <linux/topology.h>. 173 */ 174 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 175 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 176 #endif 177 178 static DEFINE_MUTEX(pcpu_drain_mutex); 179 180 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 181 volatile unsigned long latent_entropy __latent_entropy; 182 EXPORT_SYMBOL(latent_entropy); 183 #endif 184 185 /* 186 * Array of node states. 187 */ 188 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 189 [N_POSSIBLE] = NODE_MASK_ALL, 190 [N_ONLINE] = { { [0] = 1UL } }, 191 #ifndef CONFIG_NUMA 192 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 193 #ifdef CONFIG_HIGHMEM 194 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 195 #endif 196 [N_MEMORY] = { { [0] = 1UL } }, 197 [N_CPU] = { { [0] = 1UL } }, 198 #endif /* NUMA */ 199 }; 200 EXPORT_SYMBOL(node_states); 201 202 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 203 204 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 205 unsigned int pageblock_order __read_mostly; 206 #endif 207 208 static void __free_pages_ok(struct page *page, unsigned int order, 209 fpi_t fpi_flags); 210 211 /* 212 * results with 256, 32 in the lowmem_reserve sysctl: 213 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 214 * 1G machine -> (16M dma, 784M normal, 224M high) 215 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 216 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 217 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 218 * 219 * TBD: should special case ZONE_DMA32 machines here - in those we normally 220 * don't need any ZONE_NORMAL reservation 221 */ 222 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 223 #ifdef CONFIG_ZONE_DMA 224 [ZONE_DMA] = 256, 225 #endif 226 #ifdef CONFIG_ZONE_DMA32 227 [ZONE_DMA32] = 256, 228 #endif 229 [ZONE_NORMAL] = 32, 230 #ifdef CONFIG_HIGHMEM 231 [ZONE_HIGHMEM] = 0, 232 #endif 233 [ZONE_MOVABLE] = 0, 234 }; 235 236 char * const zone_names[MAX_NR_ZONES] = { 237 #ifdef CONFIG_ZONE_DMA 238 "DMA", 239 #endif 240 #ifdef CONFIG_ZONE_DMA32 241 "DMA32", 242 #endif 243 "Normal", 244 #ifdef CONFIG_HIGHMEM 245 "HighMem", 246 #endif 247 "Movable", 248 #ifdef CONFIG_ZONE_DEVICE 249 "Device", 250 #endif 251 }; 252 253 const char * const migratetype_names[MIGRATE_TYPES] = { 254 "Unmovable", 255 "Movable", 256 "Reclaimable", 257 "HighAtomic", 258 #ifdef CONFIG_CMA 259 "CMA", 260 #endif 261 #ifdef CONFIG_MEMORY_ISOLATION 262 "Isolate", 263 #endif 264 }; 265 266 int min_free_kbytes = 1024; 267 int user_min_free_kbytes = -1; 268 static int watermark_boost_factor __read_mostly = 15000; 269 static int watermark_scale_factor = 10; 270 int defrag_mode; 271 272 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 273 int movable_zone; 274 EXPORT_SYMBOL(movable_zone); 275 276 #if MAX_NUMNODES > 1 277 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 278 unsigned int nr_online_nodes __read_mostly = 1; 279 EXPORT_SYMBOL(nr_node_ids); 280 EXPORT_SYMBOL(nr_online_nodes); 281 #endif 282 283 static bool page_contains_unaccepted(struct page *page, unsigned int order); 284 static bool cond_accept_memory(struct zone *zone, unsigned int order, 285 int alloc_flags); 286 static bool __free_unaccepted(struct page *page); 287 288 int page_group_by_mobility_disabled __read_mostly; 289 290 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 291 /* 292 * During boot we initialize deferred pages on-demand, as needed, but once 293 * page_alloc_init_late() has finished, the deferred pages are all initialized, 294 * and we can permanently disable that path. 295 */ 296 DEFINE_STATIC_KEY_TRUE(deferred_pages); 297 298 static inline bool deferred_pages_enabled(void) 299 { 300 return static_branch_unlikely(&deferred_pages); 301 } 302 303 /* 304 * deferred_grow_zone() is __init, but it is called from 305 * get_page_from_freelist() during early boot until deferred_pages permanently 306 * disables this call. This is why we have refdata wrapper to avoid warning, 307 * and to ensure that the function body gets unloaded. 308 */ 309 static bool __ref 310 _deferred_grow_zone(struct zone *zone, unsigned int order) 311 { 312 return deferred_grow_zone(zone, order); 313 } 314 #else 315 static inline bool deferred_pages_enabled(void) 316 { 317 return false; 318 } 319 320 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 321 { 322 return false; 323 } 324 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 325 326 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 327 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 328 unsigned long pfn) 329 { 330 #ifdef CONFIG_SPARSEMEM 331 return section_to_usemap(__pfn_to_section(pfn)); 332 #else 333 return page_zone(page)->pageblock_flags; 334 #endif /* CONFIG_SPARSEMEM */ 335 } 336 337 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 338 { 339 #ifdef CONFIG_SPARSEMEM 340 pfn &= (PAGES_PER_SECTION-1); 341 #else 342 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 343 #endif /* CONFIG_SPARSEMEM */ 344 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 345 } 346 347 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit) 348 { 349 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS; 350 } 351 352 static __always_inline void 353 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn, 354 unsigned long **bitmap_word, unsigned long *bitidx) 355 { 356 unsigned long *bitmap; 357 unsigned long word_bitidx; 358 359 #ifdef CONFIG_MEMORY_ISOLATION 360 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8); 361 #else 362 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 363 #endif 364 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK); 365 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 366 367 bitmap = get_pageblock_bitmap(page, pfn); 368 *bitidx = pfn_to_bitidx(page, pfn); 369 word_bitidx = *bitidx / BITS_PER_LONG; 370 *bitidx &= (BITS_PER_LONG - 1); 371 *bitmap_word = &bitmap[word_bitidx]; 372 } 373 374 375 /** 376 * __get_pfnblock_flags_mask - Return the requested group of flags for 377 * a pageblock_nr_pages block of pages 378 * @page: The page within the block of interest 379 * @pfn: The target page frame number 380 * @mask: mask of bits that the caller is interested in 381 * 382 * Return: pageblock_bits flags 383 */ 384 static unsigned long __get_pfnblock_flags_mask(const struct page *page, 385 unsigned long pfn, 386 unsigned long mask) 387 { 388 unsigned long *bitmap_word; 389 unsigned long bitidx; 390 unsigned long word; 391 392 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 393 /* 394 * This races, without locks, with set_pfnblock_migratetype(). Ensure 395 * a consistent read of the memory array, so that results, even though 396 * racy, are not corrupted. 397 */ 398 word = READ_ONCE(*bitmap_word); 399 return (word >> bitidx) & mask; 400 } 401 402 /** 403 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set 404 * @page: The page within the block of interest 405 * @pfn: The target page frame number 406 * @pb_bit: pageblock bit to check 407 * 408 * Return: true if the bit is set, otherwise false 409 */ 410 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, 411 enum pageblock_bits pb_bit) 412 { 413 unsigned long *bitmap_word; 414 unsigned long bitidx; 415 416 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 417 return false; 418 419 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 420 421 return test_bit(bitidx + pb_bit, bitmap_word); 422 } 423 424 /** 425 * get_pfnblock_migratetype - Return the migratetype of a pageblock 426 * @page: The page within the block of interest 427 * @pfn: The target page frame number 428 * 429 * Return: The migratetype of the pageblock 430 * 431 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn 432 * to save a call to page_to_pfn(). 433 */ 434 __always_inline enum migratetype 435 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) 436 { 437 unsigned long mask = MIGRATETYPE_AND_ISO_MASK; 438 unsigned long flags; 439 440 flags = __get_pfnblock_flags_mask(page, pfn, mask); 441 442 #ifdef CONFIG_MEMORY_ISOLATION 443 if (flags & BIT(PB_migrate_isolate)) 444 return MIGRATE_ISOLATE; 445 #endif 446 return flags & MIGRATETYPE_MASK; 447 } 448 449 /** 450 * __set_pfnblock_flags_mask - Set the requested group of flags for 451 * a pageblock_nr_pages block of pages 452 * @page: The page within the block of interest 453 * @pfn: The target page frame number 454 * @flags: The flags to set 455 * @mask: mask of bits that the caller is interested in 456 */ 457 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn, 458 unsigned long flags, unsigned long mask) 459 { 460 unsigned long *bitmap_word; 461 unsigned long bitidx; 462 unsigned long word; 463 464 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 465 466 mask <<= bitidx; 467 flags <<= bitidx; 468 469 word = READ_ONCE(*bitmap_word); 470 do { 471 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags)); 472 } 473 474 /** 475 * set_pfnblock_bit - Set a standalone bit of a pageblock 476 * @page: The page within the block of interest 477 * @pfn: The target page frame number 478 * @pb_bit: pageblock bit to set 479 */ 480 void set_pfnblock_bit(const struct page *page, unsigned long pfn, 481 enum pageblock_bits pb_bit) 482 { 483 unsigned long *bitmap_word; 484 unsigned long bitidx; 485 486 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 487 return; 488 489 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 490 491 set_bit(bitidx + pb_bit, bitmap_word); 492 } 493 494 /** 495 * clear_pfnblock_bit - Clear a standalone bit of a pageblock 496 * @page: The page within the block of interest 497 * @pfn: The target page frame number 498 * @pb_bit: pageblock bit to clear 499 */ 500 void clear_pfnblock_bit(const struct page *page, unsigned long pfn, 501 enum pageblock_bits pb_bit) 502 { 503 unsigned long *bitmap_word; 504 unsigned long bitidx; 505 506 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 507 return; 508 509 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 510 511 clear_bit(bitidx + pb_bit, bitmap_word); 512 } 513 514 /** 515 * set_pageblock_migratetype - Set the migratetype of a pageblock 516 * @page: The page within the block of interest 517 * @migratetype: migratetype to set 518 */ 519 static void set_pageblock_migratetype(struct page *page, 520 enum migratetype migratetype) 521 { 522 if (unlikely(page_group_by_mobility_disabled && 523 migratetype < MIGRATE_PCPTYPES)) 524 migratetype = MIGRATE_UNMOVABLE; 525 526 #ifdef CONFIG_MEMORY_ISOLATION 527 if (migratetype == MIGRATE_ISOLATE) { 528 VM_WARN_ONCE(1, 529 "Use set_pageblock_isolate() for pageblock isolation"); 530 return; 531 } 532 VM_WARN_ONCE(get_pageblock_isolate(page), 533 "Use clear_pageblock_isolate() to unisolate pageblock"); 534 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */ 535 #endif 536 __set_pfnblock_flags_mask(page, page_to_pfn(page), 537 (unsigned long)migratetype, 538 MIGRATETYPE_AND_ISO_MASK); 539 } 540 541 void __meminit init_pageblock_migratetype(struct page *page, 542 enum migratetype migratetype, 543 bool isolate) 544 { 545 unsigned long flags; 546 547 if (unlikely(page_group_by_mobility_disabled && 548 migratetype < MIGRATE_PCPTYPES)) 549 migratetype = MIGRATE_UNMOVABLE; 550 551 flags = migratetype; 552 553 #ifdef CONFIG_MEMORY_ISOLATION 554 if (migratetype == MIGRATE_ISOLATE) { 555 VM_WARN_ONCE( 556 1, 557 "Set isolate=true to isolate pageblock with a migratetype"); 558 return; 559 } 560 if (isolate) 561 flags |= BIT(PB_migrate_isolate); 562 #endif 563 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags, 564 MIGRATETYPE_AND_ISO_MASK); 565 } 566 567 #ifdef CONFIG_DEBUG_VM 568 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 569 { 570 int ret; 571 unsigned seq; 572 unsigned long pfn = page_to_pfn(page); 573 unsigned long sp, start_pfn; 574 575 do { 576 seq = zone_span_seqbegin(zone); 577 start_pfn = zone->zone_start_pfn; 578 sp = zone->spanned_pages; 579 ret = !zone_spans_pfn(zone, pfn); 580 } while (zone_span_seqretry(zone, seq)); 581 582 if (ret) 583 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 584 pfn, zone_to_nid(zone), zone->name, 585 start_pfn, start_pfn + sp); 586 587 return ret; 588 } 589 590 /* 591 * Temporary debugging check for pages not lying within a given zone. 592 */ 593 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 594 { 595 if (page_outside_zone_boundaries(zone, page)) 596 return true; 597 if (zone != page_zone(page)) 598 return true; 599 600 return false; 601 } 602 #else 603 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 604 { 605 return false; 606 } 607 #endif 608 609 static void bad_page(struct page *page, const char *reason) 610 { 611 static unsigned long resume; 612 static unsigned long nr_shown; 613 static unsigned long nr_unshown; 614 615 /* 616 * Allow a burst of 60 reports, then keep quiet for that minute; 617 * or allow a steady drip of one report per second. 618 */ 619 if (nr_shown == 60) { 620 if (time_before(jiffies, resume)) { 621 nr_unshown++; 622 goto out; 623 } 624 if (nr_unshown) { 625 pr_alert( 626 "BUG: Bad page state: %lu messages suppressed\n", 627 nr_unshown); 628 nr_unshown = 0; 629 } 630 nr_shown = 0; 631 } 632 if (nr_shown++ == 0) 633 resume = jiffies + 60 * HZ; 634 635 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 636 current->comm, page_to_pfn(page)); 637 dump_page(page, reason); 638 639 print_modules(); 640 dump_stack(); 641 out: 642 /* Leave bad fields for debug, except PageBuddy could make trouble */ 643 if (PageBuddy(page)) 644 __ClearPageBuddy(page); 645 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 646 } 647 648 static inline unsigned int order_to_pindex(int migratetype, int order) 649 { 650 651 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 652 bool movable; 653 if (order > PAGE_ALLOC_COSTLY_ORDER) { 654 VM_BUG_ON(order != HPAGE_PMD_ORDER); 655 656 movable = migratetype == MIGRATE_MOVABLE; 657 658 return NR_LOWORDER_PCP_LISTS + movable; 659 } 660 #else 661 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 662 #endif 663 664 return (MIGRATE_PCPTYPES * order) + migratetype; 665 } 666 667 static inline int pindex_to_order(unsigned int pindex) 668 { 669 int order = pindex / MIGRATE_PCPTYPES; 670 671 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 672 if (pindex >= NR_LOWORDER_PCP_LISTS) 673 order = HPAGE_PMD_ORDER; 674 #else 675 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 676 #endif 677 678 return order; 679 } 680 681 static inline bool pcp_allowed_order(unsigned int order) 682 { 683 if (order <= PAGE_ALLOC_COSTLY_ORDER) 684 return true; 685 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 686 if (order == HPAGE_PMD_ORDER) 687 return true; 688 #endif 689 return false; 690 } 691 692 /* 693 * Higher-order pages are called "compound pages". They are structured thusly: 694 * 695 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 696 * 697 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 698 * in bit 0 of page->compound_info. The rest of bits is pointer to head page. 699 * 700 * The first tail page's ->compound_order holds the order of allocation. 701 * This usage means that zero-order pages may not be compound. 702 */ 703 704 void prep_compound_page(struct page *page, unsigned int order) 705 { 706 int i; 707 int nr_pages = 1 << order; 708 709 __SetPageHead(page); 710 for (i = 1; i < nr_pages; i++) 711 prep_compound_tail(page + i, page, order); 712 713 prep_compound_head(page, order); 714 } 715 716 static inline void set_buddy_order(struct page *page, unsigned int order) 717 { 718 set_page_private(page, order); 719 __SetPageBuddy(page); 720 } 721 722 #ifdef CONFIG_COMPACTION 723 static inline struct capture_control *task_capc(struct zone *zone) 724 { 725 struct capture_control *capc = current->capture_control; 726 727 return unlikely(capc) && 728 !(current->flags & PF_KTHREAD) && 729 !capc->page && 730 capc->cc->zone == zone ? capc : NULL; 731 } 732 733 static inline bool 734 compaction_capture(struct capture_control *capc, struct page *page, 735 int order, int migratetype) 736 { 737 if (!capc || order != capc->cc->order) 738 return false; 739 740 /* Do not accidentally pollute CMA or isolated regions*/ 741 if (is_migrate_cma(migratetype) || 742 is_migrate_isolate(migratetype)) 743 return false; 744 745 /* 746 * Do not let lower order allocations pollute a movable pageblock 747 * unless compaction is also requesting movable pages. 748 * This might let an unmovable request use a reclaimable pageblock 749 * and vice-versa but no more than normal fallback logic which can 750 * have trouble finding a high-order free page. 751 */ 752 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 753 capc->cc->migratetype != MIGRATE_MOVABLE) 754 return false; 755 756 if (migratetype != capc->cc->migratetype) 757 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 758 capc->cc->migratetype, migratetype); 759 760 capc->page = page; 761 return true; 762 } 763 764 #else 765 static inline struct capture_control *task_capc(struct zone *zone) 766 { 767 return NULL; 768 } 769 770 static inline bool 771 compaction_capture(struct capture_control *capc, struct page *page, 772 int order, int migratetype) 773 { 774 return false; 775 } 776 #endif /* CONFIG_COMPACTION */ 777 778 static inline void account_freepages(struct zone *zone, int nr_pages, 779 int migratetype) 780 { 781 lockdep_assert_held(&zone->lock); 782 783 if (is_migrate_isolate(migratetype)) 784 return; 785 786 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 787 788 if (is_migrate_cma(migratetype)) 789 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 790 else if (migratetype == MIGRATE_HIGHATOMIC) 791 WRITE_ONCE(zone->nr_free_highatomic, 792 zone->nr_free_highatomic + nr_pages); 793 } 794 795 /* Used for pages not on another list */ 796 static inline void __add_to_free_list(struct page *page, struct zone *zone, 797 unsigned int order, int migratetype, 798 bool tail) 799 { 800 struct free_area *area = &zone->free_area[order]; 801 int nr_pages = 1 << order; 802 803 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 804 "page type is %d, passed migratetype is %d (nr=%d)\n", 805 get_pageblock_migratetype(page), migratetype, nr_pages); 806 807 if (tail) 808 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 809 else 810 list_add(&page->buddy_list, &area->free_list[migratetype]); 811 area->nr_free++; 812 813 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 814 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 815 } 816 817 /* 818 * Used for pages which are on another list. Move the pages to the tail 819 * of the list - so the moved pages won't immediately be considered for 820 * allocation again (e.g., optimization for memory onlining). 821 */ 822 static inline void move_to_free_list(struct page *page, struct zone *zone, 823 unsigned int order, int old_mt, int new_mt) 824 { 825 struct free_area *area = &zone->free_area[order]; 826 int nr_pages = 1 << order; 827 828 /* Free page moving can fail, so it happens before the type update */ 829 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 830 "page type is %d, passed migratetype is %d (nr=%d)\n", 831 get_pageblock_migratetype(page), old_mt, nr_pages); 832 833 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 834 835 account_freepages(zone, -nr_pages, old_mt); 836 account_freepages(zone, nr_pages, new_mt); 837 838 if (order >= pageblock_order && 839 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 840 if (!is_migrate_isolate(old_mt)) 841 nr_pages = -nr_pages; 842 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 843 } 844 } 845 846 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 847 unsigned int order, int migratetype) 848 { 849 int nr_pages = 1 << order; 850 851 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 852 "page type is %d, passed migratetype is %d (nr=%d)\n", 853 get_pageblock_migratetype(page), migratetype, nr_pages); 854 855 /* clear reported state and update reported page count */ 856 if (page_reported(page)) 857 __ClearPageReported(page); 858 859 list_del(&page->buddy_list); 860 __ClearPageBuddy(page); 861 set_page_private(page, 0); 862 zone->free_area[order].nr_free--; 863 864 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 865 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 866 } 867 868 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 869 unsigned int order, int migratetype) 870 { 871 __del_page_from_free_list(page, zone, order, migratetype); 872 account_freepages(zone, -(1 << order), migratetype); 873 } 874 875 static inline struct page *get_page_from_free_area(struct free_area *area, 876 int migratetype) 877 { 878 return list_first_entry_or_null(&area->free_list[migratetype], 879 struct page, buddy_list); 880 } 881 882 /* 883 * If this is less than the 2nd largest possible page, check if the buddy 884 * of the next-higher order is free. If it is, it's possible 885 * that pages are being freed that will coalesce soon. In case, 886 * that is happening, add the free page to the tail of the list 887 * so it's less likely to be used soon and more likely to be merged 888 * as a 2-level higher order page 889 */ 890 static inline bool 891 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 892 struct page *page, unsigned int order) 893 { 894 unsigned long higher_page_pfn; 895 struct page *higher_page; 896 897 if (order >= MAX_PAGE_ORDER - 1) 898 return false; 899 900 higher_page_pfn = buddy_pfn & pfn; 901 higher_page = page + (higher_page_pfn - pfn); 902 903 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 904 NULL) != NULL; 905 } 906 907 static void change_pageblock_range(struct page *pageblock_page, 908 int start_order, int migratetype) 909 { 910 int nr_pageblocks = 1 << (start_order - pageblock_order); 911 912 while (nr_pageblocks--) { 913 set_pageblock_migratetype(pageblock_page, migratetype); 914 pageblock_page += pageblock_nr_pages; 915 } 916 } 917 918 /* 919 * Freeing function for a buddy system allocator. 920 * 921 * The concept of a buddy system is to maintain direct-mapped table 922 * (containing bit values) for memory blocks of various "orders". 923 * The bottom level table contains the map for the smallest allocatable 924 * units of memory (here, pages), and each level above it describes 925 * pairs of units from the levels below, hence, "buddies". 926 * At a high level, all that happens here is marking the table entry 927 * at the bottom level available, and propagating the changes upward 928 * as necessary, plus some accounting needed to play nicely with other 929 * parts of the VM system. 930 * At each level, we keep a list of pages, which are heads of continuous 931 * free pages of length of (1 << order) and marked with PageBuddy. 932 * Page's order is recorded in page_private(page) field. 933 * So when we are allocating or freeing one, we can derive the state of the 934 * other. That is, if we allocate a small block, and both were 935 * free, the remainder of the region must be split into blocks. 936 * If a block is freed, and its buddy is also free, then this 937 * triggers coalescing into a block of larger size. 938 * 939 * -- nyc 940 */ 941 942 static inline void __free_one_page(struct page *page, 943 unsigned long pfn, 944 struct zone *zone, unsigned int order, 945 int migratetype, fpi_t fpi_flags) 946 { 947 struct capture_control *capc = task_capc(zone); 948 unsigned long buddy_pfn = 0; 949 unsigned long combined_pfn; 950 struct page *buddy; 951 bool to_tail; 952 953 VM_BUG_ON(!zone_is_initialized(zone)); 954 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); 955 956 VM_BUG_ON(migratetype == -1); 957 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 958 VM_BUG_ON_PAGE(bad_range(zone, page), page); 959 960 account_freepages(zone, 1 << order, migratetype); 961 962 while (order < MAX_PAGE_ORDER) { 963 int buddy_mt = migratetype; 964 965 if (compaction_capture(capc, page, order, migratetype)) { 966 account_freepages(zone, -(1 << order), migratetype); 967 return; 968 } 969 970 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 971 if (!buddy) 972 goto done_merging; 973 974 if (unlikely(order >= pageblock_order)) { 975 /* 976 * We want to prevent merge between freepages on pageblock 977 * without fallbacks and normal pageblock. Without this, 978 * pageblock isolation could cause incorrect freepage or CMA 979 * accounting or HIGHATOMIC accounting. 980 */ 981 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 982 983 if (migratetype != buddy_mt && 984 (!migratetype_is_mergeable(migratetype) || 985 !migratetype_is_mergeable(buddy_mt))) 986 goto done_merging; 987 } 988 989 /* 990 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 991 * merge with it and move up one order. 992 */ 993 if (page_is_guard(buddy)) 994 clear_page_guard(zone, buddy, order); 995 else 996 __del_page_from_free_list(buddy, zone, order, buddy_mt); 997 998 if (unlikely(buddy_mt != migratetype)) { 999 /* 1000 * Match buddy type. This ensures that an 1001 * expand() down the line puts the sub-blocks 1002 * on the right freelists. 1003 */ 1004 change_pageblock_range(buddy, order, migratetype); 1005 } 1006 1007 combined_pfn = buddy_pfn & pfn; 1008 page = page + (combined_pfn - pfn); 1009 pfn = combined_pfn; 1010 order++; 1011 } 1012 1013 done_merging: 1014 set_buddy_order(page, order); 1015 1016 if (fpi_flags & FPI_TO_TAIL) 1017 to_tail = true; 1018 else if (is_shuffle_order(order)) 1019 to_tail = shuffle_pick_tail(); 1020 else 1021 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1022 1023 __add_to_free_list(page, zone, order, migratetype, to_tail); 1024 1025 /* Notify page reporting subsystem of freed page */ 1026 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1027 page_reporting_notify_free(order); 1028 } 1029 1030 /* 1031 * A bad page could be due to a number of fields. Instead of multiple branches, 1032 * try and check multiple fields with one check. The caller must do a detailed 1033 * check if necessary. 1034 */ 1035 static inline bool page_expected_state(struct page *page, 1036 unsigned long check_flags) 1037 { 1038 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1039 return false; 1040 1041 if (unlikely((unsigned long)page->mapping | 1042 page_ref_count(page) | 1043 #ifdef CONFIG_MEMCG 1044 page->memcg_data | 1045 #endif 1046 (page->flags.f & check_flags))) 1047 return false; 1048 1049 return true; 1050 } 1051 1052 static const char *page_bad_reason(struct page *page, unsigned long flags) 1053 { 1054 const char *bad_reason = NULL; 1055 1056 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1057 bad_reason = "nonzero mapcount"; 1058 if (unlikely(page->mapping != NULL)) 1059 bad_reason = "non-NULL mapping"; 1060 if (unlikely(page_ref_count(page) != 0)) 1061 bad_reason = "nonzero _refcount"; 1062 if (unlikely(page->flags.f & flags)) { 1063 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1064 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1065 else 1066 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1067 } 1068 #ifdef CONFIG_MEMCG 1069 if (unlikely(page->memcg_data)) 1070 bad_reason = "page still charged to cgroup"; 1071 #endif 1072 return bad_reason; 1073 } 1074 1075 static inline bool free_page_is_bad(struct page *page) 1076 { 1077 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1078 return false; 1079 1080 /* Something has gone sideways, find it */ 1081 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1082 return true; 1083 } 1084 1085 static inline bool is_check_pages_enabled(void) 1086 { 1087 return static_branch_unlikely(&check_pages_enabled); 1088 } 1089 1090 static int free_tail_page_prepare(struct page *head_page, struct page *page) 1091 { 1092 struct folio *folio = (struct folio *)head_page; 1093 int ret = 1; 1094 1095 /* 1096 * We rely page->lru.next never has bit 0 set, unless the page 1097 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1098 */ 1099 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1100 1101 if (!is_check_pages_enabled()) { 1102 ret = 0; 1103 goto out; 1104 } 1105 switch (page - head_page) { 1106 case 1: 1107 /* the first tail page: these may be in place of ->mapping */ 1108 if (unlikely(folio_large_mapcount(folio))) { 1109 bad_page(page, "nonzero large_mapcount"); 1110 goto out; 1111 } 1112 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 1113 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1114 bad_page(page, "nonzero nr_pages_mapped"); 1115 goto out; 1116 } 1117 if (IS_ENABLED(CONFIG_MM_ID)) { 1118 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 1119 bad_page(page, "nonzero mm mapcount 0"); 1120 goto out; 1121 } 1122 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 1123 bad_page(page, "nonzero mm mapcount 1"); 1124 goto out; 1125 } 1126 } 1127 if (IS_ENABLED(CONFIG_64BIT)) { 1128 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1129 bad_page(page, "nonzero entire_mapcount"); 1130 goto out; 1131 } 1132 if (unlikely(atomic_read(&folio->_pincount))) { 1133 bad_page(page, "nonzero pincount"); 1134 goto out; 1135 } 1136 } 1137 break; 1138 case 2: 1139 /* the second tail page: deferred_list overlaps ->mapping */ 1140 if (unlikely(!list_empty(&folio->_deferred_list))) { 1141 bad_page(page, "on deferred list"); 1142 goto out; 1143 } 1144 if (!IS_ENABLED(CONFIG_64BIT)) { 1145 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1146 bad_page(page, "nonzero entire_mapcount"); 1147 goto out; 1148 } 1149 if (unlikely(atomic_read(&folio->_pincount))) { 1150 bad_page(page, "nonzero pincount"); 1151 goto out; 1152 } 1153 } 1154 break; 1155 case 3: 1156 /* the third tail page: hugetlb specifics overlap ->mappings */ 1157 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1158 break; 1159 fallthrough; 1160 default: 1161 if (page->mapping != TAIL_MAPPING) { 1162 bad_page(page, "corrupted mapping in tail page"); 1163 goto out; 1164 } 1165 break; 1166 } 1167 if (unlikely(!PageTail(page))) { 1168 bad_page(page, "PageTail not set"); 1169 goto out; 1170 } 1171 if (unlikely(compound_head(page) != head_page)) { 1172 bad_page(page, "compound_head not consistent"); 1173 goto out; 1174 } 1175 ret = 0; 1176 out: 1177 page->mapping = NULL; 1178 clear_compound_head(page); 1179 return ret; 1180 } 1181 1182 /* 1183 * Skip KASAN memory poisoning when either: 1184 * 1185 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1186 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1187 * using page tags instead (see below). 1188 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1189 * that error detection is disabled for accesses via the page address. 1190 * 1191 * Pages will have match-all tags in the following circumstances: 1192 * 1193 * 1. Pages are being initialized for the first time, including during deferred 1194 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1195 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1196 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1197 * 3. The allocation was excluded from being checked due to sampling, 1198 * see the call to kasan_unpoison_pages. 1199 * 1200 * Poisoning pages during deferred memory init will greatly lengthen the 1201 * process and cause problem in large memory systems as the deferred pages 1202 * initialization is done with interrupt disabled. 1203 * 1204 * Assuming that there will be no reference to those newly initialized 1205 * pages before they are ever allocated, this should have no effect on 1206 * KASAN memory tracking as the poison will be properly inserted at page 1207 * allocation time. The only corner case is when pages are allocated by 1208 * on-demand allocation and then freed again before the deferred pages 1209 * initialization is done, but this is not likely to happen. 1210 */ 1211 static inline bool should_skip_kasan_poison(struct page *page) 1212 { 1213 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1214 return deferred_pages_enabled(); 1215 1216 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1217 } 1218 1219 static void kernel_init_pages(struct page *page, int numpages) 1220 { 1221 int i; 1222 1223 /* s390's use of memset() could override KASAN redzones. */ 1224 kasan_disable_current(); 1225 for (i = 0; i < numpages; i++) 1226 clear_highpage_kasan_tagged(page + i); 1227 kasan_enable_current(); 1228 } 1229 1230 #ifdef CONFIG_MEM_ALLOC_PROFILING 1231 1232 /* Should be called only if mem_alloc_profiling_enabled() */ 1233 void __clear_page_tag_ref(struct page *page) 1234 { 1235 union pgtag_ref_handle handle; 1236 union codetag_ref ref; 1237 1238 if (get_page_tag_ref(page, &ref, &handle)) { 1239 set_codetag_empty(&ref); 1240 update_page_tag_ref(handle, &ref); 1241 put_page_tag_ref(handle); 1242 } 1243 } 1244 1245 /* Should be called only if mem_alloc_profiling_enabled() */ 1246 static noinline 1247 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1248 unsigned int nr) 1249 { 1250 union pgtag_ref_handle handle; 1251 union codetag_ref ref; 1252 1253 if (get_page_tag_ref(page, &ref, &handle)) { 1254 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1255 update_page_tag_ref(handle, &ref); 1256 put_page_tag_ref(handle); 1257 } 1258 } 1259 1260 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1261 unsigned int nr) 1262 { 1263 if (mem_alloc_profiling_enabled()) 1264 __pgalloc_tag_add(page, task, nr); 1265 } 1266 1267 /* Should be called only if mem_alloc_profiling_enabled() */ 1268 static noinline 1269 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1270 { 1271 union pgtag_ref_handle handle; 1272 union codetag_ref ref; 1273 1274 if (get_page_tag_ref(page, &ref, &handle)) { 1275 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1276 update_page_tag_ref(handle, &ref); 1277 put_page_tag_ref(handle); 1278 } 1279 } 1280 1281 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1282 { 1283 if (mem_alloc_profiling_enabled()) 1284 __pgalloc_tag_sub(page, nr); 1285 } 1286 1287 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1288 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1289 { 1290 if (tag) 1291 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1292 } 1293 1294 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1295 1296 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1297 unsigned int nr) {} 1298 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1299 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1300 1301 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1302 1303 __always_inline bool __free_pages_prepare(struct page *page, 1304 unsigned int order, fpi_t fpi_flags) 1305 { 1306 int bad = 0; 1307 bool skip_kasan_poison = should_skip_kasan_poison(page); 1308 bool init = want_init_on_free(); 1309 bool compound = PageCompound(page); 1310 struct folio *folio = page_folio(page); 1311 1312 VM_BUG_ON_PAGE(PageTail(page), page); 1313 1314 trace_mm_page_free(page, order); 1315 kmsan_free_page(page, order); 1316 1317 if (memcg_kmem_online() && PageMemcgKmem(page)) 1318 __memcg_kmem_uncharge_page(page, order); 1319 1320 /* 1321 * In rare cases, when truncation or holepunching raced with 1322 * munlock after VM_LOCKED was cleared, Mlocked may still be 1323 * found set here. This does not indicate a problem, unless 1324 * "unevictable_pgs_cleared" appears worryingly large. 1325 */ 1326 if (unlikely(folio_test_mlocked(folio))) { 1327 long nr_pages = folio_nr_pages(folio); 1328 1329 __folio_clear_mlocked(folio); 1330 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1331 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1332 } 1333 1334 if (unlikely(PageHWPoison(page)) && !order) { 1335 /* Do not let hwpoison pages hit pcplists/buddy */ 1336 reset_page_owner(page, order); 1337 page_table_check_free(page, order); 1338 pgalloc_tag_sub(page, 1 << order); 1339 1340 /* 1341 * The page is isolated and accounted for. 1342 * Mark the codetag as empty to avoid accounting error 1343 * when the page is freed by unpoison_memory(). 1344 */ 1345 clear_page_tag_ref(page); 1346 return false; 1347 } 1348 1349 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1350 1351 /* 1352 * Check tail pages before head page information is cleared to 1353 * avoid checking PageCompound for order-0 pages. 1354 */ 1355 if (unlikely(order)) { 1356 int i; 1357 1358 if (compound) { 1359 page[1].flags.f &= ~PAGE_FLAGS_SECOND; 1360 #ifdef NR_PAGES_IN_LARGE_FOLIO 1361 folio->_nr_pages = 0; 1362 #endif 1363 } 1364 for (i = 1; i < (1 << order); i++) { 1365 if (compound) 1366 bad += free_tail_page_prepare(page, page + i); 1367 if (is_check_pages_enabled()) { 1368 if (free_page_is_bad(page + i)) { 1369 bad++; 1370 continue; 1371 } 1372 } 1373 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1374 } 1375 } 1376 if (folio_test_anon(folio)) { 1377 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1378 folio->mapping = NULL; 1379 } 1380 if (unlikely(page_has_type(page))) { 1381 /* networking expects to clear its page type before releasing */ 1382 if (is_check_pages_enabled()) { 1383 if (unlikely(PageNetpp(page))) { 1384 bad_page(page, "page_pool leak"); 1385 return false; 1386 } 1387 } 1388 /* Reset the page_type (which overlays _mapcount) */ 1389 page->page_type = UINT_MAX; 1390 } 1391 1392 if (is_check_pages_enabled()) { 1393 if (free_page_is_bad(page)) 1394 bad++; 1395 if (bad) 1396 return false; 1397 } 1398 1399 page_cpupid_reset_last(page); 1400 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1401 page->private = 0; 1402 reset_page_owner(page, order); 1403 page_table_check_free(page, order); 1404 pgalloc_tag_sub(page, 1 << order); 1405 1406 if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) { 1407 debug_check_no_locks_freed(page_address(page), 1408 PAGE_SIZE << order); 1409 debug_check_no_obj_freed(page_address(page), 1410 PAGE_SIZE << order); 1411 } 1412 1413 kernel_poison_pages(page, 1 << order); 1414 1415 /* 1416 * As memory initialization might be integrated into KASAN, 1417 * KASAN poisoning and memory initialization code must be 1418 * kept together to avoid discrepancies in behavior. 1419 * 1420 * With hardware tag-based KASAN, memory tags must be set before the 1421 * page becomes unavailable via debug_pagealloc or arch_free_page. 1422 */ 1423 if (!skip_kasan_poison) { 1424 kasan_poison_pages(page, order, init); 1425 1426 /* Memory is already initialized if KASAN did it internally. */ 1427 if (kasan_has_integrated_init()) 1428 init = false; 1429 } 1430 if (init) 1431 kernel_init_pages(page, 1 << order); 1432 1433 /* 1434 * arch_free_page() can make the page's contents inaccessible. s390 1435 * does this. So nothing which can access the page's contents should 1436 * happen after this. 1437 */ 1438 arch_free_page(page, order); 1439 1440 debug_pagealloc_unmap_pages(page, 1 << order); 1441 1442 return true; 1443 } 1444 1445 bool free_pages_prepare(struct page *page, unsigned int order) 1446 { 1447 return __free_pages_prepare(page, order, FPI_NONE); 1448 } 1449 1450 /* 1451 * Frees a number of pages from the PCP lists 1452 * Assumes all pages on list are in same zone. 1453 * count is the number of pages to free. 1454 */ 1455 static void free_pcppages_bulk(struct zone *zone, int count, 1456 struct per_cpu_pages *pcp, 1457 int pindex) 1458 { 1459 unsigned long flags; 1460 unsigned int order; 1461 struct page *page; 1462 1463 /* 1464 * Ensure proper count is passed which otherwise would stuck in the 1465 * below while (list_empty(list)) loop. 1466 */ 1467 count = min(pcp->count, count); 1468 1469 /* Ensure requested pindex is drained first. */ 1470 pindex = pindex - 1; 1471 1472 spin_lock_irqsave(&zone->lock, flags); 1473 1474 while (count > 0) { 1475 struct list_head *list; 1476 int nr_pages; 1477 1478 /* Remove pages from lists in a round-robin fashion. */ 1479 do { 1480 if (++pindex > NR_PCP_LISTS - 1) 1481 pindex = 0; 1482 list = &pcp->lists[pindex]; 1483 } while (list_empty(list)); 1484 1485 order = pindex_to_order(pindex); 1486 nr_pages = 1 << order; 1487 do { 1488 unsigned long pfn; 1489 int mt; 1490 1491 page = list_last_entry(list, struct page, pcp_list); 1492 pfn = page_to_pfn(page); 1493 mt = get_pfnblock_migratetype(page, pfn); 1494 1495 /* must delete to avoid corrupting pcp list */ 1496 list_del(&page->pcp_list); 1497 count -= nr_pages; 1498 pcp->count -= nr_pages; 1499 1500 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1501 trace_mm_page_pcpu_drain(page, order, mt); 1502 } while (count > 0 && !list_empty(list)); 1503 } 1504 1505 spin_unlock_irqrestore(&zone->lock, flags); 1506 } 1507 1508 /* Split a multi-block free page into its individual pageblocks. */ 1509 static void split_large_buddy(struct zone *zone, struct page *page, 1510 unsigned long pfn, int order, fpi_t fpi) 1511 { 1512 unsigned long end = pfn + (1 << order); 1513 1514 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1515 /* Caller removed page from freelist, buddy info cleared! */ 1516 VM_WARN_ON_ONCE(PageBuddy(page)); 1517 1518 if (order > pageblock_order) 1519 order = pageblock_order; 1520 1521 do { 1522 int mt = get_pfnblock_migratetype(page, pfn); 1523 1524 __free_one_page(page, pfn, zone, order, mt, fpi); 1525 pfn += 1 << order; 1526 if (pfn == end) 1527 break; 1528 page = pfn_to_page(pfn); 1529 } while (1); 1530 } 1531 1532 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1533 unsigned int order) 1534 { 1535 /* Remember the order */ 1536 page->private = order; 1537 /* Add the page to the free list */ 1538 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1539 } 1540 1541 static void free_one_page(struct zone *zone, struct page *page, 1542 unsigned long pfn, unsigned int order, 1543 fpi_t fpi_flags) 1544 { 1545 struct llist_head *llhead; 1546 unsigned long flags; 1547 1548 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1549 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1550 add_page_to_zone_llist(zone, page, order); 1551 return; 1552 } 1553 } else { 1554 spin_lock_irqsave(&zone->lock, flags); 1555 } 1556 1557 /* The lock succeeded. Process deferred pages. */ 1558 llhead = &zone->trylock_free_pages; 1559 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1560 struct llist_node *llnode; 1561 struct page *p, *tmp; 1562 1563 llnode = llist_del_all(llhead); 1564 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1565 unsigned int p_order = p->private; 1566 1567 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1568 __count_vm_events(PGFREE, 1 << p_order); 1569 } 1570 } 1571 split_large_buddy(zone, page, pfn, order, fpi_flags); 1572 spin_unlock_irqrestore(&zone->lock, flags); 1573 1574 __count_vm_events(PGFREE, 1 << order); 1575 } 1576 1577 static void __free_pages_ok(struct page *page, unsigned int order, 1578 fpi_t fpi_flags) 1579 { 1580 unsigned long pfn = page_to_pfn(page); 1581 struct zone *zone = page_zone(page); 1582 1583 if (__free_pages_prepare(page, order, fpi_flags)) 1584 free_one_page(zone, page, pfn, order, fpi_flags); 1585 } 1586 1587 void __meminit __free_pages_core(struct page *page, unsigned int order, 1588 enum meminit_context context) 1589 { 1590 unsigned int nr_pages = 1 << order; 1591 struct page *p = page; 1592 unsigned int loop; 1593 1594 /* 1595 * When initializing the memmap, __init_single_page() sets the refcount 1596 * of all pages to 1 ("allocated"/"not free"). We have to set the 1597 * refcount of all involved pages to 0. 1598 * 1599 * Note that hotplugged memory pages are initialized to PageOffline(). 1600 * Pages freed from memblock might be marked as reserved. 1601 */ 1602 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1603 unlikely(context == MEMINIT_HOTPLUG)) { 1604 for (loop = 0; loop < nr_pages; loop++, p++) { 1605 VM_WARN_ON_ONCE(PageReserved(p)); 1606 __ClearPageOffline(p); 1607 set_page_count(p, 0); 1608 } 1609 1610 adjust_managed_page_count(page, nr_pages); 1611 } else { 1612 for (loop = 0; loop < nr_pages; loop++, p++) { 1613 __ClearPageReserved(p); 1614 set_page_count(p, 0); 1615 } 1616 1617 /* memblock adjusts totalram_pages() manually. */ 1618 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1619 } 1620 1621 if (page_contains_unaccepted(page, order)) { 1622 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1623 return; 1624 1625 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1626 } 1627 1628 /* 1629 * Bypass PCP and place fresh pages right to the tail, primarily 1630 * relevant for memory onlining. 1631 */ 1632 __free_pages_ok(page, order, FPI_TO_TAIL); 1633 } 1634 1635 /* 1636 * Check that the whole (or subset of) a pageblock given by the interval of 1637 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1638 * with the migration of free compaction scanner. 1639 * 1640 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1641 * 1642 * It's possible on some configurations to have a setup like node0 node1 node0 1643 * i.e. it's possible that all pages within a zones range of pages do not 1644 * belong to a single zone. We assume that a border between node0 and node1 1645 * can occur within a single pageblock, but not a node0 node1 node0 1646 * interleaving within a single pageblock. It is therefore sufficient to check 1647 * the first and last page of a pageblock and avoid checking each individual 1648 * page in a pageblock. 1649 * 1650 * Note: the function may return non-NULL struct page even for a page block 1651 * which contains a memory hole (i.e. there is no physical memory for a subset 1652 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1653 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1654 * even though the start pfn is online and valid. This should be safe most of 1655 * the time because struct pages are still initialized via init_unavailable_range() 1656 * and pfn walkers shouldn't touch any physical memory range for which they do 1657 * not recognize any specific metadata in struct pages. 1658 */ 1659 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1660 unsigned long end_pfn, struct zone *zone) 1661 { 1662 struct page *start_page; 1663 struct page *end_page; 1664 1665 /* end_pfn is one past the range we are checking */ 1666 end_pfn--; 1667 1668 if (!pfn_valid(end_pfn)) 1669 return NULL; 1670 1671 start_page = pfn_to_online_page(start_pfn); 1672 if (!start_page) 1673 return NULL; 1674 1675 if (page_zone(start_page) != zone) 1676 return NULL; 1677 1678 end_page = pfn_to_page(end_pfn); 1679 1680 /* This gives a shorter code than deriving page_zone(end_page) */ 1681 if (page_zone_id(start_page) != page_zone_id(end_page)) 1682 return NULL; 1683 1684 return start_page; 1685 } 1686 1687 /* 1688 * The order of subdivision here is critical for the IO subsystem. 1689 * Please do not alter this order without good reasons and regression 1690 * testing. Specifically, as large blocks of memory are subdivided, 1691 * the order in which smaller blocks are delivered depends on the order 1692 * they're subdivided in this function. This is the primary factor 1693 * influencing the order in which pages are delivered to the IO 1694 * subsystem according to empirical testing, and this is also justified 1695 * by considering the behavior of a buddy system containing a single 1696 * large block of memory acted on by a series of small allocations. 1697 * This behavior is a critical factor in sglist merging's success. 1698 * 1699 * -- nyc 1700 */ 1701 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1702 int high, int migratetype) 1703 { 1704 unsigned int size = 1 << high; 1705 unsigned int nr_added = 0; 1706 1707 while (high > low) { 1708 high--; 1709 size >>= 1; 1710 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1711 1712 /* 1713 * Mark as guard pages (or page), that will allow to 1714 * merge back to allocator when buddy will be freed. 1715 * Corresponding page table entries will not be touched, 1716 * pages will stay not present in virtual address space 1717 */ 1718 if (set_page_guard(zone, &page[size], high)) 1719 continue; 1720 1721 __add_to_free_list(&page[size], zone, high, migratetype, false); 1722 set_buddy_order(&page[size], high); 1723 nr_added += size; 1724 } 1725 1726 return nr_added; 1727 } 1728 1729 static __always_inline void page_del_and_expand(struct zone *zone, 1730 struct page *page, int low, 1731 int high, int migratetype) 1732 { 1733 int nr_pages = 1 << high; 1734 1735 __del_page_from_free_list(page, zone, high, migratetype); 1736 nr_pages -= expand(zone, page, low, high, migratetype); 1737 account_freepages(zone, -nr_pages, migratetype); 1738 } 1739 1740 static void check_new_page_bad(struct page *page) 1741 { 1742 if (unlikely(PageHWPoison(page))) { 1743 /* Don't complain about hwpoisoned pages */ 1744 if (PageBuddy(page)) 1745 __ClearPageBuddy(page); 1746 return; 1747 } 1748 1749 bad_page(page, 1750 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1751 } 1752 1753 /* 1754 * This page is about to be returned from the page allocator 1755 */ 1756 static bool check_new_page(struct page *page) 1757 { 1758 if (likely(page_expected_state(page, 1759 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1760 return false; 1761 1762 check_new_page_bad(page); 1763 return true; 1764 } 1765 1766 static inline bool check_new_pages(struct page *page, unsigned int order) 1767 { 1768 if (is_check_pages_enabled()) { 1769 for (int i = 0; i < (1 << order); i++) { 1770 struct page *p = page + i; 1771 1772 if (check_new_page(p)) 1773 return true; 1774 } 1775 } 1776 1777 return false; 1778 } 1779 1780 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1781 { 1782 /* Don't skip if a software KASAN mode is enabled. */ 1783 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1784 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1785 return false; 1786 1787 /* Skip, if hardware tag-based KASAN is not enabled. */ 1788 if (!kasan_hw_tags_enabled()) 1789 return true; 1790 1791 /* 1792 * With hardware tag-based KASAN enabled, skip if this has been 1793 * requested via __GFP_SKIP_KASAN. 1794 */ 1795 return flags & __GFP_SKIP_KASAN; 1796 } 1797 1798 static inline bool should_skip_init(gfp_t flags) 1799 { 1800 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1801 if (!kasan_hw_tags_enabled()) 1802 return false; 1803 1804 /* For hardware tag-based KASAN, skip if requested. */ 1805 return (flags & __GFP_SKIP_ZERO); 1806 } 1807 1808 inline void post_alloc_hook(struct page *page, unsigned int order, 1809 gfp_t gfp_flags) 1810 { 1811 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1812 !should_skip_init(gfp_flags); 1813 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1814 int i; 1815 1816 set_page_private(page, 0); 1817 1818 arch_alloc_page(page, order); 1819 debug_pagealloc_map_pages(page, 1 << order); 1820 1821 /* 1822 * Page unpoisoning must happen before memory initialization. 1823 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1824 * allocations and the page unpoisoning code will complain. 1825 */ 1826 kernel_unpoison_pages(page, 1 << order); 1827 1828 /* 1829 * As memory initialization might be integrated into KASAN, 1830 * KASAN unpoisoning and memory initialization code must be 1831 * kept together to avoid discrepancies in behavior. 1832 */ 1833 1834 /* 1835 * If memory tags should be zeroed 1836 * (which happens only when memory should be initialized as well). 1837 */ 1838 if (zero_tags) 1839 init = !tag_clear_highpages(page, 1 << order); 1840 1841 if (!should_skip_kasan_unpoison(gfp_flags) && 1842 kasan_unpoison_pages(page, order, init)) { 1843 /* Take note that memory was initialized by KASAN. */ 1844 if (kasan_has_integrated_init()) 1845 init = false; 1846 } else { 1847 /* 1848 * If memory tags have not been set by KASAN, reset the page 1849 * tags to ensure page_address() dereferencing does not fault. 1850 */ 1851 for (i = 0; i != 1 << order; ++i) 1852 page_kasan_tag_reset(page + i); 1853 } 1854 /* If memory is still not initialized, initialize it now. */ 1855 if (init) 1856 kernel_init_pages(page, 1 << order); 1857 1858 set_page_owner(page, order, gfp_flags); 1859 page_table_check_alloc(page, order); 1860 pgalloc_tag_add(page, current, 1 << order); 1861 } 1862 1863 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1864 unsigned int alloc_flags) 1865 { 1866 post_alloc_hook(page, order, gfp_flags); 1867 1868 if (order && (gfp_flags & __GFP_COMP)) 1869 prep_compound_page(page, order); 1870 1871 /* 1872 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1873 * allocate the page. The expectation is that the caller is taking 1874 * steps that will free more memory. The caller should avoid the page 1875 * being used for !PFMEMALLOC purposes. 1876 */ 1877 if (alloc_flags & ALLOC_NO_WATERMARKS) 1878 set_page_pfmemalloc(page); 1879 else 1880 clear_page_pfmemalloc(page); 1881 } 1882 1883 /* 1884 * Go through the free lists for the given migratetype and remove 1885 * the smallest available page from the freelists 1886 */ 1887 static __always_inline 1888 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1889 int migratetype) 1890 { 1891 unsigned int current_order; 1892 struct free_area *area; 1893 struct page *page; 1894 1895 /* Find a page of the appropriate size in the preferred list */ 1896 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1897 area = &(zone->free_area[current_order]); 1898 page = get_page_from_free_area(area, migratetype); 1899 if (!page) 1900 continue; 1901 1902 page_del_and_expand(zone, page, order, current_order, 1903 migratetype); 1904 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1905 pcp_allowed_order(order) && 1906 migratetype < MIGRATE_PCPTYPES); 1907 return page; 1908 } 1909 1910 return NULL; 1911 } 1912 1913 1914 /* 1915 * This array describes the order lists are fallen back to when 1916 * the free lists for the desirable migrate type are depleted 1917 * 1918 * The other migratetypes do not have fallbacks. 1919 */ 1920 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1921 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1922 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1923 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1924 }; 1925 1926 #ifdef CONFIG_CMA 1927 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1928 unsigned int order) 1929 { 1930 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1931 } 1932 #else 1933 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1934 unsigned int order) { return NULL; } 1935 #endif 1936 1937 /* 1938 * Move all free pages of a block to new type's freelist. Caller needs to 1939 * change the block type. 1940 */ 1941 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1942 int old_mt, int new_mt) 1943 { 1944 struct page *page; 1945 unsigned long pfn, end_pfn; 1946 unsigned int order; 1947 int pages_moved = 0; 1948 1949 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1950 end_pfn = pageblock_end_pfn(start_pfn); 1951 1952 for (pfn = start_pfn; pfn < end_pfn;) { 1953 page = pfn_to_page(pfn); 1954 if (!PageBuddy(page)) { 1955 pfn++; 1956 continue; 1957 } 1958 1959 /* Make sure we are not inadvertently changing nodes */ 1960 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1961 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1962 1963 order = buddy_order(page); 1964 1965 move_to_free_list(page, zone, order, old_mt, new_mt); 1966 1967 pfn += 1 << order; 1968 pages_moved += 1 << order; 1969 } 1970 1971 return pages_moved; 1972 } 1973 1974 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1975 unsigned long *start_pfn, 1976 int *num_free, int *num_movable) 1977 { 1978 unsigned long pfn, start, end; 1979 1980 pfn = page_to_pfn(page); 1981 start = pageblock_start_pfn(pfn); 1982 end = pageblock_end_pfn(pfn); 1983 1984 /* 1985 * The caller only has the lock for @zone, don't touch ranges 1986 * that straddle into other zones. While we could move part of 1987 * the range that's inside the zone, this call is usually 1988 * accompanied by other operations such as migratetype updates 1989 * which also should be locked. 1990 */ 1991 if (!zone_spans_pfn(zone, start)) 1992 return false; 1993 if (!zone_spans_pfn(zone, end - 1)) 1994 return false; 1995 1996 *start_pfn = start; 1997 1998 if (num_free) { 1999 *num_free = 0; 2000 *num_movable = 0; 2001 for (pfn = start; pfn < end;) { 2002 page = pfn_to_page(pfn); 2003 if (PageBuddy(page)) { 2004 int nr = 1 << buddy_order(page); 2005 2006 *num_free += nr; 2007 pfn += nr; 2008 continue; 2009 } 2010 /* 2011 * We assume that pages that could be isolated for 2012 * migration are movable. But we don't actually try 2013 * isolating, as that would be expensive. 2014 */ 2015 if (PageLRU(page) || page_has_movable_ops(page)) 2016 (*num_movable)++; 2017 pfn++; 2018 } 2019 } 2020 2021 return true; 2022 } 2023 2024 static int move_freepages_block(struct zone *zone, struct page *page, 2025 int old_mt, int new_mt) 2026 { 2027 unsigned long start_pfn; 2028 int res; 2029 2030 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2031 return -1; 2032 2033 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt); 2034 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 2035 2036 return res; 2037 2038 } 2039 2040 #ifdef CONFIG_MEMORY_ISOLATION 2041 /* Look for a buddy that straddles start_pfn */ 2042 static unsigned long find_large_buddy(unsigned long start_pfn) 2043 { 2044 /* 2045 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing 2046 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking 2047 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy, 2048 * the starting order does not matter. 2049 */ 2050 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; 2051 struct page *page; 2052 unsigned long pfn = start_pfn; 2053 2054 while (!PageBuddy(page = pfn_to_page(pfn))) { 2055 /* Nothing found */ 2056 if (++order > MAX_PAGE_ORDER) 2057 return start_pfn; 2058 pfn &= ~0UL << order; 2059 } 2060 2061 /* 2062 * Found a preceding buddy, but does it straddle? 2063 */ 2064 if (pfn + (1 << buddy_order(page)) > start_pfn) 2065 return pfn; 2066 2067 /* Nothing found */ 2068 return start_pfn; 2069 } 2070 2071 static inline void toggle_pageblock_isolate(struct page *page, bool isolate) 2072 { 2073 if (isolate) 2074 set_pageblock_isolate(page); 2075 else 2076 clear_pageblock_isolate(page); 2077 } 2078 2079 /** 2080 * __move_freepages_block_isolate - move free pages in block for page isolation 2081 * @zone: the zone 2082 * @page: the pageblock page 2083 * @isolate: to isolate the given pageblock or unisolate it 2084 * 2085 * This is similar to move_freepages_block(), but handles the special 2086 * case encountered in page isolation, where the block of interest 2087 * might be part of a larger buddy spanning multiple pageblocks. 2088 * 2089 * Unlike the regular page allocator path, which moves pages while 2090 * stealing buddies off the freelist, page isolation is interested in 2091 * arbitrary pfn ranges that may have overlapping buddies on both ends. 2092 * 2093 * This function handles that. Straddling buddies are split into 2094 * individual pageblocks. Only the block of interest is moved. 2095 * 2096 * Returns %true if pages could be moved, %false otherwise. 2097 */ 2098 static bool __move_freepages_block_isolate(struct zone *zone, 2099 struct page *page, bool isolate) 2100 { 2101 unsigned long start_pfn, buddy_pfn; 2102 int from_mt; 2103 int to_mt; 2104 struct page *buddy; 2105 2106 if (isolate == get_pageblock_isolate(page)) { 2107 VM_WARN_ONCE(1, "%s a pageblock that is already in that state", 2108 isolate ? "Isolate" : "Unisolate"); 2109 return false; 2110 } 2111 2112 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2113 return false; 2114 2115 /* No splits needed if buddies can't span multiple blocks */ 2116 if (pageblock_order == MAX_PAGE_ORDER) 2117 goto move; 2118 2119 buddy_pfn = find_large_buddy(start_pfn); 2120 buddy = pfn_to_page(buddy_pfn); 2121 /* We're a part of a larger buddy */ 2122 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) { 2123 int order = buddy_order(buddy); 2124 2125 del_page_from_free_list(buddy, zone, order, 2126 get_pfnblock_migratetype(buddy, buddy_pfn)); 2127 toggle_pageblock_isolate(page, isolate); 2128 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE); 2129 return true; 2130 } 2131 2132 move: 2133 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */ 2134 if (isolate) { 2135 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2136 MIGRATETYPE_MASK); 2137 to_mt = MIGRATE_ISOLATE; 2138 } else { 2139 from_mt = MIGRATE_ISOLATE; 2140 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2141 MIGRATETYPE_MASK); 2142 } 2143 2144 __move_freepages_block(zone, start_pfn, from_mt, to_mt); 2145 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate); 2146 2147 return true; 2148 } 2149 2150 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page) 2151 { 2152 return __move_freepages_block_isolate(zone, page, true); 2153 } 2154 2155 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page) 2156 { 2157 return __move_freepages_block_isolate(zone, page, false); 2158 } 2159 2160 #endif /* CONFIG_MEMORY_ISOLATION */ 2161 2162 static inline bool boost_watermark(struct zone *zone) 2163 { 2164 unsigned long max_boost; 2165 2166 if (!watermark_boost_factor) 2167 return false; 2168 /* 2169 * Don't bother in zones that are unlikely to produce results. 2170 * On small machines, including kdump capture kernels running 2171 * in a small area, boosting the watermark can cause an out of 2172 * memory situation immediately. 2173 */ 2174 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2175 return false; 2176 2177 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2178 watermark_boost_factor, 10000); 2179 2180 /* 2181 * high watermark may be uninitialised if fragmentation occurs 2182 * very early in boot so do not boost. We do not fall 2183 * through and boost by pageblock_nr_pages as failing 2184 * allocations that early means that reclaim is not going 2185 * to help and it may even be impossible to reclaim the 2186 * boosted watermark resulting in a hang. 2187 */ 2188 if (!max_boost) 2189 return false; 2190 2191 max_boost = max(pageblock_nr_pages, max_boost); 2192 2193 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2194 max_boost); 2195 2196 return true; 2197 } 2198 2199 /* 2200 * When we are falling back to another migratetype during allocation, should we 2201 * try to claim an entire block to satisfy further allocations, instead of 2202 * polluting multiple pageblocks? 2203 */ 2204 static bool should_try_claim_block(unsigned int order, int start_mt) 2205 { 2206 /* 2207 * Leaving this order check is intended, although there is 2208 * relaxed order check in next check. The reason is that 2209 * we can actually claim the whole pageblock if this condition met, 2210 * but, below check doesn't guarantee it and that is just heuristic 2211 * so could be changed anytime. 2212 */ 2213 if (order >= pageblock_order) 2214 return true; 2215 2216 /* 2217 * Above a certain threshold, always try to claim, as it's likely there 2218 * will be more free pages in the pageblock. 2219 */ 2220 if (order >= pageblock_order / 2) 2221 return true; 2222 2223 /* 2224 * Unmovable/reclaimable allocations would cause permanent 2225 * fragmentations if they fell back to allocating from a movable block 2226 * (polluting it), so we try to claim the whole block regardless of the 2227 * allocation size. Later movable allocations can always steal from this 2228 * block, which is less problematic. 2229 */ 2230 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2231 return true; 2232 2233 if (page_group_by_mobility_disabled) 2234 return true; 2235 2236 /* 2237 * Movable pages won't cause permanent fragmentation, so when you alloc 2238 * small pages, we just need to temporarily steal unmovable or 2239 * reclaimable pages that are closest to the request size. After a 2240 * while, memory compaction may occur to form large contiguous pages, 2241 * and the next movable allocation may not need to steal. 2242 */ 2243 return false; 2244 } 2245 2246 /* 2247 * Check whether there is a suitable fallback freepage with requested order. 2248 * If claimable is true, this function returns fallback_mt only if 2249 * we would do this whole-block claiming. This would help to reduce 2250 * fragmentation due to mixed migratetype pages in one pageblock. 2251 */ 2252 int find_suitable_fallback(struct free_area *area, unsigned int order, 2253 int migratetype, bool claimable) 2254 { 2255 int i; 2256 2257 if (claimable && !should_try_claim_block(order, migratetype)) 2258 return -2; 2259 2260 if (area->nr_free == 0) 2261 return -1; 2262 2263 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2264 int fallback_mt = fallbacks[migratetype][i]; 2265 2266 if (!free_area_empty(area, fallback_mt)) 2267 return fallback_mt; 2268 } 2269 2270 return -1; 2271 } 2272 2273 /* 2274 * This function implements actual block claiming behaviour. If order is large 2275 * enough, we can claim the whole pageblock for the requested migratetype. If 2276 * not, we check the pageblock for constituent pages; if at least half of the 2277 * pages are free or compatible, we can still claim the whole block, so pages 2278 * freed in the future will be put on the correct free list. 2279 */ 2280 static struct page * 2281 try_to_claim_block(struct zone *zone, struct page *page, 2282 int current_order, int order, int start_type, 2283 int block_type, unsigned int alloc_flags) 2284 { 2285 int free_pages, movable_pages, alike_pages; 2286 unsigned long start_pfn; 2287 2288 /* Take ownership for orders >= pageblock_order */ 2289 if (current_order >= pageblock_order) { 2290 unsigned int nr_added; 2291 2292 del_page_from_free_list(page, zone, current_order, block_type); 2293 change_pageblock_range(page, current_order, start_type); 2294 nr_added = expand(zone, page, order, current_order, start_type); 2295 account_freepages(zone, nr_added, start_type); 2296 return page; 2297 } 2298 2299 /* 2300 * Boost watermarks to increase reclaim pressure to reduce the 2301 * likelihood of future fallbacks. Wake kswapd now as the node 2302 * may be balanced overall and kswapd will not wake naturally. 2303 */ 2304 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2305 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2306 2307 /* moving whole block can fail due to zone boundary conditions */ 2308 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2309 &movable_pages)) 2310 return NULL; 2311 2312 /* 2313 * Determine how many pages are compatible with our allocation. 2314 * For movable allocation, it's the number of movable pages which 2315 * we just obtained. For other types it's a bit more tricky. 2316 */ 2317 if (start_type == MIGRATE_MOVABLE) { 2318 alike_pages = movable_pages; 2319 } else { 2320 /* 2321 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2322 * to MOVABLE pageblock, consider all non-movable pages as 2323 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2324 * vice versa, be conservative since we can't distinguish the 2325 * exact migratetype of non-movable pages. 2326 */ 2327 if (block_type == MIGRATE_MOVABLE) 2328 alike_pages = pageblock_nr_pages 2329 - (free_pages + movable_pages); 2330 else 2331 alike_pages = 0; 2332 } 2333 /* 2334 * If a sufficient number of pages in the block are either free or of 2335 * compatible migratability as our allocation, claim the whole block. 2336 */ 2337 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2338 page_group_by_mobility_disabled) { 2339 __move_freepages_block(zone, start_pfn, block_type, start_type); 2340 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type); 2341 return __rmqueue_smallest(zone, order, start_type); 2342 } 2343 2344 return NULL; 2345 } 2346 2347 /* 2348 * Try to allocate from some fallback migratetype by claiming the entire block, 2349 * i.e. converting it to the allocation's start migratetype. 2350 * 2351 * The use of signed ints for order and current_order is a deliberate 2352 * deviation from the rest of this file, to make the for loop 2353 * condition simpler. 2354 */ 2355 static __always_inline struct page * 2356 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, 2357 unsigned int alloc_flags) 2358 { 2359 struct free_area *area; 2360 int current_order; 2361 int min_order = order; 2362 struct page *page; 2363 int fallback_mt; 2364 2365 /* 2366 * Do not steal pages from freelists belonging to other pageblocks 2367 * i.e. orders < pageblock_order. If there are no local zones free, 2368 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2369 */ 2370 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2371 min_order = pageblock_order; 2372 2373 /* 2374 * Find the largest available free page in the other list. This roughly 2375 * approximates finding the pageblock with the most free pages, which 2376 * would be too costly to do exactly. 2377 */ 2378 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2379 --current_order) { 2380 area = &(zone->free_area[current_order]); 2381 fallback_mt = find_suitable_fallback(area, current_order, 2382 start_migratetype, true); 2383 2384 /* No block in that order */ 2385 if (fallback_mt == -1) 2386 continue; 2387 2388 /* Advanced into orders too low to claim, abort */ 2389 if (fallback_mt == -2) 2390 break; 2391 2392 page = get_page_from_free_area(area, fallback_mt); 2393 page = try_to_claim_block(zone, page, current_order, order, 2394 start_migratetype, fallback_mt, 2395 alloc_flags); 2396 if (page) { 2397 trace_mm_page_alloc_extfrag(page, order, current_order, 2398 start_migratetype, fallback_mt); 2399 return page; 2400 } 2401 } 2402 2403 return NULL; 2404 } 2405 2406 /* 2407 * Try to steal a single page from some fallback migratetype. Leave the rest of 2408 * the block as its current migratetype, potentially causing fragmentation. 2409 */ 2410 static __always_inline struct page * 2411 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) 2412 { 2413 struct free_area *area; 2414 int current_order; 2415 struct page *page; 2416 int fallback_mt; 2417 2418 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2419 area = &(zone->free_area[current_order]); 2420 fallback_mt = find_suitable_fallback(area, current_order, 2421 start_migratetype, false); 2422 if (fallback_mt == -1) 2423 continue; 2424 2425 page = get_page_from_free_area(area, fallback_mt); 2426 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2427 trace_mm_page_alloc_extfrag(page, order, current_order, 2428 start_migratetype, fallback_mt); 2429 return page; 2430 } 2431 2432 return NULL; 2433 } 2434 2435 enum rmqueue_mode { 2436 RMQUEUE_NORMAL, 2437 RMQUEUE_CMA, 2438 RMQUEUE_CLAIM, 2439 RMQUEUE_STEAL, 2440 }; 2441 2442 /* 2443 * Do the hard work of removing an element from the buddy allocator. 2444 * Call me with the zone->lock already held. 2445 */ 2446 static __always_inline struct page * 2447 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2448 unsigned int alloc_flags, enum rmqueue_mode *mode) 2449 { 2450 struct page *page; 2451 2452 if (IS_ENABLED(CONFIG_CMA)) { 2453 /* 2454 * Balance movable allocations between regular and CMA areas by 2455 * allocating from CMA when over half of the zone's free memory 2456 * is in the CMA area. 2457 */ 2458 if (alloc_flags & ALLOC_CMA && 2459 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2460 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2461 page = __rmqueue_cma_fallback(zone, order); 2462 if (page) 2463 return page; 2464 } 2465 } 2466 2467 /* 2468 * First try the freelists of the requested migratetype, then try 2469 * fallbacks modes with increasing levels of fragmentation risk. 2470 * 2471 * The fallback logic is expensive and rmqueue_bulk() calls in 2472 * a loop with the zone->lock held, meaning the freelists are 2473 * not subject to any outside changes. Remember in *mode where 2474 * we found pay dirt, to save us the search on the next call. 2475 */ 2476 switch (*mode) { 2477 case RMQUEUE_NORMAL: 2478 page = __rmqueue_smallest(zone, order, migratetype); 2479 if (page) 2480 return page; 2481 fallthrough; 2482 case RMQUEUE_CMA: 2483 if (alloc_flags & ALLOC_CMA) { 2484 page = __rmqueue_cma_fallback(zone, order); 2485 if (page) { 2486 *mode = RMQUEUE_CMA; 2487 return page; 2488 } 2489 } 2490 fallthrough; 2491 case RMQUEUE_CLAIM: 2492 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); 2493 if (page) { 2494 /* Replenished preferred freelist, back to normal mode. */ 2495 *mode = RMQUEUE_NORMAL; 2496 return page; 2497 } 2498 fallthrough; 2499 case RMQUEUE_STEAL: 2500 if (!(alloc_flags & ALLOC_NOFRAGMENT)) { 2501 page = __rmqueue_steal(zone, order, migratetype); 2502 if (page) { 2503 *mode = RMQUEUE_STEAL; 2504 return page; 2505 } 2506 } 2507 } 2508 return NULL; 2509 } 2510 2511 /* 2512 * Obtain a specified number of elements from the buddy allocator, all under 2513 * a single hold of the lock, for efficiency. Add them to the supplied list. 2514 * Returns the number of new pages which were placed at *list. 2515 */ 2516 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2517 unsigned long count, struct list_head *list, 2518 int migratetype, unsigned int alloc_flags) 2519 { 2520 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2521 unsigned long flags; 2522 int i; 2523 2524 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2525 if (!spin_trylock_irqsave(&zone->lock, flags)) 2526 return 0; 2527 } else { 2528 spin_lock_irqsave(&zone->lock, flags); 2529 } 2530 for (i = 0; i < count; ++i) { 2531 struct page *page = __rmqueue(zone, order, migratetype, 2532 alloc_flags, &rmqm); 2533 if (unlikely(page == NULL)) 2534 break; 2535 2536 /* 2537 * Split buddy pages returned by expand() are received here in 2538 * physical page order. The page is added to the tail of 2539 * caller's list. From the callers perspective, the linked list 2540 * is ordered by page number under some conditions. This is 2541 * useful for IO devices that can forward direction from the 2542 * head, thus also in the physical page order. This is useful 2543 * for IO devices that can merge IO requests if the physical 2544 * pages are ordered properly. 2545 */ 2546 list_add_tail(&page->pcp_list, list); 2547 } 2548 spin_unlock_irqrestore(&zone->lock, flags); 2549 2550 return i; 2551 } 2552 2553 /* 2554 * Called from the vmstat counter updater to decay the PCP high. 2555 * Return whether there are addition works to do. 2556 */ 2557 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2558 { 2559 int high_min, to_drain, to_drain_batched, batch; 2560 bool todo = false; 2561 2562 high_min = READ_ONCE(pcp->high_min); 2563 batch = READ_ONCE(pcp->batch); 2564 /* 2565 * Decrease pcp->high periodically to try to free possible 2566 * idle PCP pages. And, avoid to free too many pages to 2567 * control latency. This caps pcp->high decrement too. 2568 */ 2569 if (pcp->high > high_min) { 2570 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2571 pcp->high - (pcp->high >> 3), high_min); 2572 if (pcp->high > high_min) 2573 todo = true; 2574 } 2575 2576 to_drain = pcp->count - pcp->high; 2577 while (to_drain > 0) { 2578 to_drain_batched = min(to_drain, batch); 2579 pcp_spin_lock_nopin(pcp); 2580 free_pcppages_bulk(zone, to_drain_batched, pcp, 0); 2581 pcp_spin_unlock_nopin(pcp); 2582 todo = true; 2583 2584 to_drain -= to_drain_batched; 2585 } 2586 2587 return todo; 2588 } 2589 2590 #ifdef CONFIG_NUMA 2591 /* 2592 * Called from the vmstat counter updater to drain pagesets of this 2593 * currently executing processor on remote nodes after they have 2594 * expired. 2595 */ 2596 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2597 { 2598 int to_drain, batch; 2599 2600 batch = READ_ONCE(pcp->batch); 2601 to_drain = min(pcp->count, batch); 2602 if (to_drain > 0) { 2603 pcp_spin_lock_nopin(pcp); 2604 free_pcppages_bulk(zone, to_drain, pcp, 0); 2605 pcp_spin_unlock_nopin(pcp); 2606 } 2607 } 2608 #endif 2609 2610 /* 2611 * Drain pcplists of the indicated processor and zone. 2612 */ 2613 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2614 { 2615 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2616 int count; 2617 2618 do { 2619 pcp_spin_lock_nopin(pcp); 2620 count = pcp->count; 2621 if (count) { 2622 int to_drain = min(count, 2623 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2624 2625 free_pcppages_bulk(zone, to_drain, pcp, 0); 2626 count -= to_drain; 2627 } 2628 pcp_spin_unlock_nopin(pcp); 2629 } while (count); 2630 } 2631 2632 /* 2633 * Drain pcplists of all zones on the indicated processor. 2634 */ 2635 static void drain_pages(unsigned int cpu) 2636 { 2637 struct zone *zone; 2638 2639 for_each_populated_zone(zone) { 2640 drain_pages_zone(cpu, zone); 2641 } 2642 } 2643 2644 /* 2645 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2646 */ 2647 void drain_local_pages(struct zone *zone) 2648 { 2649 int cpu = smp_processor_id(); 2650 2651 if (zone) 2652 drain_pages_zone(cpu, zone); 2653 else 2654 drain_pages(cpu); 2655 } 2656 2657 /* 2658 * The implementation of drain_all_pages(), exposing an extra parameter to 2659 * drain on all cpus. 2660 * 2661 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2662 * not empty. The check for non-emptiness can however race with a free to 2663 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2664 * that need the guarantee that every CPU has drained can disable the 2665 * optimizing racy check. 2666 */ 2667 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2668 { 2669 int cpu; 2670 2671 /* 2672 * Allocate in the BSS so we won't require allocation in 2673 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2674 */ 2675 static cpumask_t cpus_with_pcps; 2676 2677 /* 2678 * Do not drain if one is already in progress unless it's specific to 2679 * a zone. Such callers are primarily CMA and memory hotplug and need 2680 * the drain to be complete when the call returns. 2681 */ 2682 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2683 if (!zone) 2684 return; 2685 mutex_lock(&pcpu_drain_mutex); 2686 } 2687 2688 /* 2689 * We don't care about racing with CPU hotplug event 2690 * as offline notification will cause the notified 2691 * cpu to drain that CPU pcps and on_each_cpu_mask 2692 * disables preemption as part of its processing 2693 */ 2694 for_each_online_cpu(cpu) { 2695 struct per_cpu_pages *pcp; 2696 struct zone *z; 2697 bool has_pcps = false; 2698 2699 if (force_all_cpus) { 2700 /* 2701 * The pcp.count check is racy, some callers need a 2702 * guarantee that no cpu is missed. 2703 */ 2704 has_pcps = true; 2705 } else if (zone) { 2706 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2707 if (pcp->count) 2708 has_pcps = true; 2709 } else { 2710 for_each_populated_zone(z) { 2711 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2712 if (pcp->count) { 2713 has_pcps = true; 2714 break; 2715 } 2716 } 2717 } 2718 2719 if (has_pcps) 2720 cpumask_set_cpu(cpu, &cpus_with_pcps); 2721 else 2722 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2723 } 2724 2725 for_each_cpu(cpu, &cpus_with_pcps) { 2726 if (zone) 2727 drain_pages_zone(cpu, zone); 2728 else 2729 drain_pages(cpu); 2730 } 2731 2732 mutex_unlock(&pcpu_drain_mutex); 2733 } 2734 2735 /* 2736 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2737 * 2738 * When zone parameter is non-NULL, spill just the single zone's pages. 2739 */ 2740 void drain_all_pages(struct zone *zone) 2741 { 2742 __drain_all_pages(zone, false); 2743 } 2744 2745 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2746 { 2747 int min_nr_free, max_nr_free; 2748 2749 /* Free as much as possible if batch freeing high-order pages. */ 2750 if (unlikely(free_high)) 2751 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2752 2753 /* Check for PCP disabled or boot pageset */ 2754 if (unlikely(high < batch)) 2755 return 1; 2756 2757 /* Leave at least pcp->batch pages on the list */ 2758 min_nr_free = batch; 2759 max_nr_free = high - batch; 2760 2761 /* 2762 * Increase the batch number to the number of the consecutive 2763 * freed pages to reduce zone lock contention. 2764 */ 2765 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2766 2767 return batch; 2768 } 2769 2770 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2771 int batch, bool free_high) 2772 { 2773 int high, high_min, high_max; 2774 2775 high_min = READ_ONCE(pcp->high_min); 2776 high_max = READ_ONCE(pcp->high_max); 2777 high = pcp->high = clamp(pcp->high, high_min, high_max); 2778 2779 if (unlikely(!high)) 2780 return 0; 2781 2782 if (unlikely(free_high)) { 2783 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2784 high_min); 2785 return 0; 2786 } 2787 2788 /* 2789 * If reclaim is active, limit the number of pages that can be 2790 * stored on pcp lists 2791 */ 2792 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2793 int free_count = max_t(int, pcp->free_count, batch); 2794 2795 pcp->high = max(high - free_count, high_min); 2796 return min(batch << 2, pcp->high); 2797 } 2798 2799 if (high_min == high_max) 2800 return high; 2801 2802 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2803 int free_count = max_t(int, pcp->free_count, batch); 2804 2805 pcp->high = max(high - free_count, high_min); 2806 high = max(pcp->count, high_min); 2807 } else if (pcp->count >= high) { 2808 int need_high = pcp->free_count + batch; 2809 2810 /* pcp->high should be large enough to hold batch freed pages */ 2811 if (pcp->high < need_high) 2812 pcp->high = clamp(need_high, high_min, high_max); 2813 } 2814 2815 return high; 2816 } 2817 2818 /* 2819 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the 2820 * pcp's watermarks below high. 2821 * 2822 * May return a freed pcp, if during page freeing the pcp spinlock cannot be 2823 * reacquired. Return true if pcp is locked, false otherwise. 2824 */ 2825 static bool free_frozen_page_commit(struct zone *zone, 2826 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2827 unsigned int order, fpi_t fpi_flags) 2828 { 2829 int high, batch; 2830 int to_free, to_free_batched; 2831 int pindex; 2832 int cpu = smp_processor_id(); 2833 int ret = true; 2834 bool free_high = false; 2835 2836 /* 2837 * On freeing, reduce the number of pages that are batch allocated. 2838 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2839 * allocations. 2840 */ 2841 pcp->alloc_factor >>= 1; 2842 __count_vm_events(PGFREE, 1 << order); 2843 pindex = order_to_pindex(migratetype, order); 2844 list_add(&page->pcp_list, &pcp->lists[pindex]); 2845 pcp->count += 1 << order; 2846 2847 batch = READ_ONCE(pcp->batch); 2848 /* 2849 * As high-order pages other than THP's stored on PCP can contribute 2850 * to fragmentation, limit the number stored when PCP is heavily 2851 * freeing without allocation. The remainder after bulk freeing 2852 * stops will be drained from vmstat refresh context. 2853 */ 2854 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2855 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) && 2856 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2857 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2858 pcp->count >= batch)); 2859 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2860 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2861 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2862 } 2863 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2864 pcp->free_count += (1 << order); 2865 2866 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2867 /* 2868 * Do not attempt to take a zone lock. Let pcp->count get 2869 * over high mark temporarily. 2870 */ 2871 return true; 2872 } 2873 2874 high = nr_pcp_high(pcp, zone, batch, free_high); 2875 if (pcp->count < high) 2876 return true; 2877 2878 to_free = nr_pcp_free(pcp, batch, high, free_high); 2879 while (to_free > 0 && pcp->count > 0) { 2880 to_free_batched = min(to_free, batch); 2881 free_pcppages_bulk(zone, to_free_batched, pcp, pindex); 2882 to_free -= to_free_batched; 2883 2884 if (to_free == 0 || pcp->count == 0) 2885 break; 2886 2887 pcp_spin_unlock(pcp); 2888 2889 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2890 if (!pcp) { 2891 ret = false; 2892 break; 2893 } 2894 2895 /* 2896 * Check if this thread has been migrated to a different CPU. 2897 * If that is the case, give up and indicate that the pcp is 2898 * returned in an unlocked state. 2899 */ 2900 if (smp_processor_id() != cpu) { 2901 pcp_spin_unlock(pcp); 2902 ret = false; 2903 break; 2904 } 2905 } 2906 2907 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2908 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2909 ZONE_MOVABLE, 0)) { 2910 struct pglist_data *pgdat = zone->zone_pgdat; 2911 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2912 2913 /* 2914 * Assume that memory pressure on this node is gone and may be 2915 * in a reclaimable state. If a memory fallback node exists, 2916 * direct reclaim may not have been triggered, causing a 2917 * 'hopeless node' to stay in that state for a while. Let 2918 * kswapd work again by resetting kswapd_failures. 2919 */ 2920 if (kswapd_test_hopeless(pgdat) && 2921 next_memory_node(pgdat->node_id) < MAX_NUMNODES) 2922 kswapd_clear_hopeless(pgdat, KSWAPD_CLEAR_HOPELESS_PCP); 2923 } 2924 return ret; 2925 } 2926 2927 /* 2928 * Free a pcp page 2929 */ 2930 static void __free_frozen_pages(struct page *page, unsigned int order, 2931 fpi_t fpi_flags) 2932 { 2933 struct per_cpu_pages *pcp; 2934 struct zone *zone; 2935 unsigned long pfn = page_to_pfn(page); 2936 int migratetype; 2937 2938 if (!pcp_allowed_order(order)) { 2939 __free_pages_ok(page, order, fpi_flags); 2940 return; 2941 } 2942 2943 if (!__free_pages_prepare(page, order, fpi_flags)) 2944 return; 2945 2946 /* 2947 * We only track unmovable, reclaimable and movable on pcp lists. 2948 * Place ISOLATE pages on the isolated list because they are being 2949 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2950 * get those areas back if necessary. Otherwise, we may have to free 2951 * excessively into the page allocator 2952 */ 2953 zone = page_zone(page); 2954 migratetype = get_pfnblock_migratetype(page, pfn); 2955 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2956 if (unlikely(is_migrate_isolate(migratetype))) { 2957 free_one_page(zone, page, pfn, order, fpi_flags); 2958 return; 2959 } 2960 migratetype = MIGRATE_MOVABLE; 2961 } 2962 2963 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2964 && (in_nmi() || in_hardirq()))) { 2965 add_page_to_zone_llist(zone, page, order); 2966 return; 2967 } 2968 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2969 if (pcp) { 2970 if (!free_frozen_page_commit(zone, pcp, page, migratetype, 2971 order, fpi_flags)) 2972 return; 2973 pcp_spin_unlock(pcp); 2974 } else { 2975 free_one_page(zone, page, pfn, order, fpi_flags); 2976 } 2977 } 2978 2979 void free_frozen_pages(struct page *page, unsigned int order) 2980 { 2981 __free_frozen_pages(page, order, FPI_NONE); 2982 } 2983 2984 void free_frozen_pages_nolock(struct page *page, unsigned int order) 2985 { 2986 __free_frozen_pages(page, order, FPI_TRYLOCK); 2987 } 2988 2989 /* 2990 * Free a batch of folios 2991 */ 2992 void free_unref_folios(struct folio_batch *folios) 2993 { 2994 struct per_cpu_pages *pcp = NULL; 2995 struct zone *locked_zone = NULL; 2996 int i, j; 2997 2998 /* Prepare folios for freeing */ 2999 for (i = 0, j = 0; i < folios->nr; i++) { 3000 struct folio *folio = folios->folios[i]; 3001 unsigned long pfn = folio_pfn(folio); 3002 unsigned int order = folio_order(folio); 3003 3004 if (!__free_pages_prepare(&folio->page, order, FPI_NONE)) 3005 continue; 3006 /* 3007 * Free orders not handled on the PCP directly to the 3008 * allocator. 3009 */ 3010 if (!pcp_allowed_order(order)) { 3011 free_one_page(folio_zone(folio), &folio->page, 3012 pfn, order, FPI_NONE); 3013 continue; 3014 } 3015 folio->private = (void *)(unsigned long)order; 3016 if (j != i) 3017 folios->folios[j] = folio; 3018 j++; 3019 } 3020 folios->nr = j; 3021 3022 for (i = 0; i < folios->nr; i++) { 3023 struct folio *folio = folios->folios[i]; 3024 struct zone *zone = folio_zone(folio); 3025 unsigned long pfn = folio_pfn(folio); 3026 unsigned int order = (unsigned long)folio->private; 3027 int migratetype; 3028 3029 folio->private = NULL; 3030 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 3031 3032 /* Different zone requires a different pcp lock */ 3033 if (zone != locked_zone || 3034 is_migrate_isolate(migratetype)) { 3035 if (pcp) { 3036 pcp_spin_unlock(pcp); 3037 locked_zone = NULL; 3038 pcp = NULL; 3039 } 3040 3041 /* 3042 * Free isolated pages directly to the 3043 * allocator, see comment in free_frozen_pages. 3044 */ 3045 if (is_migrate_isolate(migratetype)) { 3046 free_one_page(zone, &folio->page, pfn, 3047 order, FPI_NONE); 3048 continue; 3049 } 3050 3051 /* 3052 * trylock is necessary as folios may be getting freed 3053 * from IRQ or SoftIRQ context after an IO completion. 3054 */ 3055 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3056 if (unlikely(!pcp)) { 3057 free_one_page(zone, &folio->page, pfn, 3058 order, FPI_NONE); 3059 continue; 3060 } 3061 locked_zone = zone; 3062 } 3063 3064 /* 3065 * Non-isolated types over MIGRATE_PCPTYPES get added 3066 * to the MIGRATE_MOVABLE pcp list. 3067 */ 3068 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3069 migratetype = MIGRATE_MOVABLE; 3070 3071 trace_mm_page_free_batched(&folio->page); 3072 if (!free_frozen_page_commit(zone, pcp, &folio->page, 3073 migratetype, order, FPI_NONE)) { 3074 pcp = NULL; 3075 locked_zone = NULL; 3076 } 3077 } 3078 3079 if (pcp) 3080 pcp_spin_unlock(pcp); 3081 folio_batch_reinit(folios); 3082 } 3083 3084 static void __split_page(struct page *page, unsigned int order) 3085 { 3086 VM_WARN_ON_PAGE(PageCompound(page), page); 3087 3088 split_page_owner(page, order, 0); 3089 pgalloc_tag_split(page_folio(page), order, 0); 3090 split_page_memcg(page, order); 3091 } 3092 3093 /* 3094 * split_page takes a non-compound higher-order page, and splits it into 3095 * n (1<<order) sub-pages: page[0..n] 3096 * Each sub-page must be freed individually. 3097 * 3098 * Note: this is probably too low level an operation for use in drivers. 3099 * Please consult with lkml before using this in your driver. 3100 */ 3101 void split_page(struct page *page, unsigned int order) 3102 { 3103 int i; 3104 3105 VM_WARN_ON_PAGE(!page_count(page), page); 3106 3107 for (i = 1; i < (1 << order); i++) 3108 set_page_refcounted(page + i); 3109 3110 __split_page(page, order); 3111 } 3112 EXPORT_SYMBOL_GPL(split_page); 3113 3114 int __isolate_free_page(struct page *page, unsigned int order) 3115 { 3116 struct zone *zone = page_zone(page); 3117 int mt = get_pageblock_migratetype(page); 3118 3119 if (!is_migrate_isolate(mt)) { 3120 unsigned long watermark; 3121 /* 3122 * Obey watermarks as if the page was being allocated. We can 3123 * emulate a high-order watermark check with a raised order-0 3124 * watermark, because we already know our high-order page 3125 * exists. 3126 */ 3127 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3128 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3129 return 0; 3130 } 3131 3132 del_page_from_free_list(page, zone, order, mt); 3133 3134 /* 3135 * Set the pageblock if the isolated page is at least half of a 3136 * pageblock 3137 */ 3138 if (order >= pageblock_order - 1) { 3139 struct page *endpage = page + (1 << order) - 1; 3140 for (; page < endpage; page += pageblock_nr_pages) { 3141 int mt = get_pageblock_migratetype(page); 3142 /* 3143 * Only change normal pageblocks (i.e., they can merge 3144 * with others) 3145 */ 3146 if (migratetype_is_mergeable(mt)) 3147 move_freepages_block(zone, page, mt, 3148 MIGRATE_MOVABLE); 3149 } 3150 } 3151 3152 return 1UL << order; 3153 } 3154 3155 /** 3156 * __putback_isolated_page - Return a now-isolated page back where we got it 3157 * @page: Page that was isolated 3158 * @order: Order of the isolated page 3159 * @mt: The page's pageblock's migratetype 3160 * 3161 * This function is meant to return a page pulled from the free lists via 3162 * __isolate_free_page back to the free lists they were pulled from. 3163 */ 3164 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3165 { 3166 struct zone *zone = page_zone(page); 3167 3168 /* zone lock should be held when this function is called */ 3169 lockdep_assert_held(&zone->lock); 3170 3171 /* Return isolated page to tail of freelist. */ 3172 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3173 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3174 } 3175 3176 /* 3177 * Update NUMA hit/miss statistics 3178 */ 3179 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3180 long nr_account) 3181 { 3182 #ifdef CONFIG_NUMA 3183 enum numa_stat_item local_stat = NUMA_LOCAL; 3184 3185 /* skip numa counters update if numa stats is disabled */ 3186 if (!static_branch_likely(&vm_numa_stat_key)) 3187 return; 3188 3189 if (zone_to_nid(z) != numa_node_id()) 3190 local_stat = NUMA_OTHER; 3191 3192 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3193 __count_numa_events(z, NUMA_HIT, nr_account); 3194 else { 3195 __count_numa_events(z, NUMA_MISS, nr_account); 3196 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3197 } 3198 __count_numa_events(z, local_stat, nr_account); 3199 #endif 3200 } 3201 3202 static __always_inline 3203 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 3204 unsigned int order, unsigned int alloc_flags, 3205 int migratetype) 3206 { 3207 struct page *page; 3208 unsigned long flags; 3209 3210 do { 3211 page = NULL; 3212 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 3213 if (!spin_trylock_irqsave(&zone->lock, flags)) 3214 return NULL; 3215 } else { 3216 spin_lock_irqsave(&zone->lock, flags); 3217 } 3218 if (alloc_flags & ALLOC_HIGHATOMIC) 3219 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3220 if (!page) { 3221 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 3222 3223 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); 3224 3225 /* 3226 * If the allocation fails, allow OOM handling and 3227 * order-0 (atomic) allocs access to HIGHATOMIC 3228 * reserves as failing now is worse than failing a 3229 * high-order atomic allocation in the future. 3230 */ 3231 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 3232 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3233 3234 if (!page) { 3235 spin_unlock_irqrestore(&zone->lock, flags); 3236 return NULL; 3237 } 3238 } 3239 spin_unlock_irqrestore(&zone->lock, flags); 3240 } while (check_new_pages(page, order)); 3241 3242 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3243 zone_statistics(preferred_zone, zone, 1); 3244 3245 return page; 3246 } 3247 3248 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3249 { 3250 int high, base_batch, batch, max_nr_alloc; 3251 int high_max, high_min; 3252 3253 base_batch = READ_ONCE(pcp->batch); 3254 high_min = READ_ONCE(pcp->high_min); 3255 high_max = READ_ONCE(pcp->high_max); 3256 high = pcp->high = clamp(pcp->high, high_min, high_max); 3257 3258 /* Check for PCP disabled or boot pageset */ 3259 if (unlikely(high < base_batch)) 3260 return 1; 3261 3262 if (order) 3263 batch = base_batch; 3264 else 3265 batch = (base_batch << pcp->alloc_factor); 3266 3267 /* 3268 * If we had larger pcp->high, we could avoid to allocate from 3269 * zone. 3270 */ 3271 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3272 high = pcp->high = min(high + batch, high_max); 3273 3274 if (!order) { 3275 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3276 /* 3277 * Double the number of pages allocated each time there is 3278 * subsequent allocation of order-0 pages without any freeing. 3279 */ 3280 if (batch <= max_nr_alloc && 3281 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3282 pcp->alloc_factor++; 3283 batch = min(batch, max_nr_alloc); 3284 } 3285 3286 /* 3287 * Scale batch relative to order if batch implies free pages 3288 * can be stored on the PCP. Batch can be 1 for small zones or 3289 * for boot pagesets which should never store free pages as 3290 * the pages may belong to arbitrary zones. 3291 */ 3292 if (batch > 1) 3293 batch = max(batch >> order, 2); 3294 3295 return batch; 3296 } 3297 3298 /* Remove page from the per-cpu list, caller must protect the list */ 3299 static inline 3300 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3301 int migratetype, 3302 unsigned int alloc_flags, 3303 struct per_cpu_pages *pcp, 3304 struct list_head *list) 3305 { 3306 struct page *page; 3307 3308 do { 3309 if (list_empty(list)) { 3310 int batch = nr_pcp_alloc(pcp, zone, order); 3311 int alloced; 3312 3313 alloced = rmqueue_bulk(zone, order, 3314 batch, list, 3315 migratetype, alloc_flags); 3316 3317 pcp->count += alloced << order; 3318 if (unlikely(list_empty(list))) 3319 return NULL; 3320 } 3321 3322 page = list_first_entry(list, struct page, pcp_list); 3323 list_del(&page->pcp_list); 3324 pcp->count -= 1 << order; 3325 } while (check_new_pages(page, order)); 3326 3327 return page; 3328 } 3329 3330 /* Lock and remove page from the per-cpu list */ 3331 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3332 struct zone *zone, unsigned int order, 3333 int migratetype, unsigned int alloc_flags) 3334 { 3335 struct per_cpu_pages *pcp; 3336 struct list_head *list; 3337 struct page *page; 3338 3339 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3340 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3341 if (!pcp) 3342 return NULL; 3343 3344 /* 3345 * On allocation, reduce the number of pages that are batch freed. 3346 * See nr_pcp_free() where free_factor is increased for subsequent 3347 * frees. 3348 */ 3349 pcp->free_count >>= 1; 3350 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3351 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3352 pcp_spin_unlock(pcp); 3353 if (page) { 3354 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3355 zone_statistics(preferred_zone, zone, 1); 3356 } 3357 return page; 3358 } 3359 3360 /* 3361 * Allocate a page from the given zone. 3362 * Use pcplists for THP or "cheap" high-order allocations. 3363 */ 3364 3365 /* 3366 * Do not instrument rmqueue() with KMSAN. This function may call 3367 * __msan_poison_alloca() through a call to set_pfnblock_migratetype(). 3368 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3369 * may call rmqueue() again, which will result in a deadlock. 3370 */ 3371 __no_sanitize_memory 3372 static inline 3373 struct page *rmqueue(struct zone *preferred_zone, 3374 struct zone *zone, unsigned int order, 3375 gfp_t gfp_flags, unsigned int alloc_flags, 3376 int migratetype) 3377 { 3378 struct page *page; 3379 3380 if (likely(pcp_allowed_order(order))) { 3381 page = rmqueue_pcplist(preferred_zone, zone, order, 3382 migratetype, alloc_flags); 3383 if (likely(page)) 3384 goto out; 3385 } 3386 3387 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3388 migratetype); 3389 3390 out: 3391 /* Separate test+clear to avoid unnecessary atomics */ 3392 if ((alloc_flags & ALLOC_KSWAPD) && 3393 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3394 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3395 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3396 } 3397 3398 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3399 return page; 3400 } 3401 3402 /* 3403 * Reserve the pageblock(s) surrounding an allocation request for 3404 * exclusive use of high-order atomic allocations if there are no 3405 * empty page blocks that contain a page with a suitable order 3406 */ 3407 static void reserve_highatomic_pageblock(struct page *page, int order, 3408 struct zone *zone) 3409 { 3410 int mt; 3411 unsigned long max_managed, flags; 3412 3413 /* 3414 * The number reserved as: minimum is 1 pageblock, maximum is 3415 * roughly 1% of a zone. But if 1% of a zone falls below a 3416 * pageblock size, then don't reserve any pageblocks. 3417 * Check is race-prone but harmless. 3418 */ 3419 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3420 return; 3421 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3422 if (zone->nr_reserved_highatomic >= max_managed) 3423 return; 3424 3425 spin_lock_irqsave(&zone->lock, flags); 3426 3427 /* Recheck the nr_reserved_highatomic limit under the lock */ 3428 if (zone->nr_reserved_highatomic >= max_managed) 3429 goto out_unlock; 3430 3431 /* Yoink! */ 3432 mt = get_pageblock_migratetype(page); 3433 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3434 if (!migratetype_is_mergeable(mt)) 3435 goto out_unlock; 3436 3437 if (order < pageblock_order) { 3438 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3439 goto out_unlock; 3440 zone->nr_reserved_highatomic += pageblock_nr_pages; 3441 } else { 3442 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3443 zone->nr_reserved_highatomic += 1 << order; 3444 } 3445 3446 out_unlock: 3447 spin_unlock_irqrestore(&zone->lock, flags); 3448 } 3449 3450 /* 3451 * Used when an allocation is about to fail under memory pressure. This 3452 * potentially hurts the reliability of high-order allocations when under 3453 * intense memory pressure but failed atomic allocations should be easier 3454 * to recover from than an OOM. 3455 * 3456 * If @force is true, try to unreserve pageblocks even though highatomic 3457 * pageblock is exhausted. 3458 */ 3459 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3460 bool force) 3461 { 3462 struct zonelist *zonelist = ac->zonelist; 3463 unsigned long flags; 3464 struct zoneref *z; 3465 struct zone *zone; 3466 struct page *page; 3467 int order; 3468 int ret; 3469 3470 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3471 ac->nodemask) { 3472 /* 3473 * Preserve at least one pageblock unless memory pressure 3474 * is really high. 3475 */ 3476 if (!force && zone->nr_reserved_highatomic <= 3477 pageblock_nr_pages) 3478 continue; 3479 3480 spin_lock_irqsave(&zone->lock, flags); 3481 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3482 struct free_area *area = &(zone->free_area[order]); 3483 unsigned long size; 3484 3485 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3486 if (!page) 3487 continue; 3488 3489 size = max(pageblock_nr_pages, 1UL << order); 3490 /* 3491 * It should never happen but changes to 3492 * locking could inadvertently allow a per-cpu 3493 * drain to add pages to MIGRATE_HIGHATOMIC 3494 * while unreserving so be safe and watch for 3495 * underflows. 3496 */ 3497 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3498 size = zone->nr_reserved_highatomic; 3499 zone->nr_reserved_highatomic -= size; 3500 3501 /* 3502 * Convert to ac->migratetype and avoid the normal 3503 * pageblock stealing heuristics. Minimally, the caller 3504 * is doing the work and needs the pages. More 3505 * importantly, if the block was always converted to 3506 * MIGRATE_UNMOVABLE or another type then the number 3507 * of pageblocks that cannot be completely freed 3508 * may increase. 3509 */ 3510 if (order < pageblock_order) 3511 ret = move_freepages_block(zone, page, 3512 MIGRATE_HIGHATOMIC, 3513 ac->migratetype); 3514 else { 3515 move_to_free_list(page, zone, order, 3516 MIGRATE_HIGHATOMIC, 3517 ac->migratetype); 3518 change_pageblock_range(page, order, 3519 ac->migratetype); 3520 ret = 1; 3521 } 3522 /* 3523 * Reserving the block(s) already succeeded, 3524 * so this should not fail on zone boundaries. 3525 */ 3526 WARN_ON_ONCE(ret == -1); 3527 if (ret > 0) { 3528 spin_unlock_irqrestore(&zone->lock, flags); 3529 return ret; 3530 } 3531 } 3532 spin_unlock_irqrestore(&zone->lock, flags); 3533 } 3534 3535 return false; 3536 } 3537 3538 static inline long __zone_watermark_unusable_free(struct zone *z, 3539 unsigned int order, unsigned int alloc_flags) 3540 { 3541 long unusable_free = (1 << order) - 1; 3542 3543 /* 3544 * If the caller does not have rights to reserves below the min 3545 * watermark then subtract the free pages reserved for highatomic. 3546 */ 3547 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3548 unusable_free += READ_ONCE(z->nr_free_highatomic); 3549 3550 #ifdef CONFIG_CMA 3551 /* If allocation can't use CMA areas don't use free CMA pages */ 3552 if (!(alloc_flags & ALLOC_CMA)) 3553 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3554 #endif 3555 3556 return unusable_free; 3557 } 3558 3559 /* 3560 * Return true if free base pages are above 'mark'. For high-order checks it 3561 * will return true of the order-0 watermark is reached and there is at least 3562 * one free page of a suitable size. Checking now avoids taking the zone lock 3563 * to check in the allocation paths if no pages are free. 3564 */ 3565 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3566 int highest_zoneidx, unsigned int alloc_flags, 3567 long free_pages) 3568 { 3569 long min = mark; 3570 int o; 3571 3572 /* free_pages may go negative - that's OK */ 3573 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3574 3575 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3576 /* 3577 * __GFP_HIGH allows access to 50% of the min reserve as well 3578 * as OOM. 3579 */ 3580 if (alloc_flags & ALLOC_MIN_RESERVE) { 3581 min -= min / 2; 3582 3583 /* 3584 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3585 * access more reserves than just __GFP_HIGH. Other 3586 * non-blocking allocations requests such as GFP_NOWAIT 3587 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3588 * access to the min reserve. 3589 */ 3590 if (alloc_flags & ALLOC_NON_BLOCK) 3591 min -= min / 4; 3592 } 3593 3594 /* 3595 * OOM victims can try even harder than the normal reserve 3596 * users on the grounds that it's definitely going to be in 3597 * the exit path shortly and free memory. Any allocation it 3598 * makes during the free path will be small and short-lived. 3599 */ 3600 if (alloc_flags & ALLOC_OOM) 3601 min -= min / 2; 3602 } 3603 3604 /* 3605 * Check watermarks for an order-0 allocation request. If these 3606 * are not met, then a high-order request also cannot go ahead 3607 * even if a suitable page happened to be free. 3608 */ 3609 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3610 return false; 3611 3612 /* If this is an order-0 request then the watermark is fine */ 3613 if (!order) 3614 return true; 3615 3616 /* For a high-order request, check at least one suitable page is free */ 3617 for (o = order; o < NR_PAGE_ORDERS; o++) { 3618 struct free_area *area = &z->free_area[o]; 3619 int mt; 3620 3621 if (!area->nr_free) 3622 continue; 3623 3624 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3625 if (!free_area_empty(area, mt)) 3626 return true; 3627 } 3628 3629 #ifdef CONFIG_CMA 3630 if ((alloc_flags & ALLOC_CMA) && 3631 !free_area_empty(area, MIGRATE_CMA)) { 3632 return true; 3633 } 3634 #endif 3635 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3636 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3637 return true; 3638 } 3639 } 3640 return false; 3641 } 3642 3643 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3644 int highest_zoneidx, unsigned int alloc_flags) 3645 { 3646 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3647 zone_page_state(z, NR_FREE_PAGES)); 3648 } 3649 3650 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3651 unsigned long mark, int highest_zoneidx, 3652 unsigned int alloc_flags, gfp_t gfp_mask) 3653 { 3654 long free_pages; 3655 3656 free_pages = zone_page_state(z, NR_FREE_PAGES); 3657 3658 /* 3659 * Fast check for order-0 only. If this fails then the reserves 3660 * need to be calculated. 3661 */ 3662 if (!order) { 3663 long usable_free; 3664 long reserved; 3665 3666 usable_free = free_pages; 3667 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3668 3669 /* reserved may over estimate high-atomic reserves. */ 3670 usable_free -= min(usable_free, reserved); 3671 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3672 return true; 3673 } 3674 3675 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3676 free_pages)) 3677 return true; 3678 3679 /* 3680 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3681 * when checking the min watermark. The min watermark is the 3682 * point where boosting is ignored so that kswapd is woken up 3683 * when below the low watermark. 3684 */ 3685 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3686 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3687 mark = z->_watermark[WMARK_MIN]; 3688 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3689 alloc_flags, free_pages); 3690 } 3691 3692 return false; 3693 } 3694 3695 #ifdef CONFIG_NUMA 3696 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3697 3698 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3699 { 3700 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3701 node_reclaim_distance; 3702 } 3703 #else /* CONFIG_NUMA */ 3704 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3705 { 3706 return true; 3707 } 3708 #endif /* CONFIG_NUMA */ 3709 3710 /* 3711 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3712 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3713 * premature use of a lower zone may cause lowmem pressure problems that 3714 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3715 * probably too small. It only makes sense to spread allocations to avoid 3716 * fragmentation between the Normal and DMA32 zones. 3717 */ 3718 static inline unsigned int 3719 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3720 { 3721 unsigned int alloc_flags; 3722 3723 /* 3724 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3725 * to save a branch. 3726 */ 3727 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3728 3729 if (defrag_mode) { 3730 alloc_flags |= ALLOC_NOFRAGMENT; 3731 return alloc_flags; 3732 } 3733 3734 #ifdef CONFIG_ZONE_DMA32 3735 if (!zone) 3736 return alloc_flags; 3737 3738 if (zone_idx(zone) != ZONE_NORMAL) 3739 return alloc_flags; 3740 3741 /* 3742 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3743 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3744 * on UMA that if Normal is populated then so is DMA32. 3745 */ 3746 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3747 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3748 return alloc_flags; 3749 3750 alloc_flags |= ALLOC_NOFRAGMENT; 3751 #endif /* CONFIG_ZONE_DMA32 */ 3752 return alloc_flags; 3753 } 3754 3755 /* Must be called after current_gfp_context() which can change gfp_mask */ 3756 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3757 unsigned int alloc_flags) 3758 { 3759 #ifdef CONFIG_CMA 3760 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3761 alloc_flags |= ALLOC_CMA; 3762 #endif 3763 return alloc_flags; 3764 } 3765 3766 /* 3767 * get_page_from_freelist goes through the zonelist trying to allocate 3768 * a page. 3769 */ 3770 static struct page * 3771 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3772 const struct alloc_context *ac) 3773 { 3774 struct zoneref *z; 3775 struct zone *zone; 3776 struct pglist_data *last_pgdat = NULL; 3777 bool last_pgdat_dirty_ok = false; 3778 bool no_fallback; 3779 bool skip_kswapd_nodes = nr_online_nodes > 1; 3780 bool skipped_kswapd_nodes = false; 3781 3782 retry: 3783 /* 3784 * Scan zonelist, looking for a zone with enough free. 3785 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c. 3786 */ 3787 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3788 z = ac->preferred_zoneref; 3789 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3790 ac->nodemask) { 3791 struct page *page; 3792 unsigned long mark; 3793 3794 if (cpusets_enabled() && 3795 (alloc_flags & ALLOC_CPUSET) && 3796 !__cpuset_zone_allowed(zone, gfp_mask)) 3797 continue; 3798 /* 3799 * When allocating a page cache page for writing, we 3800 * want to get it from a node that is within its dirty 3801 * limit, such that no single node holds more than its 3802 * proportional share of globally allowed dirty pages. 3803 * The dirty limits take into account the node's 3804 * lowmem reserves and high watermark so that kswapd 3805 * should be able to balance it without having to 3806 * write pages from its LRU list. 3807 * 3808 * XXX: For now, allow allocations to potentially 3809 * exceed the per-node dirty limit in the slowpath 3810 * (spread_dirty_pages unset) before going into reclaim, 3811 * which is important when on a NUMA setup the allowed 3812 * nodes are together not big enough to reach the 3813 * global limit. The proper fix for these situations 3814 * will require awareness of nodes in the 3815 * dirty-throttling and the flusher threads. 3816 */ 3817 if (ac->spread_dirty_pages) { 3818 if (last_pgdat != zone->zone_pgdat) { 3819 last_pgdat = zone->zone_pgdat; 3820 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3821 } 3822 3823 if (!last_pgdat_dirty_ok) 3824 continue; 3825 } 3826 3827 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3828 zone != zonelist_zone(ac->preferred_zoneref)) { 3829 int local_nid; 3830 3831 /* 3832 * If moving to a remote node, retry but allow 3833 * fragmenting fallbacks. Locality is more important 3834 * than fragmentation avoidance. 3835 */ 3836 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3837 if (zone_to_nid(zone) != local_nid) { 3838 alloc_flags &= ~ALLOC_NOFRAGMENT; 3839 goto retry; 3840 } 3841 } 3842 3843 /* 3844 * If kswapd is already active on a node, keep looking 3845 * for other nodes that might be idle. This can happen 3846 * if another process has NUMA bindings and is causing 3847 * kswapd wakeups on only some nodes. Avoid accidental 3848 * "node_reclaim_mode"-like behavior in this case. 3849 */ 3850 if (skip_kswapd_nodes && 3851 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) { 3852 skipped_kswapd_nodes = true; 3853 continue; 3854 } 3855 3856 cond_accept_memory(zone, order, alloc_flags); 3857 3858 /* 3859 * Detect whether the number of free pages is below high 3860 * watermark. If so, we will decrease pcp->high and free 3861 * PCP pages in free path to reduce the possibility of 3862 * premature page reclaiming. Detection is done here to 3863 * avoid to do that in hotter free path. 3864 */ 3865 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3866 goto check_alloc_wmark; 3867 3868 mark = high_wmark_pages(zone); 3869 if (zone_watermark_fast(zone, order, mark, 3870 ac->highest_zoneidx, alloc_flags, 3871 gfp_mask)) 3872 goto try_this_zone; 3873 else 3874 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3875 3876 check_alloc_wmark: 3877 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3878 if (!zone_watermark_fast(zone, order, mark, 3879 ac->highest_zoneidx, alloc_flags, 3880 gfp_mask)) { 3881 int ret; 3882 3883 if (cond_accept_memory(zone, order, alloc_flags)) 3884 goto try_this_zone; 3885 3886 /* 3887 * Watermark failed for this zone, but see if we can 3888 * grow this zone if it contains deferred pages. 3889 */ 3890 if (deferred_pages_enabled()) { 3891 if (_deferred_grow_zone(zone, order)) 3892 goto try_this_zone; 3893 } 3894 /* Checked here to keep the fast path fast */ 3895 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3896 if (alloc_flags & ALLOC_NO_WATERMARKS) 3897 goto try_this_zone; 3898 3899 if (!node_reclaim_enabled() || 3900 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3901 continue; 3902 3903 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3904 switch (ret) { 3905 case NODE_RECLAIM_NOSCAN: 3906 /* did not scan */ 3907 continue; 3908 case NODE_RECLAIM_FULL: 3909 /* scanned but unreclaimable */ 3910 continue; 3911 default: 3912 /* did we reclaim enough */ 3913 if (zone_watermark_ok(zone, order, mark, 3914 ac->highest_zoneidx, alloc_flags)) 3915 goto try_this_zone; 3916 3917 continue; 3918 } 3919 } 3920 3921 try_this_zone: 3922 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3923 gfp_mask, alloc_flags, ac->migratetype); 3924 if (page) { 3925 prep_new_page(page, order, gfp_mask, alloc_flags); 3926 3927 /* 3928 * If this is a high-order atomic allocation then check 3929 * if the pageblock should be reserved for the future 3930 */ 3931 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3932 reserve_highatomic_pageblock(page, order, zone); 3933 3934 return page; 3935 } else { 3936 if (cond_accept_memory(zone, order, alloc_flags)) 3937 goto try_this_zone; 3938 3939 /* Try again if zone has deferred pages */ 3940 if (deferred_pages_enabled()) { 3941 if (_deferred_grow_zone(zone, order)) 3942 goto try_this_zone; 3943 } 3944 } 3945 } 3946 3947 /* 3948 * If we skipped over nodes with active kswapds and found no 3949 * idle nodes, retry and place anywhere the watermarks permit. 3950 */ 3951 if (skip_kswapd_nodes && skipped_kswapd_nodes) { 3952 skip_kswapd_nodes = false; 3953 goto retry; 3954 } 3955 3956 /* 3957 * It's possible on a UMA machine to get through all zones that are 3958 * fragmented. If avoiding fragmentation, reset and try again. 3959 */ 3960 if (no_fallback && !defrag_mode) { 3961 alloc_flags &= ~ALLOC_NOFRAGMENT; 3962 goto retry; 3963 } 3964 3965 return NULL; 3966 } 3967 3968 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3969 { 3970 unsigned int filter = SHOW_MEM_FILTER_NODES; 3971 3972 /* 3973 * This documents exceptions given to allocations in certain 3974 * contexts that are allowed to allocate outside current's set 3975 * of allowed nodes. 3976 */ 3977 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3978 if (tsk_is_oom_victim(current) || 3979 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3980 filter &= ~SHOW_MEM_FILTER_NODES; 3981 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3982 filter &= ~SHOW_MEM_FILTER_NODES; 3983 3984 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3985 mem_cgroup_show_protected_memory(NULL); 3986 } 3987 3988 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3989 { 3990 struct va_format vaf; 3991 va_list args; 3992 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3993 3994 if ((gfp_mask & __GFP_NOWARN) || 3995 !__ratelimit(&nopage_rs) || 3996 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3997 return; 3998 3999 va_start(args, fmt); 4000 vaf.fmt = fmt; 4001 vaf.va = &args; 4002 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4003 current->comm, &vaf, gfp_mask, &gfp_mask, 4004 nodemask_pr_args(nodemask)); 4005 va_end(args); 4006 4007 cpuset_print_current_mems_allowed(); 4008 pr_cont("\n"); 4009 dump_stack(); 4010 warn_alloc_show_mem(gfp_mask, nodemask); 4011 } 4012 4013 static inline struct page * 4014 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4015 unsigned int alloc_flags, 4016 const struct alloc_context *ac) 4017 { 4018 struct page *page; 4019 4020 page = get_page_from_freelist(gfp_mask, order, 4021 alloc_flags|ALLOC_CPUSET, ac); 4022 /* 4023 * fallback to ignore cpuset restriction if our nodes 4024 * are depleted 4025 */ 4026 if (!page) 4027 page = get_page_from_freelist(gfp_mask, order, 4028 alloc_flags, ac); 4029 return page; 4030 } 4031 4032 static inline struct page * 4033 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4034 const struct alloc_context *ac, unsigned long *did_some_progress) 4035 { 4036 struct oom_control oc = { 4037 .zonelist = ac->zonelist, 4038 .nodemask = ac->nodemask, 4039 .memcg = NULL, 4040 .gfp_mask = gfp_mask, 4041 .order = order, 4042 }; 4043 struct page *page; 4044 4045 *did_some_progress = 0; 4046 4047 /* 4048 * Acquire the oom lock. If that fails, somebody else is 4049 * making progress for us. 4050 */ 4051 if (!mutex_trylock(&oom_lock)) { 4052 *did_some_progress = 1; 4053 schedule_timeout_uninterruptible(1); 4054 return NULL; 4055 } 4056 4057 /* 4058 * Go through the zonelist yet one more time, keep very high watermark 4059 * here, this is only to catch a parallel oom killing, we must fail if 4060 * we're still under heavy pressure. But make sure that this reclaim 4061 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4062 * allocation which will never fail due to oom_lock already held. 4063 */ 4064 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4065 ~__GFP_DIRECT_RECLAIM, order, 4066 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4067 if (page) 4068 goto out; 4069 4070 /* Coredumps can quickly deplete all memory reserves */ 4071 if (current->flags & PF_DUMPCORE) 4072 goto out; 4073 /* The OOM killer will not help higher order allocs */ 4074 if (order > PAGE_ALLOC_COSTLY_ORDER) 4075 goto out; 4076 /* 4077 * We have already exhausted all our reclaim opportunities without any 4078 * success so it is time to admit defeat. We will skip the OOM killer 4079 * because it is very likely that the caller has a more reasonable 4080 * fallback than shooting a random task. 4081 * 4082 * The OOM killer may not free memory on a specific node. 4083 */ 4084 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4085 goto out; 4086 /* The OOM killer does not needlessly kill tasks for lowmem */ 4087 if (ac->highest_zoneidx < ZONE_NORMAL) 4088 goto out; 4089 if (pm_suspended_storage()) 4090 goto out; 4091 /* 4092 * XXX: GFP_NOFS allocations should rather fail than rely on 4093 * other request to make a forward progress. 4094 * We are in an unfortunate situation where out_of_memory cannot 4095 * do much for this context but let's try it to at least get 4096 * access to memory reserved if the current task is killed (see 4097 * out_of_memory). Once filesystems are ready to handle allocation 4098 * failures more gracefully we should just bail out here. 4099 */ 4100 4101 /* Exhausted what can be done so it's blame time */ 4102 if (out_of_memory(&oc) || 4103 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4104 *did_some_progress = 1; 4105 4106 /* 4107 * Help non-failing allocations by giving them access to memory 4108 * reserves 4109 */ 4110 if (gfp_mask & __GFP_NOFAIL) 4111 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4112 ALLOC_NO_WATERMARKS, ac); 4113 } 4114 out: 4115 mutex_unlock(&oom_lock); 4116 return page; 4117 } 4118 4119 /* 4120 * Maximum number of compaction retries with a progress before OOM 4121 * killer is consider as the only way to move forward. 4122 */ 4123 #define MAX_COMPACT_RETRIES 16 4124 4125 #ifdef CONFIG_COMPACTION 4126 /* Try memory compaction for high-order allocations before reclaim */ 4127 static struct page * 4128 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4129 unsigned int alloc_flags, const struct alloc_context *ac, 4130 enum compact_priority prio, enum compact_result *compact_result) 4131 { 4132 struct page *page = NULL; 4133 unsigned long pflags; 4134 unsigned int noreclaim_flag; 4135 4136 if (!order) 4137 return NULL; 4138 4139 psi_memstall_enter(&pflags); 4140 delayacct_compact_start(); 4141 noreclaim_flag = memalloc_noreclaim_save(); 4142 4143 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4144 prio, &page); 4145 4146 memalloc_noreclaim_restore(noreclaim_flag); 4147 psi_memstall_leave(&pflags); 4148 delayacct_compact_end(); 4149 4150 if (*compact_result == COMPACT_SKIPPED) 4151 return NULL; 4152 /* 4153 * At least in one zone compaction wasn't deferred or skipped, so let's 4154 * count a compaction stall 4155 */ 4156 count_vm_event(COMPACTSTALL); 4157 4158 /* Prep a captured page if available */ 4159 if (page) 4160 prep_new_page(page, order, gfp_mask, alloc_flags); 4161 4162 /* Try get a page from the freelist if available */ 4163 if (!page) 4164 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4165 4166 if (page) { 4167 struct zone *zone = page_zone(page); 4168 4169 zone->compact_blockskip_flush = false; 4170 compaction_defer_reset(zone, order, true); 4171 count_vm_event(COMPACTSUCCESS); 4172 return page; 4173 } 4174 4175 /* 4176 * It's bad if compaction run occurs and fails. The most likely reason 4177 * is that pages exist, but not enough to satisfy watermarks. 4178 */ 4179 count_vm_event(COMPACTFAIL); 4180 4181 cond_resched(); 4182 4183 return NULL; 4184 } 4185 4186 static inline bool 4187 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4188 enum compact_result compact_result, 4189 enum compact_priority *compact_priority, 4190 int *compaction_retries) 4191 { 4192 int max_retries = MAX_COMPACT_RETRIES; 4193 int min_priority; 4194 bool ret = false; 4195 int retries = *compaction_retries; 4196 enum compact_priority priority = *compact_priority; 4197 4198 if (!order) 4199 return false; 4200 4201 if (fatal_signal_pending(current)) 4202 return false; 4203 4204 /* 4205 * Compaction was skipped due to a lack of free order-0 4206 * migration targets. Continue if reclaim can help. 4207 */ 4208 if (compact_result == COMPACT_SKIPPED) { 4209 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4210 goto out; 4211 } 4212 4213 /* 4214 * Compaction managed to coalesce some page blocks, but the 4215 * allocation failed presumably due to a race. Retry some. 4216 */ 4217 if (compact_result == COMPACT_SUCCESS) { 4218 /* 4219 * !costly requests are much more important than 4220 * __GFP_RETRY_MAYFAIL costly ones because they are de 4221 * facto nofail and invoke OOM killer to move on while 4222 * costly can fail and users are ready to cope with 4223 * that. 1/4 retries is rather arbitrary but we would 4224 * need much more detailed feedback from compaction to 4225 * make a better decision. 4226 */ 4227 if (order > PAGE_ALLOC_COSTLY_ORDER) 4228 max_retries /= 4; 4229 4230 if (++(*compaction_retries) <= max_retries) { 4231 ret = true; 4232 goto out; 4233 } 4234 } 4235 4236 /* 4237 * Compaction failed. Retry with increasing priority. 4238 */ 4239 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4240 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4241 4242 if (*compact_priority > min_priority) { 4243 (*compact_priority)--; 4244 *compaction_retries = 0; 4245 ret = true; 4246 } 4247 out: 4248 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4249 return ret; 4250 } 4251 #else 4252 static inline struct page * 4253 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4254 unsigned int alloc_flags, const struct alloc_context *ac, 4255 enum compact_priority prio, enum compact_result *compact_result) 4256 { 4257 *compact_result = COMPACT_SKIPPED; 4258 return NULL; 4259 } 4260 4261 static inline bool 4262 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4263 enum compact_result compact_result, 4264 enum compact_priority *compact_priority, 4265 int *compaction_retries) 4266 { 4267 struct zone *zone; 4268 struct zoneref *z; 4269 4270 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4271 return false; 4272 4273 /* 4274 * There are setups with compaction disabled which would prefer to loop 4275 * inside the allocator rather than hit the oom killer prematurely. 4276 * Let's give them a good hope and keep retrying while the order-0 4277 * watermarks are OK. 4278 */ 4279 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4280 ac->highest_zoneidx, ac->nodemask) { 4281 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4282 ac->highest_zoneidx, alloc_flags)) 4283 return true; 4284 } 4285 return false; 4286 } 4287 #endif /* CONFIG_COMPACTION */ 4288 4289 #ifdef CONFIG_LOCKDEP 4290 static struct lockdep_map __fs_reclaim_map = 4291 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4292 4293 static bool __need_reclaim(gfp_t gfp_mask) 4294 { 4295 /* no reclaim without waiting on it */ 4296 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4297 return false; 4298 4299 /* this guy won't enter reclaim */ 4300 if (current->flags & PF_MEMALLOC) 4301 return false; 4302 4303 if (gfp_mask & __GFP_NOLOCKDEP) 4304 return false; 4305 4306 return true; 4307 } 4308 4309 void __fs_reclaim_acquire(unsigned long ip) 4310 { 4311 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4312 } 4313 4314 void __fs_reclaim_release(unsigned long ip) 4315 { 4316 lock_release(&__fs_reclaim_map, ip); 4317 } 4318 4319 void fs_reclaim_acquire(gfp_t gfp_mask) 4320 { 4321 gfp_mask = current_gfp_context(gfp_mask); 4322 4323 if (__need_reclaim(gfp_mask)) { 4324 if (gfp_mask & __GFP_FS) 4325 __fs_reclaim_acquire(_RET_IP_); 4326 4327 #ifdef CONFIG_MMU_NOTIFIER 4328 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4329 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4330 #endif 4331 4332 } 4333 } 4334 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4335 4336 void fs_reclaim_release(gfp_t gfp_mask) 4337 { 4338 gfp_mask = current_gfp_context(gfp_mask); 4339 4340 if (__need_reclaim(gfp_mask)) { 4341 if (gfp_mask & __GFP_FS) 4342 __fs_reclaim_release(_RET_IP_); 4343 } 4344 } 4345 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4346 #endif 4347 4348 /* 4349 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4350 * have been rebuilt so allocation retries. Reader side does not lock and 4351 * retries the allocation if zonelist changes. Writer side is protected by the 4352 * embedded spin_lock. 4353 */ 4354 static DEFINE_SEQLOCK(zonelist_update_seq); 4355 4356 static unsigned int zonelist_iter_begin(void) 4357 { 4358 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4359 return read_seqbegin(&zonelist_update_seq); 4360 4361 return 0; 4362 } 4363 4364 static unsigned int check_retry_zonelist(unsigned int seq) 4365 { 4366 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4367 return read_seqretry(&zonelist_update_seq, seq); 4368 4369 return seq; 4370 } 4371 4372 /* Perform direct synchronous page reclaim */ 4373 static unsigned long 4374 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4375 const struct alloc_context *ac) 4376 { 4377 unsigned int noreclaim_flag; 4378 unsigned long progress; 4379 4380 cond_resched(); 4381 4382 /* We now go into synchronous reclaim */ 4383 cpuset_memory_pressure_bump(); 4384 fs_reclaim_acquire(gfp_mask); 4385 noreclaim_flag = memalloc_noreclaim_save(); 4386 4387 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4388 ac->nodemask); 4389 4390 memalloc_noreclaim_restore(noreclaim_flag); 4391 fs_reclaim_release(gfp_mask); 4392 4393 cond_resched(); 4394 4395 return progress; 4396 } 4397 4398 /* The really slow allocator path where we enter direct reclaim */ 4399 static inline struct page * 4400 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4401 unsigned int alloc_flags, const struct alloc_context *ac, 4402 unsigned long *did_some_progress) 4403 { 4404 struct page *page = NULL; 4405 unsigned long pflags; 4406 bool drained = false; 4407 4408 psi_memstall_enter(&pflags); 4409 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4410 if (unlikely(!(*did_some_progress))) 4411 goto out; 4412 4413 retry: 4414 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4415 4416 /* 4417 * If an allocation failed after direct reclaim, it could be because 4418 * pages are pinned on the per-cpu lists or in high alloc reserves. 4419 * Shrink them and try again 4420 */ 4421 if (!page && !drained) { 4422 unreserve_highatomic_pageblock(ac, false); 4423 drain_all_pages(NULL); 4424 drained = true; 4425 goto retry; 4426 } 4427 out: 4428 psi_memstall_leave(&pflags); 4429 4430 return page; 4431 } 4432 4433 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4434 const struct alloc_context *ac) 4435 { 4436 struct zoneref *z; 4437 struct zone *zone; 4438 pg_data_t *last_pgdat = NULL; 4439 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4440 unsigned int reclaim_order; 4441 4442 if (defrag_mode) 4443 reclaim_order = max(order, pageblock_order); 4444 else 4445 reclaim_order = order; 4446 4447 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4448 ac->nodemask) { 4449 if (!managed_zone(zone)) 4450 continue; 4451 if (last_pgdat == zone->zone_pgdat) 4452 continue; 4453 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4454 last_pgdat = zone->zone_pgdat; 4455 } 4456 } 4457 4458 static inline unsigned int 4459 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4460 { 4461 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4462 4463 /* 4464 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4465 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4466 * to save two branches. 4467 */ 4468 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4469 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4470 4471 /* 4472 * The caller may dip into page reserves a bit more if the caller 4473 * cannot run direct reclaim, or if the caller has realtime scheduling 4474 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4475 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4476 */ 4477 alloc_flags |= (__force int) 4478 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4479 4480 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4481 /* 4482 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4483 * if it can't schedule. 4484 */ 4485 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4486 alloc_flags |= ALLOC_NON_BLOCK; 4487 4488 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) 4489 alloc_flags |= ALLOC_HIGHATOMIC; 4490 } 4491 4492 /* 4493 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4494 * GFP_ATOMIC) rather than fail, see the comment for 4495 * cpuset_current_node_allowed(). 4496 */ 4497 if (alloc_flags & ALLOC_MIN_RESERVE) 4498 alloc_flags &= ~ALLOC_CPUSET; 4499 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4500 alloc_flags |= ALLOC_MIN_RESERVE; 4501 4502 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4503 4504 if (defrag_mode) 4505 alloc_flags |= ALLOC_NOFRAGMENT; 4506 4507 return alloc_flags; 4508 } 4509 4510 static bool oom_reserves_allowed(struct task_struct *tsk) 4511 { 4512 if (!tsk_is_oom_victim(tsk)) 4513 return false; 4514 4515 /* 4516 * !MMU doesn't have oom reaper so give access to memory reserves 4517 * only to the thread with TIF_MEMDIE set 4518 */ 4519 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4520 return false; 4521 4522 return true; 4523 } 4524 4525 /* 4526 * Distinguish requests which really need access to full memory 4527 * reserves from oom victims which can live with a portion of it 4528 */ 4529 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4530 { 4531 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4532 return 0; 4533 if (gfp_mask & __GFP_MEMALLOC) 4534 return ALLOC_NO_WATERMARKS; 4535 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4536 return ALLOC_NO_WATERMARKS; 4537 if (!in_interrupt()) { 4538 if (current->flags & PF_MEMALLOC) 4539 return ALLOC_NO_WATERMARKS; 4540 else if (oom_reserves_allowed(current)) 4541 return ALLOC_OOM; 4542 } 4543 4544 return 0; 4545 } 4546 4547 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4548 { 4549 return !!__gfp_pfmemalloc_flags(gfp_mask); 4550 } 4551 4552 /* 4553 * Checks whether it makes sense to retry the reclaim to make a forward progress 4554 * for the given allocation request. 4555 * 4556 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4557 * without success, or when we couldn't even meet the watermark if we 4558 * reclaimed all remaining pages on the LRU lists. 4559 * 4560 * Returns true if a retry is viable or false to enter the oom path. 4561 */ 4562 static inline bool 4563 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4564 struct alloc_context *ac, int alloc_flags, 4565 bool did_some_progress, int *no_progress_loops) 4566 { 4567 struct zone *zone; 4568 struct zoneref *z; 4569 bool ret = false; 4570 4571 /* 4572 * Costly allocations might have made a progress but this doesn't mean 4573 * their order will become available due to high fragmentation so 4574 * always increment the no progress counter for them 4575 */ 4576 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4577 *no_progress_loops = 0; 4578 else 4579 (*no_progress_loops)++; 4580 4581 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4582 goto out; 4583 4584 4585 /* 4586 * Keep reclaiming pages while there is a chance this will lead 4587 * somewhere. If none of the target zones can satisfy our allocation 4588 * request even if all reclaimable pages are considered then we are 4589 * screwed and have to go OOM. 4590 */ 4591 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4592 ac->highest_zoneidx, ac->nodemask) { 4593 unsigned long available; 4594 unsigned long reclaimable; 4595 unsigned long min_wmark = min_wmark_pages(zone); 4596 bool wmark; 4597 4598 if (cpusets_enabled() && 4599 (alloc_flags & ALLOC_CPUSET) && 4600 !__cpuset_zone_allowed(zone, gfp_mask)) 4601 continue; 4602 4603 available = reclaimable = zone_reclaimable_pages(zone); 4604 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4605 4606 /* 4607 * Would the allocation succeed if we reclaimed all 4608 * reclaimable pages? 4609 */ 4610 wmark = __zone_watermark_ok(zone, order, min_wmark, 4611 ac->highest_zoneidx, alloc_flags, available); 4612 trace_reclaim_retry_zone(z, order, reclaimable, 4613 available, min_wmark, *no_progress_loops, wmark); 4614 if (wmark) { 4615 ret = true; 4616 break; 4617 } 4618 } 4619 4620 /* 4621 * Memory allocation/reclaim might be called from a WQ context and the 4622 * current implementation of the WQ concurrency control doesn't 4623 * recognize that a particular WQ is congested if the worker thread is 4624 * looping without ever sleeping. Therefore we have to do a short sleep 4625 * here rather than calling cond_resched(). 4626 */ 4627 if (current->flags & PF_WQ_WORKER) 4628 schedule_timeout_uninterruptible(1); 4629 else 4630 cond_resched(); 4631 out: 4632 /* Before OOM, exhaust highatomic_reserve */ 4633 if (!ret) 4634 return unreserve_highatomic_pageblock(ac, true); 4635 4636 return ret; 4637 } 4638 4639 static inline bool 4640 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4641 { 4642 /* 4643 * It's possible that cpuset's mems_allowed and the nodemask from 4644 * mempolicy don't intersect. This should be normally dealt with by 4645 * policy_nodemask(), but it's possible to race with cpuset update in 4646 * such a way the check therein was true, and then it became false 4647 * before we got our cpuset_mems_cookie here. 4648 * This assumes that for all allocations, ac->nodemask can come only 4649 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4650 * when it does not intersect with the cpuset restrictions) or the 4651 * caller can deal with a violated nodemask. 4652 */ 4653 if (cpusets_enabled() && ac->nodemask && 4654 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4655 ac->nodemask = NULL; 4656 return true; 4657 } 4658 4659 /* 4660 * When updating a task's mems_allowed or mempolicy nodemask, it is 4661 * possible to race with parallel threads in such a way that our 4662 * allocation can fail while the mask is being updated. If we are about 4663 * to fail, check if the cpuset changed during allocation and if so, 4664 * retry. 4665 */ 4666 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4667 return true; 4668 4669 return false; 4670 } 4671 4672 static inline struct page * 4673 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4674 struct alloc_context *ac) 4675 { 4676 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4677 bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask); 4678 bool nofail = gfp_mask & __GFP_NOFAIL; 4679 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4680 struct page *page = NULL; 4681 unsigned int alloc_flags; 4682 unsigned long did_some_progress; 4683 enum compact_priority compact_priority; 4684 enum compact_result compact_result; 4685 int compaction_retries; 4686 int no_progress_loops; 4687 unsigned int cpuset_mems_cookie; 4688 unsigned int zonelist_iter_cookie; 4689 int reserve_flags; 4690 bool compact_first = false; 4691 bool can_retry_reserves = true; 4692 4693 if (unlikely(nofail)) { 4694 /* 4695 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4696 * otherwise, we may result in lockup. 4697 */ 4698 WARN_ON_ONCE(!can_direct_reclaim); 4699 /* 4700 * PF_MEMALLOC request from this context is rather bizarre 4701 * because we cannot reclaim anything and only can loop waiting 4702 * for somebody to do a work for us. 4703 */ 4704 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4705 } 4706 4707 restart: 4708 compaction_retries = 0; 4709 no_progress_loops = 0; 4710 compact_result = COMPACT_SKIPPED; 4711 compact_priority = DEF_COMPACT_PRIORITY; 4712 cpuset_mems_cookie = read_mems_allowed_begin(); 4713 zonelist_iter_cookie = zonelist_iter_begin(); 4714 4715 /* 4716 * For costly allocations, try direct compaction first, as it's likely 4717 * that we have enough base pages and don't need to reclaim. For non- 4718 * movable high-order allocations, do that as well, as compaction will 4719 * try prevent permanent fragmentation by migrating from blocks of the 4720 * same migratetype. 4721 */ 4722 if (can_compact && (costly_order || (order > 0 && 4723 ac->migratetype != MIGRATE_MOVABLE))) { 4724 compact_first = true; 4725 compact_priority = INIT_COMPACT_PRIORITY; 4726 } 4727 4728 /* 4729 * The fast path uses conservative alloc_flags to succeed only until 4730 * kswapd needs to be woken up, and to avoid the cost of setting up 4731 * alloc_flags precisely. So we do that now. 4732 */ 4733 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4734 4735 /* 4736 * We need to recalculate the starting point for the zonelist iterator 4737 * because we might have used different nodemask in the fast path, or 4738 * there was a cpuset modification and we are retrying - otherwise we 4739 * could end up iterating over non-eligible zones endlessly. 4740 */ 4741 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4742 ac->highest_zoneidx, ac->nodemask); 4743 if (!zonelist_zone(ac->preferred_zoneref)) 4744 goto nopage; 4745 4746 /* 4747 * Check for insane configurations where the cpuset doesn't contain 4748 * any suitable zone to satisfy the request - e.g. non-movable 4749 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4750 */ 4751 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4752 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4753 ac->highest_zoneidx, 4754 &cpuset_current_mems_allowed); 4755 if (!zonelist_zone(z)) 4756 goto nopage; 4757 } 4758 4759 retry: 4760 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4761 if (alloc_flags & ALLOC_KSWAPD) 4762 wake_all_kswapds(order, gfp_mask, ac); 4763 4764 /* 4765 * The adjusted alloc_flags might result in immediate success, so try 4766 * that first 4767 */ 4768 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4769 if (page) 4770 goto got_pg; 4771 4772 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4773 if (reserve_flags) 4774 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4775 (alloc_flags & ALLOC_KSWAPD); 4776 4777 /* 4778 * Reset the nodemask and zonelist iterators if memory policies can be 4779 * ignored. These allocations are high priority and system rather than 4780 * user oriented. 4781 */ 4782 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4783 ac->nodemask = NULL; 4784 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4785 ac->highest_zoneidx, ac->nodemask); 4786 4787 /* 4788 * The first time we adjust anything due to being allowed to 4789 * ignore memory policies or watermarks, retry immediately. This 4790 * allows us to keep the first allocation attempt optimistic so 4791 * it can succeed in a zone that is still above watermarks. 4792 */ 4793 if (can_retry_reserves) { 4794 can_retry_reserves = false; 4795 goto retry; 4796 } 4797 } 4798 4799 /* Caller is not willing to reclaim, we can't balance anything */ 4800 if (!can_direct_reclaim) 4801 goto nopage; 4802 4803 /* Avoid recursion of direct reclaim */ 4804 if (current->flags & PF_MEMALLOC) 4805 goto nopage; 4806 4807 /* Try direct reclaim and then allocating */ 4808 if (!compact_first) { 4809 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, 4810 ac, &did_some_progress); 4811 if (page) 4812 goto got_pg; 4813 } 4814 4815 /* Try direct compaction and then allocating */ 4816 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4817 compact_priority, &compact_result); 4818 if (page) 4819 goto got_pg; 4820 4821 if (compact_first) { 4822 /* 4823 * THP page faults may attempt local node only first, but are 4824 * then allowed to only compact, not reclaim, see 4825 * alloc_pages_mpol(). 4826 * 4827 * Compaction has failed above and we don't want such THP 4828 * allocations to put reclaim pressure on a single node in a 4829 * situation where other nodes might have plenty of available 4830 * memory. 4831 */ 4832 if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE)) 4833 goto nopage; 4834 4835 /* 4836 * For the initial compaction attempt we have lowered its 4837 * priority. Restore it for further retries, if those are 4838 * allowed. With __GFP_NORETRY there will be a single round of 4839 * reclaim and compaction with the lowered priority. 4840 */ 4841 if (!(gfp_mask & __GFP_NORETRY)) 4842 compact_priority = DEF_COMPACT_PRIORITY; 4843 4844 compact_first = false; 4845 goto retry; 4846 } 4847 4848 /* Do not loop if specifically requested */ 4849 if (gfp_mask & __GFP_NORETRY) 4850 goto nopage; 4851 4852 /* 4853 * Do not retry costly high order allocations unless they are 4854 * __GFP_RETRY_MAYFAIL and we can compact 4855 */ 4856 if (costly_order && (!can_compact || 4857 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4858 goto nopage; 4859 4860 /* 4861 * Deal with possible cpuset update races or zonelist updates to avoid 4862 * infinite retries. No "goto retry;" can be placed above this check 4863 * unless it can execute just once. 4864 */ 4865 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4866 check_retry_zonelist(zonelist_iter_cookie)) 4867 goto restart; 4868 4869 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4870 did_some_progress > 0, &no_progress_loops)) 4871 goto retry; 4872 4873 /* 4874 * It doesn't make any sense to retry for the compaction if the order-0 4875 * reclaim is not able to make any progress because the current 4876 * implementation of the compaction depends on the sufficient amount 4877 * of free memory (see __compaction_suitable) 4878 */ 4879 if (did_some_progress > 0 && can_compact && 4880 should_compact_retry(ac, order, alloc_flags, 4881 compact_result, &compact_priority, 4882 &compaction_retries)) 4883 goto retry; 4884 4885 /* Reclaim/compaction failed to prevent the fallback */ 4886 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4887 alloc_flags &= ~ALLOC_NOFRAGMENT; 4888 goto retry; 4889 } 4890 4891 /* 4892 * Deal with possible cpuset update races or zonelist updates to avoid 4893 * a unnecessary OOM kill. 4894 */ 4895 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4896 check_retry_zonelist(zonelist_iter_cookie)) 4897 goto restart; 4898 4899 /* Reclaim has failed us, start killing things */ 4900 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4901 if (page) 4902 goto got_pg; 4903 4904 /* Avoid allocations with no watermarks from looping endlessly */ 4905 if (tsk_is_oom_victim(current) && 4906 (alloc_flags & ALLOC_OOM || 4907 (gfp_mask & __GFP_NOMEMALLOC))) 4908 goto nopage; 4909 4910 /* Retry as long as the OOM killer is making progress */ 4911 if (did_some_progress) { 4912 no_progress_loops = 0; 4913 goto retry; 4914 } 4915 4916 nopage: 4917 /* 4918 * Deal with possible cpuset update races or zonelist updates to avoid 4919 * a unnecessary OOM kill. 4920 */ 4921 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4922 check_retry_zonelist(zonelist_iter_cookie)) 4923 goto restart; 4924 4925 /* 4926 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4927 * we always retry 4928 */ 4929 if (unlikely(nofail)) { 4930 /* 4931 * Lacking direct_reclaim we can't do anything to reclaim memory, 4932 * we disregard these unreasonable nofail requests and still 4933 * return NULL 4934 */ 4935 if (!can_direct_reclaim) 4936 goto fail; 4937 4938 /* 4939 * Help non-failing allocations by giving some access to memory 4940 * reserves normally used for high priority non-blocking 4941 * allocations but do not use ALLOC_NO_WATERMARKS because this 4942 * could deplete whole memory reserves which would just make 4943 * the situation worse. 4944 */ 4945 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4946 if (page) 4947 goto got_pg; 4948 4949 cond_resched(); 4950 goto retry; 4951 } 4952 fail: 4953 warn_alloc(gfp_mask, ac->nodemask, 4954 "page allocation failure: order:%u", order); 4955 got_pg: 4956 return page; 4957 } 4958 4959 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4960 int preferred_nid, nodemask_t *nodemask, 4961 struct alloc_context *ac, gfp_t *alloc_gfp, 4962 unsigned int *alloc_flags) 4963 { 4964 ac->highest_zoneidx = gfp_zone(gfp_mask); 4965 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4966 ac->nodemask = nodemask; 4967 ac->migratetype = gfp_migratetype(gfp_mask); 4968 4969 if (cpusets_enabled()) { 4970 *alloc_gfp |= __GFP_HARDWALL; 4971 /* 4972 * When we are in the interrupt context, it is irrelevant 4973 * to the current task context. It means that any node ok. 4974 */ 4975 if (in_task() && !ac->nodemask) 4976 ac->nodemask = &cpuset_current_mems_allowed; 4977 else 4978 *alloc_flags |= ALLOC_CPUSET; 4979 } 4980 4981 might_alloc(gfp_mask); 4982 4983 /* 4984 * Don't invoke should_fail logic, since it may call 4985 * get_random_u32() and printk() which need to spin_lock. 4986 */ 4987 if (!(*alloc_flags & ALLOC_TRYLOCK) && 4988 should_fail_alloc_page(gfp_mask, order)) 4989 return false; 4990 4991 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4992 4993 /* Dirty zone balancing only done in the fast path */ 4994 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4995 4996 /* 4997 * The preferred zone is used for statistics but crucially it is 4998 * also used as the starting point for the zonelist iterator. It 4999 * may get reset for allocations that ignore memory policies. 5000 */ 5001 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5002 ac->highest_zoneidx, ac->nodemask); 5003 5004 return true; 5005 } 5006 5007 /* 5008 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 5009 * @gfp: GFP flags for the allocation 5010 * @preferred_nid: The preferred NUMA node ID to allocate from 5011 * @nodemask: Set of nodes to allocate from, may be NULL 5012 * @nr_pages: The number of pages desired in the array 5013 * @page_array: Array to store the pages 5014 * 5015 * This is a batched version of the page allocator that attempts to allocate 5016 * @nr_pages quickly. Pages are added to @page_array. 5017 * 5018 * Note that only the elements in @page_array that were cleared to %NULL on 5019 * entry are populated with newly allocated pages. @nr_pages is the maximum 5020 * number of pages that will be stored in the array. 5021 * 5022 * Returns the number of pages in @page_array, including ones already 5023 * allocated on entry. This can be less than the number requested in @nr_pages, 5024 * but all empty slots are filled from the beginning. I.e., if all slots in 5025 * @page_array were set to %NULL on entry, the slots from 0 to the return value 5026 * - 1 will be filled. 5027 */ 5028 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 5029 nodemask_t *nodemask, int nr_pages, 5030 struct page **page_array) 5031 { 5032 struct page *page; 5033 struct zone *zone; 5034 struct zoneref *z; 5035 struct per_cpu_pages *pcp; 5036 struct list_head *pcp_list; 5037 struct alloc_context ac; 5038 gfp_t alloc_gfp; 5039 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5040 int nr_populated = 0, nr_account = 0; 5041 5042 /* 5043 * Skip populated array elements to determine if any pages need 5044 * to be allocated before disabling IRQs. 5045 */ 5046 while (nr_populated < nr_pages && page_array[nr_populated]) 5047 nr_populated++; 5048 5049 /* No pages requested? */ 5050 if (unlikely(nr_pages <= 0)) 5051 goto out; 5052 5053 /* Already populated array? */ 5054 if (unlikely(nr_pages - nr_populated == 0)) 5055 goto out; 5056 5057 /* Bulk allocator does not support memcg accounting. */ 5058 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 5059 goto failed; 5060 5061 /* Use the single page allocator for one page. */ 5062 if (nr_pages - nr_populated == 1) 5063 goto failed; 5064 5065 #ifdef CONFIG_PAGE_OWNER 5066 /* 5067 * PAGE_OWNER may recurse into the allocator to allocate space to 5068 * save the stack with pagesets.lock held. Releasing/reacquiring 5069 * removes much of the performance benefit of bulk allocation so 5070 * force the caller to allocate one page at a time as it'll have 5071 * similar performance to added complexity to the bulk allocator. 5072 */ 5073 if (static_branch_unlikely(&page_owner_inited)) 5074 goto failed; 5075 #endif 5076 5077 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5078 gfp &= gfp_allowed_mask; 5079 alloc_gfp = gfp; 5080 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5081 goto out; 5082 gfp = alloc_gfp; 5083 5084 /* Find an allowed local zone that meets the low watermark. */ 5085 z = ac.preferred_zoneref; 5086 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 5087 unsigned long mark; 5088 5089 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5090 !__cpuset_zone_allowed(zone, gfp)) { 5091 continue; 5092 } 5093 5094 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 5095 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 5096 goto failed; 5097 } 5098 5099 cond_accept_memory(zone, 0, alloc_flags); 5100 retry_this_zone: 5101 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages - nr_populated; 5102 if (zone_watermark_fast(zone, 0, mark, 5103 zonelist_zone_idx(ac.preferred_zoneref), 5104 alloc_flags, gfp)) { 5105 break; 5106 } 5107 5108 if (cond_accept_memory(zone, 0, alloc_flags)) 5109 goto retry_this_zone; 5110 5111 /* Try again if zone has deferred pages */ 5112 if (deferred_pages_enabled()) { 5113 if (_deferred_grow_zone(zone, 0)) 5114 goto retry_this_zone; 5115 } 5116 } 5117 5118 /* 5119 * If there are no allowed local zones that meets the watermarks then 5120 * try to allocate a single page and reclaim if necessary. 5121 */ 5122 if (unlikely(!zone)) 5123 goto failed; 5124 5125 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 5126 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 5127 if (!pcp) 5128 goto failed; 5129 5130 /* Attempt the batch allocation */ 5131 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5132 while (nr_populated < nr_pages) { 5133 5134 /* Skip existing pages */ 5135 if (page_array[nr_populated]) { 5136 nr_populated++; 5137 continue; 5138 } 5139 5140 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5141 pcp, pcp_list); 5142 if (unlikely(!page)) { 5143 /* Try and allocate at least one page */ 5144 if (!nr_account) { 5145 pcp_spin_unlock(pcp); 5146 goto failed; 5147 } 5148 break; 5149 } 5150 nr_account++; 5151 5152 prep_new_page(page, 0, gfp, 0); 5153 set_page_refcounted(page); 5154 page_array[nr_populated++] = page; 5155 } 5156 5157 pcp_spin_unlock(pcp); 5158 5159 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5160 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 5161 5162 out: 5163 return nr_populated; 5164 5165 failed: 5166 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 5167 if (page) 5168 page_array[nr_populated++] = page; 5169 goto out; 5170 } 5171 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 5172 5173 /* 5174 * This is the 'heart' of the zoned buddy allocator. 5175 */ 5176 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 5177 int preferred_nid, nodemask_t *nodemask) 5178 { 5179 struct page *page; 5180 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5181 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5182 struct alloc_context ac = { }; 5183 5184 /* 5185 * There are several places where we assume that the order value is sane 5186 * so bail out early if the request is out of bound. 5187 */ 5188 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 5189 return NULL; 5190 5191 gfp &= gfp_allowed_mask; 5192 /* 5193 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5194 * resp. GFP_NOIO which has to be inherited for all allocation requests 5195 * from a particular context which has been marked by 5196 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5197 * movable zones are not used during allocation. 5198 */ 5199 gfp = current_gfp_context(gfp); 5200 alloc_gfp = gfp; 5201 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5202 &alloc_gfp, &alloc_flags)) 5203 return NULL; 5204 5205 /* 5206 * Forbid the first pass from falling back to types that fragment 5207 * memory until all local zones are considered. 5208 */ 5209 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 5210 5211 /* First allocation attempt */ 5212 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5213 if (likely(page)) 5214 goto out; 5215 5216 alloc_gfp = gfp; 5217 ac.spread_dirty_pages = false; 5218 5219 /* 5220 * Restore the original nodemask if it was potentially replaced with 5221 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5222 */ 5223 ac.nodemask = nodemask; 5224 5225 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5226 5227 out: 5228 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 5229 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5230 free_frozen_pages(page, order); 5231 page = NULL; 5232 } 5233 5234 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5235 kmsan_alloc_page(page, order, alloc_gfp); 5236 5237 return page; 5238 } 5239 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 5240 5241 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 5242 int preferred_nid, nodemask_t *nodemask) 5243 { 5244 struct page *page; 5245 5246 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 5247 if (page) 5248 set_page_refcounted(page); 5249 return page; 5250 } 5251 EXPORT_SYMBOL(__alloc_pages_noprof); 5252 5253 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 5254 nodemask_t *nodemask) 5255 { 5256 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 5257 preferred_nid, nodemask); 5258 return page_rmappable_folio(page); 5259 } 5260 EXPORT_SYMBOL(__folio_alloc_noprof); 5261 5262 /* 5263 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5264 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5265 * you need to access high mem. 5266 */ 5267 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 5268 { 5269 struct page *page; 5270 5271 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 5272 if (!page) 5273 return 0; 5274 return (unsigned long) page_address(page); 5275 } 5276 EXPORT_SYMBOL(get_free_pages_noprof); 5277 5278 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5279 { 5280 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5281 } 5282 EXPORT_SYMBOL(get_zeroed_page_noprof); 5283 5284 static void ___free_pages(struct page *page, unsigned int order, 5285 fpi_t fpi_flags) 5286 { 5287 /* get PageHead before we drop reference */ 5288 int head = PageHead(page); 5289 /* get alloc tag in case the page is released by others */ 5290 struct alloc_tag *tag = pgalloc_tag_get(page); 5291 5292 if (put_page_testzero(page)) 5293 __free_frozen_pages(page, order, fpi_flags); 5294 else if (!head) { 5295 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5296 while (order-- > 0) { 5297 /* 5298 * The "tail" pages of this non-compound high-order 5299 * page will have no code tags, so to avoid warnings 5300 * mark them as empty. 5301 */ 5302 clear_page_tag_ref(page + (1 << order)); 5303 __free_frozen_pages(page + (1 << order), order, 5304 fpi_flags); 5305 } 5306 } 5307 } 5308 5309 /** 5310 * __free_pages - Free pages allocated with alloc_pages(). 5311 * @page: The page pointer returned from alloc_pages(). 5312 * @order: The order of the allocation. 5313 * 5314 * This function can free multi-page allocations that are not compound 5315 * pages. It does not check that the @order passed in matches that of 5316 * the allocation, so it is easy to leak memory. Freeing more memory 5317 * than was allocated will probably emit a warning. 5318 * 5319 * If the last reference to this page is speculative, it will be released 5320 * by put_page() which only frees the first page of a non-compound 5321 * allocation. To prevent the remaining pages from being leaked, we free 5322 * the subsequent pages here. If you want to use the page's reference 5323 * count to decide when to free the allocation, you should allocate a 5324 * compound page, and use put_page() instead of __free_pages(). 5325 * 5326 * Context: May be called in interrupt context or while holding a normal 5327 * spinlock, but not in NMI context or while holding a raw spinlock. 5328 */ 5329 void __free_pages(struct page *page, unsigned int order) 5330 { 5331 ___free_pages(page, order, FPI_NONE); 5332 } 5333 EXPORT_SYMBOL(__free_pages); 5334 5335 /* 5336 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5337 * page type (not only those that came from alloc_pages_nolock) 5338 */ 5339 void free_pages_nolock(struct page *page, unsigned int order) 5340 { 5341 ___free_pages(page, order, FPI_TRYLOCK); 5342 } 5343 5344 /** 5345 * free_pages - Free pages allocated with __get_free_pages(). 5346 * @addr: The virtual address tied to a page returned from __get_free_pages(). 5347 * @order: The order of the allocation. 5348 * 5349 * This function behaves the same as __free_pages(). Use this function 5350 * to free pages when you only have a valid virtual address. If you have 5351 * the page, call __free_pages() instead. 5352 */ 5353 void free_pages(unsigned long addr, unsigned int order) 5354 { 5355 if (addr != 0) { 5356 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5357 __free_pages(virt_to_page((void *)addr), order); 5358 } 5359 } 5360 5361 EXPORT_SYMBOL(free_pages); 5362 5363 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5364 size_t size) 5365 { 5366 if (addr) { 5367 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5368 struct page *page = virt_to_page((void *)addr); 5369 struct page *last = page + nr; 5370 5371 __split_page(page, order); 5372 while (page < --last) 5373 set_page_refcounted(last); 5374 5375 last = page + (1UL << order); 5376 for (page += nr; page < last; page++) 5377 __free_pages_ok(page, 0, FPI_TO_TAIL); 5378 } 5379 return (void *)addr; 5380 } 5381 5382 /** 5383 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5384 * @size: the number of bytes to allocate 5385 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5386 * 5387 * This function is similar to alloc_pages(), except that it allocates the 5388 * minimum number of pages to satisfy the request. alloc_pages() can only 5389 * allocate memory in power-of-two pages. 5390 * 5391 * This function is also limited by MAX_PAGE_ORDER. 5392 * 5393 * Memory allocated by this function must be released by free_pages_exact(). 5394 * 5395 * Return: pointer to the allocated area or %NULL in case of error. 5396 */ 5397 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5398 { 5399 unsigned int order = get_order(size); 5400 unsigned long addr; 5401 5402 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5403 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5404 5405 addr = get_free_pages_noprof(gfp_mask, order); 5406 return make_alloc_exact(addr, order, size); 5407 } 5408 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5409 5410 /** 5411 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5412 * pages on a node. 5413 * @nid: the preferred node ID where memory should be allocated 5414 * @size: the number of bytes to allocate 5415 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5416 * 5417 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5418 * back. 5419 * 5420 * Return: pointer to the allocated area or %NULL in case of error. 5421 */ 5422 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5423 { 5424 unsigned int order = get_order(size); 5425 struct page *p; 5426 5427 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5428 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5429 5430 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5431 if (!p) 5432 return NULL; 5433 return make_alloc_exact((unsigned long)page_address(p), order, size); 5434 } 5435 5436 /** 5437 * free_pages_exact - release memory allocated via alloc_pages_exact() 5438 * @virt: the value returned by alloc_pages_exact. 5439 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5440 * 5441 * Release the memory allocated by a previous call to alloc_pages_exact. 5442 */ 5443 void free_pages_exact(void *virt, size_t size) 5444 { 5445 unsigned long addr = (unsigned long)virt; 5446 unsigned long end = addr + PAGE_ALIGN(size); 5447 5448 while (addr < end) { 5449 free_page(addr); 5450 addr += PAGE_SIZE; 5451 } 5452 } 5453 EXPORT_SYMBOL(free_pages_exact); 5454 5455 /** 5456 * nr_free_zone_pages - count number of pages beyond high watermark 5457 * @offset: The zone index of the highest zone 5458 * 5459 * nr_free_zone_pages() counts the number of pages which are beyond the 5460 * high watermark within all zones at or below a given zone index. For each 5461 * zone, the number of pages is calculated as: 5462 * 5463 * nr_free_zone_pages = managed_pages - high_pages 5464 * 5465 * Return: number of pages beyond high watermark. 5466 */ 5467 static unsigned long nr_free_zone_pages(int offset) 5468 { 5469 struct zoneref *z; 5470 struct zone *zone; 5471 5472 /* Just pick one node, since fallback list is circular */ 5473 unsigned long sum = 0; 5474 5475 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5476 5477 for_each_zone_zonelist(zone, z, zonelist, offset) { 5478 unsigned long size = zone_managed_pages(zone); 5479 unsigned long high = high_wmark_pages(zone); 5480 if (size > high) 5481 sum += size - high; 5482 } 5483 5484 return sum; 5485 } 5486 5487 /** 5488 * nr_free_buffer_pages - count number of pages beyond high watermark 5489 * 5490 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5491 * watermark within ZONE_DMA and ZONE_NORMAL. 5492 * 5493 * Return: number of pages beyond high watermark within ZONE_DMA and 5494 * ZONE_NORMAL. 5495 */ 5496 unsigned long nr_free_buffer_pages(void) 5497 { 5498 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5499 } 5500 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5501 5502 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5503 { 5504 zoneref->zone = zone; 5505 zoneref->zone_idx = zone_idx(zone); 5506 } 5507 5508 /* 5509 * Builds allocation fallback zone lists. 5510 * 5511 * Add all populated zones of a node to the zonelist. 5512 */ 5513 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5514 { 5515 struct zone *zone; 5516 enum zone_type zone_type = MAX_NR_ZONES; 5517 int nr_zones = 0; 5518 5519 do { 5520 zone_type--; 5521 zone = pgdat->node_zones + zone_type; 5522 if (populated_zone(zone)) { 5523 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5524 check_highest_zone(zone_type); 5525 } 5526 } while (zone_type); 5527 5528 return nr_zones; 5529 } 5530 5531 #ifdef CONFIG_NUMA 5532 5533 static int __parse_numa_zonelist_order(char *s) 5534 { 5535 /* 5536 * We used to support different zonelists modes but they turned 5537 * out to be just not useful. Let's keep the warning in place 5538 * if somebody still use the cmd line parameter so that we do 5539 * not fail it silently 5540 */ 5541 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5542 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5543 return -EINVAL; 5544 } 5545 return 0; 5546 } 5547 5548 static char numa_zonelist_order[] = "Node"; 5549 #define NUMA_ZONELIST_ORDER_LEN 16 5550 /* 5551 * sysctl handler for numa_zonelist_order 5552 */ 5553 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5554 void *buffer, size_t *length, loff_t *ppos) 5555 { 5556 if (write) 5557 return __parse_numa_zonelist_order(buffer); 5558 return proc_dostring(table, write, buffer, length, ppos); 5559 } 5560 5561 static int node_load[MAX_NUMNODES]; 5562 5563 /** 5564 * find_next_best_node - find the next node that should appear in a given node's fallback list 5565 * @node: node whose fallback list we're appending 5566 * @used_node_mask: nodemask_t of already used nodes 5567 * 5568 * We use a number of factors to determine which is the next node that should 5569 * appear on a given node's fallback list. The node should not have appeared 5570 * already in @node's fallback list, and it should be the next closest node 5571 * according to the distance array (which contains arbitrary distance values 5572 * from each node to each node in the system), and should also prefer nodes 5573 * with no CPUs, since presumably they'll have very little allocation pressure 5574 * on them otherwise. 5575 * 5576 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5577 */ 5578 int find_next_best_node(int node, nodemask_t *used_node_mask) 5579 { 5580 int n, val; 5581 int min_val = INT_MAX; 5582 int best_node = NUMA_NO_NODE; 5583 5584 /* 5585 * Use the local node if we haven't already, but for memoryless local 5586 * node, we should skip it and fall back to other nodes. 5587 */ 5588 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5589 node_set(node, *used_node_mask); 5590 return node; 5591 } 5592 5593 for_each_node_state(n, N_MEMORY) { 5594 5595 /* Don't want a node to appear more than once */ 5596 if (node_isset(n, *used_node_mask)) 5597 continue; 5598 5599 /* Use the distance array to find the distance */ 5600 val = node_distance(node, n); 5601 5602 /* Penalize nodes under us ("prefer the next node") */ 5603 val += (n < node); 5604 5605 /* Give preference to headless and unused nodes */ 5606 if (!cpumask_empty(cpumask_of_node(n))) 5607 val += PENALTY_FOR_NODE_WITH_CPUS; 5608 5609 /* Slight preference for less loaded node */ 5610 val *= MAX_NUMNODES; 5611 val += node_load[n]; 5612 5613 if (val < min_val) { 5614 min_val = val; 5615 best_node = n; 5616 } 5617 } 5618 5619 if (best_node >= 0) 5620 node_set(best_node, *used_node_mask); 5621 5622 return best_node; 5623 } 5624 5625 5626 /* 5627 * Build zonelists ordered by node and zones within node. 5628 * This results in maximum locality--normal zone overflows into local 5629 * DMA zone, if any--but risks exhausting DMA zone. 5630 */ 5631 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5632 unsigned nr_nodes) 5633 { 5634 struct zoneref *zonerefs; 5635 int i; 5636 5637 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5638 5639 for (i = 0; i < nr_nodes; i++) { 5640 int nr_zones; 5641 5642 pg_data_t *node = NODE_DATA(node_order[i]); 5643 5644 nr_zones = build_zonerefs_node(node, zonerefs); 5645 zonerefs += nr_zones; 5646 } 5647 zonerefs->zone = NULL; 5648 zonerefs->zone_idx = 0; 5649 } 5650 5651 /* 5652 * Build __GFP_THISNODE zonelists 5653 */ 5654 static void build_thisnode_zonelists(pg_data_t *pgdat) 5655 { 5656 struct zoneref *zonerefs; 5657 int nr_zones; 5658 5659 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5660 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5661 zonerefs += nr_zones; 5662 zonerefs->zone = NULL; 5663 zonerefs->zone_idx = 0; 5664 } 5665 5666 static void build_zonelists(pg_data_t *pgdat) 5667 { 5668 static int node_order[MAX_NUMNODES]; 5669 int node, nr_nodes = 0; 5670 nodemask_t used_mask = NODE_MASK_NONE; 5671 int local_node, prev_node; 5672 5673 /* NUMA-aware ordering of nodes */ 5674 local_node = pgdat->node_id; 5675 prev_node = local_node; 5676 5677 memset(node_order, 0, sizeof(node_order)); 5678 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5679 /* 5680 * We don't want to pressure a particular node. 5681 * So adding penalty to the first node in same 5682 * distance group to make it round-robin. 5683 */ 5684 if (node_distance(local_node, node) != 5685 node_distance(local_node, prev_node)) 5686 node_load[node] += 1; 5687 5688 node_order[nr_nodes++] = node; 5689 prev_node = node; 5690 } 5691 5692 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5693 build_thisnode_zonelists(pgdat); 5694 pr_info("Fallback order for Node %d: ", local_node); 5695 for (node = 0; node < nr_nodes; node++) 5696 pr_cont("%d ", node_order[node]); 5697 pr_cont("\n"); 5698 } 5699 5700 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5701 /* 5702 * Return node id of node used for "local" allocations. 5703 * I.e., first node id of first zone in arg node's generic zonelist. 5704 * Used for initializing percpu 'numa_mem', which is used primarily 5705 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5706 */ 5707 int local_memory_node(int node) 5708 { 5709 struct zoneref *z; 5710 5711 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5712 gfp_zone(GFP_KERNEL), 5713 NULL); 5714 return zonelist_node_idx(z); 5715 } 5716 #endif 5717 5718 static void setup_min_unmapped_ratio(void); 5719 static void setup_min_slab_ratio(void); 5720 #else /* CONFIG_NUMA */ 5721 5722 static void build_zonelists(pg_data_t *pgdat) 5723 { 5724 struct zoneref *zonerefs; 5725 int nr_zones; 5726 5727 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5728 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5729 zonerefs += nr_zones; 5730 5731 zonerefs->zone = NULL; 5732 zonerefs->zone_idx = 0; 5733 } 5734 5735 #endif /* CONFIG_NUMA */ 5736 5737 /* 5738 * Boot pageset table. One per cpu which is going to be used for all 5739 * zones and all nodes. The parameters will be set in such a way 5740 * that an item put on a list will immediately be handed over to 5741 * the buddy list. This is safe since pageset manipulation is done 5742 * with interrupts disabled. 5743 * 5744 * The boot_pagesets must be kept even after bootup is complete for 5745 * unused processors and/or zones. They do play a role for bootstrapping 5746 * hotplugged processors. 5747 * 5748 * zoneinfo_show() and maybe other functions do 5749 * not check if the processor is online before following the pageset pointer. 5750 * Other parts of the kernel may not check if the zone is available. 5751 */ 5752 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5753 /* These effectively disable the pcplists in the boot pageset completely */ 5754 #define BOOT_PAGESET_HIGH 0 5755 #define BOOT_PAGESET_BATCH 1 5756 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5757 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5758 5759 static void __build_all_zonelists(void *data) 5760 { 5761 int nid; 5762 int __maybe_unused cpu; 5763 pg_data_t *self = data; 5764 unsigned long flags; 5765 5766 /* 5767 * The zonelist_update_seq must be acquired with irqsave because the 5768 * reader can be invoked from IRQ with GFP_ATOMIC. 5769 */ 5770 write_seqlock_irqsave(&zonelist_update_seq, flags); 5771 /* 5772 * Also disable synchronous printk() to prevent any printk() from 5773 * trying to hold port->lock, for 5774 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5775 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5776 */ 5777 printk_deferred_enter(); 5778 5779 #ifdef CONFIG_NUMA 5780 memset(node_load, 0, sizeof(node_load)); 5781 #endif 5782 5783 /* 5784 * This node is hotadded and no memory is yet present. So just 5785 * building zonelists is fine - no need to touch other nodes. 5786 */ 5787 if (self && !node_online(self->node_id)) { 5788 build_zonelists(self); 5789 } else { 5790 /* 5791 * All possible nodes have pgdat preallocated 5792 * in free_area_init 5793 */ 5794 for_each_node(nid) { 5795 pg_data_t *pgdat = NODE_DATA(nid); 5796 5797 build_zonelists(pgdat); 5798 } 5799 5800 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5801 /* 5802 * We now know the "local memory node" for each node-- 5803 * i.e., the node of the first zone in the generic zonelist. 5804 * Set up numa_mem percpu variable for on-line cpus. During 5805 * boot, only the boot cpu should be on-line; we'll init the 5806 * secondary cpus' numa_mem as they come on-line. During 5807 * node/memory hotplug, we'll fixup all on-line cpus. 5808 */ 5809 for_each_online_cpu(cpu) 5810 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5811 #endif 5812 } 5813 5814 printk_deferred_exit(); 5815 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5816 } 5817 5818 static noinline void __init 5819 build_all_zonelists_init(void) 5820 { 5821 int cpu; 5822 5823 __build_all_zonelists(NULL); 5824 5825 /* 5826 * Initialize the boot_pagesets that are going to be used 5827 * for bootstrapping processors. The real pagesets for 5828 * each zone will be allocated later when the per cpu 5829 * allocator is available. 5830 * 5831 * boot_pagesets are used also for bootstrapping offline 5832 * cpus if the system is already booted because the pagesets 5833 * are needed to initialize allocators on a specific cpu too. 5834 * F.e. the percpu allocator needs the page allocator which 5835 * needs the percpu allocator in order to allocate its pagesets 5836 * (a chicken-egg dilemma). 5837 */ 5838 for_each_possible_cpu(cpu) 5839 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5840 5841 mminit_verify_zonelist(); 5842 cpuset_init_current_mems_allowed(); 5843 } 5844 5845 /* 5846 * unless system_state == SYSTEM_BOOTING. 5847 * 5848 * __ref due to call of __init annotated helper build_all_zonelists_init 5849 * [protected by SYSTEM_BOOTING]. 5850 */ 5851 void __ref build_all_zonelists(pg_data_t *pgdat) 5852 { 5853 unsigned long vm_total_pages; 5854 5855 if (system_state == SYSTEM_BOOTING) { 5856 build_all_zonelists_init(); 5857 } else { 5858 __build_all_zonelists(pgdat); 5859 /* cpuset refresh routine should be here */ 5860 } 5861 /* Get the number of free pages beyond high watermark in all zones. */ 5862 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5863 /* 5864 * Disable grouping by mobility if the number of pages in the 5865 * system is too low to allow the mechanism to work. It would be 5866 * more accurate, but expensive to check per-zone. This check is 5867 * made on memory-hotadd so a system can start with mobility 5868 * disabled and enable it later 5869 */ 5870 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5871 page_group_by_mobility_disabled = 1; 5872 else 5873 page_group_by_mobility_disabled = 0; 5874 5875 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5876 nr_online_nodes, 5877 str_off_on(page_group_by_mobility_disabled), 5878 vm_total_pages); 5879 #ifdef CONFIG_NUMA 5880 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5881 #endif 5882 } 5883 5884 static int zone_batchsize(struct zone *zone) 5885 { 5886 #ifdef CONFIG_MMU 5887 int batch; 5888 5889 /* 5890 * The number of pages to batch allocate is either ~0.025% 5891 * of the zone or 256KB, whichever is smaller. The batch 5892 * size is striking a balance between allocation latency 5893 * and zone lock contention. 5894 */ 5895 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE); 5896 if (batch <= 1) 5897 return 1; 5898 5899 /* 5900 * Clamp the batch to a 2^n - 1 value. Having a power 5901 * of 2 value was found to be more likely to have 5902 * suboptimal cache aliasing properties in some cases. 5903 * 5904 * For example if 2 tasks are alternately allocating 5905 * batches of pages, one task can end up with a lot 5906 * of pages of one half of the possible page colors 5907 * and the other with pages of the other colors. 5908 */ 5909 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5910 5911 return batch; 5912 5913 #else 5914 /* The deferral and batching of frees should be suppressed under NOMMU 5915 * conditions. 5916 * 5917 * The problem is that NOMMU needs to be able to allocate large chunks 5918 * of contiguous memory as there's no hardware page translation to 5919 * assemble apparent contiguous memory from discontiguous pages. 5920 * 5921 * Queueing large contiguous runs of pages for batching, however, 5922 * causes the pages to actually be freed in smaller chunks. As there 5923 * can be a significant delay between the individual batches being 5924 * recycled, this leads to the once large chunks of space being 5925 * fragmented and becoming unavailable for high-order allocations. 5926 */ 5927 return 1; 5928 #endif 5929 } 5930 5931 static int percpu_pagelist_high_fraction; 5932 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5933 int high_fraction) 5934 { 5935 #ifdef CONFIG_MMU 5936 int high; 5937 int nr_split_cpus; 5938 unsigned long total_pages; 5939 5940 if (!high_fraction) { 5941 /* 5942 * By default, the high value of the pcp is based on the zone 5943 * low watermark so that if they are full then background 5944 * reclaim will not be started prematurely. 5945 */ 5946 total_pages = low_wmark_pages(zone); 5947 } else { 5948 /* 5949 * If percpu_pagelist_high_fraction is configured, the high 5950 * value is based on a fraction of the managed pages in the 5951 * zone. 5952 */ 5953 total_pages = zone_managed_pages(zone) / high_fraction; 5954 } 5955 5956 /* 5957 * Split the high value across all online CPUs local to the zone. Note 5958 * that early in boot that CPUs may not be online yet and that during 5959 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5960 * onlined. For memory nodes that have no CPUs, split the high value 5961 * across all online CPUs to mitigate the risk that reclaim is triggered 5962 * prematurely due to pages stored on pcp lists. 5963 */ 5964 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5965 if (!nr_split_cpus) 5966 nr_split_cpus = num_online_cpus(); 5967 high = total_pages / nr_split_cpus; 5968 5969 /* 5970 * Ensure high is at least batch*4. The multiple is based on the 5971 * historical relationship between high and batch. 5972 */ 5973 high = max(high, batch << 2); 5974 5975 return high; 5976 #else 5977 return 0; 5978 #endif 5979 } 5980 5981 /* 5982 * pcp->high and pcp->batch values are related and generally batch is lower 5983 * than high. They are also related to pcp->count such that count is lower 5984 * than high, and as soon as it reaches high, the pcplist is flushed. 5985 * 5986 * However, guaranteeing these relations at all times would require e.g. write 5987 * barriers here but also careful usage of read barriers at the read side, and 5988 * thus be prone to error and bad for performance. Thus the update only prevents 5989 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5990 * should ensure they can cope with those fields changing asynchronously, and 5991 * fully trust only the pcp->count field on the local CPU with interrupts 5992 * disabled. 5993 * 5994 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5995 * outside of boot time (or some other assurance that no concurrent updaters 5996 * exist). 5997 */ 5998 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5999 unsigned long high_max, unsigned long batch) 6000 { 6001 WRITE_ONCE(pcp->batch, batch); 6002 WRITE_ONCE(pcp->high_min, high_min); 6003 WRITE_ONCE(pcp->high_max, high_max); 6004 } 6005 6006 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6007 { 6008 int pindex; 6009 6010 memset(pcp, 0, sizeof(*pcp)); 6011 memset(pzstats, 0, sizeof(*pzstats)); 6012 6013 spin_lock_init(&pcp->lock); 6014 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6015 INIT_LIST_HEAD(&pcp->lists[pindex]); 6016 6017 /* 6018 * Set batch and high values safe for a boot pageset. A true percpu 6019 * pageset's initialization will update them subsequently. Here we don't 6020 * need to be as careful as pageset_update() as nobody can access the 6021 * pageset yet. 6022 */ 6023 pcp->high_min = BOOT_PAGESET_HIGH; 6024 pcp->high_max = BOOT_PAGESET_HIGH; 6025 pcp->batch = BOOT_PAGESET_BATCH; 6026 } 6027 6028 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 6029 unsigned long high_max, unsigned long batch) 6030 { 6031 struct per_cpu_pages *pcp; 6032 int cpu; 6033 6034 for_each_possible_cpu(cpu) { 6035 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6036 pageset_update(pcp, high_min, high_max, batch); 6037 } 6038 } 6039 6040 /* 6041 * Calculate and set new high and batch values for all per-cpu pagesets of a 6042 * zone based on the zone's size. 6043 */ 6044 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 6045 { 6046 int new_high_min, new_high_max, new_batch; 6047 6048 new_batch = zone_batchsize(zone); 6049 if (percpu_pagelist_high_fraction) { 6050 new_high_min = zone_highsize(zone, new_batch, cpu_online, 6051 percpu_pagelist_high_fraction); 6052 /* 6053 * PCP high is tuned manually, disable auto-tuning via 6054 * setting high_min and high_max to the manual value. 6055 */ 6056 new_high_max = new_high_min; 6057 } else { 6058 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 6059 new_high_max = zone_highsize(zone, new_batch, cpu_online, 6060 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 6061 } 6062 6063 if (zone->pageset_high_min == new_high_min && 6064 zone->pageset_high_max == new_high_max && 6065 zone->pageset_batch == new_batch) 6066 return; 6067 6068 zone->pageset_high_min = new_high_min; 6069 zone->pageset_high_max = new_high_max; 6070 zone->pageset_batch = new_batch; 6071 6072 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 6073 new_batch); 6074 } 6075 6076 void __meminit setup_zone_pageset(struct zone *zone) 6077 { 6078 int cpu; 6079 6080 /* Size may be 0 on !SMP && !NUMA */ 6081 if (sizeof(struct per_cpu_zonestat) > 0) 6082 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 6083 6084 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 6085 for_each_possible_cpu(cpu) { 6086 struct per_cpu_pages *pcp; 6087 struct per_cpu_zonestat *pzstats; 6088 6089 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6090 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6091 per_cpu_pages_init(pcp, pzstats); 6092 } 6093 6094 zone_set_pageset_high_and_batch(zone, 0); 6095 } 6096 6097 /* 6098 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6099 * page high values need to be recalculated. 6100 */ 6101 static void zone_pcp_update(struct zone *zone, int cpu_online) 6102 { 6103 mutex_lock(&pcp_batch_high_lock); 6104 zone_set_pageset_high_and_batch(zone, cpu_online); 6105 mutex_unlock(&pcp_batch_high_lock); 6106 } 6107 6108 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 6109 { 6110 struct per_cpu_pages *pcp; 6111 struct cpu_cacheinfo *cci; 6112 6113 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6114 cci = get_cpu_cacheinfo(cpu); 6115 /* 6116 * If data cache slice of CPU is large enough, "pcp->batch" 6117 * pages can be preserved in PCP before draining PCP for 6118 * consecutive high-order pages freeing without allocation. 6119 * This can reduce zone lock contention without hurting 6120 * cache-hot pages sharing. 6121 */ 6122 pcp_spin_lock_nopin(pcp); 6123 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 6124 pcp->flags |= PCPF_FREE_HIGH_BATCH; 6125 else 6126 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 6127 pcp_spin_unlock_nopin(pcp); 6128 } 6129 6130 void setup_pcp_cacheinfo(unsigned int cpu) 6131 { 6132 struct zone *zone; 6133 6134 for_each_populated_zone(zone) 6135 zone_pcp_update_cacheinfo(zone, cpu); 6136 } 6137 6138 /* 6139 * Allocate per cpu pagesets and initialize them. 6140 * Before this call only boot pagesets were available. 6141 */ 6142 void __init setup_per_cpu_pageset(void) 6143 { 6144 struct pglist_data *pgdat; 6145 struct zone *zone; 6146 int __maybe_unused cpu; 6147 6148 for_each_populated_zone(zone) 6149 setup_zone_pageset(zone); 6150 6151 #ifdef CONFIG_NUMA 6152 /* 6153 * Unpopulated zones continue using the boot pagesets. 6154 * The numa stats for these pagesets need to be reset. 6155 * Otherwise, they will end up skewing the stats of 6156 * the nodes these zones are associated with. 6157 */ 6158 for_each_possible_cpu(cpu) { 6159 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 6160 memset(pzstats->vm_numa_event, 0, 6161 sizeof(pzstats->vm_numa_event)); 6162 } 6163 #endif 6164 6165 for_each_online_pgdat(pgdat) 6166 pgdat->per_cpu_nodestats = 6167 alloc_percpu(struct per_cpu_nodestat); 6168 } 6169 6170 __meminit void zone_pcp_init(struct zone *zone) 6171 { 6172 /* 6173 * per cpu subsystem is not up at this point. The following code 6174 * relies on the ability of the linker to provide the 6175 * offset of a (static) per cpu variable into the per cpu area. 6176 */ 6177 zone->per_cpu_pageset = &boot_pageset; 6178 zone->per_cpu_zonestats = &boot_zonestats; 6179 zone->pageset_high_min = BOOT_PAGESET_HIGH; 6180 zone->pageset_high_max = BOOT_PAGESET_HIGH; 6181 zone->pageset_batch = BOOT_PAGESET_BATCH; 6182 6183 if (populated_zone(zone)) 6184 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 6185 zone->present_pages, zone_batchsize(zone)); 6186 } 6187 6188 static void setup_per_zone_lowmem_reserve(void); 6189 6190 void adjust_managed_page_count(struct page *page, long count) 6191 { 6192 atomic_long_add(count, &page_zone(page)->managed_pages); 6193 totalram_pages_add(count); 6194 setup_per_zone_lowmem_reserve(); 6195 } 6196 EXPORT_SYMBOL(adjust_managed_page_count); 6197 6198 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6199 { 6200 void *pos; 6201 unsigned long pages = 0; 6202 6203 start = (void *)PAGE_ALIGN((unsigned long)start); 6204 end = (void *)((unsigned long)end & PAGE_MASK); 6205 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6206 struct page *page = virt_to_page(pos); 6207 void *direct_map_addr; 6208 6209 /* 6210 * 'direct_map_addr' might be different from 'pos' 6211 * because some architectures' virt_to_page() 6212 * work with aliases. Getting the direct map 6213 * address ensures that we get a _writeable_ 6214 * alias for the memset(). 6215 */ 6216 direct_map_addr = page_address(page); 6217 /* 6218 * Perform a kasan-unchecked memset() since this memory 6219 * has not been initialized. 6220 */ 6221 direct_map_addr = kasan_reset_tag(direct_map_addr); 6222 if ((unsigned int)poison <= 0xFF) 6223 memset(direct_map_addr, poison, PAGE_SIZE); 6224 6225 free_reserved_page(page); 6226 } 6227 6228 if (pages && s) 6229 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6230 6231 return pages; 6232 } 6233 6234 void free_reserved_page(struct page *page) 6235 { 6236 clear_page_tag_ref(page); 6237 ClearPageReserved(page); 6238 init_page_count(page); 6239 __free_page(page); 6240 adjust_managed_page_count(page, 1); 6241 } 6242 EXPORT_SYMBOL(free_reserved_page); 6243 6244 static int page_alloc_cpu_dead(unsigned int cpu) 6245 { 6246 struct zone *zone; 6247 6248 lru_add_drain_cpu(cpu); 6249 mlock_drain_remote(cpu); 6250 drain_pages(cpu); 6251 6252 /* 6253 * Spill the event counters of the dead processor 6254 * into the current processors event counters. 6255 * This artificially elevates the count of the current 6256 * processor. 6257 */ 6258 vm_events_fold_cpu(cpu); 6259 6260 /* 6261 * Zero the differential counters of the dead processor 6262 * so that the vm statistics are consistent. 6263 * 6264 * This is only okay since the processor is dead and cannot 6265 * race with what we are doing. 6266 */ 6267 cpu_vm_stats_fold(cpu); 6268 6269 for_each_populated_zone(zone) 6270 zone_pcp_update(zone, 0); 6271 6272 return 0; 6273 } 6274 6275 static int page_alloc_cpu_online(unsigned int cpu) 6276 { 6277 struct zone *zone; 6278 6279 for_each_populated_zone(zone) 6280 zone_pcp_update(zone, 1); 6281 return 0; 6282 } 6283 6284 void __init page_alloc_init_cpuhp(void) 6285 { 6286 int ret; 6287 6288 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6289 "mm/page_alloc:pcp", 6290 page_alloc_cpu_online, 6291 page_alloc_cpu_dead); 6292 WARN_ON(ret < 0); 6293 } 6294 6295 /* 6296 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6297 * or min_free_kbytes changes. 6298 */ 6299 static void calculate_totalreserve_pages(void) 6300 { 6301 struct pglist_data *pgdat; 6302 unsigned long reserve_pages = 0; 6303 enum zone_type i, j; 6304 6305 for_each_online_pgdat(pgdat) { 6306 6307 pgdat->totalreserve_pages = 0; 6308 6309 for (i = 0; i < MAX_NR_ZONES; i++) { 6310 struct zone *zone = pgdat->node_zones + i; 6311 long max = 0; 6312 unsigned long managed_pages = zone_managed_pages(zone); 6313 6314 /* 6315 * lowmem_reserve[j] is monotonically non-decreasing 6316 * in j for a given zone (see 6317 * setup_per_zone_lowmem_reserve()). The maximum 6318 * valid reserve lives at the highest index with a 6319 * non-zero value, so scan backwards and stop at the 6320 * first hit. 6321 */ 6322 for (j = MAX_NR_ZONES - 1; j > i; j--) { 6323 if (!zone->lowmem_reserve[j]) 6324 continue; 6325 6326 max = zone->lowmem_reserve[j]; 6327 break; 6328 } 6329 /* we treat the high watermark as reserved pages. */ 6330 max += high_wmark_pages(zone); 6331 6332 max = min_t(unsigned long, max, managed_pages); 6333 6334 pgdat->totalreserve_pages += max; 6335 6336 reserve_pages += max; 6337 } 6338 } 6339 totalreserve_pages = reserve_pages; 6340 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6341 } 6342 6343 /* 6344 * setup_per_zone_lowmem_reserve - called whenever 6345 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6346 * has a correct pages reserved value, so an adequate number of 6347 * pages are left in the zone after a successful __alloc_pages(). 6348 */ 6349 static void setup_per_zone_lowmem_reserve(void) 6350 { 6351 struct pglist_data *pgdat; 6352 enum zone_type i, j; 6353 /* 6354 * For a given zone node_zones[i], lowmem_reserve[j] (j > i) 6355 * represents how many pages in zone i must effectively be kept 6356 * in reserve when deciding whether an allocation class that is 6357 * allowed to allocate from zones up to j may fall back into 6358 * zone i. 6359 * 6360 * As j increases, the allocation class can use a strictly larger 6361 * set of fallback zones and therefore must not be allowed to 6362 * deplete low zones more aggressively than a less flexible one. 6363 * As a result, lowmem_reserve[j] is required to be monotonically 6364 * non-decreasing in j for each zone i. Callers such as 6365 * calculate_totalreserve_pages() rely on this monotonicity when 6366 * selecting the maximum reserve entry. 6367 */ 6368 for_each_online_pgdat(pgdat) { 6369 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6370 struct zone *zone = &pgdat->node_zones[i]; 6371 int ratio = sysctl_lowmem_reserve_ratio[i]; 6372 bool clear = !ratio || !zone_managed_pages(zone); 6373 unsigned long managed_pages = 0; 6374 6375 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6376 struct zone *upper_zone = &pgdat->node_zones[j]; 6377 6378 managed_pages += zone_managed_pages(upper_zone); 6379 6380 if (clear) 6381 zone->lowmem_reserve[j] = 0; 6382 else 6383 zone->lowmem_reserve[j] = managed_pages / ratio; 6384 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6385 zone->lowmem_reserve[j]); 6386 } 6387 } 6388 } 6389 6390 /* update totalreserve_pages */ 6391 calculate_totalreserve_pages(); 6392 } 6393 6394 static void __setup_per_zone_wmarks(void) 6395 { 6396 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6397 unsigned long lowmem_pages = 0; 6398 struct zone *zone; 6399 unsigned long flags; 6400 6401 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6402 for_each_zone(zone) { 6403 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6404 lowmem_pages += zone_managed_pages(zone); 6405 } 6406 6407 for_each_zone(zone) { 6408 u64 tmp; 6409 6410 spin_lock_irqsave(&zone->lock, flags); 6411 tmp = (u64)pages_min * zone_managed_pages(zone); 6412 tmp = div64_ul(tmp, lowmem_pages); 6413 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6414 /* 6415 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6416 * need highmem and movable zones pages, so cap pages_min 6417 * to a small value here. 6418 * 6419 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6420 * deltas control async page reclaim, and so should 6421 * not be capped for highmem and movable zones. 6422 */ 6423 unsigned long min_pages; 6424 6425 min_pages = zone_managed_pages(zone) / 1024; 6426 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6427 zone->_watermark[WMARK_MIN] = min_pages; 6428 } else { 6429 /* 6430 * If it's a lowmem zone, reserve a number of pages 6431 * proportionate to the zone's size. 6432 */ 6433 zone->_watermark[WMARK_MIN] = tmp; 6434 } 6435 6436 /* 6437 * Set the kswapd watermarks distance according to the 6438 * scale factor in proportion to available memory, but 6439 * ensure a minimum size on small systems. 6440 */ 6441 tmp = max_t(u64, tmp >> 2, 6442 mult_frac(zone_managed_pages(zone), 6443 watermark_scale_factor, 10000)); 6444 6445 zone->watermark_boost = 0; 6446 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6447 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6448 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6449 trace_mm_setup_per_zone_wmarks(zone); 6450 6451 spin_unlock_irqrestore(&zone->lock, flags); 6452 } 6453 6454 /* update totalreserve_pages */ 6455 calculate_totalreserve_pages(); 6456 } 6457 6458 /** 6459 * setup_per_zone_wmarks - called when min_free_kbytes changes 6460 * or when memory is hot-{added|removed} 6461 * 6462 * Ensures that the watermark[min,low,high] values for each zone are set 6463 * correctly with respect to min_free_kbytes. 6464 */ 6465 void setup_per_zone_wmarks(void) 6466 { 6467 struct zone *zone; 6468 static DEFINE_SPINLOCK(lock); 6469 6470 spin_lock(&lock); 6471 __setup_per_zone_wmarks(); 6472 spin_unlock(&lock); 6473 6474 /* 6475 * The watermark size have changed so update the pcpu batch 6476 * and high limits or the limits may be inappropriate. 6477 */ 6478 for_each_zone(zone) 6479 zone_pcp_update(zone, 0); 6480 } 6481 6482 /* 6483 * Initialise min_free_kbytes. 6484 * 6485 * For small machines we want it small (128k min). For large machines 6486 * we want it large (256MB max). But it is not linear, because network 6487 * bandwidth does not increase linearly with machine size. We use 6488 * 6489 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6490 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6491 * 6492 * which yields 6493 * 6494 * 16MB: 512k 6495 * 32MB: 724k 6496 * 64MB: 1024k 6497 * 128MB: 1448k 6498 * 256MB: 2048k 6499 * 512MB: 2896k 6500 * 1024MB: 4096k 6501 * 2048MB: 5792k 6502 * 4096MB: 8192k 6503 * 8192MB: 11584k 6504 * 16384MB: 16384k 6505 */ 6506 void calculate_min_free_kbytes(void) 6507 { 6508 unsigned long lowmem_kbytes; 6509 int new_min_free_kbytes; 6510 6511 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6512 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6513 6514 if (new_min_free_kbytes > user_min_free_kbytes) 6515 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6516 else 6517 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6518 new_min_free_kbytes, user_min_free_kbytes); 6519 6520 } 6521 6522 int __meminit init_per_zone_wmark_min(void) 6523 { 6524 calculate_min_free_kbytes(); 6525 setup_per_zone_wmarks(); 6526 refresh_zone_stat_thresholds(); 6527 setup_per_zone_lowmem_reserve(); 6528 6529 #ifdef CONFIG_NUMA 6530 setup_min_unmapped_ratio(); 6531 setup_min_slab_ratio(); 6532 #endif 6533 6534 khugepaged_min_free_kbytes_update(); 6535 6536 return 0; 6537 } 6538 postcore_initcall(init_per_zone_wmark_min) 6539 6540 /* 6541 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6542 * that we can call two helper functions whenever min_free_kbytes 6543 * changes. 6544 */ 6545 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6546 void *buffer, size_t *length, loff_t *ppos) 6547 { 6548 int rc; 6549 6550 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6551 if (rc) 6552 return rc; 6553 6554 if (write) { 6555 user_min_free_kbytes = min_free_kbytes; 6556 setup_per_zone_wmarks(); 6557 } 6558 return 0; 6559 } 6560 6561 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6562 void *buffer, size_t *length, loff_t *ppos) 6563 { 6564 int rc; 6565 6566 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6567 if (rc) 6568 return rc; 6569 6570 if (write) 6571 setup_per_zone_wmarks(); 6572 6573 return 0; 6574 } 6575 6576 #ifdef CONFIG_NUMA 6577 static void setup_min_unmapped_ratio(void) 6578 { 6579 pg_data_t *pgdat; 6580 struct zone *zone; 6581 6582 for_each_online_pgdat(pgdat) 6583 pgdat->min_unmapped_pages = 0; 6584 6585 for_each_zone(zone) 6586 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6587 sysctl_min_unmapped_ratio) / 100; 6588 } 6589 6590 6591 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6592 void *buffer, size_t *length, loff_t *ppos) 6593 { 6594 int rc; 6595 6596 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6597 if (rc) 6598 return rc; 6599 6600 setup_min_unmapped_ratio(); 6601 6602 return 0; 6603 } 6604 6605 static void setup_min_slab_ratio(void) 6606 { 6607 pg_data_t *pgdat; 6608 struct zone *zone; 6609 6610 for_each_online_pgdat(pgdat) 6611 pgdat->min_slab_pages = 0; 6612 6613 for_each_zone(zone) 6614 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6615 sysctl_min_slab_ratio) / 100; 6616 } 6617 6618 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6619 void *buffer, size_t *length, loff_t *ppos) 6620 { 6621 int rc; 6622 6623 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6624 if (rc) 6625 return rc; 6626 6627 setup_min_slab_ratio(); 6628 6629 return 0; 6630 } 6631 #endif 6632 6633 /* 6634 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6635 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6636 * whenever sysctl_lowmem_reserve_ratio changes. 6637 * 6638 * The reserve ratio obviously has absolutely no relation with the 6639 * minimum watermarks. The lowmem reserve ratio can only make sense 6640 * if in function of the boot time zone sizes. 6641 */ 6642 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6643 int write, void *buffer, size_t *length, loff_t *ppos) 6644 { 6645 int i; 6646 6647 proc_dointvec_minmax(table, write, buffer, length, ppos); 6648 6649 for (i = 0; i < MAX_NR_ZONES; i++) { 6650 if (sysctl_lowmem_reserve_ratio[i] < 1) 6651 sysctl_lowmem_reserve_ratio[i] = 0; 6652 } 6653 6654 setup_per_zone_lowmem_reserve(); 6655 return 0; 6656 } 6657 6658 /* 6659 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6660 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6661 * pagelist can have before it gets flushed back to buddy allocator. 6662 */ 6663 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6664 int write, void *buffer, size_t *length, loff_t *ppos) 6665 { 6666 struct zone *zone; 6667 int old_percpu_pagelist_high_fraction; 6668 int ret; 6669 6670 /* 6671 * Avoid using pcp_batch_high_lock for reads as the value is read 6672 * atomically and a race with offlining is harmless. 6673 */ 6674 6675 if (!write) 6676 return proc_dointvec_minmax(table, write, buffer, length, ppos); 6677 6678 mutex_lock(&pcp_batch_high_lock); 6679 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6680 6681 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6682 if (ret < 0) 6683 goto out; 6684 6685 /* Sanity checking to avoid pcp imbalance */ 6686 if (percpu_pagelist_high_fraction && 6687 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6688 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6689 ret = -EINVAL; 6690 goto out; 6691 } 6692 6693 /* No change? */ 6694 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6695 goto out; 6696 6697 for_each_populated_zone(zone) 6698 zone_set_pageset_high_and_batch(zone, 0); 6699 out: 6700 mutex_unlock(&pcp_batch_high_lock); 6701 return ret; 6702 } 6703 6704 static const struct ctl_table page_alloc_sysctl_table[] = { 6705 { 6706 .procname = "min_free_kbytes", 6707 .data = &min_free_kbytes, 6708 .maxlen = sizeof(min_free_kbytes), 6709 .mode = 0644, 6710 .proc_handler = min_free_kbytes_sysctl_handler, 6711 .extra1 = SYSCTL_ZERO, 6712 }, 6713 { 6714 .procname = "watermark_boost_factor", 6715 .data = &watermark_boost_factor, 6716 .maxlen = sizeof(watermark_boost_factor), 6717 .mode = 0644, 6718 .proc_handler = proc_dointvec_minmax, 6719 .extra1 = SYSCTL_ZERO, 6720 }, 6721 { 6722 .procname = "watermark_scale_factor", 6723 .data = &watermark_scale_factor, 6724 .maxlen = sizeof(watermark_scale_factor), 6725 .mode = 0644, 6726 .proc_handler = watermark_scale_factor_sysctl_handler, 6727 .extra1 = SYSCTL_ONE, 6728 .extra2 = SYSCTL_THREE_THOUSAND, 6729 }, 6730 { 6731 .procname = "defrag_mode", 6732 .data = &defrag_mode, 6733 .maxlen = sizeof(defrag_mode), 6734 .mode = 0644, 6735 .proc_handler = proc_dointvec_minmax, 6736 .extra1 = SYSCTL_ZERO, 6737 .extra2 = SYSCTL_ONE, 6738 }, 6739 { 6740 .procname = "percpu_pagelist_high_fraction", 6741 .data = &percpu_pagelist_high_fraction, 6742 .maxlen = sizeof(percpu_pagelist_high_fraction), 6743 .mode = 0644, 6744 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6745 .extra1 = SYSCTL_ZERO, 6746 }, 6747 { 6748 .procname = "lowmem_reserve_ratio", 6749 .data = &sysctl_lowmem_reserve_ratio, 6750 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6751 .mode = 0644, 6752 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6753 }, 6754 #ifdef CONFIG_NUMA 6755 { 6756 .procname = "numa_zonelist_order", 6757 .data = &numa_zonelist_order, 6758 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6759 .mode = 0644, 6760 .proc_handler = numa_zonelist_order_handler, 6761 }, 6762 { 6763 .procname = "min_unmapped_ratio", 6764 .data = &sysctl_min_unmapped_ratio, 6765 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6766 .mode = 0644, 6767 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6768 .extra1 = SYSCTL_ZERO, 6769 .extra2 = SYSCTL_ONE_HUNDRED, 6770 }, 6771 { 6772 .procname = "min_slab_ratio", 6773 .data = &sysctl_min_slab_ratio, 6774 .maxlen = sizeof(sysctl_min_slab_ratio), 6775 .mode = 0644, 6776 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6777 .extra1 = SYSCTL_ZERO, 6778 .extra2 = SYSCTL_ONE_HUNDRED, 6779 }, 6780 #endif 6781 }; 6782 6783 void __init page_alloc_sysctl_init(void) 6784 { 6785 register_sysctl_init("vm", page_alloc_sysctl_table); 6786 } 6787 6788 #ifdef CONFIG_CONTIG_ALLOC 6789 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6790 static void alloc_contig_dump_pages(struct list_head *page_list) 6791 { 6792 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6793 6794 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6795 struct page *page; 6796 6797 dump_stack(); 6798 list_for_each_entry(page, page_list, lru) 6799 dump_page(page, "migration failure"); 6800 } 6801 } 6802 6803 /* [start, end) must belong to a single zone. */ 6804 static int __alloc_contig_migrate_range(struct compact_control *cc, 6805 unsigned long start, unsigned long end) 6806 { 6807 /* This function is based on compact_zone() from compaction.c. */ 6808 unsigned int nr_reclaimed; 6809 unsigned long pfn = start; 6810 unsigned int tries = 0; 6811 int ret = 0; 6812 struct migration_target_control mtc = { 6813 .nid = zone_to_nid(cc->zone), 6814 .gfp_mask = cc->gfp_mask, 6815 .reason = MR_CONTIG_RANGE, 6816 }; 6817 6818 lru_cache_disable(); 6819 6820 while (pfn < end || !list_empty(&cc->migratepages)) { 6821 if (fatal_signal_pending(current)) { 6822 ret = -EINTR; 6823 break; 6824 } 6825 6826 if (list_empty(&cc->migratepages)) { 6827 cc->nr_migratepages = 0; 6828 ret = isolate_migratepages_range(cc, pfn, end); 6829 if (ret && ret != -EAGAIN) 6830 break; 6831 pfn = cc->migrate_pfn; 6832 tries = 0; 6833 } else if (++tries == 5) { 6834 ret = -EBUSY; 6835 break; 6836 } 6837 6838 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6839 &cc->migratepages); 6840 cc->nr_migratepages -= nr_reclaimed; 6841 6842 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6843 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6844 6845 /* 6846 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6847 * to retry again over this error, so do the same here. 6848 */ 6849 if (ret == -ENOMEM) 6850 break; 6851 } 6852 6853 lru_cache_enable(); 6854 if (ret < 0) { 6855 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6856 alloc_contig_dump_pages(&cc->migratepages); 6857 putback_movable_pages(&cc->migratepages); 6858 } 6859 6860 return (ret < 0) ? ret : 0; 6861 } 6862 6863 static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask) 6864 { 6865 int order; 6866 6867 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6868 struct page *page, *next; 6869 int nr_pages = 1 << order; 6870 6871 list_for_each_entry_safe(page, next, &list[order], lru) { 6872 int i; 6873 6874 post_alloc_hook(page, order, gfp_mask); 6875 if (!order) 6876 continue; 6877 6878 __split_page(page, order); 6879 6880 /* Add all subpages to the order-0 head, in sequence. */ 6881 list_del(&page->lru); 6882 for (i = 0; i < nr_pages; i++) 6883 list_add_tail(&page[i].lru, &list[0]); 6884 } 6885 } 6886 } 6887 6888 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6889 { 6890 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6891 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6892 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO | 6893 __GFP_SKIP_KASAN; 6894 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6895 6896 /* 6897 * We are given the range to allocate; node, mobility and placement 6898 * hints are irrelevant at this point. We'll simply ignore them. 6899 */ 6900 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6901 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6902 6903 /* 6904 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6905 * selected action flags. 6906 */ 6907 if (gfp_mask & ~(reclaim_mask | action_mask)) 6908 return -EINVAL; 6909 6910 /* 6911 * Flags to control page compaction/migration/reclaim, to free up our 6912 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6913 * for them. 6914 * 6915 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6916 * to not degrade callers. 6917 */ 6918 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6919 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6920 return 0; 6921 } 6922 6923 static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages) 6924 { 6925 for (; nr_pages--; pfn++) 6926 free_frozen_pages(pfn_to_page(pfn), 0); 6927 } 6928 6929 /** 6930 * alloc_contig_frozen_range() -- tries to allocate given range of frozen pages 6931 * @start: start PFN to allocate 6932 * @end: one-past-the-last PFN to allocate 6933 * @alloc_flags: allocation information 6934 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6935 * action and reclaim modifiers are supported. Reclaim modifiers 6936 * control allocation behavior during compaction/migration/reclaim. 6937 * 6938 * The PFN range does not have to be pageblock aligned. The PFN range must 6939 * belong to a single zone. 6940 * 6941 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6942 * pageblocks in the range. Once isolated, the pageblocks should not 6943 * be modified by others. 6944 * 6945 * All frozen pages which PFN is in [start, end) are allocated for the 6946 * caller, and they could be freed with free_contig_frozen_range(), 6947 * free_frozen_pages() also could be used to free compound frozen pages 6948 * directly. 6949 * 6950 * Return: zero on success or negative error code. 6951 */ 6952 int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end, 6953 acr_flags_t alloc_flags, gfp_t gfp_mask) 6954 { 6955 const unsigned int order = ilog2(end - start); 6956 unsigned long outer_start, outer_end; 6957 int ret = 0; 6958 6959 struct compact_control cc = { 6960 .nr_migratepages = 0, 6961 .order = -1, 6962 .zone = page_zone(pfn_to_page(start)), 6963 .mode = MIGRATE_SYNC, 6964 .ignore_skip_hint = true, 6965 .no_set_skip_hint = true, 6966 .alloc_contig = true, 6967 }; 6968 INIT_LIST_HEAD(&cc.migratepages); 6969 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ? 6970 PB_ISOLATE_MODE_CMA_ALLOC : 6971 PB_ISOLATE_MODE_OTHER; 6972 6973 /* 6974 * In contrast to the buddy, we allow for orders here that exceed 6975 * MAX_PAGE_ORDER, so we must manually make sure that we are not 6976 * exceeding the maximum folio order. 6977 */ 6978 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER)) 6979 return -EINVAL; 6980 6981 gfp_mask = current_gfp_context(gfp_mask); 6982 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 6983 return -EINVAL; 6984 6985 /* 6986 * What we do here is we mark all pageblocks in range as 6987 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6988 * have different sizes, and due to the way page allocator 6989 * work, start_isolate_page_range() has special handlings for this. 6990 * 6991 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6992 * migrate the pages from an unaligned range (ie. pages that 6993 * we are interested in). This will put all the pages in 6994 * range back to page allocator as MIGRATE_ISOLATE. 6995 * 6996 * When this is done, we take the pages in range from page 6997 * allocator removing them from the buddy system. This way 6998 * page allocator will never consider using them. 6999 * 7000 * This lets us mark the pageblocks back as 7001 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7002 * aligned range but not in the unaligned, original range are 7003 * put back to page allocator so that buddy can use them. 7004 */ 7005 7006 ret = start_isolate_page_range(start, end, mode); 7007 if (ret) 7008 goto done; 7009 7010 drain_all_pages(cc.zone); 7011 7012 /* 7013 * In case of -EBUSY, we'd like to know which page causes problem. 7014 * So, just fall through. test_pages_isolated() has a tracepoint 7015 * which will report the busy page. 7016 * 7017 * It is possible that busy pages could become available before 7018 * the call to test_pages_isolated, and the range will actually be 7019 * allocated. So, if we fall through be sure to clear ret so that 7020 * -EBUSY is not accidentally used or returned to caller. 7021 */ 7022 ret = __alloc_contig_migrate_range(&cc, start, end); 7023 if (ret && ret != -EBUSY) 7024 goto done; 7025 7026 /* 7027 * When in-use hugetlb pages are migrated, they may simply be released 7028 * back into the free hugepage pool instead of being returned to the 7029 * buddy system. After the migration of in-use huge pages is completed, 7030 * we will invoke replace_free_hugepage_folios() to ensure that these 7031 * hugepages are properly released to the buddy system. 7032 */ 7033 ret = replace_free_hugepage_folios(start, end); 7034 if (ret) 7035 goto done; 7036 7037 /* 7038 * Pages from [start, end) are within a pageblock_nr_pages 7039 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7040 * more, all pages in [start, end) are free in page allocator. 7041 * What we are going to do is to allocate all pages from 7042 * [start, end) (that is remove them from page allocator). 7043 * 7044 * The only problem is that pages at the beginning and at the 7045 * end of interesting range may be not aligned with pages that 7046 * page allocator holds, ie. they can be part of higher order 7047 * pages. Because of this, we reserve the bigger range and 7048 * once this is done free the pages we are not interested in. 7049 * 7050 * We don't have to hold zone->lock here because the pages are 7051 * isolated thus they won't get removed from buddy. 7052 */ 7053 outer_start = find_large_buddy(start); 7054 7055 /* Make sure the range is really isolated. */ 7056 if (test_pages_isolated(outer_start, end, mode)) { 7057 ret = -EBUSY; 7058 goto done; 7059 } 7060 7061 /* Grab isolated pages from freelists. */ 7062 outer_end = isolate_freepages_range(&cc, outer_start, end); 7063 if (!outer_end) { 7064 ret = -EBUSY; 7065 goto done; 7066 } 7067 7068 if (!(gfp_mask & __GFP_COMP)) { 7069 split_free_frozen_pages(cc.freepages, gfp_mask); 7070 7071 /* Free head and tail (if any) */ 7072 if (start != outer_start) 7073 __free_contig_frozen_range(outer_start, start - outer_start); 7074 if (end != outer_end) 7075 __free_contig_frozen_range(end, outer_end - end); 7076 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 7077 struct page *head = pfn_to_page(start); 7078 7079 check_new_pages(head, order); 7080 prep_new_page(head, order, gfp_mask, 0); 7081 } else { 7082 ret = -EINVAL; 7083 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 7084 start, end, outer_start, outer_end); 7085 } 7086 done: 7087 undo_isolate_page_range(start, end); 7088 return ret; 7089 } 7090 EXPORT_SYMBOL(alloc_contig_frozen_range_noprof); 7091 7092 /** 7093 * alloc_contig_range() -- tries to allocate given range of pages 7094 * @start: start PFN to allocate 7095 * @end: one-past-the-last PFN to allocate 7096 * @alloc_flags: allocation information 7097 * @gfp_mask: GFP mask. 7098 * 7099 * This routine is a wrapper around alloc_contig_frozen_range(), it can't 7100 * be used to allocate compound pages, the refcount of each allocated page 7101 * will be set to one. 7102 * 7103 * All pages which PFN is in [start, end) are allocated for the caller, 7104 * and should be freed with free_contig_range() or by manually calling 7105 * __free_page() on each allocated page. 7106 * 7107 * Return: zero on success or negative error code. 7108 */ 7109 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 7110 acr_flags_t alloc_flags, gfp_t gfp_mask) 7111 { 7112 int ret; 7113 7114 if (WARN_ON(gfp_mask & __GFP_COMP)) 7115 return -EINVAL; 7116 7117 ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask); 7118 if (!ret) 7119 set_pages_refcounted(pfn_to_page(start), end - start); 7120 7121 return ret; 7122 } 7123 EXPORT_SYMBOL(alloc_contig_range_noprof); 7124 7125 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 7126 unsigned long nr_pages, bool skip_hugetlb, 7127 bool *skipped_hugetlb) 7128 { 7129 unsigned long end_pfn = start_pfn + nr_pages; 7130 struct page *page; 7131 7132 while (start_pfn < end_pfn) { 7133 unsigned long step = 1; 7134 7135 page = pfn_to_online_page(start_pfn); 7136 if (!page) 7137 return false; 7138 7139 if (page_zone(page) != z) 7140 return false; 7141 7142 if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step)) 7143 return false; 7144 7145 /* 7146 * Only consider ranges containing hugepages if those pages are 7147 * smaller than the requested contiguous region. e.g.: 7148 * Move 2MB pages to free up a 1GB range. 7149 * Don't move 1GB pages to free up a 2MB range. 7150 * 7151 * This makes contiguous allocation more reliable if multiple 7152 * hugepage sizes are used without causing needless movement. 7153 */ 7154 if (PageHuge(page)) { 7155 unsigned int order; 7156 7157 if (skip_hugetlb) { 7158 *skipped_hugetlb = true; 7159 return false; 7160 } 7161 7162 page = compound_head(page); 7163 order = compound_order(page); 7164 if ((order >= MAX_FOLIO_ORDER) || 7165 (nr_pages <= (1 << order))) 7166 return false; 7167 } 7168 7169 start_pfn += step; 7170 } 7171 return true; 7172 } 7173 7174 static bool zone_spans_last_pfn(const struct zone *zone, 7175 unsigned long start_pfn, unsigned long nr_pages) 7176 { 7177 unsigned long last_pfn = start_pfn + nr_pages - 1; 7178 7179 return zone_spans_pfn(zone, last_pfn); 7180 } 7181 7182 /** 7183 * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages 7184 * @nr_pages: Number of contiguous pages to allocate 7185 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 7186 * action and reclaim modifiers are supported. Reclaim modifiers 7187 * control allocation behavior during compaction/migration/reclaim. 7188 * @nid: Target node 7189 * @nodemask: Mask for other possible nodes 7190 * 7191 * This routine is a wrapper around alloc_contig_frozen_range(). It scans over 7192 * zones on an applicable zonelist to find a contiguous pfn range which can then 7193 * be tried for allocation with alloc_contig_frozen_range(). This routine is 7194 * intended for allocation requests which can not be fulfilled with the buddy 7195 * allocator. 7196 * 7197 * The allocated memory is always aligned to a page boundary. If nr_pages is a 7198 * power of two, then allocated range is also guaranteed to be aligned to same 7199 * nr_pages (e.g. 1GB request would be aligned to 1GB). 7200 * 7201 * Allocated frozen pages need be freed with free_contig_frozen_range(), 7202 * or by manually calling free_frozen_pages() on each allocated frozen 7203 * non-compound page, for compound frozen pages could be freed with 7204 * free_frozen_pages() directly. 7205 * 7206 * Return: pointer to contiguous frozen pages on success, or NULL if not successful. 7207 */ 7208 struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages, 7209 gfp_t gfp_mask, int nid, nodemask_t *nodemask) 7210 { 7211 unsigned long ret, pfn, flags; 7212 struct zonelist *zonelist; 7213 struct zone *zone; 7214 struct zoneref *z; 7215 bool skip_hugetlb = true; 7216 bool skipped_hugetlb = false; 7217 7218 retry: 7219 zonelist = node_zonelist(nid, gfp_mask); 7220 for_each_zone_zonelist_nodemask(zone, z, zonelist, 7221 gfp_zone(gfp_mask), nodemask) { 7222 spin_lock_irqsave(&zone->lock, flags); 7223 7224 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 7225 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 7226 if (pfn_range_valid_contig(zone, pfn, nr_pages, 7227 skip_hugetlb, 7228 &skipped_hugetlb)) { 7229 /* 7230 * We release the zone lock here because 7231 * alloc_contig_frozen_range() will also lock 7232 * the zone at some point. If there's an 7233 * allocation spinning on this lock, it may 7234 * win the race and cause allocation to fail. 7235 */ 7236 spin_unlock_irqrestore(&zone->lock, flags); 7237 ret = alloc_contig_frozen_range_noprof(pfn, 7238 pfn + nr_pages, 7239 ACR_FLAGS_NONE, 7240 gfp_mask); 7241 if (!ret) 7242 return pfn_to_page(pfn); 7243 spin_lock_irqsave(&zone->lock, flags); 7244 } 7245 pfn += nr_pages; 7246 } 7247 spin_unlock_irqrestore(&zone->lock, flags); 7248 } 7249 /* 7250 * If we failed, retry the search, but treat regions with HugeTLB pages 7251 * as valid targets. This retains fast-allocations on first pass 7252 * without trying to migrate HugeTLB pages (which may fail). On the 7253 * second pass, we will try moving HugeTLB pages when those pages are 7254 * smaller than the requested contiguous region size. 7255 */ 7256 if (skip_hugetlb && skipped_hugetlb) { 7257 skip_hugetlb = false; 7258 goto retry; 7259 } 7260 return NULL; 7261 } 7262 EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof); 7263 7264 /** 7265 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 7266 * @nr_pages: Number of contiguous pages to allocate 7267 * @gfp_mask: GFP mask. 7268 * @nid: Target node 7269 * @nodemask: Mask for other possible nodes 7270 * 7271 * This routine is a wrapper around alloc_contig_frozen_pages(), it can't 7272 * be used to allocate compound pages, the refcount of each allocated page 7273 * will be set to one. 7274 * 7275 * Allocated pages can be freed with free_contig_range() or by manually 7276 * calling __free_page() on each allocated page. 7277 * 7278 * Return: pointer to contiguous pages on success, or NULL if not successful. 7279 */ 7280 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 7281 int nid, nodemask_t *nodemask) 7282 { 7283 struct page *page; 7284 7285 if (WARN_ON(gfp_mask & __GFP_COMP)) 7286 return NULL; 7287 7288 page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid, 7289 nodemask); 7290 if (page) 7291 set_pages_refcounted(page, nr_pages); 7292 7293 return page; 7294 } 7295 EXPORT_SYMBOL(alloc_contig_pages_noprof); 7296 7297 /** 7298 * free_contig_frozen_range() -- free the contiguous range of frozen pages 7299 * @pfn: start PFN to free 7300 * @nr_pages: Number of contiguous frozen pages to free 7301 * 7302 * This can be used to free the allocated compound/non-compound frozen pages. 7303 */ 7304 void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages) 7305 { 7306 struct page *first_page = pfn_to_page(pfn); 7307 const unsigned int order = ilog2(nr_pages); 7308 7309 if (WARN_ON_ONCE(first_page != compound_head(first_page))) 7310 return; 7311 7312 if (PageHead(first_page)) { 7313 WARN_ON_ONCE(order != compound_order(first_page)); 7314 free_frozen_pages(first_page, order); 7315 return; 7316 } 7317 7318 __free_contig_frozen_range(pfn, nr_pages); 7319 } 7320 EXPORT_SYMBOL(free_contig_frozen_range); 7321 7322 /** 7323 * free_contig_range() -- free the contiguous range of pages 7324 * @pfn: start PFN to free 7325 * @nr_pages: Number of contiguous pages to free 7326 * 7327 * This can be only used to free the allocated non-compound pages. 7328 */ 7329 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 7330 { 7331 if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn)))) 7332 return; 7333 7334 for (; nr_pages--; pfn++) 7335 __free_page(pfn_to_page(pfn)); 7336 } 7337 EXPORT_SYMBOL(free_contig_range); 7338 #endif /* CONFIG_CONTIG_ALLOC */ 7339 7340 /* 7341 * Effectively disable pcplists for the zone by setting the high limit to 0 7342 * and draining all cpus. A concurrent page freeing on another CPU that's about 7343 * to put the page on pcplist will either finish before the drain and the page 7344 * will be drained, or observe the new high limit and skip the pcplist. 7345 * 7346 * Must be paired with a call to zone_pcp_enable(). 7347 */ 7348 void zone_pcp_disable(struct zone *zone) 7349 { 7350 mutex_lock(&pcp_batch_high_lock); 7351 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 7352 __drain_all_pages(zone, true); 7353 } 7354 7355 void zone_pcp_enable(struct zone *zone) 7356 { 7357 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 7358 zone->pageset_high_max, zone->pageset_batch); 7359 mutex_unlock(&pcp_batch_high_lock); 7360 } 7361 7362 void zone_pcp_reset(struct zone *zone) 7363 { 7364 int cpu; 7365 struct per_cpu_zonestat *pzstats; 7366 7367 if (zone->per_cpu_pageset != &boot_pageset) { 7368 for_each_online_cpu(cpu) { 7369 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7370 drain_zonestat(zone, pzstats); 7371 } 7372 free_percpu(zone->per_cpu_pageset); 7373 zone->per_cpu_pageset = &boot_pageset; 7374 if (zone->per_cpu_zonestats != &boot_zonestats) { 7375 free_percpu(zone->per_cpu_zonestats); 7376 zone->per_cpu_zonestats = &boot_zonestats; 7377 } 7378 } 7379 } 7380 7381 #ifdef CONFIG_MEMORY_HOTREMOVE 7382 /* 7383 * All pages in the range must be in a single zone, must not contain holes, 7384 * must span full sections, and must be isolated before calling this function. 7385 * 7386 * Returns the number of managed (non-PageOffline()) pages in the range: the 7387 * number of pages for which memory offlining code must adjust managed page 7388 * counters using adjust_managed_page_count(). 7389 */ 7390 unsigned long __offline_isolated_pages(unsigned long start_pfn, 7391 unsigned long end_pfn) 7392 { 7393 unsigned long already_offline = 0, flags; 7394 unsigned long pfn = start_pfn; 7395 struct page *page; 7396 struct zone *zone; 7397 unsigned int order; 7398 7399 offline_mem_sections(pfn, end_pfn); 7400 zone = page_zone(pfn_to_page(pfn)); 7401 spin_lock_irqsave(&zone->lock, flags); 7402 while (pfn < end_pfn) { 7403 page = pfn_to_page(pfn); 7404 /* 7405 * The HWPoisoned page may be not in buddy system, and 7406 * page_count() is not 0. 7407 */ 7408 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7409 pfn++; 7410 continue; 7411 } 7412 /* 7413 * At this point all remaining PageOffline() pages have a 7414 * reference count of 0 and can simply be skipped. 7415 */ 7416 if (PageOffline(page)) { 7417 BUG_ON(page_count(page)); 7418 BUG_ON(PageBuddy(page)); 7419 already_offline++; 7420 pfn++; 7421 continue; 7422 } 7423 7424 BUG_ON(page_count(page)); 7425 BUG_ON(!PageBuddy(page)); 7426 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 7427 order = buddy_order(page); 7428 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 7429 pfn += (1 << order); 7430 } 7431 spin_unlock_irqrestore(&zone->lock, flags); 7432 7433 return end_pfn - start_pfn - already_offline; 7434 } 7435 #endif 7436 7437 /* 7438 * This function returns a stable result only if called under zone lock. 7439 */ 7440 bool is_free_buddy_page(const struct page *page) 7441 { 7442 unsigned long pfn = page_to_pfn(page); 7443 unsigned int order; 7444 7445 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7446 const struct page *head = page - (pfn & ((1 << order) - 1)); 7447 7448 if (PageBuddy(head) && 7449 buddy_order_unsafe(head) >= order) 7450 break; 7451 } 7452 7453 return order <= MAX_PAGE_ORDER; 7454 } 7455 EXPORT_SYMBOL(is_free_buddy_page); 7456 7457 #ifdef CONFIG_MEMORY_FAILURE 7458 static inline void add_to_free_list(struct page *page, struct zone *zone, 7459 unsigned int order, int migratetype, 7460 bool tail) 7461 { 7462 __add_to_free_list(page, zone, order, migratetype, tail); 7463 account_freepages(zone, 1 << order, migratetype); 7464 } 7465 7466 /* 7467 * Break down a higher-order page in sub-pages, and keep our target out of 7468 * buddy allocator. 7469 */ 7470 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7471 struct page *target, int low, int high, 7472 int migratetype) 7473 { 7474 unsigned long size = 1 << high; 7475 struct page *current_buddy; 7476 7477 while (high > low) { 7478 high--; 7479 size >>= 1; 7480 7481 if (target >= &page[size]) { 7482 current_buddy = page; 7483 page = page + size; 7484 } else { 7485 current_buddy = page + size; 7486 } 7487 7488 if (set_page_guard(zone, current_buddy, high)) 7489 continue; 7490 7491 add_to_free_list(current_buddy, zone, high, migratetype, false); 7492 set_buddy_order(current_buddy, high); 7493 } 7494 } 7495 7496 /* 7497 * Take a page that will be marked as poisoned off the buddy allocator. 7498 */ 7499 bool take_page_off_buddy(struct page *page) 7500 { 7501 struct zone *zone = page_zone(page); 7502 unsigned long pfn = page_to_pfn(page); 7503 unsigned long flags; 7504 unsigned int order; 7505 bool ret = false; 7506 7507 spin_lock_irqsave(&zone->lock, flags); 7508 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7509 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7510 int page_order = buddy_order(page_head); 7511 7512 if (PageBuddy(page_head) && page_order >= order) { 7513 unsigned long pfn_head = page_to_pfn(page_head); 7514 int migratetype = get_pfnblock_migratetype(page_head, 7515 pfn_head); 7516 7517 del_page_from_free_list(page_head, zone, page_order, 7518 migratetype); 7519 break_down_buddy_pages(zone, page_head, page, 0, 7520 page_order, migratetype); 7521 SetPageHWPoisonTakenOff(page); 7522 ret = true; 7523 break; 7524 } 7525 if (page_count(page_head) > 0) 7526 break; 7527 } 7528 spin_unlock_irqrestore(&zone->lock, flags); 7529 return ret; 7530 } 7531 7532 /* 7533 * Cancel takeoff done by take_page_off_buddy(). 7534 */ 7535 bool put_page_back_buddy(struct page *page) 7536 { 7537 struct zone *zone = page_zone(page); 7538 unsigned long flags; 7539 bool ret = false; 7540 7541 spin_lock_irqsave(&zone->lock, flags); 7542 if (put_page_testzero(page)) { 7543 unsigned long pfn = page_to_pfn(page); 7544 int migratetype = get_pfnblock_migratetype(page, pfn); 7545 7546 ClearPageHWPoisonTakenOff(page); 7547 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7548 if (TestClearPageHWPoison(page)) { 7549 ret = true; 7550 } 7551 } 7552 spin_unlock_irqrestore(&zone->lock, flags); 7553 7554 return ret; 7555 } 7556 #endif 7557 7558 bool has_managed_zone(enum zone_type zone) 7559 { 7560 struct pglist_data *pgdat; 7561 7562 for_each_online_pgdat(pgdat) { 7563 if (managed_zone(&pgdat->node_zones[zone])) 7564 return true; 7565 } 7566 return false; 7567 } 7568 7569 #ifdef CONFIG_UNACCEPTED_MEMORY 7570 7571 static bool lazy_accept = true; 7572 7573 static int __init accept_memory_parse(char *p) 7574 { 7575 if (!strcmp(p, "lazy")) { 7576 lazy_accept = true; 7577 return 0; 7578 } else if (!strcmp(p, "eager")) { 7579 lazy_accept = false; 7580 return 0; 7581 } else { 7582 return -EINVAL; 7583 } 7584 } 7585 early_param("accept_memory", accept_memory_parse); 7586 7587 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7588 { 7589 phys_addr_t start = page_to_phys(page); 7590 7591 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7592 } 7593 7594 static void __accept_page(struct zone *zone, unsigned long *flags, 7595 struct page *page) 7596 { 7597 list_del(&page->lru); 7598 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7599 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7600 __ClearPageUnaccepted(page); 7601 spin_unlock_irqrestore(&zone->lock, *flags); 7602 7603 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7604 7605 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7606 } 7607 7608 void accept_page(struct page *page) 7609 { 7610 struct zone *zone = page_zone(page); 7611 unsigned long flags; 7612 7613 spin_lock_irqsave(&zone->lock, flags); 7614 if (!PageUnaccepted(page)) { 7615 spin_unlock_irqrestore(&zone->lock, flags); 7616 return; 7617 } 7618 7619 /* Unlocks zone->lock */ 7620 __accept_page(zone, &flags, page); 7621 } 7622 7623 static bool try_to_accept_memory_one(struct zone *zone) 7624 { 7625 unsigned long flags; 7626 struct page *page; 7627 7628 spin_lock_irqsave(&zone->lock, flags); 7629 page = list_first_entry_or_null(&zone->unaccepted_pages, 7630 struct page, lru); 7631 if (!page) { 7632 spin_unlock_irqrestore(&zone->lock, flags); 7633 return false; 7634 } 7635 7636 /* Unlocks zone->lock */ 7637 __accept_page(zone, &flags, page); 7638 7639 return true; 7640 } 7641 7642 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7643 int alloc_flags) 7644 { 7645 long to_accept, wmark; 7646 bool ret = false; 7647 7648 if (list_empty(&zone->unaccepted_pages)) 7649 return false; 7650 7651 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7652 if (alloc_flags & ALLOC_TRYLOCK) 7653 return false; 7654 7655 wmark = promo_wmark_pages(zone); 7656 7657 /* 7658 * Watermarks have not been initialized yet. 7659 * 7660 * Accepting one MAX_ORDER page to ensure progress. 7661 */ 7662 if (!wmark) 7663 return try_to_accept_memory_one(zone); 7664 7665 /* How much to accept to get to promo watermark? */ 7666 to_accept = wmark - 7667 (zone_page_state(zone, NR_FREE_PAGES) - 7668 __zone_watermark_unusable_free(zone, order, 0) - 7669 zone_page_state(zone, NR_UNACCEPTED)); 7670 7671 while (to_accept > 0) { 7672 if (!try_to_accept_memory_one(zone)) 7673 break; 7674 ret = true; 7675 to_accept -= MAX_ORDER_NR_PAGES; 7676 } 7677 7678 return ret; 7679 } 7680 7681 static bool __free_unaccepted(struct page *page) 7682 { 7683 struct zone *zone = page_zone(page); 7684 unsigned long flags; 7685 7686 if (!lazy_accept) 7687 return false; 7688 7689 spin_lock_irqsave(&zone->lock, flags); 7690 list_add_tail(&page->lru, &zone->unaccepted_pages); 7691 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7692 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7693 __SetPageUnaccepted(page); 7694 spin_unlock_irqrestore(&zone->lock, flags); 7695 7696 return true; 7697 } 7698 7699 #else 7700 7701 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7702 { 7703 return false; 7704 } 7705 7706 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7707 int alloc_flags) 7708 { 7709 return false; 7710 } 7711 7712 static bool __free_unaccepted(struct page *page) 7713 { 7714 BUILD_BUG(); 7715 return false; 7716 } 7717 7718 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7719 7720 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7721 { 7722 /* 7723 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7724 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7725 * is not safe in arbitrary context. 7726 * 7727 * These two are the conditions for gfpflags_allow_spinning() being true. 7728 * 7729 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason 7730 * to warn. Also warn would trigger printk() which is unsafe from 7731 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7732 * since the running context is unknown. 7733 * 7734 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7735 * is safe in any context. Also zeroing the page is mandatory for 7736 * BPF use cases. 7737 * 7738 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7739 * specify it here to highlight that alloc_pages_nolock() 7740 * doesn't want to deplete reserves. 7741 */ 7742 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP 7743 | gfp_flags; 7744 unsigned int alloc_flags = ALLOC_TRYLOCK; 7745 struct alloc_context ac = { }; 7746 struct page *page; 7747 7748 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT); 7749 /* 7750 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7751 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7752 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7753 * mark the task as the owner of another rt_spin_lock which will 7754 * confuse PI logic, so return immediately if called from hard IRQ or 7755 * NMI. 7756 * 7757 * Note, irqs_disabled() case is ok. This function can be called 7758 * from raw_spin_lock_irqsave region. 7759 */ 7760 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7761 return NULL; 7762 if (!pcp_allowed_order(order)) 7763 return NULL; 7764 7765 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7766 if (deferred_pages_enabled()) 7767 return NULL; 7768 7769 if (nid == NUMA_NO_NODE) 7770 nid = numa_node_id(); 7771 7772 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7773 &alloc_gfp, &alloc_flags); 7774 7775 /* 7776 * Best effort allocation from percpu free list. 7777 * If it's empty attempt to spin_trylock zone->lock. 7778 */ 7779 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7780 7781 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7782 7783 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) && 7784 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7785 __free_frozen_pages(page, order, FPI_TRYLOCK); 7786 page = NULL; 7787 } 7788 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7789 kmsan_alloc_page(page, order, alloc_gfp); 7790 return page; 7791 } 7792 /** 7793 * alloc_pages_nolock - opportunistic reentrant allocation from any context 7794 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed. 7795 * @nid: node to allocate from 7796 * @order: allocation order size 7797 * 7798 * Allocates pages of a given order from the given node. This is safe to 7799 * call from any context (from atomic, NMI, and also reentrant 7800 * allocator -> tracepoint -> alloc_pages_nolock_noprof). 7801 * Allocation is best effort and to be expected to fail easily so nobody should 7802 * rely on the success. Failures are not reported via warn_alloc(). 7803 * See always fail conditions below. 7804 * 7805 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN. 7806 * It means ENOMEM. There is no reason to call it again and expect !NULL. 7807 */ 7808 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7809 { 7810 struct page *page; 7811 7812 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order); 7813 if (page) 7814 set_page_refcounted(page); 7815 return page; 7816 } 7817 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof); 7818