1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* Free the page without taking locks. Rely on trylock only. */ 92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 93 94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 95 static DEFINE_MUTEX(pcp_batch_high_lock); 96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 97 98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 99 /* 100 * On SMP, spin_trylock is sufficient protection. 101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 102 */ 103 #define pcp_trylock_prepare(flags) do { } while (0) 104 #define pcp_trylock_finish(flag) do { } while (0) 105 #else 106 107 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 108 #define pcp_trylock_prepare(flags) local_irq_save(flags) 109 #define pcp_trylock_finish(flags) local_irq_restore(flags) 110 #endif 111 112 /* 113 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 114 * a migration causing the wrong PCP to be locked and remote memory being 115 * potentially allocated, pin the task to the CPU for the lookup+lock. 116 * preempt_disable is used on !RT because it is faster than migrate_disable. 117 * migrate_disable is used on RT because otherwise RT spinlock usage is 118 * interfered with and a high priority task cannot preempt the allocator. 119 */ 120 #ifndef CONFIG_PREEMPT_RT 121 #define pcpu_task_pin() preempt_disable() 122 #define pcpu_task_unpin() preempt_enable() 123 #else 124 #define pcpu_task_pin() migrate_disable() 125 #define pcpu_task_unpin() migrate_enable() 126 #endif 127 128 /* 129 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 130 * Return value should be used with equivalent unlock helper. 131 */ 132 #define pcpu_spin_lock(type, member, ptr) \ 133 ({ \ 134 type *_ret; \ 135 pcpu_task_pin(); \ 136 _ret = this_cpu_ptr(ptr); \ 137 spin_lock(&_ret->member); \ 138 _ret; \ 139 }) 140 141 #define pcpu_spin_trylock(type, member, ptr) \ 142 ({ \ 143 type *_ret; \ 144 pcpu_task_pin(); \ 145 _ret = this_cpu_ptr(ptr); \ 146 if (!spin_trylock(&_ret->member)) { \ 147 pcpu_task_unpin(); \ 148 _ret = NULL; \ 149 } \ 150 _ret; \ 151 }) 152 153 #define pcpu_spin_unlock(member, ptr) \ 154 ({ \ 155 spin_unlock(&ptr->member); \ 156 pcpu_task_unpin(); \ 157 }) 158 159 /* struct per_cpu_pages specific helpers. */ 160 #define pcp_spin_lock(ptr) \ 161 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_trylock(ptr) \ 164 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 165 166 #define pcp_spin_unlock(ptr) \ 167 pcpu_spin_unlock(lock, ptr) 168 169 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 170 DEFINE_PER_CPU(int, numa_node); 171 EXPORT_PER_CPU_SYMBOL(numa_node); 172 #endif 173 174 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 175 176 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 177 /* 178 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 179 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 180 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 181 * defined in <linux/topology.h>. 182 */ 183 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 184 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 185 #endif 186 187 static DEFINE_MUTEX(pcpu_drain_mutex); 188 189 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 190 volatile unsigned long latent_entropy __latent_entropy; 191 EXPORT_SYMBOL(latent_entropy); 192 #endif 193 194 /* 195 * Array of node states. 196 */ 197 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 198 [N_POSSIBLE] = NODE_MASK_ALL, 199 [N_ONLINE] = { { [0] = 1UL } }, 200 #ifndef CONFIG_NUMA 201 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 202 #ifdef CONFIG_HIGHMEM 203 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 204 #endif 205 [N_MEMORY] = { { [0] = 1UL } }, 206 [N_CPU] = { { [0] = 1UL } }, 207 #endif /* NUMA */ 208 }; 209 EXPORT_SYMBOL(node_states); 210 211 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 212 213 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 214 unsigned int pageblock_order __read_mostly; 215 #endif 216 217 static void __free_pages_ok(struct page *page, unsigned int order, 218 fpi_t fpi_flags); 219 220 /* 221 * results with 256, 32 in the lowmem_reserve sysctl: 222 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 223 * 1G machine -> (16M dma, 784M normal, 224M high) 224 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 225 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 226 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 227 * 228 * TBD: should special case ZONE_DMA32 machines here - in those we normally 229 * don't need any ZONE_NORMAL reservation 230 */ 231 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 232 #ifdef CONFIG_ZONE_DMA 233 [ZONE_DMA] = 256, 234 #endif 235 #ifdef CONFIG_ZONE_DMA32 236 [ZONE_DMA32] = 256, 237 #endif 238 [ZONE_NORMAL] = 32, 239 #ifdef CONFIG_HIGHMEM 240 [ZONE_HIGHMEM] = 0, 241 #endif 242 [ZONE_MOVABLE] = 0, 243 }; 244 245 char * const zone_names[MAX_NR_ZONES] = { 246 #ifdef CONFIG_ZONE_DMA 247 "DMA", 248 #endif 249 #ifdef CONFIG_ZONE_DMA32 250 "DMA32", 251 #endif 252 "Normal", 253 #ifdef CONFIG_HIGHMEM 254 "HighMem", 255 #endif 256 "Movable", 257 #ifdef CONFIG_ZONE_DEVICE 258 "Device", 259 #endif 260 }; 261 262 const char * const migratetype_names[MIGRATE_TYPES] = { 263 "Unmovable", 264 "Movable", 265 "Reclaimable", 266 "HighAtomic", 267 #ifdef CONFIG_CMA 268 "CMA", 269 #endif 270 #ifdef CONFIG_MEMORY_ISOLATION 271 "Isolate", 272 #endif 273 }; 274 275 int min_free_kbytes = 1024; 276 int user_min_free_kbytes = -1; 277 static int watermark_boost_factor __read_mostly = 15000; 278 static int watermark_scale_factor = 10; 279 int defrag_mode; 280 281 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 282 int movable_zone; 283 EXPORT_SYMBOL(movable_zone); 284 285 #if MAX_NUMNODES > 1 286 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 287 unsigned int nr_online_nodes __read_mostly = 1; 288 EXPORT_SYMBOL(nr_node_ids); 289 EXPORT_SYMBOL(nr_online_nodes); 290 #endif 291 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 static bool cond_accept_memory(struct zone *zone, unsigned int order, 294 int alloc_flags); 295 static bool __free_unaccepted(struct page *page); 296 297 int page_group_by_mobility_disabled __read_mostly; 298 299 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 300 /* 301 * During boot we initialize deferred pages on-demand, as needed, but once 302 * page_alloc_init_late() has finished, the deferred pages are all initialized, 303 * and we can permanently disable that path. 304 */ 305 DEFINE_STATIC_KEY_TRUE(deferred_pages); 306 307 static inline bool deferred_pages_enabled(void) 308 { 309 return static_branch_unlikely(&deferred_pages); 310 } 311 312 /* 313 * deferred_grow_zone() is __init, but it is called from 314 * get_page_from_freelist() during early boot until deferred_pages permanently 315 * disables this call. This is why we have refdata wrapper to avoid warning, 316 * and to ensure that the function body gets unloaded. 317 */ 318 static bool __ref 319 _deferred_grow_zone(struct zone *zone, unsigned int order) 320 { 321 return deferred_grow_zone(zone, order); 322 } 323 #else 324 static inline bool deferred_pages_enabled(void) 325 { 326 return false; 327 } 328 329 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 330 { 331 return false; 332 } 333 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 334 335 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 336 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 337 unsigned long pfn) 338 { 339 #ifdef CONFIG_SPARSEMEM 340 return section_to_usemap(__pfn_to_section(pfn)); 341 #else 342 return page_zone(page)->pageblock_flags; 343 #endif /* CONFIG_SPARSEMEM */ 344 } 345 346 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 347 { 348 #ifdef CONFIG_SPARSEMEM 349 pfn &= (PAGES_PER_SECTION-1); 350 #else 351 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 352 #endif /* CONFIG_SPARSEMEM */ 353 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 354 } 355 356 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit) 357 { 358 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS; 359 } 360 361 static __always_inline void 362 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn, 363 unsigned long **bitmap_word, unsigned long *bitidx) 364 { 365 unsigned long *bitmap; 366 unsigned long word_bitidx; 367 368 #ifdef CONFIG_MEMORY_ISOLATION 369 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8); 370 #else 371 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 372 #endif 373 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK); 374 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 375 376 bitmap = get_pageblock_bitmap(page, pfn); 377 *bitidx = pfn_to_bitidx(page, pfn); 378 word_bitidx = *bitidx / BITS_PER_LONG; 379 *bitidx &= (BITS_PER_LONG - 1); 380 *bitmap_word = &bitmap[word_bitidx]; 381 } 382 383 384 /** 385 * __get_pfnblock_flags_mask - Return the requested group of flags for 386 * a pageblock_nr_pages block of pages 387 * @page: The page within the block of interest 388 * @pfn: The target page frame number 389 * @mask: mask of bits that the caller is interested in 390 * 391 * Return: pageblock_bits flags 392 */ 393 static unsigned long __get_pfnblock_flags_mask(const struct page *page, 394 unsigned long pfn, 395 unsigned long mask) 396 { 397 unsigned long *bitmap_word; 398 unsigned long bitidx; 399 unsigned long word; 400 401 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 402 /* 403 * This races, without locks, with set_pfnblock_migratetype(). Ensure 404 * a consistent read of the memory array, so that results, even though 405 * racy, are not corrupted. 406 */ 407 word = READ_ONCE(*bitmap_word); 408 return (word >> bitidx) & mask; 409 } 410 411 /** 412 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set 413 * @page: The page within the block of interest 414 * @pfn: The target page frame number 415 * @pb_bit: pageblock bit to check 416 * 417 * Return: true if the bit is set, otherwise false 418 */ 419 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, 420 enum pageblock_bits pb_bit) 421 { 422 unsigned long *bitmap_word; 423 unsigned long bitidx; 424 425 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 426 return false; 427 428 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 429 430 return test_bit(bitidx + pb_bit, bitmap_word); 431 } 432 433 /** 434 * get_pfnblock_migratetype - Return the migratetype of a pageblock 435 * @page: The page within the block of interest 436 * @pfn: The target page frame number 437 * 438 * Return: The migratetype of the pageblock 439 * 440 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn 441 * to save a call to page_to_pfn(). 442 */ 443 __always_inline enum migratetype 444 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) 445 { 446 unsigned long mask = MIGRATETYPE_AND_ISO_MASK; 447 unsigned long flags; 448 449 flags = __get_pfnblock_flags_mask(page, pfn, mask); 450 451 #ifdef CONFIG_MEMORY_ISOLATION 452 if (flags & BIT(PB_migrate_isolate)) 453 return MIGRATE_ISOLATE; 454 #endif 455 return flags & MIGRATETYPE_MASK; 456 } 457 458 /** 459 * __set_pfnblock_flags_mask - Set the requested group of flags for 460 * a pageblock_nr_pages block of pages 461 * @page: The page within the block of interest 462 * @pfn: The target page frame number 463 * @flags: The flags to set 464 * @mask: mask of bits that the caller is interested in 465 */ 466 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn, 467 unsigned long flags, unsigned long mask) 468 { 469 unsigned long *bitmap_word; 470 unsigned long bitidx; 471 unsigned long word; 472 473 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 474 475 mask <<= bitidx; 476 flags <<= bitidx; 477 478 word = READ_ONCE(*bitmap_word); 479 do { 480 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags)); 481 } 482 483 /** 484 * set_pfnblock_bit - Set a standalone bit of a pageblock 485 * @page: The page within the block of interest 486 * @pfn: The target page frame number 487 * @pb_bit: pageblock bit to set 488 */ 489 void set_pfnblock_bit(const struct page *page, unsigned long pfn, 490 enum pageblock_bits pb_bit) 491 { 492 unsigned long *bitmap_word; 493 unsigned long bitidx; 494 495 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 496 return; 497 498 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 499 500 set_bit(bitidx + pb_bit, bitmap_word); 501 } 502 503 /** 504 * clear_pfnblock_bit - Clear a standalone bit of a pageblock 505 * @page: The page within the block of interest 506 * @pfn: The target page frame number 507 * @pb_bit: pageblock bit to clear 508 */ 509 void clear_pfnblock_bit(const struct page *page, unsigned long pfn, 510 enum pageblock_bits pb_bit) 511 { 512 unsigned long *bitmap_word; 513 unsigned long bitidx; 514 515 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 516 return; 517 518 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 519 520 clear_bit(bitidx + pb_bit, bitmap_word); 521 } 522 523 /** 524 * set_pageblock_migratetype - Set the migratetype of a pageblock 525 * @page: The page within the block of interest 526 * @migratetype: migratetype to set 527 */ 528 static void set_pageblock_migratetype(struct page *page, 529 enum migratetype migratetype) 530 { 531 if (unlikely(page_group_by_mobility_disabled && 532 migratetype < MIGRATE_PCPTYPES)) 533 migratetype = MIGRATE_UNMOVABLE; 534 535 #ifdef CONFIG_MEMORY_ISOLATION 536 if (migratetype == MIGRATE_ISOLATE) { 537 VM_WARN_ONCE(1, 538 "Use set_pageblock_isolate() for pageblock isolation"); 539 return; 540 } 541 VM_WARN_ONCE(get_pageblock_isolate(page), 542 "Use clear_pageblock_isolate() to unisolate pageblock"); 543 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */ 544 #endif 545 __set_pfnblock_flags_mask(page, page_to_pfn(page), 546 (unsigned long)migratetype, 547 MIGRATETYPE_AND_ISO_MASK); 548 } 549 550 void __meminit init_pageblock_migratetype(struct page *page, 551 enum migratetype migratetype, 552 bool isolate) 553 { 554 unsigned long flags; 555 556 if (unlikely(page_group_by_mobility_disabled && 557 migratetype < MIGRATE_PCPTYPES)) 558 migratetype = MIGRATE_UNMOVABLE; 559 560 flags = migratetype; 561 562 #ifdef CONFIG_MEMORY_ISOLATION 563 if (migratetype == MIGRATE_ISOLATE) { 564 VM_WARN_ONCE( 565 1, 566 "Set isolate=true to isolate pageblock with a migratetype"); 567 return; 568 } 569 if (isolate) 570 flags |= BIT(PB_migrate_isolate); 571 #endif 572 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags, 573 MIGRATETYPE_AND_ISO_MASK); 574 } 575 576 #ifdef CONFIG_DEBUG_VM 577 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 578 { 579 int ret; 580 unsigned seq; 581 unsigned long pfn = page_to_pfn(page); 582 unsigned long sp, start_pfn; 583 584 do { 585 seq = zone_span_seqbegin(zone); 586 start_pfn = zone->zone_start_pfn; 587 sp = zone->spanned_pages; 588 ret = !zone_spans_pfn(zone, pfn); 589 } while (zone_span_seqretry(zone, seq)); 590 591 if (ret) 592 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 593 pfn, zone_to_nid(zone), zone->name, 594 start_pfn, start_pfn + sp); 595 596 return ret; 597 } 598 599 /* 600 * Temporary debugging check for pages not lying within a given zone. 601 */ 602 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 603 { 604 if (page_outside_zone_boundaries(zone, page)) 605 return true; 606 if (zone != page_zone(page)) 607 return true; 608 609 return false; 610 } 611 #else 612 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 613 { 614 return false; 615 } 616 #endif 617 618 static void bad_page(struct page *page, const char *reason) 619 { 620 static unsigned long resume; 621 static unsigned long nr_shown; 622 static unsigned long nr_unshown; 623 624 /* 625 * Allow a burst of 60 reports, then keep quiet for that minute; 626 * or allow a steady drip of one report per second. 627 */ 628 if (nr_shown == 60) { 629 if (time_before(jiffies, resume)) { 630 nr_unshown++; 631 goto out; 632 } 633 if (nr_unshown) { 634 pr_alert( 635 "BUG: Bad page state: %lu messages suppressed\n", 636 nr_unshown); 637 nr_unshown = 0; 638 } 639 nr_shown = 0; 640 } 641 if (nr_shown++ == 0) 642 resume = jiffies + 60 * HZ; 643 644 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 645 current->comm, page_to_pfn(page)); 646 dump_page(page, reason); 647 648 print_modules(); 649 dump_stack(); 650 out: 651 /* Leave bad fields for debug, except PageBuddy could make trouble */ 652 if (PageBuddy(page)) 653 __ClearPageBuddy(page); 654 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 655 } 656 657 static inline unsigned int order_to_pindex(int migratetype, int order) 658 { 659 660 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 661 bool movable; 662 if (order > PAGE_ALLOC_COSTLY_ORDER) { 663 VM_BUG_ON(order != HPAGE_PMD_ORDER); 664 665 movable = migratetype == MIGRATE_MOVABLE; 666 667 return NR_LOWORDER_PCP_LISTS + movable; 668 } 669 #else 670 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 671 #endif 672 673 return (MIGRATE_PCPTYPES * order) + migratetype; 674 } 675 676 static inline int pindex_to_order(unsigned int pindex) 677 { 678 int order = pindex / MIGRATE_PCPTYPES; 679 680 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 681 if (pindex >= NR_LOWORDER_PCP_LISTS) 682 order = HPAGE_PMD_ORDER; 683 #else 684 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 685 #endif 686 687 return order; 688 } 689 690 static inline bool pcp_allowed_order(unsigned int order) 691 { 692 if (order <= PAGE_ALLOC_COSTLY_ORDER) 693 return true; 694 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 695 if (order == HPAGE_PMD_ORDER) 696 return true; 697 #endif 698 return false; 699 } 700 701 /* 702 * Higher-order pages are called "compound pages". They are structured thusly: 703 * 704 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 705 * 706 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 707 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 708 * 709 * The first tail page's ->compound_order holds the order of allocation. 710 * This usage means that zero-order pages may not be compound. 711 */ 712 713 void prep_compound_page(struct page *page, unsigned int order) 714 { 715 int i; 716 int nr_pages = 1 << order; 717 718 __SetPageHead(page); 719 for (i = 1; i < nr_pages; i++) 720 prep_compound_tail(page, i); 721 722 prep_compound_head(page, order); 723 } 724 725 static inline void set_buddy_order(struct page *page, unsigned int order) 726 { 727 set_page_private(page, order); 728 __SetPageBuddy(page); 729 } 730 731 #ifdef CONFIG_COMPACTION 732 static inline struct capture_control *task_capc(struct zone *zone) 733 { 734 struct capture_control *capc = current->capture_control; 735 736 return unlikely(capc) && 737 !(current->flags & PF_KTHREAD) && 738 !capc->page && 739 capc->cc->zone == zone ? capc : NULL; 740 } 741 742 static inline bool 743 compaction_capture(struct capture_control *capc, struct page *page, 744 int order, int migratetype) 745 { 746 if (!capc || order != capc->cc->order) 747 return false; 748 749 /* Do not accidentally pollute CMA or isolated regions*/ 750 if (is_migrate_cma(migratetype) || 751 is_migrate_isolate(migratetype)) 752 return false; 753 754 /* 755 * Do not let lower order allocations pollute a movable pageblock 756 * unless compaction is also requesting movable pages. 757 * This might let an unmovable request use a reclaimable pageblock 758 * and vice-versa but no more than normal fallback logic which can 759 * have trouble finding a high-order free page. 760 */ 761 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 762 capc->cc->migratetype != MIGRATE_MOVABLE) 763 return false; 764 765 if (migratetype != capc->cc->migratetype) 766 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 767 capc->cc->migratetype, migratetype); 768 769 capc->page = page; 770 return true; 771 } 772 773 #else 774 static inline struct capture_control *task_capc(struct zone *zone) 775 { 776 return NULL; 777 } 778 779 static inline bool 780 compaction_capture(struct capture_control *capc, struct page *page, 781 int order, int migratetype) 782 { 783 return false; 784 } 785 #endif /* CONFIG_COMPACTION */ 786 787 static inline void account_freepages(struct zone *zone, int nr_pages, 788 int migratetype) 789 { 790 lockdep_assert_held(&zone->lock); 791 792 if (is_migrate_isolate(migratetype)) 793 return; 794 795 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 796 797 if (is_migrate_cma(migratetype)) 798 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 799 else if (migratetype == MIGRATE_HIGHATOMIC) 800 WRITE_ONCE(zone->nr_free_highatomic, 801 zone->nr_free_highatomic + nr_pages); 802 } 803 804 /* Used for pages not on another list */ 805 static inline void __add_to_free_list(struct page *page, struct zone *zone, 806 unsigned int order, int migratetype, 807 bool tail) 808 { 809 struct free_area *area = &zone->free_area[order]; 810 int nr_pages = 1 << order; 811 812 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 813 "page type is %d, passed migratetype is %d (nr=%d)\n", 814 get_pageblock_migratetype(page), migratetype, nr_pages); 815 816 if (tail) 817 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 818 else 819 list_add(&page->buddy_list, &area->free_list[migratetype]); 820 area->nr_free++; 821 822 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 823 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 824 } 825 826 /* 827 * Used for pages which are on another list. Move the pages to the tail 828 * of the list - so the moved pages won't immediately be considered for 829 * allocation again (e.g., optimization for memory onlining). 830 */ 831 static inline void move_to_free_list(struct page *page, struct zone *zone, 832 unsigned int order, int old_mt, int new_mt) 833 { 834 struct free_area *area = &zone->free_area[order]; 835 int nr_pages = 1 << order; 836 837 /* Free page moving can fail, so it happens before the type update */ 838 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 839 "page type is %d, passed migratetype is %d (nr=%d)\n", 840 get_pageblock_migratetype(page), old_mt, nr_pages); 841 842 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 843 844 account_freepages(zone, -nr_pages, old_mt); 845 account_freepages(zone, nr_pages, new_mt); 846 847 if (order >= pageblock_order && 848 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 849 if (!is_migrate_isolate(old_mt)) 850 nr_pages = -nr_pages; 851 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 852 } 853 } 854 855 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 856 unsigned int order, int migratetype) 857 { 858 int nr_pages = 1 << order; 859 860 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 861 "page type is %d, passed migratetype is %d (nr=%d)\n", 862 get_pageblock_migratetype(page), migratetype, nr_pages); 863 864 /* clear reported state and update reported page count */ 865 if (page_reported(page)) 866 __ClearPageReported(page); 867 868 list_del(&page->buddy_list); 869 __ClearPageBuddy(page); 870 set_page_private(page, 0); 871 zone->free_area[order].nr_free--; 872 873 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 874 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 875 } 876 877 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 878 unsigned int order, int migratetype) 879 { 880 __del_page_from_free_list(page, zone, order, migratetype); 881 account_freepages(zone, -(1 << order), migratetype); 882 } 883 884 static inline struct page *get_page_from_free_area(struct free_area *area, 885 int migratetype) 886 { 887 return list_first_entry_or_null(&area->free_list[migratetype], 888 struct page, buddy_list); 889 } 890 891 /* 892 * If this is less than the 2nd largest possible page, check if the buddy 893 * of the next-higher order is free. If it is, it's possible 894 * that pages are being freed that will coalesce soon. In case, 895 * that is happening, add the free page to the tail of the list 896 * so it's less likely to be used soon and more likely to be merged 897 * as a 2-level higher order page 898 */ 899 static inline bool 900 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 901 struct page *page, unsigned int order) 902 { 903 unsigned long higher_page_pfn; 904 struct page *higher_page; 905 906 if (order >= MAX_PAGE_ORDER - 1) 907 return false; 908 909 higher_page_pfn = buddy_pfn & pfn; 910 higher_page = page + (higher_page_pfn - pfn); 911 912 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 913 NULL) != NULL; 914 } 915 916 /* 917 * Freeing function for a buddy system allocator. 918 * 919 * The concept of a buddy system is to maintain direct-mapped table 920 * (containing bit values) for memory blocks of various "orders". 921 * The bottom level table contains the map for the smallest allocatable 922 * units of memory (here, pages), and each level above it describes 923 * pairs of units from the levels below, hence, "buddies". 924 * At a high level, all that happens here is marking the table entry 925 * at the bottom level available, and propagating the changes upward 926 * as necessary, plus some accounting needed to play nicely with other 927 * parts of the VM system. 928 * At each level, we keep a list of pages, which are heads of continuous 929 * free pages of length of (1 << order) and marked with PageBuddy. 930 * Page's order is recorded in page_private(page) field. 931 * So when we are allocating or freeing one, we can derive the state of the 932 * other. That is, if we allocate a small block, and both were 933 * free, the remainder of the region must be split into blocks. 934 * If a block is freed, and its buddy is also free, then this 935 * triggers coalescing into a block of larger size. 936 * 937 * -- nyc 938 */ 939 940 static inline void __free_one_page(struct page *page, 941 unsigned long pfn, 942 struct zone *zone, unsigned int order, 943 int migratetype, fpi_t fpi_flags) 944 { 945 struct capture_control *capc = task_capc(zone); 946 unsigned long buddy_pfn = 0; 947 unsigned long combined_pfn; 948 struct page *buddy; 949 bool to_tail; 950 951 VM_BUG_ON(!zone_is_initialized(zone)); 952 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); 953 954 VM_BUG_ON(migratetype == -1); 955 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 956 VM_BUG_ON_PAGE(bad_range(zone, page), page); 957 958 account_freepages(zone, 1 << order, migratetype); 959 960 while (order < MAX_PAGE_ORDER) { 961 int buddy_mt = migratetype; 962 963 if (compaction_capture(capc, page, order, migratetype)) { 964 account_freepages(zone, -(1 << order), migratetype); 965 return; 966 } 967 968 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 969 if (!buddy) 970 goto done_merging; 971 972 if (unlikely(order >= pageblock_order)) { 973 /* 974 * We want to prevent merge between freepages on pageblock 975 * without fallbacks and normal pageblock. Without this, 976 * pageblock isolation could cause incorrect freepage or CMA 977 * accounting or HIGHATOMIC accounting. 978 */ 979 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 980 981 if (migratetype != buddy_mt && 982 (!migratetype_is_mergeable(migratetype) || 983 !migratetype_is_mergeable(buddy_mt))) 984 goto done_merging; 985 } 986 987 /* 988 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 989 * merge with it and move up one order. 990 */ 991 if (page_is_guard(buddy)) 992 clear_page_guard(zone, buddy, order); 993 else 994 __del_page_from_free_list(buddy, zone, order, buddy_mt); 995 996 if (unlikely(buddy_mt != migratetype)) { 997 /* 998 * Match buddy type. This ensures that an 999 * expand() down the line puts the sub-blocks 1000 * on the right freelists. 1001 */ 1002 set_pageblock_migratetype(buddy, migratetype); 1003 } 1004 1005 combined_pfn = buddy_pfn & pfn; 1006 page = page + (combined_pfn - pfn); 1007 pfn = combined_pfn; 1008 order++; 1009 } 1010 1011 done_merging: 1012 set_buddy_order(page, order); 1013 1014 if (fpi_flags & FPI_TO_TAIL) 1015 to_tail = true; 1016 else if (is_shuffle_order(order)) 1017 to_tail = shuffle_pick_tail(); 1018 else 1019 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1020 1021 __add_to_free_list(page, zone, order, migratetype, to_tail); 1022 1023 /* Notify page reporting subsystem of freed page */ 1024 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1025 page_reporting_notify_free(order); 1026 } 1027 1028 /* 1029 * A bad page could be due to a number of fields. Instead of multiple branches, 1030 * try and check multiple fields with one check. The caller must do a detailed 1031 * check if necessary. 1032 */ 1033 static inline bool page_expected_state(struct page *page, 1034 unsigned long check_flags) 1035 { 1036 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1037 return false; 1038 1039 if (unlikely((unsigned long)page->mapping | 1040 page_ref_count(page) | 1041 #ifdef CONFIG_MEMCG 1042 page->memcg_data | 1043 #endif 1044 page_pool_page_is_pp(page) | 1045 (page->flags.f & check_flags))) 1046 return false; 1047 1048 return true; 1049 } 1050 1051 static const char *page_bad_reason(struct page *page, unsigned long flags) 1052 { 1053 const char *bad_reason = NULL; 1054 1055 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1056 bad_reason = "nonzero mapcount"; 1057 if (unlikely(page->mapping != NULL)) 1058 bad_reason = "non-NULL mapping"; 1059 if (unlikely(page_ref_count(page) != 0)) 1060 bad_reason = "nonzero _refcount"; 1061 if (unlikely(page->flags.f & flags)) { 1062 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1063 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1064 else 1065 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1066 } 1067 #ifdef CONFIG_MEMCG 1068 if (unlikely(page->memcg_data)) 1069 bad_reason = "page still charged to cgroup"; 1070 #endif 1071 if (unlikely(page_pool_page_is_pp(page))) 1072 bad_reason = "page_pool leak"; 1073 return bad_reason; 1074 } 1075 1076 static inline bool free_page_is_bad(struct page *page) 1077 { 1078 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1079 return false; 1080 1081 /* Something has gone sideways, find it */ 1082 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1083 return true; 1084 } 1085 1086 static inline bool is_check_pages_enabled(void) 1087 { 1088 return static_branch_unlikely(&check_pages_enabled); 1089 } 1090 1091 static int free_tail_page_prepare(struct page *head_page, struct page *page) 1092 { 1093 struct folio *folio = (struct folio *)head_page; 1094 int ret = 1; 1095 1096 /* 1097 * We rely page->lru.next never has bit 0 set, unless the page 1098 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1099 */ 1100 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1101 1102 if (!is_check_pages_enabled()) { 1103 ret = 0; 1104 goto out; 1105 } 1106 switch (page - head_page) { 1107 case 1: 1108 /* the first tail page: these may be in place of ->mapping */ 1109 if (unlikely(folio_large_mapcount(folio))) { 1110 bad_page(page, "nonzero large_mapcount"); 1111 goto out; 1112 } 1113 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 1114 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1115 bad_page(page, "nonzero nr_pages_mapped"); 1116 goto out; 1117 } 1118 if (IS_ENABLED(CONFIG_MM_ID)) { 1119 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 1120 bad_page(page, "nonzero mm mapcount 0"); 1121 goto out; 1122 } 1123 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 1124 bad_page(page, "nonzero mm mapcount 1"); 1125 goto out; 1126 } 1127 } 1128 if (IS_ENABLED(CONFIG_64BIT)) { 1129 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1130 bad_page(page, "nonzero entire_mapcount"); 1131 goto out; 1132 } 1133 if (unlikely(atomic_read(&folio->_pincount))) { 1134 bad_page(page, "nonzero pincount"); 1135 goto out; 1136 } 1137 } 1138 break; 1139 case 2: 1140 /* the second tail page: deferred_list overlaps ->mapping */ 1141 if (unlikely(!list_empty(&folio->_deferred_list))) { 1142 bad_page(page, "on deferred list"); 1143 goto out; 1144 } 1145 if (!IS_ENABLED(CONFIG_64BIT)) { 1146 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1147 bad_page(page, "nonzero entire_mapcount"); 1148 goto out; 1149 } 1150 if (unlikely(atomic_read(&folio->_pincount))) { 1151 bad_page(page, "nonzero pincount"); 1152 goto out; 1153 } 1154 } 1155 break; 1156 case 3: 1157 /* the third tail page: hugetlb specifics overlap ->mappings */ 1158 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1159 break; 1160 fallthrough; 1161 default: 1162 if (page->mapping != TAIL_MAPPING) { 1163 bad_page(page, "corrupted mapping in tail page"); 1164 goto out; 1165 } 1166 break; 1167 } 1168 if (unlikely(!PageTail(page))) { 1169 bad_page(page, "PageTail not set"); 1170 goto out; 1171 } 1172 if (unlikely(compound_head(page) != head_page)) { 1173 bad_page(page, "compound_head not consistent"); 1174 goto out; 1175 } 1176 ret = 0; 1177 out: 1178 page->mapping = NULL; 1179 clear_compound_head(page); 1180 return ret; 1181 } 1182 1183 /* 1184 * Skip KASAN memory poisoning when either: 1185 * 1186 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1187 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1188 * using page tags instead (see below). 1189 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1190 * that error detection is disabled for accesses via the page address. 1191 * 1192 * Pages will have match-all tags in the following circumstances: 1193 * 1194 * 1. Pages are being initialized for the first time, including during deferred 1195 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1196 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1197 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1198 * 3. The allocation was excluded from being checked due to sampling, 1199 * see the call to kasan_unpoison_pages. 1200 * 1201 * Poisoning pages during deferred memory init will greatly lengthen the 1202 * process and cause problem in large memory systems as the deferred pages 1203 * initialization is done with interrupt disabled. 1204 * 1205 * Assuming that there will be no reference to those newly initialized 1206 * pages before they are ever allocated, this should have no effect on 1207 * KASAN memory tracking as the poison will be properly inserted at page 1208 * allocation time. The only corner case is when pages are allocated by 1209 * on-demand allocation and then freed again before the deferred pages 1210 * initialization is done, but this is not likely to happen. 1211 */ 1212 static inline bool should_skip_kasan_poison(struct page *page) 1213 { 1214 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1215 return deferred_pages_enabled(); 1216 1217 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1218 } 1219 1220 static void kernel_init_pages(struct page *page, int numpages) 1221 { 1222 int i; 1223 1224 /* s390's use of memset() could override KASAN redzones. */ 1225 kasan_disable_current(); 1226 for (i = 0; i < numpages; i++) 1227 clear_highpage_kasan_tagged(page + i); 1228 kasan_enable_current(); 1229 } 1230 1231 #ifdef CONFIG_MEM_ALLOC_PROFILING 1232 1233 /* Should be called only if mem_alloc_profiling_enabled() */ 1234 void __clear_page_tag_ref(struct page *page) 1235 { 1236 union pgtag_ref_handle handle; 1237 union codetag_ref ref; 1238 1239 if (get_page_tag_ref(page, &ref, &handle)) { 1240 set_codetag_empty(&ref); 1241 update_page_tag_ref(handle, &ref); 1242 put_page_tag_ref(handle); 1243 } 1244 } 1245 1246 /* Should be called only if mem_alloc_profiling_enabled() */ 1247 static noinline 1248 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1249 unsigned int nr) 1250 { 1251 union pgtag_ref_handle handle; 1252 union codetag_ref ref; 1253 1254 if (get_page_tag_ref(page, &ref, &handle)) { 1255 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1256 update_page_tag_ref(handle, &ref); 1257 put_page_tag_ref(handle); 1258 } 1259 } 1260 1261 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1262 unsigned int nr) 1263 { 1264 if (mem_alloc_profiling_enabled()) 1265 __pgalloc_tag_add(page, task, nr); 1266 } 1267 1268 /* Should be called only if mem_alloc_profiling_enabled() */ 1269 static noinline 1270 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1271 { 1272 union pgtag_ref_handle handle; 1273 union codetag_ref ref; 1274 1275 if (get_page_tag_ref(page, &ref, &handle)) { 1276 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1277 update_page_tag_ref(handle, &ref); 1278 put_page_tag_ref(handle); 1279 } 1280 } 1281 1282 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1283 { 1284 if (mem_alloc_profiling_enabled()) 1285 __pgalloc_tag_sub(page, nr); 1286 } 1287 1288 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1289 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1290 { 1291 if (tag) 1292 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1293 } 1294 1295 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1296 1297 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1298 unsigned int nr) {} 1299 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1300 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1301 1302 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1303 1304 __always_inline bool free_pages_prepare(struct page *page, 1305 unsigned int order) 1306 { 1307 int bad = 0; 1308 bool skip_kasan_poison = should_skip_kasan_poison(page); 1309 bool init = want_init_on_free(); 1310 bool compound = PageCompound(page); 1311 struct folio *folio = page_folio(page); 1312 1313 VM_BUG_ON_PAGE(PageTail(page), page); 1314 1315 trace_mm_page_free(page, order); 1316 kmsan_free_page(page, order); 1317 1318 if (memcg_kmem_online() && PageMemcgKmem(page)) 1319 __memcg_kmem_uncharge_page(page, order); 1320 1321 /* 1322 * In rare cases, when truncation or holepunching raced with 1323 * munlock after VM_LOCKED was cleared, Mlocked may still be 1324 * found set here. This does not indicate a problem, unless 1325 * "unevictable_pgs_cleared" appears worryingly large. 1326 */ 1327 if (unlikely(folio_test_mlocked(folio))) { 1328 long nr_pages = folio_nr_pages(folio); 1329 1330 __folio_clear_mlocked(folio); 1331 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1332 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1333 } 1334 1335 if (unlikely(PageHWPoison(page)) && !order) { 1336 /* Do not let hwpoison pages hit pcplists/buddy */ 1337 reset_page_owner(page, order); 1338 page_table_check_free(page, order); 1339 pgalloc_tag_sub(page, 1 << order); 1340 1341 /* 1342 * The page is isolated and accounted for. 1343 * Mark the codetag as empty to avoid accounting error 1344 * when the page is freed by unpoison_memory(). 1345 */ 1346 clear_page_tag_ref(page); 1347 return false; 1348 } 1349 1350 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1351 1352 /* 1353 * Check tail pages before head page information is cleared to 1354 * avoid checking PageCompound for order-0 pages. 1355 */ 1356 if (unlikely(order)) { 1357 int i; 1358 1359 if (compound) { 1360 page[1].flags.f &= ~PAGE_FLAGS_SECOND; 1361 #ifdef NR_PAGES_IN_LARGE_FOLIO 1362 folio->_nr_pages = 0; 1363 #endif 1364 } 1365 for (i = 1; i < (1 << order); i++) { 1366 if (compound) 1367 bad += free_tail_page_prepare(page, page + i); 1368 if (is_check_pages_enabled()) { 1369 if (free_page_is_bad(page + i)) { 1370 bad++; 1371 continue; 1372 } 1373 } 1374 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1375 } 1376 } 1377 if (folio_test_anon(folio)) { 1378 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1379 folio->mapping = NULL; 1380 } 1381 if (unlikely(page_has_type(page))) 1382 /* Reset the page_type (which overlays _mapcount) */ 1383 page->page_type = UINT_MAX; 1384 1385 if (is_check_pages_enabled()) { 1386 if (free_page_is_bad(page)) 1387 bad++; 1388 if (bad) 1389 return false; 1390 } 1391 1392 page_cpupid_reset_last(page); 1393 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1394 reset_page_owner(page, order); 1395 page_table_check_free(page, order); 1396 pgalloc_tag_sub(page, 1 << order); 1397 1398 if (!PageHighMem(page)) { 1399 debug_check_no_locks_freed(page_address(page), 1400 PAGE_SIZE << order); 1401 debug_check_no_obj_freed(page_address(page), 1402 PAGE_SIZE << order); 1403 } 1404 1405 kernel_poison_pages(page, 1 << order); 1406 1407 /* 1408 * As memory initialization might be integrated into KASAN, 1409 * KASAN poisoning and memory initialization code must be 1410 * kept together to avoid discrepancies in behavior. 1411 * 1412 * With hardware tag-based KASAN, memory tags must be set before the 1413 * page becomes unavailable via debug_pagealloc or arch_free_page. 1414 */ 1415 if (!skip_kasan_poison) { 1416 kasan_poison_pages(page, order, init); 1417 1418 /* Memory is already initialized if KASAN did it internally. */ 1419 if (kasan_has_integrated_init()) 1420 init = false; 1421 } 1422 if (init) 1423 kernel_init_pages(page, 1 << order); 1424 1425 /* 1426 * arch_free_page() can make the page's contents inaccessible. s390 1427 * does this. So nothing which can access the page's contents should 1428 * happen after this. 1429 */ 1430 arch_free_page(page, order); 1431 1432 debug_pagealloc_unmap_pages(page, 1 << order); 1433 1434 return true; 1435 } 1436 1437 /* 1438 * Frees a number of pages from the PCP lists 1439 * Assumes all pages on list are in same zone. 1440 * count is the number of pages to free. 1441 */ 1442 static void free_pcppages_bulk(struct zone *zone, int count, 1443 struct per_cpu_pages *pcp, 1444 int pindex) 1445 { 1446 unsigned long flags; 1447 unsigned int order; 1448 struct page *page; 1449 1450 /* 1451 * Ensure proper count is passed which otherwise would stuck in the 1452 * below while (list_empty(list)) loop. 1453 */ 1454 count = min(pcp->count, count); 1455 1456 /* Ensure requested pindex is drained first. */ 1457 pindex = pindex - 1; 1458 1459 spin_lock_irqsave(&zone->lock, flags); 1460 1461 while (count > 0) { 1462 struct list_head *list; 1463 int nr_pages; 1464 1465 /* Remove pages from lists in a round-robin fashion. */ 1466 do { 1467 if (++pindex > NR_PCP_LISTS - 1) 1468 pindex = 0; 1469 list = &pcp->lists[pindex]; 1470 } while (list_empty(list)); 1471 1472 order = pindex_to_order(pindex); 1473 nr_pages = 1 << order; 1474 do { 1475 unsigned long pfn; 1476 int mt; 1477 1478 page = list_last_entry(list, struct page, pcp_list); 1479 pfn = page_to_pfn(page); 1480 mt = get_pfnblock_migratetype(page, pfn); 1481 1482 /* must delete to avoid corrupting pcp list */ 1483 list_del(&page->pcp_list); 1484 count -= nr_pages; 1485 pcp->count -= nr_pages; 1486 1487 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1488 trace_mm_page_pcpu_drain(page, order, mt); 1489 } while (count > 0 && !list_empty(list)); 1490 } 1491 1492 spin_unlock_irqrestore(&zone->lock, flags); 1493 } 1494 1495 /* Split a multi-block free page into its individual pageblocks. */ 1496 static void split_large_buddy(struct zone *zone, struct page *page, 1497 unsigned long pfn, int order, fpi_t fpi) 1498 { 1499 unsigned long end = pfn + (1 << order); 1500 1501 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1502 /* Caller removed page from freelist, buddy info cleared! */ 1503 VM_WARN_ON_ONCE(PageBuddy(page)); 1504 1505 if (order > pageblock_order) 1506 order = pageblock_order; 1507 1508 do { 1509 int mt = get_pfnblock_migratetype(page, pfn); 1510 1511 __free_one_page(page, pfn, zone, order, mt, fpi); 1512 pfn += 1 << order; 1513 if (pfn == end) 1514 break; 1515 page = pfn_to_page(pfn); 1516 } while (1); 1517 } 1518 1519 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1520 unsigned int order) 1521 { 1522 /* Remember the order */ 1523 page->order = order; 1524 /* Add the page to the free list */ 1525 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1526 } 1527 1528 static void free_one_page(struct zone *zone, struct page *page, 1529 unsigned long pfn, unsigned int order, 1530 fpi_t fpi_flags) 1531 { 1532 struct llist_head *llhead; 1533 unsigned long flags; 1534 1535 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1536 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1537 add_page_to_zone_llist(zone, page, order); 1538 return; 1539 } 1540 } else { 1541 spin_lock_irqsave(&zone->lock, flags); 1542 } 1543 1544 /* The lock succeeded. Process deferred pages. */ 1545 llhead = &zone->trylock_free_pages; 1546 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1547 struct llist_node *llnode; 1548 struct page *p, *tmp; 1549 1550 llnode = llist_del_all(llhead); 1551 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1552 unsigned int p_order = p->order; 1553 1554 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1555 __count_vm_events(PGFREE, 1 << p_order); 1556 } 1557 } 1558 split_large_buddy(zone, page, pfn, order, fpi_flags); 1559 spin_unlock_irqrestore(&zone->lock, flags); 1560 1561 __count_vm_events(PGFREE, 1 << order); 1562 } 1563 1564 static void __free_pages_ok(struct page *page, unsigned int order, 1565 fpi_t fpi_flags) 1566 { 1567 unsigned long pfn = page_to_pfn(page); 1568 struct zone *zone = page_zone(page); 1569 1570 if (free_pages_prepare(page, order)) 1571 free_one_page(zone, page, pfn, order, fpi_flags); 1572 } 1573 1574 void __meminit __free_pages_core(struct page *page, unsigned int order, 1575 enum meminit_context context) 1576 { 1577 unsigned int nr_pages = 1 << order; 1578 struct page *p = page; 1579 unsigned int loop; 1580 1581 /* 1582 * When initializing the memmap, __init_single_page() sets the refcount 1583 * of all pages to 1 ("allocated"/"not free"). We have to set the 1584 * refcount of all involved pages to 0. 1585 * 1586 * Note that hotplugged memory pages are initialized to PageOffline(). 1587 * Pages freed from memblock might be marked as reserved. 1588 */ 1589 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1590 unlikely(context == MEMINIT_HOTPLUG)) { 1591 for (loop = 0; loop < nr_pages; loop++, p++) { 1592 VM_WARN_ON_ONCE(PageReserved(p)); 1593 __ClearPageOffline(p); 1594 set_page_count(p, 0); 1595 } 1596 1597 adjust_managed_page_count(page, nr_pages); 1598 } else { 1599 for (loop = 0; loop < nr_pages; loop++, p++) { 1600 __ClearPageReserved(p); 1601 set_page_count(p, 0); 1602 } 1603 1604 /* memblock adjusts totalram_pages() manually. */ 1605 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1606 } 1607 1608 if (page_contains_unaccepted(page, order)) { 1609 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1610 return; 1611 1612 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1613 } 1614 1615 /* 1616 * Bypass PCP and place fresh pages right to the tail, primarily 1617 * relevant for memory onlining. 1618 */ 1619 __free_pages_ok(page, order, FPI_TO_TAIL); 1620 } 1621 1622 /* 1623 * Check that the whole (or subset of) a pageblock given by the interval of 1624 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1625 * with the migration of free compaction scanner. 1626 * 1627 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1628 * 1629 * It's possible on some configurations to have a setup like node0 node1 node0 1630 * i.e. it's possible that all pages within a zones range of pages do not 1631 * belong to a single zone. We assume that a border between node0 and node1 1632 * can occur within a single pageblock, but not a node0 node1 node0 1633 * interleaving within a single pageblock. It is therefore sufficient to check 1634 * the first and last page of a pageblock and avoid checking each individual 1635 * page in a pageblock. 1636 * 1637 * Note: the function may return non-NULL struct page even for a page block 1638 * which contains a memory hole (i.e. there is no physical memory for a subset 1639 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1640 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1641 * even though the start pfn is online and valid. This should be safe most of 1642 * the time because struct pages are still initialized via init_unavailable_range() 1643 * and pfn walkers shouldn't touch any physical memory range for which they do 1644 * not recognize any specific metadata in struct pages. 1645 */ 1646 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1647 unsigned long end_pfn, struct zone *zone) 1648 { 1649 struct page *start_page; 1650 struct page *end_page; 1651 1652 /* end_pfn is one past the range we are checking */ 1653 end_pfn--; 1654 1655 if (!pfn_valid(end_pfn)) 1656 return NULL; 1657 1658 start_page = pfn_to_online_page(start_pfn); 1659 if (!start_page) 1660 return NULL; 1661 1662 if (page_zone(start_page) != zone) 1663 return NULL; 1664 1665 end_page = pfn_to_page(end_pfn); 1666 1667 /* This gives a shorter code than deriving page_zone(end_page) */ 1668 if (page_zone_id(start_page) != page_zone_id(end_page)) 1669 return NULL; 1670 1671 return start_page; 1672 } 1673 1674 /* 1675 * The order of subdivision here is critical for the IO subsystem. 1676 * Please do not alter this order without good reasons and regression 1677 * testing. Specifically, as large blocks of memory are subdivided, 1678 * the order in which smaller blocks are delivered depends on the order 1679 * they're subdivided in this function. This is the primary factor 1680 * influencing the order in which pages are delivered to the IO 1681 * subsystem according to empirical testing, and this is also justified 1682 * by considering the behavior of a buddy system containing a single 1683 * large block of memory acted on by a series of small allocations. 1684 * This behavior is a critical factor in sglist merging's success. 1685 * 1686 * -- nyc 1687 */ 1688 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1689 int high, int migratetype) 1690 { 1691 unsigned int size = 1 << high; 1692 unsigned int nr_added = 0; 1693 1694 while (high > low) { 1695 high--; 1696 size >>= 1; 1697 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1698 1699 /* 1700 * Mark as guard pages (or page), that will allow to 1701 * merge back to allocator when buddy will be freed. 1702 * Corresponding page table entries will not be touched, 1703 * pages will stay not present in virtual address space 1704 */ 1705 if (set_page_guard(zone, &page[size], high)) 1706 continue; 1707 1708 __add_to_free_list(&page[size], zone, high, migratetype, false); 1709 set_buddy_order(&page[size], high); 1710 nr_added += size; 1711 } 1712 1713 return nr_added; 1714 } 1715 1716 static __always_inline void page_del_and_expand(struct zone *zone, 1717 struct page *page, int low, 1718 int high, int migratetype) 1719 { 1720 int nr_pages = 1 << high; 1721 1722 __del_page_from_free_list(page, zone, high, migratetype); 1723 nr_pages -= expand(zone, page, low, high, migratetype); 1724 account_freepages(zone, -nr_pages, migratetype); 1725 } 1726 1727 static void check_new_page_bad(struct page *page) 1728 { 1729 if (unlikely(PageHWPoison(page))) { 1730 /* Don't complain about hwpoisoned pages */ 1731 if (PageBuddy(page)) 1732 __ClearPageBuddy(page); 1733 return; 1734 } 1735 1736 bad_page(page, 1737 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1738 } 1739 1740 /* 1741 * This page is about to be returned from the page allocator 1742 */ 1743 static bool check_new_page(struct page *page) 1744 { 1745 if (likely(page_expected_state(page, 1746 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1747 return false; 1748 1749 check_new_page_bad(page); 1750 return true; 1751 } 1752 1753 static inline bool check_new_pages(struct page *page, unsigned int order) 1754 { 1755 if (is_check_pages_enabled()) { 1756 for (int i = 0; i < (1 << order); i++) { 1757 struct page *p = page + i; 1758 1759 if (check_new_page(p)) 1760 return true; 1761 } 1762 } 1763 1764 return false; 1765 } 1766 1767 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1768 { 1769 /* Don't skip if a software KASAN mode is enabled. */ 1770 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1771 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1772 return false; 1773 1774 /* Skip, if hardware tag-based KASAN is not enabled. */ 1775 if (!kasan_hw_tags_enabled()) 1776 return true; 1777 1778 /* 1779 * With hardware tag-based KASAN enabled, skip if this has been 1780 * requested via __GFP_SKIP_KASAN. 1781 */ 1782 return flags & __GFP_SKIP_KASAN; 1783 } 1784 1785 static inline bool should_skip_init(gfp_t flags) 1786 { 1787 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1788 if (!kasan_hw_tags_enabled()) 1789 return false; 1790 1791 /* For hardware tag-based KASAN, skip if requested. */ 1792 return (flags & __GFP_SKIP_ZERO); 1793 } 1794 1795 inline void post_alloc_hook(struct page *page, unsigned int order, 1796 gfp_t gfp_flags) 1797 { 1798 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1799 !should_skip_init(gfp_flags); 1800 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1801 int i; 1802 1803 set_page_private(page, 0); 1804 1805 arch_alloc_page(page, order); 1806 debug_pagealloc_map_pages(page, 1 << order); 1807 1808 /* 1809 * Page unpoisoning must happen before memory initialization. 1810 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1811 * allocations and the page unpoisoning code will complain. 1812 */ 1813 kernel_unpoison_pages(page, 1 << order); 1814 1815 /* 1816 * As memory initialization might be integrated into KASAN, 1817 * KASAN unpoisoning and memory initializion code must be 1818 * kept together to avoid discrepancies in behavior. 1819 */ 1820 1821 /* 1822 * If memory tags should be zeroed 1823 * (which happens only when memory should be initialized as well). 1824 */ 1825 if (zero_tags) { 1826 /* Initialize both memory and memory tags. */ 1827 for (i = 0; i != 1 << order; ++i) 1828 tag_clear_highpage(page + i); 1829 1830 /* Take note that memory was initialized by the loop above. */ 1831 init = false; 1832 } 1833 if (!should_skip_kasan_unpoison(gfp_flags) && 1834 kasan_unpoison_pages(page, order, init)) { 1835 /* Take note that memory was initialized by KASAN. */ 1836 if (kasan_has_integrated_init()) 1837 init = false; 1838 } else { 1839 /* 1840 * If memory tags have not been set by KASAN, reset the page 1841 * tags to ensure page_address() dereferencing does not fault. 1842 */ 1843 for (i = 0; i != 1 << order; ++i) 1844 page_kasan_tag_reset(page + i); 1845 } 1846 /* If memory is still not initialized, initialize it now. */ 1847 if (init) 1848 kernel_init_pages(page, 1 << order); 1849 1850 set_page_owner(page, order, gfp_flags); 1851 page_table_check_alloc(page, order); 1852 pgalloc_tag_add(page, current, 1 << order); 1853 } 1854 1855 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1856 unsigned int alloc_flags) 1857 { 1858 post_alloc_hook(page, order, gfp_flags); 1859 1860 if (order && (gfp_flags & __GFP_COMP)) 1861 prep_compound_page(page, order); 1862 1863 /* 1864 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1865 * allocate the page. The expectation is that the caller is taking 1866 * steps that will free more memory. The caller should avoid the page 1867 * being used for !PFMEMALLOC purposes. 1868 */ 1869 if (alloc_flags & ALLOC_NO_WATERMARKS) 1870 set_page_pfmemalloc(page); 1871 else 1872 clear_page_pfmemalloc(page); 1873 } 1874 1875 /* 1876 * Go through the free lists for the given migratetype and remove 1877 * the smallest available page from the freelists 1878 */ 1879 static __always_inline 1880 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1881 int migratetype) 1882 { 1883 unsigned int current_order; 1884 struct free_area *area; 1885 struct page *page; 1886 1887 /* Find a page of the appropriate size in the preferred list */ 1888 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1889 area = &(zone->free_area[current_order]); 1890 page = get_page_from_free_area(area, migratetype); 1891 if (!page) 1892 continue; 1893 1894 page_del_and_expand(zone, page, order, current_order, 1895 migratetype); 1896 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1897 pcp_allowed_order(order) && 1898 migratetype < MIGRATE_PCPTYPES); 1899 return page; 1900 } 1901 1902 return NULL; 1903 } 1904 1905 1906 /* 1907 * This array describes the order lists are fallen back to when 1908 * the free lists for the desirable migrate type are depleted 1909 * 1910 * The other migratetypes do not have fallbacks. 1911 */ 1912 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1913 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1914 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1915 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1916 }; 1917 1918 #ifdef CONFIG_CMA 1919 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1920 unsigned int order) 1921 { 1922 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1923 } 1924 #else 1925 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1926 unsigned int order) { return NULL; } 1927 #endif 1928 1929 /* 1930 * Move all free pages of a block to new type's freelist. Caller needs to 1931 * change the block type. 1932 */ 1933 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1934 int old_mt, int new_mt) 1935 { 1936 struct page *page; 1937 unsigned long pfn, end_pfn; 1938 unsigned int order; 1939 int pages_moved = 0; 1940 1941 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1942 end_pfn = pageblock_end_pfn(start_pfn); 1943 1944 for (pfn = start_pfn; pfn < end_pfn;) { 1945 page = pfn_to_page(pfn); 1946 if (!PageBuddy(page)) { 1947 pfn++; 1948 continue; 1949 } 1950 1951 /* Make sure we are not inadvertently changing nodes */ 1952 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1953 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1954 1955 order = buddy_order(page); 1956 1957 move_to_free_list(page, zone, order, old_mt, new_mt); 1958 1959 pfn += 1 << order; 1960 pages_moved += 1 << order; 1961 } 1962 1963 return pages_moved; 1964 } 1965 1966 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1967 unsigned long *start_pfn, 1968 int *num_free, int *num_movable) 1969 { 1970 unsigned long pfn, start, end; 1971 1972 pfn = page_to_pfn(page); 1973 start = pageblock_start_pfn(pfn); 1974 end = pageblock_end_pfn(pfn); 1975 1976 /* 1977 * The caller only has the lock for @zone, don't touch ranges 1978 * that straddle into other zones. While we could move part of 1979 * the range that's inside the zone, this call is usually 1980 * accompanied by other operations such as migratetype updates 1981 * which also should be locked. 1982 */ 1983 if (!zone_spans_pfn(zone, start)) 1984 return false; 1985 if (!zone_spans_pfn(zone, end - 1)) 1986 return false; 1987 1988 *start_pfn = start; 1989 1990 if (num_free) { 1991 *num_free = 0; 1992 *num_movable = 0; 1993 for (pfn = start; pfn < end;) { 1994 page = pfn_to_page(pfn); 1995 if (PageBuddy(page)) { 1996 int nr = 1 << buddy_order(page); 1997 1998 *num_free += nr; 1999 pfn += nr; 2000 continue; 2001 } 2002 /* 2003 * We assume that pages that could be isolated for 2004 * migration are movable. But we don't actually try 2005 * isolating, as that would be expensive. 2006 */ 2007 if (PageLRU(page) || page_has_movable_ops(page)) 2008 (*num_movable)++; 2009 pfn++; 2010 } 2011 } 2012 2013 return true; 2014 } 2015 2016 static int move_freepages_block(struct zone *zone, struct page *page, 2017 int old_mt, int new_mt) 2018 { 2019 unsigned long start_pfn; 2020 int res; 2021 2022 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2023 return -1; 2024 2025 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt); 2026 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 2027 2028 return res; 2029 2030 } 2031 2032 #ifdef CONFIG_MEMORY_ISOLATION 2033 /* Look for a buddy that straddles start_pfn */ 2034 static unsigned long find_large_buddy(unsigned long start_pfn) 2035 { 2036 /* 2037 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing 2038 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking 2039 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy, 2040 * the starting order does not matter. 2041 */ 2042 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; 2043 struct page *page; 2044 unsigned long pfn = start_pfn; 2045 2046 while (!PageBuddy(page = pfn_to_page(pfn))) { 2047 /* Nothing found */ 2048 if (++order > MAX_PAGE_ORDER) 2049 return start_pfn; 2050 pfn &= ~0UL << order; 2051 } 2052 2053 /* 2054 * Found a preceding buddy, but does it straddle? 2055 */ 2056 if (pfn + (1 << buddy_order(page)) > start_pfn) 2057 return pfn; 2058 2059 /* Nothing found */ 2060 return start_pfn; 2061 } 2062 2063 static inline void toggle_pageblock_isolate(struct page *page, bool isolate) 2064 { 2065 if (isolate) 2066 set_pageblock_isolate(page); 2067 else 2068 clear_pageblock_isolate(page); 2069 } 2070 2071 /** 2072 * __move_freepages_block_isolate - move free pages in block for page isolation 2073 * @zone: the zone 2074 * @page: the pageblock page 2075 * @isolate: to isolate the given pageblock or unisolate it 2076 * 2077 * This is similar to move_freepages_block(), but handles the special 2078 * case encountered in page isolation, where the block of interest 2079 * might be part of a larger buddy spanning multiple pageblocks. 2080 * 2081 * Unlike the regular page allocator path, which moves pages while 2082 * stealing buddies off the freelist, page isolation is interested in 2083 * arbitrary pfn ranges that may have overlapping buddies on both ends. 2084 * 2085 * This function handles that. Straddling buddies are split into 2086 * individual pageblocks. Only the block of interest is moved. 2087 * 2088 * Returns %true if pages could be moved, %false otherwise. 2089 */ 2090 static bool __move_freepages_block_isolate(struct zone *zone, 2091 struct page *page, bool isolate) 2092 { 2093 unsigned long start_pfn, pfn; 2094 int from_mt; 2095 int to_mt; 2096 2097 if (isolate == get_pageblock_isolate(page)) { 2098 VM_WARN_ONCE(1, "%s a pageblock that is already in that state", 2099 isolate ? "Isolate" : "Unisolate"); 2100 return false; 2101 } 2102 2103 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2104 return false; 2105 2106 /* No splits needed if buddies can't span multiple blocks */ 2107 if (pageblock_order == MAX_PAGE_ORDER) 2108 goto move; 2109 2110 /* We're a tail block in a larger buddy */ 2111 pfn = find_large_buddy(start_pfn); 2112 if (pfn != start_pfn) { 2113 struct page *buddy = pfn_to_page(pfn); 2114 int order = buddy_order(buddy); 2115 2116 del_page_from_free_list(buddy, zone, order, 2117 get_pfnblock_migratetype(buddy, pfn)); 2118 toggle_pageblock_isolate(page, isolate); 2119 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 2120 return true; 2121 } 2122 2123 /* We're the starting block of a larger buddy */ 2124 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 2125 int order = buddy_order(page); 2126 2127 del_page_from_free_list(page, zone, order, 2128 get_pfnblock_migratetype(page, pfn)); 2129 toggle_pageblock_isolate(page, isolate); 2130 split_large_buddy(zone, page, pfn, order, FPI_NONE); 2131 return true; 2132 } 2133 move: 2134 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */ 2135 if (isolate) { 2136 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2137 MIGRATETYPE_MASK); 2138 to_mt = MIGRATE_ISOLATE; 2139 } else { 2140 from_mt = MIGRATE_ISOLATE; 2141 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2142 MIGRATETYPE_MASK); 2143 } 2144 2145 __move_freepages_block(zone, start_pfn, from_mt, to_mt); 2146 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate); 2147 2148 return true; 2149 } 2150 2151 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page) 2152 { 2153 return __move_freepages_block_isolate(zone, page, true); 2154 } 2155 2156 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page) 2157 { 2158 return __move_freepages_block_isolate(zone, page, false); 2159 } 2160 2161 #endif /* CONFIG_MEMORY_ISOLATION */ 2162 2163 static void change_pageblock_range(struct page *pageblock_page, 2164 int start_order, int migratetype) 2165 { 2166 int nr_pageblocks = 1 << (start_order - pageblock_order); 2167 2168 while (nr_pageblocks--) { 2169 set_pageblock_migratetype(pageblock_page, migratetype); 2170 pageblock_page += pageblock_nr_pages; 2171 } 2172 } 2173 2174 static inline bool boost_watermark(struct zone *zone) 2175 { 2176 unsigned long max_boost; 2177 2178 if (!watermark_boost_factor) 2179 return false; 2180 /* 2181 * Don't bother in zones that are unlikely to produce results. 2182 * On small machines, including kdump capture kernels running 2183 * in a small area, boosting the watermark can cause an out of 2184 * memory situation immediately. 2185 */ 2186 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2187 return false; 2188 2189 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2190 watermark_boost_factor, 10000); 2191 2192 /* 2193 * high watermark may be uninitialised if fragmentation occurs 2194 * very early in boot so do not boost. We do not fall 2195 * through and boost by pageblock_nr_pages as failing 2196 * allocations that early means that reclaim is not going 2197 * to help and it may even be impossible to reclaim the 2198 * boosted watermark resulting in a hang. 2199 */ 2200 if (!max_boost) 2201 return false; 2202 2203 max_boost = max(pageblock_nr_pages, max_boost); 2204 2205 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2206 max_boost); 2207 2208 return true; 2209 } 2210 2211 /* 2212 * When we are falling back to another migratetype during allocation, should we 2213 * try to claim an entire block to satisfy further allocations, instead of 2214 * polluting multiple pageblocks? 2215 */ 2216 static bool should_try_claim_block(unsigned int order, int start_mt) 2217 { 2218 /* 2219 * Leaving this order check is intended, although there is 2220 * relaxed order check in next check. The reason is that 2221 * we can actually claim the whole pageblock if this condition met, 2222 * but, below check doesn't guarantee it and that is just heuristic 2223 * so could be changed anytime. 2224 */ 2225 if (order >= pageblock_order) 2226 return true; 2227 2228 /* 2229 * Above a certain threshold, always try to claim, as it's likely there 2230 * will be more free pages in the pageblock. 2231 */ 2232 if (order >= pageblock_order / 2) 2233 return true; 2234 2235 /* 2236 * Unmovable/reclaimable allocations would cause permanent 2237 * fragmentations if they fell back to allocating from a movable block 2238 * (polluting it), so we try to claim the whole block regardless of the 2239 * allocation size. Later movable allocations can always steal from this 2240 * block, which is less problematic. 2241 */ 2242 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2243 return true; 2244 2245 if (page_group_by_mobility_disabled) 2246 return true; 2247 2248 /* 2249 * Movable pages won't cause permanent fragmentation, so when you alloc 2250 * small pages, we just need to temporarily steal unmovable or 2251 * reclaimable pages that are closest to the request size. After a 2252 * while, memory compaction may occur to form large contiguous pages, 2253 * and the next movable allocation may not need to steal. 2254 */ 2255 return false; 2256 } 2257 2258 /* 2259 * Check whether there is a suitable fallback freepage with requested order. 2260 * If claimable is true, this function returns fallback_mt only if 2261 * we would do this whole-block claiming. This would help to reduce 2262 * fragmentation due to mixed migratetype pages in one pageblock. 2263 */ 2264 int find_suitable_fallback(struct free_area *area, unsigned int order, 2265 int migratetype, bool claimable) 2266 { 2267 int i; 2268 2269 if (claimable && !should_try_claim_block(order, migratetype)) 2270 return -2; 2271 2272 if (area->nr_free == 0) 2273 return -1; 2274 2275 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2276 int fallback_mt = fallbacks[migratetype][i]; 2277 2278 if (!free_area_empty(area, fallback_mt)) 2279 return fallback_mt; 2280 } 2281 2282 return -1; 2283 } 2284 2285 /* 2286 * This function implements actual block claiming behaviour. If order is large 2287 * enough, we can claim the whole pageblock for the requested migratetype. If 2288 * not, we check the pageblock for constituent pages; if at least half of the 2289 * pages are free or compatible, we can still claim the whole block, so pages 2290 * freed in the future will be put on the correct free list. 2291 */ 2292 static struct page * 2293 try_to_claim_block(struct zone *zone, struct page *page, 2294 int current_order, int order, int start_type, 2295 int block_type, unsigned int alloc_flags) 2296 { 2297 int free_pages, movable_pages, alike_pages; 2298 unsigned long start_pfn; 2299 2300 /* Take ownership for orders >= pageblock_order */ 2301 if (current_order >= pageblock_order) { 2302 unsigned int nr_added; 2303 2304 del_page_from_free_list(page, zone, current_order, block_type); 2305 change_pageblock_range(page, current_order, start_type); 2306 nr_added = expand(zone, page, order, current_order, start_type); 2307 account_freepages(zone, nr_added, start_type); 2308 return page; 2309 } 2310 2311 /* 2312 * Boost watermarks to increase reclaim pressure to reduce the 2313 * likelihood of future fallbacks. Wake kswapd now as the node 2314 * may be balanced overall and kswapd will not wake naturally. 2315 */ 2316 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2317 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2318 2319 /* moving whole block can fail due to zone boundary conditions */ 2320 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2321 &movable_pages)) 2322 return NULL; 2323 2324 /* 2325 * Determine how many pages are compatible with our allocation. 2326 * For movable allocation, it's the number of movable pages which 2327 * we just obtained. For other types it's a bit more tricky. 2328 */ 2329 if (start_type == MIGRATE_MOVABLE) { 2330 alike_pages = movable_pages; 2331 } else { 2332 /* 2333 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2334 * to MOVABLE pageblock, consider all non-movable pages as 2335 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2336 * vice versa, be conservative since we can't distinguish the 2337 * exact migratetype of non-movable pages. 2338 */ 2339 if (block_type == MIGRATE_MOVABLE) 2340 alike_pages = pageblock_nr_pages 2341 - (free_pages + movable_pages); 2342 else 2343 alike_pages = 0; 2344 } 2345 /* 2346 * If a sufficient number of pages in the block are either free or of 2347 * compatible migratability as our allocation, claim the whole block. 2348 */ 2349 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2350 page_group_by_mobility_disabled) { 2351 __move_freepages_block(zone, start_pfn, block_type, start_type); 2352 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type); 2353 return __rmqueue_smallest(zone, order, start_type); 2354 } 2355 2356 return NULL; 2357 } 2358 2359 /* 2360 * Try to allocate from some fallback migratetype by claiming the entire block, 2361 * i.e. converting it to the allocation's start migratetype. 2362 * 2363 * The use of signed ints for order and current_order is a deliberate 2364 * deviation from the rest of this file, to make the for loop 2365 * condition simpler. 2366 */ 2367 static __always_inline struct page * 2368 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, 2369 unsigned int alloc_flags) 2370 { 2371 struct free_area *area; 2372 int current_order; 2373 int min_order = order; 2374 struct page *page; 2375 int fallback_mt; 2376 2377 /* 2378 * Do not steal pages from freelists belonging to other pageblocks 2379 * i.e. orders < pageblock_order. If there are no local zones free, 2380 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2381 */ 2382 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2383 min_order = pageblock_order; 2384 2385 /* 2386 * Find the largest available free page in the other list. This roughly 2387 * approximates finding the pageblock with the most free pages, which 2388 * would be too costly to do exactly. 2389 */ 2390 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2391 --current_order) { 2392 area = &(zone->free_area[current_order]); 2393 fallback_mt = find_suitable_fallback(area, current_order, 2394 start_migratetype, true); 2395 2396 /* No block in that order */ 2397 if (fallback_mt == -1) 2398 continue; 2399 2400 /* Advanced into orders too low to claim, abort */ 2401 if (fallback_mt == -2) 2402 break; 2403 2404 page = get_page_from_free_area(area, fallback_mt); 2405 page = try_to_claim_block(zone, page, current_order, order, 2406 start_migratetype, fallback_mt, 2407 alloc_flags); 2408 if (page) { 2409 trace_mm_page_alloc_extfrag(page, order, current_order, 2410 start_migratetype, fallback_mt); 2411 return page; 2412 } 2413 } 2414 2415 return NULL; 2416 } 2417 2418 /* 2419 * Try to steal a single page from some fallback migratetype. Leave the rest of 2420 * the block as its current migratetype, potentially causing fragmentation. 2421 */ 2422 static __always_inline struct page * 2423 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) 2424 { 2425 struct free_area *area; 2426 int current_order; 2427 struct page *page; 2428 int fallback_mt; 2429 2430 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2431 area = &(zone->free_area[current_order]); 2432 fallback_mt = find_suitable_fallback(area, current_order, 2433 start_migratetype, false); 2434 if (fallback_mt == -1) 2435 continue; 2436 2437 page = get_page_from_free_area(area, fallback_mt); 2438 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2439 trace_mm_page_alloc_extfrag(page, order, current_order, 2440 start_migratetype, fallback_mt); 2441 return page; 2442 } 2443 2444 return NULL; 2445 } 2446 2447 enum rmqueue_mode { 2448 RMQUEUE_NORMAL, 2449 RMQUEUE_CMA, 2450 RMQUEUE_CLAIM, 2451 RMQUEUE_STEAL, 2452 }; 2453 2454 /* 2455 * Do the hard work of removing an element from the buddy allocator. 2456 * Call me with the zone->lock already held. 2457 */ 2458 static __always_inline struct page * 2459 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2460 unsigned int alloc_flags, enum rmqueue_mode *mode) 2461 { 2462 struct page *page; 2463 2464 if (IS_ENABLED(CONFIG_CMA)) { 2465 /* 2466 * Balance movable allocations between regular and CMA areas by 2467 * allocating from CMA when over half of the zone's free memory 2468 * is in the CMA area. 2469 */ 2470 if (alloc_flags & ALLOC_CMA && 2471 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2472 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2473 page = __rmqueue_cma_fallback(zone, order); 2474 if (page) 2475 return page; 2476 } 2477 } 2478 2479 /* 2480 * First try the freelists of the requested migratetype, then try 2481 * fallbacks modes with increasing levels of fragmentation risk. 2482 * 2483 * The fallback logic is expensive and rmqueue_bulk() calls in 2484 * a loop with the zone->lock held, meaning the freelists are 2485 * not subject to any outside changes. Remember in *mode where 2486 * we found pay dirt, to save us the search on the next call. 2487 */ 2488 switch (*mode) { 2489 case RMQUEUE_NORMAL: 2490 page = __rmqueue_smallest(zone, order, migratetype); 2491 if (page) 2492 return page; 2493 fallthrough; 2494 case RMQUEUE_CMA: 2495 if (alloc_flags & ALLOC_CMA) { 2496 page = __rmqueue_cma_fallback(zone, order); 2497 if (page) { 2498 *mode = RMQUEUE_CMA; 2499 return page; 2500 } 2501 } 2502 fallthrough; 2503 case RMQUEUE_CLAIM: 2504 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); 2505 if (page) { 2506 /* Replenished preferred freelist, back to normal mode. */ 2507 *mode = RMQUEUE_NORMAL; 2508 return page; 2509 } 2510 fallthrough; 2511 case RMQUEUE_STEAL: 2512 if (!(alloc_flags & ALLOC_NOFRAGMENT)) { 2513 page = __rmqueue_steal(zone, order, migratetype); 2514 if (page) { 2515 *mode = RMQUEUE_STEAL; 2516 return page; 2517 } 2518 } 2519 } 2520 return NULL; 2521 } 2522 2523 /* 2524 * Obtain a specified number of elements from the buddy allocator, all under 2525 * a single hold of the lock, for efficiency. Add them to the supplied list. 2526 * Returns the number of new pages which were placed at *list. 2527 */ 2528 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2529 unsigned long count, struct list_head *list, 2530 int migratetype, unsigned int alloc_flags) 2531 { 2532 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2533 unsigned long flags; 2534 int i; 2535 2536 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2537 if (!spin_trylock_irqsave(&zone->lock, flags)) 2538 return 0; 2539 } else { 2540 spin_lock_irqsave(&zone->lock, flags); 2541 } 2542 for (i = 0; i < count; ++i) { 2543 struct page *page = __rmqueue(zone, order, migratetype, 2544 alloc_flags, &rmqm); 2545 if (unlikely(page == NULL)) 2546 break; 2547 2548 /* 2549 * Split buddy pages returned by expand() are received here in 2550 * physical page order. The page is added to the tail of 2551 * caller's list. From the callers perspective, the linked list 2552 * is ordered by page number under some conditions. This is 2553 * useful for IO devices that can forward direction from the 2554 * head, thus also in the physical page order. This is useful 2555 * for IO devices that can merge IO requests if the physical 2556 * pages are ordered properly. 2557 */ 2558 list_add_tail(&page->pcp_list, list); 2559 } 2560 spin_unlock_irqrestore(&zone->lock, flags); 2561 2562 return i; 2563 } 2564 2565 /* 2566 * Called from the vmstat counter updater to decay the PCP high. 2567 * Return whether there are addition works to do. 2568 */ 2569 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2570 { 2571 int high_min, to_drain, batch; 2572 int todo = 0; 2573 2574 high_min = READ_ONCE(pcp->high_min); 2575 batch = READ_ONCE(pcp->batch); 2576 /* 2577 * Decrease pcp->high periodically to try to free possible 2578 * idle PCP pages. And, avoid to free too many pages to 2579 * control latency. This caps pcp->high decrement too. 2580 */ 2581 if (pcp->high > high_min) { 2582 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2583 pcp->high - (pcp->high >> 3), high_min); 2584 if (pcp->high > high_min) 2585 todo++; 2586 } 2587 2588 to_drain = pcp->count - pcp->high; 2589 if (to_drain > 0) { 2590 spin_lock(&pcp->lock); 2591 free_pcppages_bulk(zone, to_drain, pcp, 0); 2592 spin_unlock(&pcp->lock); 2593 todo++; 2594 } 2595 2596 return todo; 2597 } 2598 2599 #ifdef CONFIG_NUMA 2600 /* 2601 * Called from the vmstat counter updater to drain pagesets of this 2602 * currently executing processor on remote nodes after they have 2603 * expired. 2604 */ 2605 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2606 { 2607 int to_drain, batch; 2608 2609 batch = READ_ONCE(pcp->batch); 2610 to_drain = min(pcp->count, batch); 2611 if (to_drain > 0) { 2612 spin_lock(&pcp->lock); 2613 free_pcppages_bulk(zone, to_drain, pcp, 0); 2614 spin_unlock(&pcp->lock); 2615 } 2616 } 2617 #endif 2618 2619 /* 2620 * Drain pcplists of the indicated processor and zone. 2621 */ 2622 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2623 { 2624 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2625 int count; 2626 2627 do { 2628 spin_lock(&pcp->lock); 2629 count = pcp->count; 2630 if (count) { 2631 int to_drain = min(count, 2632 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2633 2634 free_pcppages_bulk(zone, to_drain, pcp, 0); 2635 count -= to_drain; 2636 } 2637 spin_unlock(&pcp->lock); 2638 } while (count); 2639 } 2640 2641 /* 2642 * Drain pcplists of all zones on the indicated processor. 2643 */ 2644 static void drain_pages(unsigned int cpu) 2645 { 2646 struct zone *zone; 2647 2648 for_each_populated_zone(zone) { 2649 drain_pages_zone(cpu, zone); 2650 } 2651 } 2652 2653 /* 2654 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2655 */ 2656 void drain_local_pages(struct zone *zone) 2657 { 2658 int cpu = smp_processor_id(); 2659 2660 if (zone) 2661 drain_pages_zone(cpu, zone); 2662 else 2663 drain_pages(cpu); 2664 } 2665 2666 /* 2667 * The implementation of drain_all_pages(), exposing an extra parameter to 2668 * drain on all cpus. 2669 * 2670 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2671 * not empty. The check for non-emptiness can however race with a free to 2672 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2673 * that need the guarantee that every CPU has drained can disable the 2674 * optimizing racy check. 2675 */ 2676 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2677 { 2678 int cpu; 2679 2680 /* 2681 * Allocate in the BSS so we won't require allocation in 2682 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2683 */ 2684 static cpumask_t cpus_with_pcps; 2685 2686 /* 2687 * Do not drain if one is already in progress unless it's specific to 2688 * a zone. Such callers are primarily CMA and memory hotplug and need 2689 * the drain to be complete when the call returns. 2690 */ 2691 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2692 if (!zone) 2693 return; 2694 mutex_lock(&pcpu_drain_mutex); 2695 } 2696 2697 /* 2698 * We don't care about racing with CPU hotplug event 2699 * as offline notification will cause the notified 2700 * cpu to drain that CPU pcps and on_each_cpu_mask 2701 * disables preemption as part of its processing 2702 */ 2703 for_each_online_cpu(cpu) { 2704 struct per_cpu_pages *pcp; 2705 struct zone *z; 2706 bool has_pcps = false; 2707 2708 if (force_all_cpus) { 2709 /* 2710 * The pcp.count check is racy, some callers need a 2711 * guarantee that no cpu is missed. 2712 */ 2713 has_pcps = true; 2714 } else if (zone) { 2715 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2716 if (pcp->count) 2717 has_pcps = true; 2718 } else { 2719 for_each_populated_zone(z) { 2720 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2721 if (pcp->count) { 2722 has_pcps = true; 2723 break; 2724 } 2725 } 2726 } 2727 2728 if (has_pcps) 2729 cpumask_set_cpu(cpu, &cpus_with_pcps); 2730 else 2731 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2732 } 2733 2734 for_each_cpu(cpu, &cpus_with_pcps) { 2735 if (zone) 2736 drain_pages_zone(cpu, zone); 2737 else 2738 drain_pages(cpu); 2739 } 2740 2741 mutex_unlock(&pcpu_drain_mutex); 2742 } 2743 2744 /* 2745 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2746 * 2747 * When zone parameter is non-NULL, spill just the single zone's pages. 2748 */ 2749 void drain_all_pages(struct zone *zone) 2750 { 2751 __drain_all_pages(zone, false); 2752 } 2753 2754 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2755 { 2756 int min_nr_free, max_nr_free; 2757 2758 /* Free as much as possible if batch freeing high-order pages. */ 2759 if (unlikely(free_high)) 2760 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2761 2762 /* Check for PCP disabled or boot pageset */ 2763 if (unlikely(high < batch)) 2764 return 1; 2765 2766 /* Leave at least pcp->batch pages on the list */ 2767 min_nr_free = batch; 2768 max_nr_free = high - batch; 2769 2770 /* 2771 * Increase the batch number to the number of the consecutive 2772 * freed pages to reduce zone lock contention. 2773 */ 2774 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2775 2776 return batch; 2777 } 2778 2779 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2780 int batch, bool free_high) 2781 { 2782 int high, high_min, high_max; 2783 2784 high_min = READ_ONCE(pcp->high_min); 2785 high_max = READ_ONCE(pcp->high_max); 2786 high = pcp->high = clamp(pcp->high, high_min, high_max); 2787 2788 if (unlikely(!high)) 2789 return 0; 2790 2791 if (unlikely(free_high)) { 2792 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2793 high_min); 2794 return 0; 2795 } 2796 2797 /* 2798 * If reclaim is active, limit the number of pages that can be 2799 * stored on pcp lists 2800 */ 2801 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2802 int free_count = max_t(int, pcp->free_count, batch); 2803 2804 pcp->high = max(high - free_count, high_min); 2805 return min(batch << 2, pcp->high); 2806 } 2807 2808 if (high_min == high_max) 2809 return high; 2810 2811 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2812 int free_count = max_t(int, pcp->free_count, batch); 2813 2814 pcp->high = max(high - free_count, high_min); 2815 high = max(pcp->count, high_min); 2816 } else if (pcp->count >= high) { 2817 int need_high = pcp->free_count + batch; 2818 2819 /* pcp->high should be large enough to hold batch freed pages */ 2820 if (pcp->high < need_high) 2821 pcp->high = clamp(need_high, high_min, high_max); 2822 } 2823 2824 return high; 2825 } 2826 2827 static void free_frozen_page_commit(struct zone *zone, 2828 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2829 unsigned int order, fpi_t fpi_flags) 2830 { 2831 int high, batch; 2832 int pindex; 2833 bool free_high = false; 2834 2835 /* 2836 * On freeing, reduce the number of pages that are batch allocated. 2837 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2838 * allocations. 2839 */ 2840 pcp->alloc_factor >>= 1; 2841 __count_vm_events(PGFREE, 1 << order); 2842 pindex = order_to_pindex(migratetype, order); 2843 list_add(&page->pcp_list, &pcp->lists[pindex]); 2844 pcp->count += 1 << order; 2845 2846 batch = READ_ONCE(pcp->batch); 2847 /* 2848 * As high-order pages other than THP's stored on PCP can contribute 2849 * to fragmentation, limit the number stored when PCP is heavily 2850 * freeing without allocation. The remainder after bulk freeing 2851 * stops will be drained from vmstat refresh context. 2852 */ 2853 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2854 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) && 2855 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2856 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2857 pcp->count >= batch)); 2858 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2859 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2860 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2861 } 2862 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2863 pcp->free_count += (1 << order); 2864 2865 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2866 /* 2867 * Do not attempt to take a zone lock. Let pcp->count get 2868 * over high mark temporarily. 2869 */ 2870 return; 2871 } 2872 high = nr_pcp_high(pcp, zone, batch, free_high); 2873 if (pcp->count >= high) { 2874 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2875 pcp, pindex); 2876 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2877 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2878 ZONE_MOVABLE, 0)) 2879 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2880 } 2881 } 2882 2883 /* 2884 * Free a pcp page 2885 */ 2886 static void __free_frozen_pages(struct page *page, unsigned int order, 2887 fpi_t fpi_flags) 2888 { 2889 unsigned long __maybe_unused UP_flags; 2890 struct per_cpu_pages *pcp; 2891 struct zone *zone; 2892 unsigned long pfn = page_to_pfn(page); 2893 int migratetype; 2894 2895 if (!pcp_allowed_order(order)) { 2896 __free_pages_ok(page, order, fpi_flags); 2897 return; 2898 } 2899 2900 if (!free_pages_prepare(page, order)) 2901 return; 2902 2903 /* 2904 * We only track unmovable, reclaimable and movable on pcp lists. 2905 * Place ISOLATE pages on the isolated list because they are being 2906 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2907 * get those areas back if necessary. Otherwise, we may have to free 2908 * excessively into the page allocator 2909 */ 2910 zone = page_zone(page); 2911 migratetype = get_pfnblock_migratetype(page, pfn); 2912 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2913 if (unlikely(is_migrate_isolate(migratetype))) { 2914 free_one_page(zone, page, pfn, order, fpi_flags); 2915 return; 2916 } 2917 migratetype = MIGRATE_MOVABLE; 2918 } 2919 2920 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2921 && (in_nmi() || in_hardirq()))) { 2922 add_page_to_zone_llist(zone, page, order); 2923 return; 2924 } 2925 pcp_trylock_prepare(UP_flags); 2926 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2927 if (pcp) { 2928 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags); 2929 pcp_spin_unlock(pcp); 2930 } else { 2931 free_one_page(zone, page, pfn, order, fpi_flags); 2932 } 2933 pcp_trylock_finish(UP_flags); 2934 } 2935 2936 void free_frozen_pages(struct page *page, unsigned int order) 2937 { 2938 __free_frozen_pages(page, order, FPI_NONE); 2939 } 2940 2941 /* 2942 * Free a batch of folios 2943 */ 2944 void free_unref_folios(struct folio_batch *folios) 2945 { 2946 unsigned long __maybe_unused UP_flags; 2947 struct per_cpu_pages *pcp = NULL; 2948 struct zone *locked_zone = NULL; 2949 int i, j; 2950 2951 /* Prepare folios for freeing */ 2952 for (i = 0, j = 0; i < folios->nr; i++) { 2953 struct folio *folio = folios->folios[i]; 2954 unsigned long pfn = folio_pfn(folio); 2955 unsigned int order = folio_order(folio); 2956 2957 if (!free_pages_prepare(&folio->page, order)) 2958 continue; 2959 /* 2960 * Free orders not handled on the PCP directly to the 2961 * allocator. 2962 */ 2963 if (!pcp_allowed_order(order)) { 2964 free_one_page(folio_zone(folio), &folio->page, 2965 pfn, order, FPI_NONE); 2966 continue; 2967 } 2968 folio->private = (void *)(unsigned long)order; 2969 if (j != i) 2970 folios->folios[j] = folio; 2971 j++; 2972 } 2973 folios->nr = j; 2974 2975 for (i = 0; i < folios->nr; i++) { 2976 struct folio *folio = folios->folios[i]; 2977 struct zone *zone = folio_zone(folio); 2978 unsigned long pfn = folio_pfn(folio); 2979 unsigned int order = (unsigned long)folio->private; 2980 int migratetype; 2981 2982 folio->private = NULL; 2983 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2984 2985 /* Different zone requires a different pcp lock */ 2986 if (zone != locked_zone || 2987 is_migrate_isolate(migratetype)) { 2988 if (pcp) { 2989 pcp_spin_unlock(pcp); 2990 pcp_trylock_finish(UP_flags); 2991 locked_zone = NULL; 2992 pcp = NULL; 2993 } 2994 2995 /* 2996 * Free isolated pages directly to the 2997 * allocator, see comment in free_frozen_pages. 2998 */ 2999 if (is_migrate_isolate(migratetype)) { 3000 free_one_page(zone, &folio->page, pfn, 3001 order, FPI_NONE); 3002 continue; 3003 } 3004 3005 /* 3006 * trylock is necessary as folios may be getting freed 3007 * from IRQ or SoftIRQ context after an IO completion. 3008 */ 3009 pcp_trylock_prepare(UP_flags); 3010 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3011 if (unlikely(!pcp)) { 3012 pcp_trylock_finish(UP_flags); 3013 free_one_page(zone, &folio->page, pfn, 3014 order, FPI_NONE); 3015 continue; 3016 } 3017 locked_zone = zone; 3018 } 3019 3020 /* 3021 * Non-isolated types over MIGRATE_PCPTYPES get added 3022 * to the MIGRATE_MOVABLE pcp list. 3023 */ 3024 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3025 migratetype = MIGRATE_MOVABLE; 3026 3027 trace_mm_page_free_batched(&folio->page); 3028 free_frozen_page_commit(zone, pcp, &folio->page, migratetype, 3029 order, FPI_NONE); 3030 } 3031 3032 if (pcp) { 3033 pcp_spin_unlock(pcp); 3034 pcp_trylock_finish(UP_flags); 3035 } 3036 folio_batch_reinit(folios); 3037 } 3038 3039 /* 3040 * split_page takes a non-compound higher-order page, and splits it into 3041 * n (1<<order) sub-pages: page[0..n] 3042 * Each sub-page must be freed individually. 3043 * 3044 * Note: this is probably too low level an operation for use in drivers. 3045 * Please consult with lkml before using this in your driver. 3046 */ 3047 void split_page(struct page *page, unsigned int order) 3048 { 3049 int i; 3050 3051 VM_BUG_ON_PAGE(PageCompound(page), page); 3052 VM_BUG_ON_PAGE(!page_count(page), page); 3053 3054 for (i = 1; i < (1 << order); i++) 3055 set_page_refcounted(page + i); 3056 split_page_owner(page, order, 0); 3057 pgalloc_tag_split(page_folio(page), order, 0); 3058 split_page_memcg(page, order); 3059 } 3060 EXPORT_SYMBOL_GPL(split_page); 3061 3062 int __isolate_free_page(struct page *page, unsigned int order) 3063 { 3064 struct zone *zone = page_zone(page); 3065 int mt = get_pageblock_migratetype(page); 3066 3067 if (!is_migrate_isolate(mt)) { 3068 unsigned long watermark; 3069 /* 3070 * Obey watermarks as if the page was being allocated. We can 3071 * emulate a high-order watermark check with a raised order-0 3072 * watermark, because we already know our high-order page 3073 * exists. 3074 */ 3075 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3076 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3077 return 0; 3078 } 3079 3080 del_page_from_free_list(page, zone, order, mt); 3081 3082 /* 3083 * Set the pageblock if the isolated page is at least half of a 3084 * pageblock 3085 */ 3086 if (order >= pageblock_order - 1) { 3087 struct page *endpage = page + (1 << order) - 1; 3088 for (; page < endpage; page += pageblock_nr_pages) { 3089 int mt = get_pageblock_migratetype(page); 3090 /* 3091 * Only change normal pageblocks (i.e., they can merge 3092 * with others) 3093 */ 3094 if (migratetype_is_mergeable(mt)) 3095 move_freepages_block(zone, page, mt, 3096 MIGRATE_MOVABLE); 3097 } 3098 } 3099 3100 return 1UL << order; 3101 } 3102 3103 /** 3104 * __putback_isolated_page - Return a now-isolated page back where we got it 3105 * @page: Page that was isolated 3106 * @order: Order of the isolated page 3107 * @mt: The page's pageblock's migratetype 3108 * 3109 * This function is meant to return a page pulled from the free lists via 3110 * __isolate_free_page back to the free lists they were pulled from. 3111 */ 3112 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3113 { 3114 struct zone *zone = page_zone(page); 3115 3116 /* zone lock should be held when this function is called */ 3117 lockdep_assert_held(&zone->lock); 3118 3119 /* Return isolated page to tail of freelist. */ 3120 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3121 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3122 } 3123 3124 /* 3125 * Update NUMA hit/miss statistics 3126 */ 3127 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3128 long nr_account) 3129 { 3130 #ifdef CONFIG_NUMA 3131 enum numa_stat_item local_stat = NUMA_LOCAL; 3132 3133 /* skip numa counters update if numa stats is disabled */ 3134 if (!static_branch_likely(&vm_numa_stat_key)) 3135 return; 3136 3137 if (zone_to_nid(z) != numa_node_id()) 3138 local_stat = NUMA_OTHER; 3139 3140 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3141 __count_numa_events(z, NUMA_HIT, nr_account); 3142 else { 3143 __count_numa_events(z, NUMA_MISS, nr_account); 3144 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3145 } 3146 __count_numa_events(z, local_stat, nr_account); 3147 #endif 3148 } 3149 3150 static __always_inline 3151 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 3152 unsigned int order, unsigned int alloc_flags, 3153 int migratetype) 3154 { 3155 struct page *page; 3156 unsigned long flags; 3157 3158 do { 3159 page = NULL; 3160 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 3161 if (!spin_trylock_irqsave(&zone->lock, flags)) 3162 return NULL; 3163 } else { 3164 spin_lock_irqsave(&zone->lock, flags); 3165 } 3166 if (alloc_flags & ALLOC_HIGHATOMIC) 3167 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3168 if (!page) { 3169 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 3170 3171 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); 3172 3173 /* 3174 * If the allocation fails, allow OOM handling and 3175 * order-0 (atomic) allocs access to HIGHATOMIC 3176 * reserves as failing now is worse than failing a 3177 * high-order atomic allocation in the future. 3178 */ 3179 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 3180 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3181 3182 if (!page) { 3183 spin_unlock_irqrestore(&zone->lock, flags); 3184 return NULL; 3185 } 3186 } 3187 spin_unlock_irqrestore(&zone->lock, flags); 3188 } while (check_new_pages(page, order)); 3189 3190 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3191 zone_statistics(preferred_zone, zone, 1); 3192 3193 return page; 3194 } 3195 3196 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3197 { 3198 int high, base_batch, batch, max_nr_alloc; 3199 int high_max, high_min; 3200 3201 base_batch = READ_ONCE(pcp->batch); 3202 high_min = READ_ONCE(pcp->high_min); 3203 high_max = READ_ONCE(pcp->high_max); 3204 high = pcp->high = clamp(pcp->high, high_min, high_max); 3205 3206 /* Check for PCP disabled or boot pageset */ 3207 if (unlikely(high < base_batch)) 3208 return 1; 3209 3210 if (order) 3211 batch = base_batch; 3212 else 3213 batch = (base_batch << pcp->alloc_factor); 3214 3215 /* 3216 * If we had larger pcp->high, we could avoid to allocate from 3217 * zone. 3218 */ 3219 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3220 high = pcp->high = min(high + batch, high_max); 3221 3222 if (!order) { 3223 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3224 /* 3225 * Double the number of pages allocated each time there is 3226 * subsequent allocation of order-0 pages without any freeing. 3227 */ 3228 if (batch <= max_nr_alloc && 3229 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3230 pcp->alloc_factor++; 3231 batch = min(batch, max_nr_alloc); 3232 } 3233 3234 /* 3235 * Scale batch relative to order if batch implies free pages 3236 * can be stored on the PCP. Batch can be 1 for small zones or 3237 * for boot pagesets which should never store free pages as 3238 * the pages may belong to arbitrary zones. 3239 */ 3240 if (batch > 1) 3241 batch = max(batch >> order, 2); 3242 3243 return batch; 3244 } 3245 3246 /* Remove page from the per-cpu list, caller must protect the list */ 3247 static inline 3248 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3249 int migratetype, 3250 unsigned int alloc_flags, 3251 struct per_cpu_pages *pcp, 3252 struct list_head *list) 3253 { 3254 struct page *page; 3255 3256 do { 3257 if (list_empty(list)) { 3258 int batch = nr_pcp_alloc(pcp, zone, order); 3259 int alloced; 3260 3261 alloced = rmqueue_bulk(zone, order, 3262 batch, list, 3263 migratetype, alloc_flags); 3264 3265 pcp->count += alloced << order; 3266 if (unlikely(list_empty(list))) 3267 return NULL; 3268 } 3269 3270 page = list_first_entry(list, struct page, pcp_list); 3271 list_del(&page->pcp_list); 3272 pcp->count -= 1 << order; 3273 } while (check_new_pages(page, order)); 3274 3275 return page; 3276 } 3277 3278 /* Lock and remove page from the per-cpu list */ 3279 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3280 struct zone *zone, unsigned int order, 3281 int migratetype, unsigned int alloc_flags) 3282 { 3283 struct per_cpu_pages *pcp; 3284 struct list_head *list; 3285 struct page *page; 3286 unsigned long __maybe_unused UP_flags; 3287 3288 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3289 pcp_trylock_prepare(UP_flags); 3290 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3291 if (!pcp) { 3292 pcp_trylock_finish(UP_flags); 3293 return NULL; 3294 } 3295 3296 /* 3297 * On allocation, reduce the number of pages that are batch freed. 3298 * See nr_pcp_free() where free_factor is increased for subsequent 3299 * frees. 3300 */ 3301 pcp->free_count >>= 1; 3302 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3303 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3304 pcp_spin_unlock(pcp); 3305 pcp_trylock_finish(UP_flags); 3306 if (page) { 3307 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3308 zone_statistics(preferred_zone, zone, 1); 3309 } 3310 return page; 3311 } 3312 3313 /* 3314 * Allocate a page from the given zone. 3315 * Use pcplists for THP or "cheap" high-order allocations. 3316 */ 3317 3318 /* 3319 * Do not instrument rmqueue() with KMSAN. This function may call 3320 * __msan_poison_alloca() through a call to set_pfnblock_migratetype(). 3321 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3322 * may call rmqueue() again, which will result in a deadlock. 3323 */ 3324 __no_sanitize_memory 3325 static inline 3326 struct page *rmqueue(struct zone *preferred_zone, 3327 struct zone *zone, unsigned int order, 3328 gfp_t gfp_flags, unsigned int alloc_flags, 3329 int migratetype) 3330 { 3331 struct page *page; 3332 3333 if (likely(pcp_allowed_order(order))) { 3334 page = rmqueue_pcplist(preferred_zone, zone, order, 3335 migratetype, alloc_flags); 3336 if (likely(page)) 3337 goto out; 3338 } 3339 3340 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3341 migratetype); 3342 3343 out: 3344 /* Separate test+clear to avoid unnecessary atomics */ 3345 if ((alloc_flags & ALLOC_KSWAPD) && 3346 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3347 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3348 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3349 } 3350 3351 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3352 return page; 3353 } 3354 3355 /* 3356 * Reserve the pageblock(s) surrounding an allocation request for 3357 * exclusive use of high-order atomic allocations if there are no 3358 * empty page blocks that contain a page with a suitable order 3359 */ 3360 static void reserve_highatomic_pageblock(struct page *page, int order, 3361 struct zone *zone) 3362 { 3363 int mt; 3364 unsigned long max_managed, flags; 3365 3366 /* 3367 * The number reserved as: minimum is 1 pageblock, maximum is 3368 * roughly 1% of a zone. But if 1% of a zone falls below a 3369 * pageblock size, then don't reserve any pageblocks. 3370 * Check is race-prone but harmless. 3371 */ 3372 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3373 return; 3374 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3375 if (zone->nr_reserved_highatomic >= max_managed) 3376 return; 3377 3378 spin_lock_irqsave(&zone->lock, flags); 3379 3380 /* Recheck the nr_reserved_highatomic limit under the lock */ 3381 if (zone->nr_reserved_highatomic >= max_managed) 3382 goto out_unlock; 3383 3384 /* Yoink! */ 3385 mt = get_pageblock_migratetype(page); 3386 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3387 if (!migratetype_is_mergeable(mt)) 3388 goto out_unlock; 3389 3390 if (order < pageblock_order) { 3391 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3392 goto out_unlock; 3393 zone->nr_reserved_highatomic += pageblock_nr_pages; 3394 } else { 3395 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3396 zone->nr_reserved_highatomic += 1 << order; 3397 } 3398 3399 out_unlock: 3400 spin_unlock_irqrestore(&zone->lock, flags); 3401 } 3402 3403 /* 3404 * Used when an allocation is about to fail under memory pressure. This 3405 * potentially hurts the reliability of high-order allocations when under 3406 * intense memory pressure but failed atomic allocations should be easier 3407 * to recover from than an OOM. 3408 * 3409 * If @force is true, try to unreserve pageblocks even though highatomic 3410 * pageblock is exhausted. 3411 */ 3412 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3413 bool force) 3414 { 3415 struct zonelist *zonelist = ac->zonelist; 3416 unsigned long flags; 3417 struct zoneref *z; 3418 struct zone *zone; 3419 struct page *page; 3420 int order; 3421 int ret; 3422 3423 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3424 ac->nodemask) { 3425 /* 3426 * Preserve at least one pageblock unless memory pressure 3427 * is really high. 3428 */ 3429 if (!force && zone->nr_reserved_highatomic <= 3430 pageblock_nr_pages) 3431 continue; 3432 3433 spin_lock_irqsave(&zone->lock, flags); 3434 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3435 struct free_area *area = &(zone->free_area[order]); 3436 unsigned long size; 3437 3438 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3439 if (!page) 3440 continue; 3441 3442 size = max(pageblock_nr_pages, 1UL << order); 3443 /* 3444 * It should never happen but changes to 3445 * locking could inadvertently allow a per-cpu 3446 * drain to add pages to MIGRATE_HIGHATOMIC 3447 * while unreserving so be safe and watch for 3448 * underflows. 3449 */ 3450 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3451 size = zone->nr_reserved_highatomic; 3452 zone->nr_reserved_highatomic -= size; 3453 3454 /* 3455 * Convert to ac->migratetype and avoid the normal 3456 * pageblock stealing heuristics. Minimally, the caller 3457 * is doing the work and needs the pages. More 3458 * importantly, if the block was always converted to 3459 * MIGRATE_UNMOVABLE or another type then the number 3460 * of pageblocks that cannot be completely freed 3461 * may increase. 3462 */ 3463 if (order < pageblock_order) 3464 ret = move_freepages_block(zone, page, 3465 MIGRATE_HIGHATOMIC, 3466 ac->migratetype); 3467 else { 3468 move_to_free_list(page, zone, order, 3469 MIGRATE_HIGHATOMIC, 3470 ac->migratetype); 3471 change_pageblock_range(page, order, 3472 ac->migratetype); 3473 ret = 1; 3474 } 3475 /* 3476 * Reserving the block(s) already succeeded, 3477 * so this should not fail on zone boundaries. 3478 */ 3479 WARN_ON_ONCE(ret == -1); 3480 if (ret > 0) { 3481 spin_unlock_irqrestore(&zone->lock, flags); 3482 return ret; 3483 } 3484 } 3485 spin_unlock_irqrestore(&zone->lock, flags); 3486 } 3487 3488 return false; 3489 } 3490 3491 static inline long __zone_watermark_unusable_free(struct zone *z, 3492 unsigned int order, unsigned int alloc_flags) 3493 { 3494 long unusable_free = (1 << order) - 1; 3495 3496 /* 3497 * If the caller does not have rights to reserves below the min 3498 * watermark then subtract the free pages reserved for highatomic. 3499 */ 3500 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3501 unusable_free += READ_ONCE(z->nr_free_highatomic); 3502 3503 #ifdef CONFIG_CMA 3504 /* If allocation can't use CMA areas don't use free CMA pages */ 3505 if (!(alloc_flags & ALLOC_CMA)) 3506 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3507 #endif 3508 3509 return unusable_free; 3510 } 3511 3512 /* 3513 * Return true if free base pages are above 'mark'. For high-order checks it 3514 * will return true of the order-0 watermark is reached and there is at least 3515 * one free page of a suitable size. Checking now avoids taking the zone lock 3516 * to check in the allocation paths if no pages are free. 3517 */ 3518 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3519 int highest_zoneidx, unsigned int alloc_flags, 3520 long free_pages) 3521 { 3522 long min = mark; 3523 int o; 3524 3525 /* free_pages may go negative - that's OK */ 3526 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3527 3528 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3529 /* 3530 * __GFP_HIGH allows access to 50% of the min reserve as well 3531 * as OOM. 3532 */ 3533 if (alloc_flags & ALLOC_MIN_RESERVE) { 3534 min -= min / 2; 3535 3536 /* 3537 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3538 * access more reserves than just __GFP_HIGH. Other 3539 * non-blocking allocations requests such as GFP_NOWAIT 3540 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3541 * access to the min reserve. 3542 */ 3543 if (alloc_flags & ALLOC_NON_BLOCK) 3544 min -= min / 4; 3545 } 3546 3547 /* 3548 * OOM victims can try even harder than the normal reserve 3549 * users on the grounds that it's definitely going to be in 3550 * the exit path shortly and free memory. Any allocation it 3551 * makes during the free path will be small and short-lived. 3552 */ 3553 if (alloc_flags & ALLOC_OOM) 3554 min -= min / 2; 3555 } 3556 3557 /* 3558 * Check watermarks for an order-0 allocation request. If these 3559 * are not met, then a high-order request also cannot go ahead 3560 * even if a suitable page happened to be free. 3561 */ 3562 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3563 return false; 3564 3565 /* If this is an order-0 request then the watermark is fine */ 3566 if (!order) 3567 return true; 3568 3569 /* For a high-order request, check at least one suitable page is free */ 3570 for (o = order; o < NR_PAGE_ORDERS; o++) { 3571 struct free_area *area = &z->free_area[o]; 3572 int mt; 3573 3574 if (!area->nr_free) 3575 continue; 3576 3577 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3578 if (!free_area_empty(area, mt)) 3579 return true; 3580 } 3581 3582 #ifdef CONFIG_CMA 3583 if ((alloc_flags & ALLOC_CMA) && 3584 !free_area_empty(area, MIGRATE_CMA)) { 3585 return true; 3586 } 3587 #endif 3588 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3589 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3590 return true; 3591 } 3592 } 3593 return false; 3594 } 3595 3596 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3597 int highest_zoneidx, unsigned int alloc_flags) 3598 { 3599 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3600 zone_page_state(z, NR_FREE_PAGES)); 3601 } 3602 3603 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3604 unsigned long mark, int highest_zoneidx, 3605 unsigned int alloc_flags, gfp_t gfp_mask) 3606 { 3607 long free_pages; 3608 3609 free_pages = zone_page_state(z, NR_FREE_PAGES); 3610 3611 /* 3612 * Fast check for order-0 only. If this fails then the reserves 3613 * need to be calculated. 3614 */ 3615 if (!order) { 3616 long usable_free; 3617 long reserved; 3618 3619 usable_free = free_pages; 3620 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3621 3622 /* reserved may over estimate high-atomic reserves. */ 3623 usable_free -= min(usable_free, reserved); 3624 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3625 return true; 3626 } 3627 3628 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3629 free_pages)) 3630 return true; 3631 3632 /* 3633 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3634 * when checking the min watermark. The min watermark is the 3635 * point where boosting is ignored so that kswapd is woken up 3636 * when below the low watermark. 3637 */ 3638 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3639 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3640 mark = z->_watermark[WMARK_MIN]; 3641 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3642 alloc_flags, free_pages); 3643 } 3644 3645 return false; 3646 } 3647 3648 #ifdef CONFIG_NUMA 3649 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3650 3651 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3652 { 3653 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3654 node_reclaim_distance; 3655 } 3656 #else /* CONFIG_NUMA */ 3657 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3658 { 3659 return true; 3660 } 3661 #endif /* CONFIG_NUMA */ 3662 3663 /* 3664 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3665 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3666 * premature use of a lower zone may cause lowmem pressure problems that 3667 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3668 * probably too small. It only makes sense to spread allocations to avoid 3669 * fragmentation between the Normal and DMA32 zones. 3670 */ 3671 static inline unsigned int 3672 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3673 { 3674 unsigned int alloc_flags; 3675 3676 /* 3677 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3678 * to save a branch. 3679 */ 3680 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3681 3682 if (defrag_mode) { 3683 alloc_flags |= ALLOC_NOFRAGMENT; 3684 return alloc_flags; 3685 } 3686 3687 #ifdef CONFIG_ZONE_DMA32 3688 if (!zone) 3689 return alloc_flags; 3690 3691 if (zone_idx(zone) != ZONE_NORMAL) 3692 return alloc_flags; 3693 3694 /* 3695 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3696 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3697 * on UMA that if Normal is populated then so is DMA32. 3698 */ 3699 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3700 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3701 return alloc_flags; 3702 3703 alloc_flags |= ALLOC_NOFRAGMENT; 3704 #endif /* CONFIG_ZONE_DMA32 */ 3705 return alloc_flags; 3706 } 3707 3708 /* Must be called after current_gfp_context() which can change gfp_mask */ 3709 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3710 unsigned int alloc_flags) 3711 { 3712 #ifdef CONFIG_CMA 3713 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3714 alloc_flags |= ALLOC_CMA; 3715 #endif 3716 return alloc_flags; 3717 } 3718 3719 /* 3720 * get_page_from_freelist goes through the zonelist trying to allocate 3721 * a page. 3722 */ 3723 static struct page * 3724 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3725 const struct alloc_context *ac) 3726 { 3727 struct zoneref *z; 3728 struct zone *zone; 3729 struct pglist_data *last_pgdat = NULL; 3730 bool last_pgdat_dirty_ok = false; 3731 bool no_fallback; 3732 3733 retry: 3734 /* 3735 * Scan zonelist, looking for a zone with enough free. 3736 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c. 3737 */ 3738 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3739 z = ac->preferred_zoneref; 3740 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3741 ac->nodemask) { 3742 struct page *page; 3743 unsigned long mark; 3744 3745 if (cpusets_enabled() && 3746 (alloc_flags & ALLOC_CPUSET) && 3747 !__cpuset_zone_allowed(zone, gfp_mask)) 3748 continue; 3749 /* 3750 * When allocating a page cache page for writing, we 3751 * want to get it from a node that is within its dirty 3752 * limit, such that no single node holds more than its 3753 * proportional share of globally allowed dirty pages. 3754 * The dirty limits take into account the node's 3755 * lowmem reserves and high watermark so that kswapd 3756 * should be able to balance it without having to 3757 * write pages from its LRU list. 3758 * 3759 * XXX: For now, allow allocations to potentially 3760 * exceed the per-node dirty limit in the slowpath 3761 * (spread_dirty_pages unset) before going into reclaim, 3762 * which is important when on a NUMA setup the allowed 3763 * nodes are together not big enough to reach the 3764 * global limit. The proper fix for these situations 3765 * will require awareness of nodes in the 3766 * dirty-throttling and the flusher threads. 3767 */ 3768 if (ac->spread_dirty_pages) { 3769 if (last_pgdat != zone->zone_pgdat) { 3770 last_pgdat = zone->zone_pgdat; 3771 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3772 } 3773 3774 if (!last_pgdat_dirty_ok) 3775 continue; 3776 } 3777 3778 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3779 zone != zonelist_zone(ac->preferred_zoneref)) { 3780 int local_nid; 3781 3782 /* 3783 * If moving to a remote node, retry but allow 3784 * fragmenting fallbacks. Locality is more important 3785 * than fragmentation avoidance. 3786 */ 3787 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3788 if (zone_to_nid(zone) != local_nid) { 3789 alloc_flags &= ~ALLOC_NOFRAGMENT; 3790 goto retry; 3791 } 3792 } 3793 3794 cond_accept_memory(zone, order, alloc_flags); 3795 3796 /* 3797 * Detect whether the number of free pages is below high 3798 * watermark. If so, we will decrease pcp->high and free 3799 * PCP pages in free path to reduce the possibility of 3800 * premature page reclaiming. Detection is done here to 3801 * avoid to do that in hotter free path. 3802 */ 3803 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3804 goto check_alloc_wmark; 3805 3806 mark = high_wmark_pages(zone); 3807 if (zone_watermark_fast(zone, order, mark, 3808 ac->highest_zoneidx, alloc_flags, 3809 gfp_mask)) 3810 goto try_this_zone; 3811 else 3812 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3813 3814 check_alloc_wmark: 3815 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3816 if (!zone_watermark_fast(zone, order, mark, 3817 ac->highest_zoneidx, alloc_flags, 3818 gfp_mask)) { 3819 int ret; 3820 3821 if (cond_accept_memory(zone, order, alloc_flags)) 3822 goto try_this_zone; 3823 3824 /* 3825 * Watermark failed for this zone, but see if we can 3826 * grow this zone if it contains deferred pages. 3827 */ 3828 if (deferred_pages_enabled()) { 3829 if (_deferred_grow_zone(zone, order)) 3830 goto try_this_zone; 3831 } 3832 /* Checked here to keep the fast path fast */ 3833 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3834 if (alloc_flags & ALLOC_NO_WATERMARKS) 3835 goto try_this_zone; 3836 3837 if (!node_reclaim_enabled() || 3838 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3839 continue; 3840 3841 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3842 switch (ret) { 3843 case NODE_RECLAIM_NOSCAN: 3844 /* did not scan */ 3845 continue; 3846 case NODE_RECLAIM_FULL: 3847 /* scanned but unreclaimable */ 3848 continue; 3849 default: 3850 /* did we reclaim enough */ 3851 if (zone_watermark_ok(zone, order, mark, 3852 ac->highest_zoneidx, alloc_flags)) 3853 goto try_this_zone; 3854 3855 continue; 3856 } 3857 } 3858 3859 try_this_zone: 3860 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3861 gfp_mask, alloc_flags, ac->migratetype); 3862 if (page) { 3863 prep_new_page(page, order, gfp_mask, alloc_flags); 3864 3865 /* 3866 * If this is a high-order atomic allocation then check 3867 * if the pageblock should be reserved for the future 3868 */ 3869 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3870 reserve_highatomic_pageblock(page, order, zone); 3871 3872 return page; 3873 } else { 3874 if (cond_accept_memory(zone, order, alloc_flags)) 3875 goto try_this_zone; 3876 3877 /* Try again if zone has deferred pages */ 3878 if (deferred_pages_enabled()) { 3879 if (_deferred_grow_zone(zone, order)) 3880 goto try_this_zone; 3881 } 3882 } 3883 } 3884 3885 /* 3886 * It's possible on a UMA machine to get through all zones that are 3887 * fragmented. If avoiding fragmentation, reset and try again. 3888 */ 3889 if (no_fallback && !defrag_mode) { 3890 alloc_flags &= ~ALLOC_NOFRAGMENT; 3891 goto retry; 3892 } 3893 3894 return NULL; 3895 } 3896 3897 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3898 { 3899 unsigned int filter = SHOW_MEM_FILTER_NODES; 3900 3901 /* 3902 * This documents exceptions given to allocations in certain 3903 * contexts that are allowed to allocate outside current's set 3904 * of allowed nodes. 3905 */ 3906 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3907 if (tsk_is_oom_victim(current) || 3908 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3909 filter &= ~SHOW_MEM_FILTER_NODES; 3910 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3911 filter &= ~SHOW_MEM_FILTER_NODES; 3912 3913 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3914 } 3915 3916 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3917 { 3918 struct va_format vaf; 3919 va_list args; 3920 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3921 3922 if ((gfp_mask & __GFP_NOWARN) || 3923 !__ratelimit(&nopage_rs) || 3924 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3925 return; 3926 3927 va_start(args, fmt); 3928 vaf.fmt = fmt; 3929 vaf.va = &args; 3930 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3931 current->comm, &vaf, gfp_mask, &gfp_mask, 3932 nodemask_pr_args(nodemask)); 3933 va_end(args); 3934 3935 cpuset_print_current_mems_allowed(); 3936 pr_cont("\n"); 3937 dump_stack(); 3938 warn_alloc_show_mem(gfp_mask, nodemask); 3939 } 3940 3941 static inline struct page * 3942 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3943 unsigned int alloc_flags, 3944 const struct alloc_context *ac) 3945 { 3946 struct page *page; 3947 3948 page = get_page_from_freelist(gfp_mask, order, 3949 alloc_flags|ALLOC_CPUSET, ac); 3950 /* 3951 * fallback to ignore cpuset restriction if our nodes 3952 * are depleted 3953 */ 3954 if (!page) 3955 page = get_page_from_freelist(gfp_mask, order, 3956 alloc_flags, ac); 3957 return page; 3958 } 3959 3960 static inline struct page * 3961 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3962 const struct alloc_context *ac, unsigned long *did_some_progress) 3963 { 3964 struct oom_control oc = { 3965 .zonelist = ac->zonelist, 3966 .nodemask = ac->nodemask, 3967 .memcg = NULL, 3968 .gfp_mask = gfp_mask, 3969 .order = order, 3970 }; 3971 struct page *page; 3972 3973 *did_some_progress = 0; 3974 3975 /* 3976 * Acquire the oom lock. If that fails, somebody else is 3977 * making progress for us. 3978 */ 3979 if (!mutex_trylock(&oom_lock)) { 3980 *did_some_progress = 1; 3981 schedule_timeout_uninterruptible(1); 3982 return NULL; 3983 } 3984 3985 /* 3986 * Go through the zonelist yet one more time, keep very high watermark 3987 * here, this is only to catch a parallel oom killing, we must fail if 3988 * we're still under heavy pressure. But make sure that this reclaim 3989 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3990 * allocation which will never fail due to oom_lock already held. 3991 */ 3992 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3993 ~__GFP_DIRECT_RECLAIM, order, 3994 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3995 if (page) 3996 goto out; 3997 3998 /* Coredumps can quickly deplete all memory reserves */ 3999 if (current->flags & PF_DUMPCORE) 4000 goto out; 4001 /* The OOM killer will not help higher order allocs */ 4002 if (order > PAGE_ALLOC_COSTLY_ORDER) 4003 goto out; 4004 /* 4005 * We have already exhausted all our reclaim opportunities without any 4006 * success so it is time to admit defeat. We will skip the OOM killer 4007 * because it is very likely that the caller has a more reasonable 4008 * fallback than shooting a random task. 4009 * 4010 * The OOM killer may not free memory on a specific node. 4011 */ 4012 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4013 goto out; 4014 /* The OOM killer does not needlessly kill tasks for lowmem */ 4015 if (ac->highest_zoneidx < ZONE_NORMAL) 4016 goto out; 4017 if (pm_suspended_storage()) 4018 goto out; 4019 /* 4020 * XXX: GFP_NOFS allocations should rather fail than rely on 4021 * other request to make a forward progress. 4022 * We are in an unfortunate situation where out_of_memory cannot 4023 * do much for this context but let's try it to at least get 4024 * access to memory reserved if the current task is killed (see 4025 * out_of_memory). Once filesystems are ready to handle allocation 4026 * failures more gracefully we should just bail out here. 4027 */ 4028 4029 /* Exhausted what can be done so it's blame time */ 4030 if (out_of_memory(&oc) || 4031 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4032 *did_some_progress = 1; 4033 4034 /* 4035 * Help non-failing allocations by giving them access to memory 4036 * reserves 4037 */ 4038 if (gfp_mask & __GFP_NOFAIL) 4039 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4040 ALLOC_NO_WATERMARKS, ac); 4041 } 4042 out: 4043 mutex_unlock(&oom_lock); 4044 return page; 4045 } 4046 4047 /* 4048 * Maximum number of compaction retries with a progress before OOM 4049 * killer is consider as the only way to move forward. 4050 */ 4051 #define MAX_COMPACT_RETRIES 16 4052 4053 #ifdef CONFIG_COMPACTION 4054 /* Try memory compaction for high-order allocations before reclaim */ 4055 static struct page * 4056 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4057 unsigned int alloc_flags, const struct alloc_context *ac, 4058 enum compact_priority prio, enum compact_result *compact_result) 4059 { 4060 struct page *page = NULL; 4061 unsigned long pflags; 4062 unsigned int noreclaim_flag; 4063 4064 if (!order) 4065 return NULL; 4066 4067 psi_memstall_enter(&pflags); 4068 delayacct_compact_start(); 4069 noreclaim_flag = memalloc_noreclaim_save(); 4070 4071 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4072 prio, &page); 4073 4074 memalloc_noreclaim_restore(noreclaim_flag); 4075 psi_memstall_leave(&pflags); 4076 delayacct_compact_end(); 4077 4078 if (*compact_result == COMPACT_SKIPPED) 4079 return NULL; 4080 /* 4081 * At least in one zone compaction wasn't deferred or skipped, so let's 4082 * count a compaction stall 4083 */ 4084 count_vm_event(COMPACTSTALL); 4085 4086 /* Prep a captured page if available */ 4087 if (page) 4088 prep_new_page(page, order, gfp_mask, alloc_flags); 4089 4090 /* Try get a page from the freelist if available */ 4091 if (!page) 4092 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4093 4094 if (page) { 4095 struct zone *zone = page_zone(page); 4096 4097 zone->compact_blockskip_flush = false; 4098 compaction_defer_reset(zone, order, true); 4099 count_vm_event(COMPACTSUCCESS); 4100 return page; 4101 } 4102 4103 /* 4104 * It's bad if compaction run occurs and fails. The most likely reason 4105 * is that pages exist, but not enough to satisfy watermarks. 4106 */ 4107 count_vm_event(COMPACTFAIL); 4108 4109 cond_resched(); 4110 4111 return NULL; 4112 } 4113 4114 static inline bool 4115 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4116 enum compact_result compact_result, 4117 enum compact_priority *compact_priority, 4118 int *compaction_retries) 4119 { 4120 int max_retries = MAX_COMPACT_RETRIES; 4121 int min_priority; 4122 bool ret = false; 4123 int retries = *compaction_retries; 4124 enum compact_priority priority = *compact_priority; 4125 4126 if (!order) 4127 return false; 4128 4129 if (fatal_signal_pending(current)) 4130 return false; 4131 4132 /* 4133 * Compaction was skipped due to a lack of free order-0 4134 * migration targets. Continue if reclaim can help. 4135 */ 4136 if (compact_result == COMPACT_SKIPPED) { 4137 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4138 goto out; 4139 } 4140 4141 /* 4142 * Compaction managed to coalesce some page blocks, but the 4143 * allocation failed presumably due to a race. Retry some. 4144 */ 4145 if (compact_result == COMPACT_SUCCESS) { 4146 /* 4147 * !costly requests are much more important than 4148 * __GFP_RETRY_MAYFAIL costly ones because they are de 4149 * facto nofail and invoke OOM killer to move on while 4150 * costly can fail and users are ready to cope with 4151 * that. 1/4 retries is rather arbitrary but we would 4152 * need much more detailed feedback from compaction to 4153 * make a better decision. 4154 */ 4155 if (order > PAGE_ALLOC_COSTLY_ORDER) 4156 max_retries /= 4; 4157 4158 if (++(*compaction_retries) <= max_retries) { 4159 ret = true; 4160 goto out; 4161 } 4162 } 4163 4164 /* 4165 * Compaction failed. Retry with increasing priority. 4166 */ 4167 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4168 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4169 4170 if (*compact_priority > min_priority) { 4171 (*compact_priority)--; 4172 *compaction_retries = 0; 4173 ret = true; 4174 } 4175 out: 4176 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4177 return ret; 4178 } 4179 #else 4180 static inline struct page * 4181 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4182 unsigned int alloc_flags, const struct alloc_context *ac, 4183 enum compact_priority prio, enum compact_result *compact_result) 4184 { 4185 *compact_result = COMPACT_SKIPPED; 4186 return NULL; 4187 } 4188 4189 static inline bool 4190 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4191 enum compact_result compact_result, 4192 enum compact_priority *compact_priority, 4193 int *compaction_retries) 4194 { 4195 struct zone *zone; 4196 struct zoneref *z; 4197 4198 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4199 return false; 4200 4201 /* 4202 * There are setups with compaction disabled which would prefer to loop 4203 * inside the allocator rather than hit the oom killer prematurely. 4204 * Let's give them a good hope and keep retrying while the order-0 4205 * watermarks are OK. 4206 */ 4207 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4208 ac->highest_zoneidx, ac->nodemask) { 4209 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4210 ac->highest_zoneidx, alloc_flags)) 4211 return true; 4212 } 4213 return false; 4214 } 4215 #endif /* CONFIG_COMPACTION */ 4216 4217 #ifdef CONFIG_LOCKDEP 4218 static struct lockdep_map __fs_reclaim_map = 4219 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4220 4221 static bool __need_reclaim(gfp_t gfp_mask) 4222 { 4223 /* no reclaim without waiting on it */ 4224 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4225 return false; 4226 4227 /* this guy won't enter reclaim */ 4228 if (current->flags & PF_MEMALLOC) 4229 return false; 4230 4231 if (gfp_mask & __GFP_NOLOCKDEP) 4232 return false; 4233 4234 return true; 4235 } 4236 4237 void __fs_reclaim_acquire(unsigned long ip) 4238 { 4239 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4240 } 4241 4242 void __fs_reclaim_release(unsigned long ip) 4243 { 4244 lock_release(&__fs_reclaim_map, ip); 4245 } 4246 4247 void fs_reclaim_acquire(gfp_t gfp_mask) 4248 { 4249 gfp_mask = current_gfp_context(gfp_mask); 4250 4251 if (__need_reclaim(gfp_mask)) { 4252 if (gfp_mask & __GFP_FS) 4253 __fs_reclaim_acquire(_RET_IP_); 4254 4255 #ifdef CONFIG_MMU_NOTIFIER 4256 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4257 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4258 #endif 4259 4260 } 4261 } 4262 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4263 4264 void fs_reclaim_release(gfp_t gfp_mask) 4265 { 4266 gfp_mask = current_gfp_context(gfp_mask); 4267 4268 if (__need_reclaim(gfp_mask)) { 4269 if (gfp_mask & __GFP_FS) 4270 __fs_reclaim_release(_RET_IP_); 4271 } 4272 } 4273 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4274 #endif 4275 4276 /* 4277 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4278 * have been rebuilt so allocation retries. Reader side does not lock and 4279 * retries the allocation if zonelist changes. Writer side is protected by the 4280 * embedded spin_lock. 4281 */ 4282 static DEFINE_SEQLOCK(zonelist_update_seq); 4283 4284 static unsigned int zonelist_iter_begin(void) 4285 { 4286 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4287 return read_seqbegin(&zonelist_update_seq); 4288 4289 return 0; 4290 } 4291 4292 static unsigned int check_retry_zonelist(unsigned int seq) 4293 { 4294 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4295 return read_seqretry(&zonelist_update_seq, seq); 4296 4297 return seq; 4298 } 4299 4300 /* Perform direct synchronous page reclaim */ 4301 static unsigned long 4302 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4303 const struct alloc_context *ac) 4304 { 4305 unsigned int noreclaim_flag; 4306 unsigned long progress; 4307 4308 cond_resched(); 4309 4310 /* We now go into synchronous reclaim */ 4311 cpuset_memory_pressure_bump(); 4312 fs_reclaim_acquire(gfp_mask); 4313 noreclaim_flag = memalloc_noreclaim_save(); 4314 4315 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4316 ac->nodemask); 4317 4318 memalloc_noreclaim_restore(noreclaim_flag); 4319 fs_reclaim_release(gfp_mask); 4320 4321 cond_resched(); 4322 4323 return progress; 4324 } 4325 4326 /* The really slow allocator path where we enter direct reclaim */ 4327 static inline struct page * 4328 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4329 unsigned int alloc_flags, const struct alloc_context *ac, 4330 unsigned long *did_some_progress) 4331 { 4332 struct page *page = NULL; 4333 unsigned long pflags; 4334 bool drained = false; 4335 4336 psi_memstall_enter(&pflags); 4337 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4338 if (unlikely(!(*did_some_progress))) 4339 goto out; 4340 4341 retry: 4342 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4343 4344 /* 4345 * If an allocation failed after direct reclaim, it could be because 4346 * pages are pinned on the per-cpu lists or in high alloc reserves. 4347 * Shrink them and try again 4348 */ 4349 if (!page && !drained) { 4350 unreserve_highatomic_pageblock(ac, false); 4351 drain_all_pages(NULL); 4352 drained = true; 4353 goto retry; 4354 } 4355 out: 4356 psi_memstall_leave(&pflags); 4357 4358 return page; 4359 } 4360 4361 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4362 const struct alloc_context *ac) 4363 { 4364 struct zoneref *z; 4365 struct zone *zone; 4366 pg_data_t *last_pgdat = NULL; 4367 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4368 unsigned int reclaim_order; 4369 4370 if (defrag_mode) 4371 reclaim_order = max(order, pageblock_order); 4372 else 4373 reclaim_order = order; 4374 4375 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4376 ac->nodemask) { 4377 if (!managed_zone(zone)) 4378 continue; 4379 if (last_pgdat == zone->zone_pgdat) 4380 continue; 4381 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4382 last_pgdat = zone->zone_pgdat; 4383 } 4384 } 4385 4386 static inline unsigned int 4387 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4388 { 4389 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4390 4391 /* 4392 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4393 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4394 * to save two branches. 4395 */ 4396 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4397 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4398 4399 /* 4400 * The caller may dip into page reserves a bit more if the caller 4401 * cannot run direct reclaim, or if the caller has realtime scheduling 4402 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4403 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4404 */ 4405 alloc_flags |= (__force int) 4406 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4407 4408 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4409 /* 4410 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4411 * if it can't schedule. 4412 */ 4413 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4414 alloc_flags |= ALLOC_NON_BLOCK; 4415 4416 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) 4417 alloc_flags |= ALLOC_HIGHATOMIC; 4418 } 4419 4420 /* 4421 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4422 * GFP_ATOMIC) rather than fail, see the comment for 4423 * cpuset_current_node_allowed(). 4424 */ 4425 if (alloc_flags & ALLOC_MIN_RESERVE) 4426 alloc_flags &= ~ALLOC_CPUSET; 4427 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4428 alloc_flags |= ALLOC_MIN_RESERVE; 4429 4430 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4431 4432 if (defrag_mode) 4433 alloc_flags |= ALLOC_NOFRAGMENT; 4434 4435 return alloc_flags; 4436 } 4437 4438 static bool oom_reserves_allowed(struct task_struct *tsk) 4439 { 4440 if (!tsk_is_oom_victim(tsk)) 4441 return false; 4442 4443 /* 4444 * !MMU doesn't have oom reaper so give access to memory reserves 4445 * only to the thread with TIF_MEMDIE set 4446 */ 4447 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4448 return false; 4449 4450 return true; 4451 } 4452 4453 /* 4454 * Distinguish requests which really need access to full memory 4455 * reserves from oom victims which can live with a portion of it 4456 */ 4457 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4458 { 4459 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4460 return 0; 4461 if (gfp_mask & __GFP_MEMALLOC) 4462 return ALLOC_NO_WATERMARKS; 4463 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4464 return ALLOC_NO_WATERMARKS; 4465 if (!in_interrupt()) { 4466 if (current->flags & PF_MEMALLOC) 4467 return ALLOC_NO_WATERMARKS; 4468 else if (oom_reserves_allowed(current)) 4469 return ALLOC_OOM; 4470 } 4471 4472 return 0; 4473 } 4474 4475 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4476 { 4477 return !!__gfp_pfmemalloc_flags(gfp_mask); 4478 } 4479 4480 /* 4481 * Checks whether it makes sense to retry the reclaim to make a forward progress 4482 * for the given allocation request. 4483 * 4484 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4485 * without success, or when we couldn't even meet the watermark if we 4486 * reclaimed all remaining pages on the LRU lists. 4487 * 4488 * Returns true if a retry is viable or false to enter the oom path. 4489 */ 4490 static inline bool 4491 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4492 struct alloc_context *ac, int alloc_flags, 4493 bool did_some_progress, int *no_progress_loops) 4494 { 4495 struct zone *zone; 4496 struct zoneref *z; 4497 bool ret = false; 4498 4499 /* 4500 * Costly allocations might have made a progress but this doesn't mean 4501 * their order will become available due to high fragmentation so 4502 * always increment the no progress counter for them 4503 */ 4504 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4505 *no_progress_loops = 0; 4506 else 4507 (*no_progress_loops)++; 4508 4509 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4510 goto out; 4511 4512 4513 /* 4514 * Keep reclaiming pages while there is a chance this will lead 4515 * somewhere. If none of the target zones can satisfy our allocation 4516 * request even if all reclaimable pages are considered then we are 4517 * screwed and have to go OOM. 4518 */ 4519 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4520 ac->highest_zoneidx, ac->nodemask) { 4521 unsigned long available; 4522 unsigned long reclaimable; 4523 unsigned long min_wmark = min_wmark_pages(zone); 4524 bool wmark; 4525 4526 if (cpusets_enabled() && 4527 (alloc_flags & ALLOC_CPUSET) && 4528 !__cpuset_zone_allowed(zone, gfp_mask)) 4529 continue; 4530 4531 available = reclaimable = zone_reclaimable_pages(zone); 4532 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4533 4534 /* 4535 * Would the allocation succeed if we reclaimed all 4536 * reclaimable pages? 4537 */ 4538 wmark = __zone_watermark_ok(zone, order, min_wmark, 4539 ac->highest_zoneidx, alloc_flags, available); 4540 trace_reclaim_retry_zone(z, order, reclaimable, 4541 available, min_wmark, *no_progress_loops, wmark); 4542 if (wmark) { 4543 ret = true; 4544 break; 4545 } 4546 } 4547 4548 /* 4549 * Memory allocation/reclaim might be called from a WQ context and the 4550 * current implementation of the WQ concurrency control doesn't 4551 * recognize that a particular WQ is congested if the worker thread is 4552 * looping without ever sleeping. Therefore we have to do a short sleep 4553 * here rather than calling cond_resched(). 4554 */ 4555 if (current->flags & PF_WQ_WORKER) 4556 schedule_timeout_uninterruptible(1); 4557 else 4558 cond_resched(); 4559 out: 4560 /* Before OOM, exhaust highatomic_reserve */ 4561 if (!ret) 4562 return unreserve_highatomic_pageblock(ac, true); 4563 4564 return ret; 4565 } 4566 4567 static inline bool 4568 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4569 { 4570 /* 4571 * It's possible that cpuset's mems_allowed and the nodemask from 4572 * mempolicy don't intersect. This should be normally dealt with by 4573 * policy_nodemask(), but it's possible to race with cpuset update in 4574 * such a way the check therein was true, and then it became false 4575 * before we got our cpuset_mems_cookie here. 4576 * This assumes that for all allocations, ac->nodemask can come only 4577 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4578 * when it does not intersect with the cpuset restrictions) or the 4579 * caller can deal with a violated nodemask. 4580 */ 4581 if (cpusets_enabled() && ac->nodemask && 4582 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4583 ac->nodemask = NULL; 4584 return true; 4585 } 4586 4587 /* 4588 * When updating a task's mems_allowed or mempolicy nodemask, it is 4589 * possible to race with parallel threads in such a way that our 4590 * allocation can fail while the mask is being updated. If we are about 4591 * to fail, check if the cpuset changed during allocation and if so, 4592 * retry. 4593 */ 4594 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4595 return true; 4596 4597 return false; 4598 } 4599 4600 static inline struct page * 4601 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4602 struct alloc_context *ac) 4603 { 4604 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4605 bool can_compact = gfp_compaction_allowed(gfp_mask); 4606 bool nofail = gfp_mask & __GFP_NOFAIL; 4607 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4608 struct page *page = NULL; 4609 unsigned int alloc_flags; 4610 unsigned long did_some_progress; 4611 enum compact_priority compact_priority; 4612 enum compact_result compact_result; 4613 int compaction_retries; 4614 int no_progress_loops; 4615 unsigned int cpuset_mems_cookie; 4616 unsigned int zonelist_iter_cookie; 4617 int reserve_flags; 4618 4619 if (unlikely(nofail)) { 4620 /* 4621 * We most definitely don't want callers attempting to 4622 * allocate greater than order-1 page units with __GFP_NOFAIL. 4623 */ 4624 WARN_ON_ONCE(order > 1); 4625 /* 4626 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4627 * otherwise, we may result in lockup. 4628 */ 4629 WARN_ON_ONCE(!can_direct_reclaim); 4630 /* 4631 * PF_MEMALLOC request from this context is rather bizarre 4632 * because we cannot reclaim anything and only can loop waiting 4633 * for somebody to do a work for us. 4634 */ 4635 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4636 } 4637 4638 restart: 4639 compaction_retries = 0; 4640 no_progress_loops = 0; 4641 compact_result = COMPACT_SKIPPED; 4642 compact_priority = DEF_COMPACT_PRIORITY; 4643 cpuset_mems_cookie = read_mems_allowed_begin(); 4644 zonelist_iter_cookie = zonelist_iter_begin(); 4645 4646 /* 4647 * The fast path uses conservative alloc_flags to succeed only until 4648 * kswapd needs to be woken up, and to avoid the cost of setting up 4649 * alloc_flags precisely. So we do that now. 4650 */ 4651 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4652 4653 /* 4654 * We need to recalculate the starting point for the zonelist iterator 4655 * because we might have used different nodemask in the fast path, or 4656 * there was a cpuset modification and we are retrying - otherwise we 4657 * could end up iterating over non-eligible zones endlessly. 4658 */ 4659 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4660 ac->highest_zoneidx, ac->nodemask); 4661 if (!zonelist_zone(ac->preferred_zoneref)) 4662 goto nopage; 4663 4664 /* 4665 * Check for insane configurations where the cpuset doesn't contain 4666 * any suitable zone to satisfy the request - e.g. non-movable 4667 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4668 */ 4669 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4670 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4671 ac->highest_zoneidx, 4672 &cpuset_current_mems_allowed); 4673 if (!zonelist_zone(z)) 4674 goto nopage; 4675 } 4676 4677 if (alloc_flags & ALLOC_KSWAPD) 4678 wake_all_kswapds(order, gfp_mask, ac); 4679 4680 /* 4681 * The adjusted alloc_flags might result in immediate success, so try 4682 * that first 4683 */ 4684 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4685 if (page) 4686 goto got_pg; 4687 4688 /* 4689 * For costly allocations, try direct compaction first, as it's likely 4690 * that we have enough base pages and don't need to reclaim. For non- 4691 * movable high-order allocations, do that as well, as compaction will 4692 * try prevent permanent fragmentation by migrating from blocks of the 4693 * same migratetype. 4694 * Don't try this for allocations that are allowed to ignore 4695 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4696 */ 4697 if (can_direct_reclaim && can_compact && 4698 (costly_order || 4699 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4700 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4701 page = __alloc_pages_direct_compact(gfp_mask, order, 4702 alloc_flags, ac, 4703 INIT_COMPACT_PRIORITY, 4704 &compact_result); 4705 if (page) 4706 goto got_pg; 4707 4708 /* 4709 * Checks for costly allocations with __GFP_NORETRY, which 4710 * includes some THP page fault allocations 4711 */ 4712 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4713 /* 4714 * If allocating entire pageblock(s) and compaction 4715 * failed because all zones are below low watermarks 4716 * or is prohibited because it recently failed at this 4717 * order, fail immediately unless the allocator has 4718 * requested compaction and reclaim retry. 4719 * 4720 * Reclaim is 4721 * - potentially very expensive because zones are far 4722 * below their low watermarks or this is part of very 4723 * bursty high order allocations, 4724 * - not guaranteed to help because isolate_freepages() 4725 * may not iterate over freed pages as part of its 4726 * linear scan, and 4727 * - unlikely to make entire pageblocks free on its 4728 * own. 4729 */ 4730 if (compact_result == COMPACT_SKIPPED || 4731 compact_result == COMPACT_DEFERRED) 4732 goto nopage; 4733 4734 /* 4735 * Looks like reclaim/compaction is worth trying, but 4736 * sync compaction could be very expensive, so keep 4737 * using async compaction. 4738 */ 4739 compact_priority = INIT_COMPACT_PRIORITY; 4740 } 4741 } 4742 4743 retry: 4744 /* 4745 * Deal with possible cpuset update races or zonelist updates to avoid 4746 * infinite retries. 4747 */ 4748 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4749 check_retry_zonelist(zonelist_iter_cookie)) 4750 goto restart; 4751 4752 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4753 if (alloc_flags & ALLOC_KSWAPD) 4754 wake_all_kswapds(order, gfp_mask, ac); 4755 4756 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4757 if (reserve_flags) 4758 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4759 (alloc_flags & ALLOC_KSWAPD); 4760 4761 /* 4762 * Reset the nodemask and zonelist iterators if memory policies can be 4763 * ignored. These allocations are high priority and system rather than 4764 * user oriented. 4765 */ 4766 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4767 ac->nodemask = NULL; 4768 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4769 ac->highest_zoneidx, ac->nodemask); 4770 } 4771 4772 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4773 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4774 if (page) 4775 goto got_pg; 4776 4777 /* Caller is not willing to reclaim, we can't balance anything */ 4778 if (!can_direct_reclaim) 4779 goto nopage; 4780 4781 /* Avoid recursion of direct reclaim */ 4782 if (current->flags & PF_MEMALLOC) 4783 goto nopage; 4784 4785 /* Try direct reclaim and then allocating */ 4786 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4787 &did_some_progress); 4788 if (page) 4789 goto got_pg; 4790 4791 /* Try direct compaction and then allocating */ 4792 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4793 compact_priority, &compact_result); 4794 if (page) 4795 goto got_pg; 4796 4797 /* Do not loop if specifically requested */ 4798 if (gfp_mask & __GFP_NORETRY) 4799 goto nopage; 4800 4801 /* 4802 * Do not retry costly high order allocations unless they are 4803 * __GFP_RETRY_MAYFAIL and we can compact 4804 */ 4805 if (costly_order && (!can_compact || 4806 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4807 goto nopage; 4808 4809 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4810 did_some_progress > 0, &no_progress_loops)) 4811 goto retry; 4812 4813 /* 4814 * It doesn't make any sense to retry for the compaction if the order-0 4815 * reclaim is not able to make any progress because the current 4816 * implementation of the compaction depends on the sufficient amount 4817 * of free memory (see __compaction_suitable) 4818 */ 4819 if (did_some_progress > 0 && can_compact && 4820 should_compact_retry(ac, order, alloc_flags, 4821 compact_result, &compact_priority, 4822 &compaction_retries)) 4823 goto retry; 4824 4825 /* Reclaim/compaction failed to prevent the fallback */ 4826 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4827 alloc_flags &= ~ALLOC_NOFRAGMENT; 4828 goto retry; 4829 } 4830 4831 /* 4832 * Deal with possible cpuset update races or zonelist updates to avoid 4833 * a unnecessary OOM kill. 4834 */ 4835 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4836 check_retry_zonelist(zonelist_iter_cookie)) 4837 goto restart; 4838 4839 /* Reclaim has failed us, start killing things */ 4840 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4841 if (page) 4842 goto got_pg; 4843 4844 /* Avoid allocations with no watermarks from looping endlessly */ 4845 if (tsk_is_oom_victim(current) && 4846 (alloc_flags & ALLOC_OOM || 4847 (gfp_mask & __GFP_NOMEMALLOC))) 4848 goto nopage; 4849 4850 /* Retry as long as the OOM killer is making progress */ 4851 if (did_some_progress) { 4852 no_progress_loops = 0; 4853 goto retry; 4854 } 4855 4856 nopage: 4857 /* 4858 * Deal with possible cpuset update races or zonelist updates to avoid 4859 * a unnecessary OOM kill. 4860 */ 4861 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4862 check_retry_zonelist(zonelist_iter_cookie)) 4863 goto restart; 4864 4865 /* 4866 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4867 * we always retry 4868 */ 4869 if (unlikely(nofail)) { 4870 /* 4871 * Lacking direct_reclaim we can't do anything to reclaim memory, 4872 * we disregard these unreasonable nofail requests and still 4873 * return NULL 4874 */ 4875 if (!can_direct_reclaim) 4876 goto fail; 4877 4878 /* 4879 * Help non-failing allocations by giving some access to memory 4880 * reserves normally used for high priority non-blocking 4881 * allocations but do not use ALLOC_NO_WATERMARKS because this 4882 * could deplete whole memory reserves which would just make 4883 * the situation worse. 4884 */ 4885 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4886 if (page) 4887 goto got_pg; 4888 4889 cond_resched(); 4890 goto retry; 4891 } 4892 fail: 4893 warn_alloc(gfp_mask, ac->nodemask, 4894 "page allocation failure: order:%u", order); 4895 got_pg: 4896 return page; 4897 } 4898 4899 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4900 int preferred_nid, nodemask_t *nodemask, 4901 struct alloc_context *ac, gfp_t *alloc_gfp, 4902 unsigned int *alloc_flags) 4903 { 4904 ac->highest_zoneidx = gfp_zone(gfp_mask); 4905 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4906 ac->nodemask = nodemask; 4907 ac->migratetype = gfp_migratetype(gfp_mask); 4908 4909 if (cpusets_enabled()) { 4910 *alloc_gfp |= __GFP_HARDWALL; 4911 /* 4912 * When we are in the interrupt context, it is irrelevant 4913 * to the current task context. It means that any node ok. 4914 */ 4915 if (in_task() && !ac->nodemask) 4916 ac->nodemask = &cpuset_current_mems_allowed; 4917 else 4918 *alloc_flags |= ALLOC_CPUSET; 4919 } 4920 4921 might_alloc(gfp_mask); 4922 4923 /* 4924 * Don't invoke should_fail logic, since it may call 4925 * get_random_u32() and printk() which need to spin_lock. 4926 */ 4927 if (!(*alloc_flags & ALLOC_TRYLOCK) && 4928 should_fail_alloc_page(gfp_mask, order)) 4929 return false; 4930 4931 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4932 4933 /* Dirty zone balancing only done in the fast path */ 4934 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4935 4936 /* 4937 * The preferred zone is used for statistics but crucially it is 4938 * also used as the starting point for the zonelist iterator. It 4939 * may get reset for allocations that ignore memory policies. 4940 */ 4941 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4942 ac->highest_zoneidx, ac->nodemask); 4943 4944 return true; 4945 } 4946 4947 /* 4948 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 4949 * @gfp: GFP flags for the allocation 4950 * @preferred_nid: The preferred NUMA node ID to allocate from 4951 * @nodemask: Set of nodes to allocate from, may be NULL 4952 * @nr_pages: The number of pages desired in the array 4953 * @page_array: Array to store the pages 4954 * 4955 * This is a batched version of the page allocator that attempts to 4956 * allocate nr_pages quickly. Pages are added to the page_array. 4957 * 4958 * Note that only NULL elements are populated with pages and nr_pages 4959 * is the maximum number of pages that will be stored in the array. 4960 * 4961 * Returns the number of pages in the array. 4962 */ 4963 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4964 nodemask_t *nodemask, int nr_pages, 4965 struct page **page_array) 4966 { 4967 struct page *page; 4968 unsigned long __maybe_unused UP_flags; 4969 struct zone *zone; 4970 struct zoneref *z; 4971 struct per_cpu_pages *pcp; 4972 struct list_head *pcp_list; 4973 struct alloc_context ac; 4974 gfp_t alloc_gfp; 4975 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4976 int nr_populated = 0, nr_account = 0; 4977 4978 /* 4979 * Skip populated array elements to determine if any pages need 4980 * to be allocated before disabling IRQs. 4981 */ 4982 while (nr_populated < nr_pages && page_array[nr_populated]) 4983 nr_populated++; 4984 4985 /* No pages requested? */ 4986 if (unlikely(nr_pages <= 0)) 4987 goto out; 4988 4989 /* Already populated array? */ 4990 if (unlikely(nr_pages - nr_populated == 0)) 4991 goto out; 4992 4993 /* Bulk allocator does not support memcg accounting. */ 4994 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4995 goto failed; 4996 4997 /* Use the single page allocator for one page. */ 4998 if (nr_pages - nr_populated == 1) 4999 goto failed; 5000 5001 #ifdef CONFIG_PAGE_OWNER 5002 /* 5003 * PAGE_OWNER may recurse into the allocator to allocate space to 5004 * save the stack with pagesets.lock held. Releasing/reacquiring 5005 * removes much of the performance benefit of bulk allocation so 5006 * force the caller to allocate one page at a time as it'll have 5007 * similar performance to added complexity to the bulk allocator. 5008 */ 5009 if (static_branch_unlikely(&page_owner_inited)) 5010 goto failed; 5011 #endif 5012 5013 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5014 gfp &= gfp_allowed_mask; 5015 alloc_gfp = gfp; 5016 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5017 goto out; 5018 gfp = alloc_gfp; 5019 5020 /* Find an allowed local zone that meets the low watermark. */ 5021 z = ac.preferred_zoneref; 5022 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 5023 unsigned long mark; 5024 5025 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5026 !__cpuset_zone_allowed(zone, gfp)) { 5027 continue; 5028 } 5029 5030 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 5031 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 5032 goto failed; 5033 } 5034 5035 cond_accept_memory(zone, 0, alloc_flags); 5036 retry_this_zone: 5037 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5038 if (zone_watermark_fast(zone, 0, mark, 5039 zonelist_zone_idx(ac.preferred_zoneref), 5040 alloc_flags, gfp)) { 5041 break; 5042 } 5043 5044 if (cond_accept_memory(zone, 0, alloc_flags)) 5045 goto retry_this_zone; 5046 5047 /* Try again if zone has deferred pages */ 5048 if (deferred_pages_enabled()) { 5049 if (_deferred_grow_zone(zone, 0)) 5050 goto retry_this_zone; 5051 } 5052 } 5053 5054 /* 5055 * If there are no allowed local zones that meets the watermarks then 5056 * try to allocate a single page and reclaim if necessary. 5057 */ 5058 if (unlikely(!zone)) 5059 goto failed; 5060 5061 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 5062 pcp_trylock_prepare(UP_flags); 5063 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 5064 if (!pcp) 5065 goto failed_irq; 5066 5067 /* Attempt the batch allocation */ 5068 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5069 while (nr_populated < nr_pages) { 5070 5071 /* Skip existing pages */ 5072 if (page_array[nr_populated]) { 5073 nr_populated++; 5074 continue; 5075 } 5076 5077 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5078 pcp, pcp_list); 5079 if (unlikely(!page)) { 5080 /* Try and allocate at least one page */ 5081 if (!nr_account) { 5082 pcp_spin_unlock(pcp); 5083 goto failed_irq; 5084 } 5085 break; 5086 } 5087 nr_account++; 5088 5089 prep_new_page(page, 0, gfp, 0); 5090 set_page_refcounted(page); 5091 page_array[nr_populated++] = page; 5092 } 5093 5094 pcp_spin_unlock(pcp); 5095 pcp_trylock_finish(UP_flags); 5096 5097 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5098 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 5099 5100 out: 5101 return nr_populated; 5102 5103 failed_irq: 5104 pcp_trylock_finish(UP_flags); 5105 5106 failed: 5107 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 5108 if (page) 5109 page_array[nr_populated++] = page; 5110 goto out; 5111 } 5112 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 5113 5114 /* 5115 * This is the 'heart' of the zoned buddy allocator. 5116 */ 5117 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 5118 int preferred_nid, nodemask_t *nodemask) 5119 { 5120 struct page *page; 5121 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5122 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5123 struct alloc_context ac = { }; 5124 5125 /* 5126 * There are several places where we assume that the order value is sane 5127 * so bail out early if the request is out of bound. 5128 */ 5129 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 5130 return NULL; 5131 5132 gfp &= gfp_allowed_mask; 5133 /* 5134 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5135 * resp. GFP_NOIO which has to be inherited for all allocation requests 5136 * from a particular context which has been marked by 5137 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5138 * movable zones are not used during allocation. 5139 */ 5140 gfp = current_gfp_context(gfp); 5141 alloc_gfp = gfp; 5142 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5143 &alloc_gfp, &alloc_flags)) 5144 return NULL; 5145 5146 /* 5147 * Forbid the first pass from falling back to types that fragment 5148 * memory until all local zones are considered. 5149 */ 5150 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 5151 5152 /* First allocation attempt */ 5153 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5154 if (likely(page)) 5155 goto out; 5156 5157 alloc_gfp = gfp; 5158 ac.spread_dirty_pages = false; 5159 5160 /* 5161 * Restore the original nodemask if it was potentially replaced with 5162 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5163 */ 5164 ac.nodemask = nodemask; 5165 5166 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5167 5168 out: 5169 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 5170 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5171 free_frozen_pages(page, order); 5172 page = NULL; 5173 } 5174 5175 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5176 kmsan_alloc_page(page, order, alloc_gfp); 5177 5178 return page; 5179 } 5180 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 5181 5182 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 5183 int preferred_nid, nodemask_t *nodemask) 5184 { 5185 struct page *page; 5186 5187 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 5188 if (page) 5189 set_page_refcounted(page); 5190 return page; 5191 } 5192 EXPORT_SYMBOL(__alloc_pages_noprof); 5193 5194 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 5195 nodemask_t *nodemask) 5196 { 5197 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 5198 preferred_nid, nodemask); 5199 return page_rmappable_folio(page); 5200 } 5201 EXPORT_SYMBOL(__folio_alloc_noprof); 5202 5203 /* 5204 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5205 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5206 * you need to access high mem. 5207 */ 5208 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 5209 { 5210 struct page *page; 5211 5212 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 5213 if (!page) 5214 return 0; 5215 return (unsigned long) page_address(page); 5216 } 5217 EXPORT_SYMBOL(get_free_pages_noprof); 5218 5219 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5220 { 5221 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5222 } 5223 EXPORT_SYMBOL(get_zeroed_page_noprof); 5224 5225 static void ___free_pages(struct page *page, unsigned int order, 5226 fpi_t fpi_flags) 5227 { 5228 /* get PageHead before we drop reference */ 5229 int head = PageHead(page); 5230 /* get alloc tag in case the page is released by others */ 5231 struct alloc_tag *tag = pgalloc_tag_get(page); 5232 5233 if (put_page_testzero(page)) 5234 __free_frozen_pages(page, order, fpi_flags); 5235 else if (!head) { 5236 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5237 while (order-- > 0) 5238 __free_frozen_pages(page + (1 << order), order, 5239 fpi_flags); 5240 } 5241 } 5242 5243 /** 5244 * __free_pages - Free pages allocated with alloc_pages(). 5245 * @page: The page pointer returned from alloc_pages(). 5246 * @order: The order of the allocation. 5247 * 5248 * This function can free multi-page allocations that are not compound 5249 * pages. It does not check that the @order passed in matches that of 5250 * the allocation, so it is easy to leak memory. Freeing more memory 5251 * than was allocated will probably emit a warning. 5252 * 5253 * If the last reference to this page is speculative, it will be released 5254 * by put_page() which only frees the first page of a non-compound 5255 * allocation. To prevent the remaining pages from being leaked, we free 5256 * the subsequent pages here. If you want to use the page's reference 5257 * count to decide when to free the allocation, you should allocate a 5258 * compound page, and use put_page() instead of __free_pages(). 5259 * 5260 * Context: May be called in interrupt context or while holding a normal 5261 * spinlock, but not in NMI context or while holding a raw spinlock. 5262 */ 5263 void __free_pages(struct page *page, unsigned int order) 5264 { 5265 ___free_pages(page, order, FPI_NONE); 5266 } 5267 EXPORT_SYMBOL(__free_pages); 5268 5269 /* 5270 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5271 * page type (not only those that came from alloc_pages_nolock) 5272 */ 5273 void free_pages_nolock(struct page *page, unsigned int order) 5274 { 5275 ___free_pages(page, order, FPI_TRYLOCK); 5276 } 5277 5278 /** 5279 * free_pages - Free pages allocated with __get_free_pages(). 5280 * @addr: The virtual address tied to a page returned from __get_free_pages(). 5281 * @order: The order of the allocation. 5282 * 5283 * This function behaves the same as __free_pages(). Use this function 5284 * to free pages when you only have a valid virtual address. If you have 5285 * the page, call __free_pages() instead. 5286 */ 5287 void free_pages(unsigned long addr, unsigned int order) 5288 { 5289 if (addr != 0) { 5290 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5291 __free_pages(virt_to_page((void *)addr), order); 5292 } 5293 } 5294 5295 EXPORT_SYMBOL(free_pages); 5296 5297 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5298 size_t size) 5299 { 5300 if (addr) { 5301 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5302 struct page *page = virt_to_page((void *)addr); 5303 struct page *last = page + nr; 5304 5305 split_page_owner(page, order, 0); 5306 pgalloc_tag_split(page_folio(page), order, 0); 5307 split_page_memcg(page, order); 5308 while (page < --last) 5309 set_page_refcounted(last); 5310 5311 last = page + (1UL << order); 5312 for (page += nr; page < last; page++) 5313 __free_pages_ok(page, 0, FPI_TO_TAIL); 5314 } 5315 return (void *)addr; 5316 } 5317 5318 /** 5319 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5320 * @size: the number of bytes to allocate 5321 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5322 * 5323 * This function is similar to alloc_pages(), except that it allocates the 5324 * minimum number of pages to satisfy the request. alloc_pages() can only 5325 * allocate memory in power-of-two pages. 5326 * 5327 * This function is also limited by MAX_PAGE_ORDER. 5328 * 5329 * Memory allocated by this function must be released by free_pages_exact(). 5330 * 5331 * Return: pointer to the allocated area or %NULL in case of error. 5332 */ 5333 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5334 { 5335 unsigned int order = get_order(size); 5336 unsigned long addr; 5337 5338 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5339 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5340 5341 addr = get_free_pages_noprof(gfp_mask, order); 5342 return make_alloc_exact(addr, order, size); 5343 } 5344 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5345 5346 /** 5347 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5348 * pages on a node. 5349 * @nid: the preferred node ID where memory should be allocated 5350 * @size: the number of bytes to allocate 5351 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5352 * 5353 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5354 * back. 5355 * 5356 * Return: pointer to the allocated area or %NULL in case of error. 5357 */ 5358 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5359 { 5360 unsigned int order = get_order(size); 5361 struct page *p; 5362 5363 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5364 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5365 5366 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5367 if (!p) 5368 return NULL; 5369 return make_alloc_exact((unsigned long)page_address(p), order, size); 5370 } 5371 5372 /** 5373 * free_pages_exact - release memory allocated via alloc_pages_exact() 5374 * @virt: the value returned by alloc_pages_exact. 5375 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5376 * 5377 * Release the memory allocated by a previous call to alloc_pages_exact. 5378 */ 5379 void free_pages_exact(void *virt, size_t size) 5380 { 5381 unsigned long addr = (unsigned long)virt; 5382 unsigned long end = addr + PAGE_ALIGN(size); 5383 5384 while (addr < end) { 5385 free_page(addr); 5386 addr += PAGE_SIZE; 5387 } 5388 } 5389 EXPORT_SYMBOL(free_pages_exact); 5390 5391 /** 5392 * nr_free_zone_pages - count number of pages beyond high watermark 5393 * @offset: The zone index of the highest zone 5394 * 5395 * nr_free_zone_pages() counts the number of pages which are beyond the 5396 * high watermark within all zones at or below a given zone index. For each 5397 * zone, the number of pages is calculated as: 5398 * 5399 * nr_free_zone_pages = managed_pages - high_pages 5400 * 5401 * Return: number of pages beyond high watermark. 5402 */ 5403 static unsigned long nr_free_zone_pages(int offset) 5404 { 5405 struct zoneref *z; 5406 struct zone *zone; 5407 5408 /* Just pick one node, since fallback list is circular */ 5409 unsigned long sum = 0; 5410 5411 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5412 5413 for_each_zone_zonelist(zone, z, zonelist, offset) { 5414 unsigned long size = zone_managed_pages(zone); 5415 unsigned long high = high_wmark_pages(zone); 5416 if (size > high) 5417 sum += size - high; 5418 } 5419 5420 return sum; 5421 } 5422 5423 /** 5424 * nr_free_buffer_pages - count number of pages beyond high watermark 5425 * 5426 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5427 * watermark within ZONE_DMA and ZONE_NORMAL. 5428 * 5429 * Return: number of pages beyond high watermark within ZONE_DMA and 5430 * ZONE_NORMAL. 5431 */ 5432 unsigned long nr_free_buffer_pages(void) 5433 { 5434 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5435 } 5436 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5437 5438 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5439 { 5440 zoneref->zone = zone; 5441 zoneref->zone_idx = zone_idx(zone); 5442 } 5443 5444 /* 5445 * Builds allocation fallback zone lists. 5446 * 5447 * Add all populated zones of a node to the zonelist. 5448 */ 5449 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5450 { 5451 struct zone *zone; 5452 enum zone_type zone_type = MAX_NR_ZONES; 5453 int nr_zones = 0; 5454 5455 do { 5456 zone_type--; 5457 zone = pgdat->node_zones + zone_type; 5458 if (populated_zone(zone)) { 5459 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5460 check_highest_zone(zone_type); 5461 } 5462 } while (zone_type); 5463 5464 return nr_zones; 5465 } 5466 5467 #ifdef CONFIG_NUMA 5468 5469 static int __parse_numa_zonelist_order(char *s) 5470 { 5471 /* 5472 * We used to support different zonelists modes but they turned 5473 * out to be just not useful. Let's keep the warning in place 5474 * if somebody still use the cmd line parameter so that we do 5475 * not fail it silently 5476 */ 5477 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5478 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5479 return -EINVAL; 5480 } 5481 return 0; 5482 } 5483 5484 static char numa_zonelist_order[] = "Node"; 5485 #define NUMA_ZONELIST_ORDER_LEN 16 5486 /* 5487 * sysctl handler for numa_zonelist_order 5488 */ 5489 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5490 void *buffer, size_t *length, loff_t *ppos) 5491 { 5492 if (write) 5493 return __parse_numa_zonelist_order(buffer); 5494 return proc_dostring(table, write, buffer, length, ppos); 5495 } 5496 5497 static int node_load[MAX_NUMNODES]; 5498 5499 /** 5500 * find_next_best_node - find the next node that should appear in a given node's fallback list 5501 * @node: node whose fallback list we're appending 5502 * @used_node_mask: nodemask_t of already used nodes 5503 * 5504 * We use a number of factors to determine which is the next node that should 5505 * appear on a given node's fallback list. The node should not have appeared 5506 * already in @node's fallback list, and it should be the next closest node 5507 * according to the distance array (which contains arbitrary distance values 5508 * from each node to each node in the system), and should also prefer nodes 5509 * with no CPUs, since presumably they'll have very little allocation pressure 5510 * on them otherwise. 5511 * 5512 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5513 */ 5514 int find_next_best_node(int node, nodemask_t *used_node_mask) 5515 { 5516 int n, val; 5517 int min_val = INT_MAX; 5518 int best_node = NUMA_NO_NODE; 5519 5520 /* 5521 * Use the local node if we haven't already, but for memoryless local 5522 * node, we should skip it and fall back to other nodes. 5523 */ 5524 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5525 node_set(node, *used_node_mask); 5526 return node; 5527 } 5528 5529 for_each_node_state(n, N_MEMORY) { 5530 5531 /* Don't want a node to appear more than once */ 5532 if (node_isset(n, *used_node_mask)) 5533 continue; 5534 5535 /* Use the distance array to find the distance */ 5536 val = node_distance(node, n); 5537 5538 /* Penalize nodes under us ("prefer the next node") */ 5539 val += (n < node); 5540 5541 /* Give preference to headless and unused nodes */ 5542 if (!cpumask_empty(cpumask_of_node(n))) 5543 val += PENALTY_FOR_NODE_WITH_CPUS; 5544 5545 /* Slight preference for less loaded node */ 5546 val *= MAX_NUMNODES; 5547 val += node_load[n]; 5548 5549 if (val < min_val) { 5550 min_val = val; 5551 best_node = n; 5552 } 5553 } 5554 5555 if (best_node >= 0) 5556 node_set(best_node, *used_node_mask); 5557 5558 return best_node; 5559 } 5560 5561 5562 /* 5563 * Build zonelists ordered by node and zones within node. 5564 * This results in maximum locality--normal zone overflows into local 5565 * DMA zone, if any--but risks exhausting DMA zone. 5566 */ 5567 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5568 unsigned nr_nodes) 5569 { 5570 struct zoneref *zonerefs; 5571 int i; 5572 5573 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5574 5575 for (i = 0; i < nr_nodes; i++) { 5576 int nr_zones; 5577 5578 pg_data_t *node = NODE_DATA(node_order[i]); 5579 5580 nr_zones = build_zonerefs_node(node, zonerefs); 5581 zonerefs += nr_zones; 5582 } 5583 zonerefs->zone = NULL; 5584 zonerefs->zone_idx = 0; 5585 } 5586 5587 /* 5588 * Build __GFP_THISNODE zonelists 5589 */ 5590 static void build_thisnode_zonelists(pg_data_t *pgdat) 5591 { 5592 struct zoneref *zonerefs; 5593 int nr_zones; 5594 5595 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5596 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5597 zonerefs += nr_zones; 5598 zonerefs->zone = NULL; 5599 zonerefs->zone_idx = 0; 5600 } 5601 5602 static void build_zonelists(pg_data_t *pgdat) 5603 { 5604 static int node_order[MAX_NUMNODES]; 5605 int node, nr_nodes = 0; 5606 nodemask_t used_mask = NODE_MASK_NONE; 5607 int local_node, prev_node; 5608 5609 /* NUMA-aware ordering of nodes */ 5610 local_node = pgdat->node_id; 5611 prev_node = local_node; 5612 5613 memset(node_order, 0, sizeof(node_order)); 5614 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5615 /* 5616 * We don't want to pressure a particular node. 5617 * So adding penalty to the first node in same 5618 * distance group to make it round-robin. 5619 */ 5620 if (node_distance(local_node, node) != 5621 node_distance(local_node, prev_node)) 5622 node_load[node] += 1; 5623 5624 node_order[nr_nodes++] = node; 5625 prev_node = node; 5626 } 5627 5628 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5629 build_thisnode_zonelists(pgdat); 5630 pr_info("Fallback order for Node %d: ", local_node); 5631 for (node = 0; node < nr_nodes; node++) 5632 pr_cont("%d ", node_order[node]); 5633 pr_cont("\n"); 5634 } 5635 5636 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5637 /* 5638 * Return node id of node used for "local" allocations. 5639 * I.e., first node id of first zone in arg node's generic zonelist. 5640 * Used for initializing percpu 'numa_mem', which is used primarily 5641 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5642 */ 5643 int local_memory_node(int node) 5644 { 5645 struct zoneref *z; 5646 5647 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5648 gfp_zone(GFP_KERNEL), 5649 NULL); 5650 return zonelist_node_idx(z); 5651 } 5652 #endif 5653 5654 static void setup_min_unmapped_ratio(void); 5655 static void setup_min_slab_ratio(void); 5656 #else /* CONFIG_NUMA */ 5657 5658 static void build_zonelists(pg_data_t *pgdat) 5659 { 5660 struct zoneref *zonerefs; 5661 int nr_zones; 5662 5663 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5664 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5665 zonerefs += nr_zones; 5666 5667 zonerefs->zone = NULL; 5668 zonerefs->zone_idx = 0; 5669 } 5670 5671 #endif /* CONFIG_NUMA */ 5672 5673 /* 5674 * Boot pageset table. One per cpu which is going to be used for all 5675 * zones and all nodes. The parameters will be set in such a way 5676 * that an item put on a list will immediately be handed over to 5677 * the buddy list. This is safe since pageset manipulation is done 5678 * with interrupts disabled. 5679 * 5680 * The boot_pagesets must be kept even after bootup is complete for 5681 * unused processors and/or zones. They do play a role for bootstrapping 5682 * hotplugged processors. 5683 * 5684 * zoneinfo_show() and maybe other functions do 5685 * not check if the processor is online before following the pageset pointer. 5686 * Other parts of the kernel may not check if the zone is available. 5687 */ 5688 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5689 /* These effectively disable the pcplists in the boot pageset completely */ 5690 #define BOOT_PAGESET_HIGH 0 5691 #define BOOT_PAGESET_BATCH 1 5692 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5693 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5694 5695 static void __build_all_zonelists(void *data) 5696 { 5697 int nid; 5698 int __maybe_unused cpu; 5699 pg_data_t *self = data; 5700 unsigned long flags; 5701 5702 /* 5703 * The zonelist_update_seq must be acquired with irqsave because the 5704 * reader can be invoked from IRQ with GFP_ATOMIC. 5705 */ 5706 write_seqlock_irqsave(&zonelist_update_seq, flags); 5707 /* 5708 * Also disable synchronous printk() to prevent any printk() from 5709 * trying to hold port->lock, for 5710 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5711 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5712 */ 5713 printk_deferred_enter(); 5714 5715 #ifdef CONFIG_NUMA 5716 memset(node_load, 0, sizeof(node_load)); 5717 #endif 5718 5719 /* 5720 * This node is hotadded and no memory is yet present. So just 5721 * building zonelists is fine - no need to touch other nodes. 5722 */ 5723 if (self && !node_online(self->node_id)) { 5724 build_zonelists(self); 5725 } else { 5726 /* 5727 * All possible nodes have pgdat preallocated 5728 * in free_area_init 5729 */ 5730 for_each_node(nid) { 5731 pg_data_t *pgdat = NODE_DATA(nid); 5732 5733 build_zonelists(pgdat); 5734 } 5735 5736 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5737 /* 5738 * We now know the "local memory node" for each node-- 5739 * i.e., the node of the first zone in the generic zonelist. 5740 * Set up numa_mem percpu variable for on-line cpus. During 5741 * boot, only the boot cpu should be on-line; we'll init the 5742 * secondary cpus' numa_mem as they come on-line. During 5743 * node/memory hotplug, we'll fixup all on-line cpus. 5744 */ 5745 for_each_online_cpu(cpu) 5746 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5747 #endif 5748 } 5749 5750 printk_deferred_exit(); 5751 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5752 } 5753 5754 static noinline void __init 5755 build_all_zonelists_init(void) 5756 { 5757 int cpu; 5758 5759 __build_all_zonelists(NULL); 5760 5761 /* 5762 * Initialize the boot_pagesets that are going to be used 5763 * for bootstrapping processors. The real pagesets for 5764 * each zone will be allocated later when the per cpu 5765 * allocator is available. 5766 * 5767 * boot_pagesets are used also for bootstrapping offline 5768 * cpus if the system is already booted because the pagesets 5769 * are needed to initialize allocators on a specific cpu too. 5770 * F.e. the percpu allocator needs the page allocator which 5771 * needs the percpu allocator in order to allocate its pagesets 5772 * (a chicken-egg dilemma). 5773 */ 5774 for_each_possible_cpu(cpu) 5775 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5776 5777 mminit_verify_zonelist(); 5778 cpuset_init_current_mems_allowed(); 5779 } 5780 5781 /* 5782 * unless system_state == SYSTEM_BOOTING. 5783 * 5784 * __ref due to call of __init annotated helper build_all_zonelists_init 5785 * [protected by SYSTEM_BOOTING]. 5786 */ 5787 void __ref build_all_zonelists(pg_data_t *pgdat) 5788 { 5789 unsigned long vm_total_pages; 5790 5791 if (system_state == SYSTEM_BOOTING) { 5792 build_all_zonelists_init(); 5793 } else { 5794 __build_all_zonelists(pgdat); 5795 /* cpuset refresh routine should be here */ 5796 } 5797 /* Get the number of free pages beyond high watermark in all zones. */ 5798 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5799 /* 5800 * Disable grouping by mobility if the number of pages in the 5801 * system is too low to allow the mechanism to work. It would be 5802 * more accurate, but expensive to check per-zone. This check is 5803 * made on memory-hotadd so a system can start with mobility 5804 * disabled and enable it later 5805 */ 5806 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5807 page_group_by_mobility_disabled = 1; 5808 else 5809 page_group_by_mobility_disabled = 0; 5810 5811 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5812 nr_online_nodes, 5813 str_off_on(page_group_by_mobility_disabled), 5814 vm_total_pages); 5815 #ifdef CONFIG_NUMA 5816 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5817 #endif 5818 } 5819 5820 static int zone_batchsize(struct zone *zone) 5821 { 5822 #ifdef CONFIG_MMU 5823 int batch; 5824 5825 /* 5826 * The number of pages to batch allocate is either ~0.1% 5827 * of the zone or 1MB, whichever is smaller. The batch 5828 * size is striking a balance between allocation latency 5829 * and zone lock contention. 5830 */ 5831 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5832 batch /= 4; /* We effectively *= 4 below */ 5833 if (batch < 1) 5834 batch = 1; 5835 5836 /* 5837 * Clamp the batch to a 2^n - 1 value. Having a power 5838 * of 2 value was found to be more likely to have 5839 * suboptimal cache aliasing properties in some cases. 5840 * 5841 * For example if 2 tasks are alternately allocating 5842 * batches of pages, one task can end up with a lot 5843 * of pages of one half of the possible page colors 5844 * and the other with pages of the other colors. 5845 */ 5846 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5847 5848 return batch; 5849 5850 #else 5851 /* The deferral and batching of frees should be suppressed under NOMMU 5852 * conditions. 5853 * 5854 * The problem is that NOMMU needs to be able to allocate large chunks 5855 * of contiguous memory as there's no hardware page translation to 5856 * assemble apparent contiguous memory from discontiguous pages. 5857 * 5858 * Queueing large contiguous runs of pages for batching, however, 5859 * causes the pages to actually be freed in smaller chunks. As there 5860 * can be a significant delay between the individual batches being 5861 * recycled, this leads to the once large chunks of space being 5862 * fragmented and becoming unavailable for high-order allocations. 5863 */ 5864 return 0; 5865 #endif 5866 } 5867 5868 static int percpu_pagelist_high_fraction; 5869 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5870 int high_fraction) 5871 { 5872 #ifdef CONFIG_MMU 5873 int high; 5874 int nr_split_cpus; 5875 unsigned long total_pages; 5876 5877 if (!high_fraction) { 5878 /* 5879 * By default, the high value of the pcp is based on the zone 5880 * low watermark so that if they are full then background 5881 * reclaim will not be started prematurely. 5882 */ 5883 total_pages = low_wmark_pages(zone); 5884 } else { 5885 /* 5886 * If percpu_pagelist_high_fraction is configured, the high 5887 * value is based on a fraction of the managed pages in the 5888 * zone. 5889 */ 5890 total_pages = zone_managed_pages(zone) / high_fraction; 5891 } 5892 5893 /* 5894 * Split the high value across all online CPUs local to the zone. Note 5895 * that early in boot that CPUs may not be online yet and that during 5896 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5897 * onlined. For memory nodes that have no CPUs, split the high value 5898 * across all online CPUs to mitigate the risk that reclaim is triggered 5899 * prematurely due to pages stored on pcp lists. 5900 */ 5901 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5902 if (!nr_split_cpus) 5903 nr_split_cpus = num_online_cpus(); 5904 high = total_pages / nr_split_cpus; 5905 5906 /* 5907 * Ensure high is at least batch*4. The multiple is based on the 5908 * historical relationship between high and batch. 5909 */ 5910 high = max(high, batch << 2); 5911 5912 return high; 5913 #else 5914 return 0; 5915 #endif 5916 } 5917 5918 /* 5919 * pcp->high and pcp->batch values are related and generally batch is lower 5920 * than high. They are also related to pcp->count such that count is lower 5921 * than high, and as soon as it reaches high, the pcplist is flushed. 5922 * 5923 * However, guaranteeing these relations at all times would require e.g. write 5924 * barriers here but also careful usage of read barriers at the read side, and 5925 * thus be prone to error and bad for performance. Thus the update only prevents 5926 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5927 * should ensure they can cope with those fields changing asynchronously, and 5928 * fully trust only the pcp->count field on the local CPU with interrupts 5929 * disabled. 5930 * 5931 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5932 * outside of boot time (or some other assurance that no concurrent updaters 5933 * exist). 5934 */ 5935 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5936 unsigned long high_max, unsigned long batch) 5937 { 5938 WRITE_ONCE(pcp->batch, batch); 5939 WRITE_ONCE(pcp->high_min, high_min); 5940 WRITE_ONCE(pcp->high_max, high_max); 5941 } 5942 5943 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5944 { 5945 int pindex; 5946 5947 memset(pcp, 0, sizeof(*pcp)); 5948 memset(pzstats, 0, sizeof(*pzstats)); 5949 5950 spin_lock_init(&pcp->lock); 5951 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5952 INIT_LIST_HEAD(&pcp->lists[pindex]); 5953 5954 /* 5955 * Set batch and high values safe for a boot pageset. A true percpu 5956 * pageset's initialization will update them subsequently. Here we don't 5957 * need to be as careful as pageset_update() as nobody can access the 5958 * pageset yet. 5959 */ 5960 pcp->high_min = BOOT_PAGESET_HIGH; 5961 pcp->high_max = BOOT_PAGESET_HIGH; 5962 pcp->batch = BOOT_PAGESET_BATCH; 5963 } 5964 5965 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5966 unsigned long high_max, unsigned long batch) 5967 { 5968 struct per_cpu_pages *pcp; 5969 int cpu; 5970 5971 for_each_possible_cpu(cpu) { 5972 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5973 pageset_update(pcp, high_min, high_max, batch); 5974 } 5975 } 5976 5977 /* 5978 * Calculate and set new high and batch values for all per-cpu pagesets of a 5979 * zone based on the zone's size. 5980 */ 5981 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5982 { 5983 int new_high_min, new_high_max, new_batch; 5984 5985 new_batch = max(1, zone_batchsize(zone)); 5986 if (percpu_pagelist_high_fraction) { 5987 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5988 percpu_pagelist_high_fraction); 5989 /* 5990 * PCP high is tuned manually, disable auto-tuning via 5991 * setting high_min and high_max to the manual value. 5992 */ 5993 new_high_max = new_high_min; 5994 } else { 5995 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5996 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5997 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5998 } 5999 6000 if (zone->pageset_high_min == new_high_min && 6001 zone->pageset_high_max == new_high_max && 6002 zone->pageset_batch == new_batch) 6003 return; 6004 6005 zone->pageset_high_min = new_high_min; 6006 zone->pageset_high_max = new_high_max; 6007 zone->pageset_batch = new_batch; 6008 6009 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 6010 new_batch); 6011 } 6012 6013 void __meminit setup_zone_pageset(struct zone *zone) 6014 { 6015 int cpu; 6016 6017 /* Size may be 0 on !SMP && !NUMA */ 6018 if (sizeof(struct per_cpu_zonestat) > 0) 6019 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 6020 6021 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 6022 for_each_possible_cpu(cpu) { 6023 struct per_cpu_pages *pcp; 6024 struct per_cpu_zonestat *pzstats; 6025 6026 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6027 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6028 per_cpu_pages_init(pcp, pzstats); 6029 } 6030 6031 zone_set_pageset_high_and_batch(zone, 0); 6032 } 6033 6034 /* 6035 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6036 * page high values need to be recalculated. 6037 */ 6038 static void zone_pcp_update(struct zone *zone, int cpu_online) 6039 { 6040 mutex_lock(&pcp_batch_high_lock); 6041 zone_set_pageset_high_and_batch(zone, cpu_online); 6042 mutex_unlock(&pcp_batch_high_lock); 6043 } 6044 6045 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 6046 { 6047 struct per_cpu_pages *pcp; 6048 struct cpu_cacheinfo *cci; 6049 6050 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6051 cci = get_cpu_cacheinfo(cpu); 6052 /* 6053 * If data cache slice of CPU is large enough, "pcp->batch" 6054 * pages can be preserved in PCP before draining PCP for 6055 * consecutive high-order pages freeing without allocation. 6056 * This can reduce zone lock contention without hurting 6057 * cache-hot pages sharing. 6058 */ 6059 spin_lock(&pcp->lock); 6060 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 6061 pcp->flags |= PCPF_FREE_HIGH_BATCH; 6062 else 6063 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 6064 spin_unlock(&pcp->lock); 6065 } 6066 6067 void setup_pcp_cacheinfo(unsigned int cpu) 6068 { 6069 struct zone *zone; 6070 6071 for_each_populated_zone(zone) 6072 zone_pcp_update_cacheinfo(zone, cpu); 6073 } 6074 6075 /* 6076 * Allocate per cpu pagesets and initialize them. 6077 * Before this call only boot pagesets were available. 6078 */ 6079 void __init setup_per_cpu_pageset(void) 6080 { 6081 struct pglist_data *pgdat; 6082 struct zone *zone; 6083 int __maybe_unused cpu; 6084 6085 for_each_populated_zone(zone) 6086 setup_zone_pageset(zone); 6087 6088 #ifdef CONFIG_NUMA 6089 /* 6090 * Unpopulated zones continue using the boot pagesets. 6091 * The numa stats for these pagesets need to be reset. 6092 * Otherwise, they will end up skewing the stats of 6093 * the nodes these zones are associated with. 6094 */ 6095 for_each_possible_cpu(cpu) { 6096 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 6097 memset(pzstats->vm_numa_event, 0, 6098 sizeof(pzstats->vm_numa_event)); 6099 } 6100 #endif 6101 6102 for_each_online_pgdat(pgdat) 6103 pgdat->per_cpu_nodestats = 6104 alloc_percpu(struct per_cpu_nodestat); 6105 } 6106 6107 __meminit void zone_pcp_init(struct zone *zone) 6108 { 6109 /* 6110 * per cpu subsystem is not up at this point. The following code 6111 * relies on the ability of the linker to provide the 6112 * offset of a (static) per cpu variable into the per cpu area. 6113 */ 6114 zone->per_cpu_pageset = &boot_pageset; 6115 zone->per_cpu_zonestats = &boot_zonestats; 6116 zone->pageset_high_min = BOOT_PAGESET_HIGH; 6117 zone->pageset_high_max = BOOT_PAGESET_HIGH; 6118 zone->pageset_batch = BOOT_PAGESET_BATCH; 6119 6120 if (populated_zone(zone)) 6121 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 6122 zone->present_pages, zone_batchsize(zone)); 6123 } 6124 6125 static void setup_per_zone_lowmem_reserve(void); 6126 6127 void adjust_managed_page_count(struct page *page, long count) 6128 { 6129 atomic_long_add(count, &page_zone(page)->managed_pages); 6130 totalram_pages_add(count); 6131 setup_per_zone_lowmem_reserve(); 6132 } 6133 EXPORT_SYMBOL(adjust_managed_page_count); 6134 6135 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6136 { 6137 void *pos; 6138 unsigned long pages = 0; 6139 6140 start = (void *)PAGE_ALIGN((unsigned long)start); 6141 end = (void *)((unsigned long)end & PAGE_MASK); 6142 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6143 struct page *page = virt_to_page(pos); 6144 void *direct_map_addr; 6145 6146 /* 6147 * 'direct_map_addr' might be different from 'pos' 6148 * because some architectures' virt_to_page() 6149 * work with aliases. Getting the direct map 6150 * address ensures that we get a _writeable_ 6151 * alias for the memset(). 6152 */ 6153 direct_map_addr = page_address(page); 6154 /* 6155 * Perform a kasan-unchecked memset() since this memory 6156 * has not been initialized. 6157 */ 6158 direct_map_addr = kasan_reset_tag(direct_map_addr); 6159 if ((unsigned int)poison <= 0xFF) 6160 memset(direct_map_addr, poison, PAGE_SIZE); 6161 6162 free_reserved_page(page); 6163 } 6164 6165 if (pages && s) 6166 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6167 6168 return pages; 6169 } 6170 6171 void free_reserved_page(struct page *page) 6172 { 6173 clear_page_tag_ref(page); 6174 ClearPageReserved(page); 6175 init_page_count(page); 6176 __free_page(page); 6177 adjust_managed_page_count(page, 1); 6178 } 6179 EXPORT_SYMBOL(free_reserved_page); 6180 6181 static int page_alloc_cpu_dead(unsigned int cpu) 6182 { 6183 struct zone *zone; 6184 6185 lru_add_drain_cpu(cpu); 6186 mlock_drain_remote(cpu); 6187 drain_pages(cpu); 6188 6189 /* 6190 * Spill the event counters of the dead processor 6191 * into the current processors event counters. 6192 * This artificially elevates the count of the current 6193 * processor. 6194 */ 6195 vm_events_fold_cpu(cpu); 6196 6197 /* 6198 * Zero the differential counters of the dead processor 6199 * so that the vm statistics are consistent. 6200 * 6201 * This is only okay since the processor is dead and cannot 6202 * race with what we are doing. 6203 */ 6204 cpu_vm_stats_fold(cpu); 6205 6206 for_each_populated_zone(zone) 6207 zone_pcp_update(zone, 0); 6208 6209 return 0; 6210 } 6211 6212 static int page_alloc_cpu_online(unsigned int cpu) 6213 { 6214 struct zone *zone; 6215 6216 for_each_populated_zone(zone) 6217 zone_pcp_update(zone, 1); 6218 return 0; 6219 } 6220 6221 void __init page_alloc_init_cpuhp(void) 6222 { 6223 int ret; 6224 6225 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6226 "mm/page_alloc:pcp", 6227 page_alloc_cpu_online, 6228 page_alloc_cpu_dead); 6229 WARN_ON(ret < 0); 6230 } 6231 6232 /* 6233 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6234 * or min_free_kbytes changes. 6235 */ 6236 static void calculate_totalreserve_pages(void) 6237 { 6238 struct pglist_data *pgdat; 6239 unsigned long reserve_pages = 0; 6240 enum zone_type i, j; 6241 6242 for_each_online_pgdat(pgdat) { 6243 6244 pgdat->totalreserve_pages = 0; 6245 6246 for (i = 0; i < MAX_NR_ZONES; i++) { 6247 struct zone *zone = pgdat->node_zones + i; 6248 long max = 0; 6249 unsigned long managed_pages = zone_managed_pages(zone); 6250 6251 /* Find valid and maximum lowmem_reserve in the zone */ 6252 for (j = i; j < MAX_NR_ZONES; j++) 6253 max = max(max, zone->lowmem_reserve[j]); 6254 6255 /* we treat the high watermark as reserved pages. */ 6256 max += high_wmark_pages(zone); 6257 6258 max = min_t(unsigned long, max, managed_pages); 6259 6260 pgdat->totalreserve_pages += max; 6261 6262 reserve_pages += max; 6263 } 6264 } 6265 totalreserve_pages = reserve_pages; 6266 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6267 } 6268 6269 /* 6270 * setup_per_zone_lowmem_reserve - called whenever 6271 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6272 * has a correct pages reserved value, so an adequate number of 6273 * pages are left in the zone after a successful __alloc_pages(). 6274 */ 6275 static void setup_per_zone_lowmem_reserve(void) 6276 { 6277 struct pglist_data *pgdat; 6278 enum zone_type i, j; 6279 6280 for_each_online_pgdat(pgdat) { 6281 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6282 struct zone *zone = &pgdat->node_zones[i]; 6283 int ratio = sysctl_lowmem_reserve_ratio[i]; 6284 bool clear = !ratio || !zone_managed_pages(zone); 6285 unsigned long managed_pages = 0; 6286 6287 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6288 struct zone *upper_zone = &pgdat->node_zones[j]; 6289 6290 managed_pages += zone_managed_pages(upper_zone); 6291 6292 if (clear) 6293 zone->lowmem_reserve[j] = 0; 6294 else 6295 zone->lowmem_reserve[j] = managed_pages / ratio; 6296 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6297 zone->lowmem_reserve[j]); 6298 } 6299 } 6300 } 6301 6302 /* update totalreserve_pages */ 6303 calculate_totalreserve_pages(); 6304 } 6305 6306 static void __setup_per_zone_wmarks(void) 6307 { 6308 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6309 unsigned long lowmem_pages = 0; 6310 struct zone *zone; 6311 unsigned long flags; 6312 6313 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6314 for_each_zone(zone) { 6315 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6316 lowmem_pages += zone_managed_pages(zone); 6317 } 6318 6319 for_each_zone(zone) { 6320 u64 tmp; 6321 6322 spin_lock_irqsave(&zone->lock, flags); 6323 tmp = (u64)pages_min * zone_managed_pages(zone); 6324 tmp = div64_ul(tmp, lowmem_pages); 6325 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6326 /* 6327 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6328 * need highmem and movable zones pages, so cap pages_min 6329 * to a small value here. 6330 * 6331 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6332 * deltas control async page reclaim, and so should 6333 * not be capped for highmem and movable zones. 6334 */ 6335 unsigned long min_pages; 6336 6337 min_pages = zone_managed_pages(zone) / 1024; 6338 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6339 zone->_watermark[WMARK_MIN] = min_pages; 6340 } else { 6341 /* 6342 * If it's a lowmem zone, reserve a number of pages 6343 * proportionate to the zone's size. 6344 */ 6345 zone->_watermark[WMARK_MIN] = tmp; 6346 } 6347 6348 /* 6349 * Set the kswapd watermarks distance according to the 6350 * scale factor in proportion to available memory, but 6351 * ensure a minimum size on small systems. 6352 */ 6353 tmp = max_t(u64, tmp >> 2, 6354 mult_frac(zone_managed_pages(zone), 6355 watermark_scale_factor, 10000)); 6356 6357 zone->watermark_boost = 0; 6358 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6359 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6360 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6361 trace_mm_setup_per_zone_wmarks(zone); 6362 6363 spin_unlock_irqrestore(&zone->lock, flags); 6364 } 6365 6366 /* update totalreserve_pages */ 6367 calculate_totalreserve_pages(); 6368 } 6369 6370 /** 6371 * setup_per_zone_wmarks - called when min_free_kbytes changes 6372 * or when memory is hot-{added|removed} 6373 * 6374 * Ensures that the watermark[min,low,high] values for each zone are set 6375 * correctly with respect to min_free_kbytes. 6376 */ 6377 void setup_per_zone_wmarks(void) 6378 { 6379 struct zone *zone; 6380 static DEFINE_SPINLOCK(lock); 6381 6382 spin_lock(&lock); 6383 __setup_per_zone_wmarks(); 6384 spin_unlock(&lock); 6385 6386 /* 6387 * The watermark size have changed so update the pcpu batch 6388 * and high limits or the limits may be inappropriate. 6389 */ 6390 for_each_zone(zone) 6391 zone_pcp_update(zone, 0); 6392 } 6393 6394 /* 6395 * Initialise min_free_kbytes. 6396 * 6397 * For small machines we want it small (128k min). For large machines 6398 * we want it large (256MB max). But it is not linear, because network 6399 * bandwidth does not increase linearly with machine size. We use 6400 * 6401 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6402 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6403 * 6404 * which yields 6405 * 6406 * 16MB: 512k 6407 * 32MB: 724k 6408 * 64MB: 1024k 6409 * 128MB: 1448k 6410 * 256MB: 2048k 6411 * 512MB: 2896k 6412 * 1024MB: 4096k 6413 * 2048MB: 5792k 6414 * 4096MB: 8192k 6415 * 8192MB: 11584k 6416 * 16384MB: 16384k 6417 */ 6418 void calculate_min_free_kbytes(void) 6419 { 6420 unsigned long lowmem_kbytes; 6421 int new_min_free_kbytes; 6422 6423 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6424 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6425 6426 if (new_min_free_kbytes > user_min_free_kbytes) 6427 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6428 else 6429 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6430 new_min_free_kbytes, user_min_free_kbytes); 6431 6432 } 6433 6434 int __meminit init_per_zone_wmark_min(void) 6435 { 6436 calculate_min_free_kbytes(); 6437 setup_per_zone_wmarks(); 6438 refresh_zone_stat_thresholds(); 6439 setup_per_zone_lowmem_reserve(); 6440 6441 #ifdef CONFIG_NUMA 6442 setup_min_unmapped_ratio(); 6443 setup_min_slab_ratio(); 6444 #endif 6445 6446 khugepaged_min_free_kbytes_update(); 6447 6448 return 0; 6449 } 6450 postcore_initcall(init_per_zone_wmark_min) 6451 6452 /* 6453 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6454 * that we can call two helper functions whenever min_free_kbytes 6455 * changes. 6456 */ 6457 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6458 void *buffer, size_t *length, loff_t *ppos) 6459 { 6460 int rc; 6461 6462 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6463 if (rc) 6464 return rc; 6465 6466 if (write) { 6467 user_min_free_kbytes = min_free_kbytes; 6468 setup_per_zone_wmarks(); 6469 } 6470 return 0; 6471 } 6472 6473 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6474 void *buffer, size_t *length, loff_t *ppos) 6475 { 6476 int rc; 6477 6478 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6479 if (rc) 6480 return rc; 6481 6482 if (write) 6483 setup_per_zone_wmarks(); 6484 6485 return 0; 6486 } 6487 6488 #ifdef CONFIG_NUMA 6489 static void setup_min_unmapped_ratio(void) 6490 { 6491 pg_data_t *pgdat; 6492 struct zone *zone; 6493 6494 for_each_online_pgdat(pgdat) 6495 pgdat->min_unmapped_pages = 0; 6496 6497 for_each_zone(zone) 6498 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6499 sysctl_min_unmapped_ratio) / 100; 6500 } 6501 6502 6503 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6504 void *buffer, size_t *length, loff_t *ppos) 6505 { 6506 int rc; 6507 6508 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6509 if (rc) 6510 return rc; 6511 6512 setup_min_unmapped_ratio(); 6513 6514 return 0; 6515 } 6516 6517 static void setup_min_slab_ratio(void) 6518 { 6519 pg_data_t *pgdat; 6520 struct zone *zone; 6521 6522 for_each_online_pgdat(pgdat) 6523 pgdat->min_slab_pages = 0; 6524 6525 for_each_zone(zone) 6526 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6527 sysctl_min_slab_ratio) / 100; 6528 } 6529 6530 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6531 void *buffer, size_t *length, loff_t *ppos) 6532 { 6533 int rc; 6534 6535 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6536 if (rc) 6537 return rc; 6538 6539 setup_min_slab_ratio(); 6540 6541 return 0; 6542 } 6543 #endif 6544 6545 /* 6546 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6547 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6548 * whenever sysctl_lowmem_reserve_ratio changes. 6549 * 6550 * The reserve ratio obviously has absolutely no relation with the 6551 * minimum watermarks. The lowmem reserve ratio can only make sense 6552 * if in function of the boot time zone sizes. 6553 */ 6554 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6555 int write, void *buffer, size_t *length, loff_t *ppos) 6556 { 6557 int i; 6558 6559 proc_dointvec_minmax(table, write, buffer, length, ppos); 6560 6561 for (i = 0; i < MAX_NR_ZONES; i++) { 6562 if (sysctl_lowmem_reserve_ratio[i] < 1) 6563 sysctl_lowmem_reserve_ratio[i] = 0; 6564 } 6565 6566 setup_per_zone_lowmem_reserve(); 6567 return 0; 6568 } 6569 6570 /* 6571 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6572 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6573 * pagelist can have before it gets flushed back to buddy allocator. 6574 */ 6575 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6576 int write, void *buffer, size_t *length, loff_t *ppos) 6577 { 6578 struct zone *zone; 6579 int old_percpu_pagelist_high_fraction; 6580 int ret; 6581 6582 mutex_lock(&pcp_batch_high_lock); 6583 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6584 6585 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6586 if (!write || ret < 0) 6587 goto out; 6588 6589 /* Sanity checking to avoid pcp imbalance */ 6590 if (percpu_pagelist_high_fraction && 6591 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6592 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6593 ret = -EINVAL; 6594 goto out; 6595 } 6596 6597 /* No change? */ 6598 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6599 goto out; 6600 6601 for_each_populated_zone(zone) 6602 zone_set_pageset_high_and_batch(zone, 0); 6603 out: 6604 mutex_unlock(&pcp_batch_high_lock); 6605 return ret; 6606 } 6607 6608 static const struct ctl_table page_alloc_sysctl_table[] = { 6609 { 6610 .procname = "min_free_kbytes", 6611 .data = &min_free_kbytes, 6612 .maxlen = sizeof(min_free_kbytes), 6613 .mode = 0644, 6614 .proc_handler = min_free_kbytes_sysctl_handler, 6615 .extra1 = SYSCTL_ZERO, 6616 }, 6617 { 6618 .procname = "watermark_boost_factor", 6619 .data = &watermark_boost_factor, 6620 .maxlen = sizeof(watermark_boost_factor), 6621 .mode = 0644, 6622 .proc_handler = proc_dointvec_minmax, 6623 .extra1 = SYSCTL_ZERO, 6624 }, 6625 { 6626 .procname = "watermark_scale_factor", 6627 .data = &watermark_scale_factor, 6628 .maxlen = sizeof(watermark_scale_factor), 6629 .mode = 0644, 6630 .proc_handler = watermark_scale_factor_sysctl_handler, 6631 .extra1 = SYSCTL_ONE, 6632 .extra2 = SYSCTL_THREE_THOUSAND, 6633 }, 6634 { 6635 .procname = "defrag_mode", 6636 .data = &defrag_mode, 6637 .maxlen = sizeof(defrag_mode), 6638 .mode = 0644, 6639 .proc_handler = proc_dointvec_minmax, 6640 .extra1 = SYSCTL_ZERO, 6641 .extra2 = SYSCTL_ONE, 6642 }, 6643 { 6644 .procname = "percpu_pagelist_high_fraction", 6645 .data = &percpu_pagelist_high_fraction, 6646 .maxlen = sizeof(percpu_pagelist_high_fraction), 6647 .mode = 0644, 6648 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6649 .extra1 = SYSCTL_ZERO, 6650 }, 6651 { 6652 .procname = "lowmem_reserve_ratio", 6653 .data = &sysctl_lowmem_reserve_ratio, 6654 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6655 .mode = 0644, 6656 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6657 }, 6658 #ifdef CONFIG_NUMA 6659 { 6660 .procname = "numa_zonelist_order", 6661 .data = &numa_zonelist_order, 6662 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6663 .mode = 0644, 6664 .proc_handler = numa_zonelist_order_handler, 6665 }, 6666 { 6667 .procname = "min_unmapped_ratio", 6668 .data = &sysctl_min_unmapped_ratio, 6669 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6670 .mode = 0644, 6671 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6672 .extra1 = SYSCTL_ZERO, 6673 .extra2 = SYSCTL_ONE_HUNDRED, 6674 }, 6675 { 6676 .procname = "min_slab_ratio", 6677 .data = &sysctl_min_slab_ratio, 6678 .maxlen = sizeof(sysctl_min_slab_ratio), 6679 .mode = 0644, 6680 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6681 .extra1 = SYSCTL_ZERO, 6682 .extra2 = SYSCTL_ONE_HUNDRED, 6683 }, 6684 #endif 6685 }; 6686 6687 void __init page_alloc_sysctl_init(void) 6688 { 6689 register_sysctl_init("vm", page_alloc_sysctl_table); 6690 } 6691 6692 #ifdef CONFIG_CONTIG_ALLOC 6693 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6694 static void alloc_contig_dump_pages(struct list_head *page_list) 6695 { 6696 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6697 6698 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6699 struct page *page; 6700 6701 dump_stack(); 6702 list_for_each_entry(page, page_list, lru) 6703 dump_page(page, "migration failure"); 6704 } 6705 } 6706 6707 /* [start, end) must belong to a single zone. */ 6708 static int __alloc_contig_migrate_range(struct compact_control *cc, 6709 unsigned long start, unsigned long end) 6710 { 6711 /* This function is based on compact_zone() from compaction.c. */ 6712 unsigned int nr_reclaimed; 6713 unsigned long pfn = start; 6714 unsigned int tries = 0; 6715 int ret = 0; 6716 struct migration_target_control mtc = { 6717 .nid = zone_to_nid(cc->zone), 6718 .gfp_mask = cc->gfp_mask, 6719 .reason = MR_CONTIG_RANGE, 6720 }; 6721 6722 lru_cache_disable(); 6723 6724 while (pfn < end || !list_empty(&cc->migratepages)) { 6725 if (fatal_signal_pending(current)) { 6726 ret = -EINTR; 6727 break; 6728 } 6729 6730 if (list_empty(&cc->migratepages)) { 6731 cc->nr_migratepages = 0; 6732 ret = isolate_migratepages_range(cc, pfn, end); 6733 if (ret && ret != -EAGAIN) 6734 break; 6735 pfn = cc->migrate_pfn; 6736 tries = 0; 6737 } else if (++tries == 5) { 6738 ret = -EBUSY; 6739 break; 6740 } 6741 6742 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6743 &cc->migratepages); 6744 cc->nr_migratepages -= nr_reclaimed; 6745 6746 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6747 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6748 6749 /* 6750 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6751 * to retry again over this error, so do the same here. 6752 */ 6753 if (ret == -ENOMEM) 6754 break; 6755 } 6756 6757 lru_cache_enable(); 6758 if (ret < 0) { 6759 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6760 alloc_contig_dump_pages(&cc->migratepages); 6761 putback_movable_pages(&cc->migratepages); 6762 } 6763 6764 return (ret < 0) ? ret : 0; 6765 } 6766 6767 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) 6768 { 6769 int order; 6770 6771 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6772 struct page *page, *next; 6773 int nr_pages = 1 << order; 6774 6775 list_for_each_entry_safe(page, next, &list[order], lru) { 6776 int i; 6777 6778 post_alloc_hook(page, order, gfp_mask); 6779 set_page_refcounted(page); 6780 if (!order) 6781 continue; 6782 6783 split_page(page, order); 6784 6785 /* Add all subpages to the order-0 head, in sequence. */ 6786 list_del(&page->lru); 6787 for (i = 0; i < nr_pages; i++) 6788 list_add_tail(&page[i].lru, &list[0]); 6789 } 6790 } 6791 } 6792 6793 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6794 { 6795 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6796 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6797 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6798 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6799 6800 /* 6801 * We are given the range to allocate; node, mobility and placement 6802 * hints are irrelevant at this point. We'll simply ignore them. 6803 */ 6804 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6805 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6806 6807 /* 6808 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6809 * selected action flags. 6810 */ 6811 if (gfp_mask & ~(reclaim_mask | action_mask)) 6812 return -EINVAL; 6813 6814 /* 6815 * Flags to control page compaction/migration/reclaim, to free up our 6816 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6817 * for them. 6818 * 6819 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6820 * to not degrade callers. 6821 */ 6822 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6823 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6824 return 0; 6825 } 6826 6827 /** 6828 * alloc_contig_range() -- tries to allocate given range of pages 6829 * @start: start PFN to allocate 6830 * @end: one-past-the-last PFN to allocate 6831 * @alloc_flags: allocation information 6832 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6833 * action and reclaim modifiers are supported. Reclaim modifiers 6834 * control allocation behavior during compaction/migration/reclaim. 6835 * 6836 * The PFN range does not have to be pageblock aligned. The PFN range must 6837 * belong to a single zone. 6838 * 6839 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6840 * pageblocks in the range. Once isolated, the pageblocks should not 6841 * be modified by others. 6842 * 6843 * Return: zero on success or negative error code. On success all 6844 * pages which PFN is in [start, end) are allocated for the caller and 6845 * need to be freed with free_contig_range(). 6846 */ 6847 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6848 acr_flags_t alloc_flags, gfp_t gfp_mask) 6849 { 6850 const unsigned int order = ilog2(end - start); 6851 unsigned long outer_start, outer_end; 6852 int ret = 0; 6853 6854 struct compact_control cc = { 6855 .nr_migratepages = 0, 6856 .order = -1, 6857 .zone = page_zone(pfn_to_page(start)), 6858 .mode = MIGRATE_SYNC, 6859 .ignore_skip_hint = true, 6860 .no_set_skip_hint = true, 6861 .alloc_contig = true, 6862 }; 6863 INIT_LIST_HEAD(&cc.migratepages); 6864 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ? 6865 PB_ISOLATE_MODE_CMA_ALLOC : 6866 PB_ISOLATE_MODE_OTHER; 6867 6868 /* 6869 * In contrast to the buddy, we allow for orders here that exceed 6870 * MAX_PAGE_ORDER, so we must manually make sure that we are not 6871 * exceeding the maximum folio order. 6872 */ 6873 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER)) 6874 return -EINVAL; 6875 6876 gfp_mask = current_gfp_context(gfp_mask); 6877 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 6878 return -EINVAL; 6879 6880 /* 6881 * What we do here is we mark all pageblocks in range as 6882 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6883 * have different sizes, and due to the way page allocator 6884 * work, start_isolate_page_range() has special handlings for this. 6885 * 6886 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6887 * migrate the pages from an unaligned range (ie. pages that 6888 * we are interested in). This will put all the pages in 6889 * range back to page allocator as MIGRATE_ISOLATE. 6890 * 6891 * When this is done, we take the pages in range from page 6892 * allocator removing them from the buddy system. This way 6893 * page allocator will never consider using them. 6894 * 6895 * This lets us mark the pageblocks back as 6896 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6897 * aligned range but not in the unaligned, original range are 6898 * put back to page allocator so that buddy can use them. 6899 */ 6900 6901 ret = start_isolate_page_range(start, end, mode); 6902 if (ret) 6903 goto done; 6904 6905 drain_all_pages(cc.zone); 6906 6907 /* 6908 * In case of -EBUSY, we'd like to know which page causes problem. 6909 * So, just fall through. test_pages_isolated() has a tracepoint 6910 * which will report the busy page. 6911 * 6912 * It is possible that busy pages could become available before 6913 * the call to test_pages_isolated, and the range will actually be 6914 * allocated. So, if we fall through be sure to clear ret so that 6915 * -EBUSY is not accidentally used or returned to caller. 6916 */ 6917 ret = __alloc_contig_migrate_range(&cc, start, end); 6918 if (ret && ret != -EBUSY) 6919 goto done; 6920 6921 /* 6922 * When in-use hugetlb pages are migrated, they may simply be released 6923 * back into the free hugepage pool instead of being returned to the 6924 * buddy system. After the migration of in-use huge pages is completed, 6925 * we will invoke replace_free_hugepage_folios() to ensure that these 6926 * hugepages are properly released to the buddy system. 6927 */ 6928 ret = replace_free_hugepage_folios(start, end); 6929 if (ret) 6930 goto done; 6931 6932 /* 6933 * Pages from [start, end) are within a pageblock_nr_pages 6934 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6935 * more, all pages in [start, end) are free in page allocator. 6936 * What we are going to do is to allocate all pages from 6937 * [start, end) (that is remove them from page allocator). 6938 * 6939 * The only problem is that pages at the beginning and at the 6940 * end of interesting range may be not aligned with pages that 6941 * page allocator holds, ie. they can be part of higher order 6942 * pages. Because of this, we reserve the bigger range and 6943 * once this is done free the pages we are not interested in. 6944 * 6945 * We don't have to hold zone->lock here because the pages are 6946 * isolated thus they won't get removed from buddy. 6947 */ 6948 outer_start = find_large_buddy(start); 6949 6950 /* Make sure the range is really isolated. */ 6951 if (test_pages_isolated(outer_start, end, mode)) { 6952 ret = -EBUSY; 6953 goto done; 6954 } 6955 6956 /* Grab isolated pages from freelists. */ 6957 outer_end = isolate_freepages_range(&cc, outer_start, end); 6958 if (!outer_end) { 6959 ret = -EBUSY; 6960 goto done; 6961 } 6962 6963 if (!(gfp_mask & __GFP_COMP)) { 6964 split_free_pages(cc.freepages, gfp_mask); 6965 6966 /* Free head and tail (if any) */ 6967 if (start != outer_start) 6968 free_contig_range(outer_start, start - outer_start); 6969 if (end != outer_end) 6970 free_contig_range(end, outer_end - end); 6971 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6972 struct page *head = pfn_to_page(start); 6973 6974 check_new_pages(head, order); 6975 prep_new_page(head, order, gfp_mask, 0); 6976 set_page_refcounted(head); 6977 } else { 6978 ret = -EINVAL; 6979 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6980 start, end, outer_start, outer_end); 6981 } 6982 done: 6983 undo_isolate_page_range(start, end); 6984 return ret; 6985 } 6986 EXPORT_SYMBOL(alloc_contig_range_noprof); 6987 6988 static int __alloc_contig_pages(unsigned long start_pfn, 6989 unsigned long nr_pages, gfp_t gfp_mask) 6990 { 6991 unsigned long end_pfn = start_pfn + nr_pages; 6992 6993 return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE, 6994 gfp_mask); 6995 } 6996 6997 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6998 unsigned long nr_pages) 6999 { 7000 unsigned long i, end_pfn = start_pfn + nr_pages; 7001 struct page *page; 7002 7003 for (i = start_pfn; i < end_pfn; i++) { 7004 page = pfn_to_online_page(i); 7005 if (!page) 7006 return false; 7007 7008 if (page_zone(page) != z) 7009 return false; 7010 7011 if (PageReserved(page)) 7012 return false; 7013 7014 if (PageHuge(page)) 7015 return false; 7016 } 7017 return true; 7018 } 7019 7020 static bool zone_spans_last_pfn(const struct zone *zone, 7021 unsigned long start_pfn, unsigned long nr_pages) 7022 { 7023 unsigned long last_pfn = start_pfn + nr_pages - 1; 7024 7025 return zone_spans_pfn(zone, last_pfn); 7026 } 7027 7028 /** 7029 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 7030 * @nr_pages: Number of contiguous pages to allocate 7031 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 7032 * action and reclaim modifiers are supported. Reclaim modifiers 7033 * control allocation behavior during compaction/migration/reclaim. 7034 * @nid: Target node 7035 * @nodemask: Mask for other possible nodes 7036 * 7037 * This routine is a wrapper around alloc_contig_range(). It scans over zones 7038 * on an applicable zonelist to find a contiguous pfn range which can then be 7039 * tried for allocation with alloc_contig_range(). This routine is intended 7040 * for allocation requests which can not be fulfilled with the buddy allocator. 7041 * 7042 * The allocated memory is always aligned to a page boundary. If nr_pages is a 7043 * power of two, then allocated range is also guaranteed to be aligned to same 7044 * nr_pages (e.g. 1GB request would be aligned to 1GB). 7045 * 7046 * Allocated pages can be freed with free_contig_range() or by manually calling 7047 * __free_page() on each allocated page. 7048 * 7049 * Return: pointer to contiguous pages on success, or NULL if not successful. 7050 */ 7051 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 7052 int nid, nodemask_t *nodemask) 7053 { 7054 unsigned long ret, pfn, flags; 7055 struct zonelist *zonelist; 7056 struct zone *zone; 7057 struct zoneref *z; 7058 7059 zonelist = node_zonelist(nid, gfp_mask); 7060 for_each_zone_zonelist_nodemask(zone, z, zonelist, 7061 gfp_zone(gfp_mask), nodemask) { 7062 spin_lock_irqsave(&zone->lock, flags); 7063 7064 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 7065 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 7066 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 7067 /* 7068 * We release the zone lock here because 7069 * alloc_contig_range() will also lock the zone 7070 * at some point. If there's an allocation 7071 * spinning on this lock, it may win the race 7072 * and cause alloc_contig_range() to fail... 7073 */ 7074 spin_unlock_irqrestore(&zone->lock, flags); 7075 ret = __alloc_contig_pages(pfn, nr_pages, 7076 gfp_mask); 7077 if (!ret) 7078 return pfn_to_page(pfn); 7079 spin_lock_irqsave(&zone->lock, flags); 7080 } 7081 pfn += nr_pages; 7082 } 7083 spin_unlock_irqrestore(&zone->lock, flags); 7084 } 7085 return NULL; 7086 } 7087 #endif /* CONFIG_CONTIG_ALLOC */ 7088 7089 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 7090 { 7091 unsigned long count = 0; 7092 struct folio *folio = pfn_folio(pfn); 7093 7094 if (folio_test_large(folio)) { 7095 int expected = folio_nr_pages(folio); 7096 7097 if (nr_pages == expected) 7098 folio_put(folio); 7099 else 7100 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 7101 pfn, nr_pages, expected); 7102 return; 7103 } 7104 7105 for (; nr_pages--; pfn++) { 7106 struct page *page = pfn_to_page(pfn); 7107 7108 count += page_count(page) != 1; 7109 __free_page(page); 7110 } 7111 WARN(count != 0, "%lu pages are still in use!\n", count); 7112 } 7113 EXPORT_SYMBOL(free_contig_range); 7114 7115 /* 7116 * Effectively disable pcplists for the zone by setting the high limit to 0 7117 * and draining all cpus. A concurrent page freeing on another CPU that's about 7118 * to put the page on pcplist will either finish before the drain and the page 7119 * will be drained, or observe the new high limit and skip the pcplist. 7120 * 7121 * Must be paired with a call to zone_pcp_enable(). 7122 */ 7123 void zone_pcp_disable(struct zone *zone) 7124 { 7125 mutex_lock(&pcp_batch_high_lock); 7126 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 7127 __drain_all_pages(zone, true); 7128 } 7129 7130 void zone_pcp_enable(struct zone *zone) 7131 { 7132 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 7133 zone->pageset_high_max, zone->pageset_batch); 7134 mutex_unlock(&pcp_batch_high_lock); 7135 } 7136 7137 void zone_pcp_reset(struct zone *zone) 7138 { 7139 int cpu; 7140 struct per_cpu_zonestat *pzstats; 7141 7142 if (zone->per_cpu_pageset != &boot_pageset) { 7143 for_each_online_cpu(cpu) { 7144 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7145 drain_zonestat(zone, pzstats); 7146 } 7147 free_percpu(zone->per_cpu_pageset); 7148 zone->per_cpu_pageset = &boot_pageset; 7149 if (zone->per_cpu_zonestats != &boot_zonestats) { 7150 free_percpu(zone->per_cpu_zonestats); 7151 zone->per_cpu_zonestats = &boot_zonestats; 7152 } 7153 } 7154 } 7155 7156 #ifdef CONFIG_MEMORY_HOTREMOVE 7157 /* 7158 * All pages in the range must be in a single zone, must not contain holes, 7159 * must span full sections, and must be isolated before calling this function. 7160 * 7161 * Returns the number of managed (non-PageOffline()) pages in the range: the 7162 * number of pages for which memory offlining code must adjust managed page 7163 * counters using adjust_managed_page_count(). 7164 */ 7165 unsigned long __offline_isolated_pages(unsigned long start_pfn, 7166 unsigned long end_pfn) 7167 { 7168 unsigned long already_offline = 0, flags; 7169 unsigned long pfn = start_pfn; 7170 struct page *page; 7171 struct zone *zone; 7172 unsigned int order; 7173 7174 offline_mem_sections(pfn, end_pfn); 7175 zone = page_zone(pfn_to_page(pfn)); 7176 spin_lock_irqsave(&zone->lock, flags); 7177 while (pfn < end_pfn) { 7178 page = pfn_to_page(pfn); 7179 /* 7180 * The HWPoisoned page may be not in buddy system, and 7181 * page_count() is not 0. 7182 */ 7183 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7184 pfn++; 7185 continue; 7186 } 7187 /* 7188 * At this point all remaining PageOffline() pages have a 7189 * reference count of 0 and can simply be skipped. 7190 */ 7191 if (PageOffline(page)) { 7192 BUG_ON(page_count(page)); 7193 BUG_ON(PageBuddy(page)); 7194 already_offline++; 7195 pfn++; 7196 continue; 7197 } 7198 7199 BUG_ON(page_count(page)); 7200 BUG_ON(!PageBuddy(page)); 7201 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 7202 order = buddy_order(page); 7203 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 7204 pfn += (1 << order); 7205 } 7206 spin_unlock_irqrestore(&zone->lock, flags); 7207 7208 return end_pfn - start_pfn - already_offline; 7209 } 7210 #endif 7211 7212 /* 7213 * This function returns a stable result only if called under zone lock. 7214 */ 7215 bool is_free_buddy_page(const struct page *page) 7216 { 7217 unsigned long pfn = page_to_pfn(page); 7218 unsigned int order; 7219 7220 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7221 const struct page *head = page - (pfn & ((1 << order) - 1)); 7222 7223 if (PageBuddy(head) && 7224 buddy_order_unsafe(head) >= order) 7225 break; 7226 } 7227 7228 return order <= MAX_PAGE_ORDER; 7229 } 7230 EXPORT_SYMBOL(is_free_buddy_page); 7231 7232 #ifdef CONFIG_MEMORY_FAILURE 7233 static inline void add_to_free_list(struct page *page, struct zone *zone, 7234 unsigned int order, int migratetype, 7235 bool tail) 7236 { 7237 __add_to_free_list(page, zone, order, migratetype, tail); 7238 account_freepages(zone, 1 << order, migratetype); 7239 } 7240 7241 /* 7242 * Break down a higher-order page in sub-pages, and keep our target out of 7243 * buddy allocator. 7244 */ 7245 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7246 struct page *target, int low, int high, 7247 int migratetype) 7248 { 7249 unsigned long size = 1 << high; 7250 struct page *current_buddy; 7251 7252 while (high > low) { 7253 high--; 7254 size >>= 1; 7255 7256 if (target >= &page[size]) { 7257 current_buddy = page; 7258 page = page + size; 7259 } else { 7260 current_buddy = page + size; 7261 } 7262 7263 if (set_page_guard(zone, current_buddy, high)) 7264 continue; 7265 7266 add_to_free_list(current_buddy, zone, high, migratetype, false); 7267 set_buddy_order(current_buddy, high); 7268 } 7269 } 7270 7271 /* 7272 * Take a page that will be marked as poisoned off the buddy allocator. 7273 */ 7274 bool take_page_off_buddy(struct page *page) 7275 { 7276 struct zone *zone = page_zone(page); 7277 unsigned long pfn = page_to_pfn(page); 7278 unsigned long flags; 7279 unsigned int order; 7280 bool ret = false; 7281 7282 spin_lock_irqsave(&zone->lock, flags); 7283 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7284 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7285 int page_order = buddy_order(page_head); 7286 7287 if (PageBuddy(page_head) && page_order >= order) { 7288 unsigned long pfn_head = page_to_pfn(page_head); 7289 int migratetype = get_pfnblock_migratetype(page_head, 7290 pfn_head); 7291 7292 del_page_from_free_list(page_head, zone, page_order, 7293 migratetype); 7294 break_down_buddy_pages(zone, page_head, page, 0, 7295 page_order, migratetype); 7296 SetPageHWPoisonTakenOff(page); 7297 ret = true; 7298 break; 7299 } 7300 if (page_count(page_head) > 0) 7301 break; 7302 } 7303 spin_unlock_irqrestore(&zone->lock, flags); 7304 return ret; 7305 } 7306 7307 /* 7308 * Cancel takeoff done by take_page_off_buddy(). 7309 */ 7310 bool put_page_back_buddy(struct page *page) 7311 { 7312 struct zone *zone = page_zone(page); 7313 unsigned long flags; 7314 bool ret = false; 7315 7316 spin_lock_irqsave(&zone->lock, flags); 7317 if (put_page_testzero(page)) { 7318 unsigned long pfn = page_to_pfn(page); 7319 int migratetype = get_pfnblock_migratetype(page, pfn); 7320 7321 ClearPageHWPoisonTakenOff(page); 7322 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7323 if (TestClearPageHWPoison(page)) { 7324 ret = true; 7325 } 7326 } 7327 spin_unlock_irqrestore(&zone->lock, flags); 7328 7329 return ret; 7330 } 7331 #endif 7332 7333 #ifdef CONFIG_ZONE_DMA 7334 bool has_managed_dma(void) 7335 { 7336 struct pglist_data *pgdat; 7337 7338 for_each_online_pgdat(pgdat) { 7339 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 7340 7341 if (managed_zone(zone)) 7342 return true; 7343 } 7344 return false; 7345 } 7346 #endif /* CONFIG_ZONE_DMA */ 7347 7348 #ifdef CONFIG_UNACCEPTED_MEMORY 7349 7350 static bool lazy_accept = true; 7351 7352 static int __init accept_memory_parse(char *p) 7353 { 7354 if (!strcmp(p, "lazy")) { 7355 lazy_accept = true; 7356 return 0; 7357 } else if (!strcmp(p, "eager")) { 7358 lazy_accept = false; 7359 return 0; 7360 } else { 7361 return -EINVAL; 7362 } 7363 } 7364 early_param("accept_memory", accept_memory_parse); 7365 7366 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7367 { 7368 phys_addr_t start = page_to_phys(page); 7369 7370 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7371 } 7372 7373 static void __accept_page(struct zone *zone, unsigned long *flags, 7374 struct page *page) 7375 { 7376 list_del(&page->lru); 7377 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7378 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7379 __ClearPageUnaccepted(page); 7380 spin_unlock_irqrestore(&zone->lock, *flags); 7381 7382 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7383 7384 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7385 } 7386 7387 void accept_page(struct page *page) 7388 { 7389 struct zone *zone = page_zone(page); 7390 unsigned long flags; 7391 7392 spin_lock_irqsave(&zone->lock, flags); 7393 if (!PageUnaccepted(page)) { 7394 spin_unlock_irqrestore(&zone->lock, flags); 7395 return; 7396 } 7397 7398 /* Unlocks zone->lock */ 7399 __accept_page(zone, &flags, page); 7400 } 7401 7402 static bool try_to_accept_memory_one(struct zone *zone) 7403 { 7404 unsigned long flags; 7405 struct page *page; 7406 7407 spin_lock_irqsave(&zone->lock, flags); 7408 page = list_first_entry_or_null(&zone->unaccepted_pages, 7409 struct page, lru); 7410 if (!page) { 7411 spin_unlock_irqrestore(&zone->lock, flags); 7412 return false; 7413 } 7414 7415 /* Unlocks zone->lock */ 7416 __accept_page(zone, &flags, page); 7417 7418 return true; 7419 } 7420 7421 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7422 int alloc_flags) 7423 { 7424 long to_accept, wmark; 7425 bool ret = false; 7426 7427 if (list_empty(&zone->unaccepted_pages)) 7428 return false; 7429 7430 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7431 if (alloc_flags & ALLOC_TRYLOCK) 7432 return false; 7433 7434 wmark = promo_wmark_pages(zone); 7435 7436 /* 7437 * Watermarks have not been initialized yet. 7438 * 7439 * Accepting one MAX_ORDER page to ensure progress. 7440 */ 7441 if (!wmark) 7442 return try_to_accept_memory_one(zone); 7443 7444 /* How much to accept to get to promo watermark? */ 7445 to_accept = wmark - 7446 (zone_page_state(zone, NR_FREE_PAGES) - 7447 __zone_watermark_unusable_free(zone, order, 0) - 7448 zone_page_state(zone, NR_UNACCEPTED)); 7449 7450 while (to_accept > 0) { 7451 if (!try_to_accept_memory_one(zone)) 7452 break; 7453 ret = true; 7454 to_accept -= MAX_ORDER_NR_PAGES; 7455 } 7456 7457 return ret; 7458 } 7459 7460 static bool __free_unaccepted(struct page *page) 7461 { 7462 struct zone *zone = page_zone(page); 7463 unsigned long flags; 7464 7465 if (!lazy_accept) 7466 return false; 7467 7468 spin_lock_irqsave(&zone->lock, flags); 7469 list_add_tail(&page->lru, &zone->unaccepted_pages); 7470 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7471 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7472 __SetPageUnaccepted(page); 7473 spin_unlock_irqrestore(&zone->lock, flags); 7474 7475 return true; 7476 } 7477 7478 #else 7479 7480 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7481 { 7482 return false; 7483 } 7484 7485 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7486 int alloc_flags) 7487 { 7488 return false; 7489 } 7490 7491 static bool __free_unaccepted(struct page *page) 7492 { 7493 BUILD_BUG(); 7494 return false; 7495 } 7496 7497 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7498 7499 /** 7500 * alloc_pages_nolock - opportunistic reentrant allocation from any context 7501 * @nid: node to allocate from 7502 * @order: allocation order size 7503 * 7504 * Allocates pages of a given order from the given node. This is safe to 7505 * call from any context (from atomic, NMI, and also reentrant 7506 * allocator -> tracepoint -> alloc_pages_nolock_noprof). 7507 * Allocation is best effort and to be expected to fail easily so nobody should 7508 * rely on the success. Failures are not reported via warn_alloc(). 7509 * See always fail conditions below. 7510 * 7511 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN. 7512 * It means ENOMEM. There is no reason to call it again and expect !NULL. 7513 */ 7514 struct page *alloc_pages_nolock_noprof(int nid, unsigned int order) 7515 { 7516 /* 7517 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7518 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7519 * is not safe in arbitrary context. 7520 * 7521 * These two are the conditions for gfpflags_allow_spinning() being true. 7522 * 7523 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason 7524 * to warn. Also warn would trigger printk() which is unsafe from 7525 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7526 * since the running context is unknown. 7527 * 7528 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7529 * is safe in any context. Also zeroing the page is mandatory for 7530 * BPF use cases. 7531 * 7532 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7533 * specify it here to highlight that alloc_pages_nolock() 7534 * doesn't want to deplete reserves. 7535 */ 7536 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC 7537 | __GFP_ACCOUNT; 7538 unsigned int alloc_flags = ALLOC_TRYLOCK; 7539 struct alloc_context ac = { }; 7540 struct page *page; 7541 7542 /* 7543 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7544 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7545 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7546 * mark the task as the owner of another rt_spin_lock which will 7547 * confuse PI logic, so return immediately if called form hard IRQ or 7548 * NMI. 7549 * 7550 * Note, irqs_disabled() case is ok. This function can be called 7551 * from raw_spin_lock_irqsave region. 7552 */ 7553 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7554 return NULL; 7555 if (!pcp_allowed_order(order)) 7556 return NULL; 7557 7558 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7559 if (deferred_pages_enabled()) 7560 return NULL; 7561 7562 if (nid == NUMA_NO_NODE) 7563 nid = numa_node_id(); 7564 7565 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7566 &alloc_gfp, &alloc_flags); 7567 7568 /* 7569 * Best effort allocation from percpu free list. 7570 * If it's empty attempt to spin_trylock zone->lock. 7571 */ 7572 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7573 7574 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7575 7576 if (page) 7577 set_page_refcounted(page); 7578 7579 if (memcg_kmem_online() && page && 7580 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7581 free_pages_nolock(page, order); 7582 page = NULL; 7583 } 7584 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7585 kmsan_alloc_page(page, order, alloc_gfp); 7586 return page; 7587 } 7588