1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* Free the page without taking locks. Rely on trylock only. */ 92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 93 94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 95 static DEFINE_MUTEX(pcp_batch_high_lock); 96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 97 98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 99 /* 100 * On SMP, spin_trylock is sufficient protection. 101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 102 */ 103 #define pcp_trylock_prepare(flags) do { } while (0) 104 #define pcp_trylock_finish(flag) do { } while (0) 105 #else 106 107 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 108 #define pcp_trylock_prepare(flags) local_irq_save(flags) 109 #define pcp_trylock_finish(flags) local_irq_restore(flags) 110 #endif 111 112 /* 113 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 114 * a migration causing the wrong PCP to be locked and remote memory being 115 * potentially allocated, pin the task to the CPU for the lookup+lock. 116 * preempt_disable is used on !RT because it is faster than migrate_disable. 117 * migrate_disable is used on RT because otherwise RT spinlock usage is 118 * interfered with and a high priority task cannot preempt the allocator. 119 */ 120 #ifndef CONFIG_PREEMPT_RT 121 #define pcpu_task_pin() preempt_disable() 122 #define pcpu_task_unpin() preempt_enable() 123 #else 124 #define pcpu_task_pin() migrate_disable() 125 #define pcpu_task_unpin() migrate_enable() 126 #endif 127 128 /* 129 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 130 * Return value should be used with equivalent unlock helper. 131 */ 132 #define pcpu_spin_lock(type, member, ptr) \ 133 ({ \ 134 type *_ret; \ 135 pcpu_task_pin(); \ 136 _ret = this_cpu_ptr(ptr); \ 137 spin_lock(&_ret->member); \ 138 _ret; \ 139 }) 140 141 #define pcpu_spin_trylock(type, member, ptr) \ 142 ({ \ 143 type *_ret; \ 144 pcpu_task_pin(); \ 145 _ret = this_cpu_ptr(ptr); \ 146 if (!spin_trylock(&_ret->member)) { \ 147 pcpu_task_unpin(); \ 148 _ret = NULL; \ 149 } \ 150 _ret; \ 151 }) 152 153 #define pcpu_spin_unlock(member, ptr) \ 154 ({ \ 155 spin_unlock(&ptr->member); \ 156 pcpu_task_unpin(); \ 157 }) 158 159 /* struct per_cpu_pages specific helpers. */ 160 #define pcp_spin_lock(ptr) \ 161 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_trylock(ptr) \ 164 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 165 166 #define pcp_spin_unlock(ptr) \ 167 pcpu_spin_unlock(lock, ptr) 168 169 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 170 DEFINE_PER_CPU(int, numa_node); 171 EXPORT_PER_CPU_SYMBOL(numa_node); 172 #endif 173 174 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 175 176 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 177 /* 178 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 179 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 180 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 181 * defined in <linux/topology.h>. 182 */ 183 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 184 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 185 #endif 186 187 static DEFINE_MUTEX(pcpu_drain_mutex); 188 189 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 190 volatile unsigned long latent_entropy __latent_entropy; 191 EXPORT_SYMBOL(latent_entropy); 192 #endif 193 194 /* 195 * Array of node states. 196 */ 197 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 198 [N_POSSIBLE] = NODE_MASK_ALL, 199 [N_ONLINE] = { { [0] = 1UL } }, 200 #ifndef CONFIG_NUMA 201 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 202 #ifdef CONFIG_HIGHMEM 203 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 204 #endif 205 [N_MEMORY] = { { [0] = 1UL } }, 206 [N_CPU] = { { [0] = 1UL } }, 207 #endif /* NUMA */ 208 }; 209 EXPORT_SYMBOL(node_states); 210 211 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 212 213 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 214 unsigned int pageblock_order __read_mostly; 215 #endif 216 217 static void __free_pages_ok(struct page *page, unsigned int order, 218 fpi_t fpi_flags); 219 220 /* 221 * results with 256, 32 in the lowmem_reserve sysctl: 222 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 223 * 1G machine -> (16M dma, 784M normal, 224M high) 224 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 225 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 226 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 227 * 228 * TBD: should special case ZONE_DMA32 machines here - in those we normally 229 * don't need any ZONE_NORMAL reservation 230 */ 231 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 232 #ifdef CONFIG_ZONE_DMA 233 [ZONE_DMA] = 256, 234 #endif 235 #ifdef CONFIG_ZONE_DMA32 236 [ZONE_DMA32] = 256, 237 #endif 238 [ZONE_NORMAL] = 32, 239 #ifdef CONFIG_HIGHMEM 240 [ZONE_HIGHMEM] = 0, 241 #endif 242 [ZONE_MOVABLE] = 0, 243 }; 244 245 char * const zone_names[MAX_NR_ZONES] = { 246 #ifdef CONFIG_ZONE_DMA 247 "DMA", 248 #endif 249 #ifdef CONFIG_ZONE_DMA32 250 "DMA32", 251 #endif 252 "Normal", 253 #ifdef CONFIG_HIGHMEM 254 "HighMem", 255 #endif 256 "Movable", 257 #ifdef CONFIG_ZONE_DEVICE 258 "Device", 259 #endif 260 }; 261 262 const char * const migratetype_names[MIGRATE_TYPES] = { 263 "Unmovable", 264 "Movable", 265 "Reclaimable", 266 "HighAtomic", 267 #ifdef CONFIG_CMA 268 "CMA", 269 #endif 270 #ifdef CONFIG_MEMORY_ISOLATION 271 "Isolate", 272 #endif 273 }; 274 275 int min_free_kbytes = 1024; 276 int user_min_free_kbytes = -1; 277 static int watermark_boost_factor __read_mostly = 15000; 278 static int watermark_scale_factor = 10; 279 int defrag_mode; 280 281 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 282 int movable_zone; 283 EXPORT_SYMBOL(movable_zone); 284 285 #if MAX_NUMNODES > 1 286 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 287 unsigned int nr_online_nodes __read_mostly = 1; 288 EXPORT_SYMBOL(nr_node_ids); 289 EXPORT_SYMBOL(nr_online_nodes); 290 #endif 291 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 static bool cond_accept_memory(struct zone *zone, unsigned int order); 294 static bool __free_unaccepted(struct page *page); 295 296 int page_group_by_mobility_disabled __read_mostly; 297 298 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 299 /* 300 * During boot we initialize deferred pages on-demand, as needed, but once 301 * page_alloc_init_late() has finished, the deferred pages are all initialized, 302 * and we can permanently disable that path. 303 */ 304 DEFINE_STATIC_KEY_TRUE(deferred_pages); 305 306 static inline bool deferred_pages_enabled(void) 307 { 308 return static_branch_unlikely(&deferred_pages); 309 } 310 311 /* 312 * deferred_grow_zone() is __init, but it is called from 313 * get_page_from_freelist() during early boot until deferred_pages permanently 314 * disables this call. This is why we have refdata wrapper to avoid warning, 315 * and to ensure that the function body gets unloaded. 316 */ 317 static bool __ref 318 _deferred_grow_zone(struct zone *zone, unsigned int order) 319 { 320 return deferred_grow_zone(zone, order); 321 } 322 #else 323 static inline bool deferred_pages_enabled(void) 324 { 325 return false; 326 } 327 328 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 329 { 330 return false; 331 } 332 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 333 334 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 335 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 336 unsigned long pfn) 337 { 338 #ifdef CONFIG_SPARSEMEM 339 return section_to_usemap(__pfn_to_section(pfn)); 340 #else 341 return page_zone(page)->pageblock_flags; 342 #endif /* CONFIG_SPARSEMEM */ 343 } 344 345 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 346 { 347 #ifdef CONFIG_SPARSEMEM 348 pfn &= (PAGES_PER_SECTION-1); 349 #else 350 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 351 #endif /* CONFIG_SPARSEMEM */ 352 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 353 } 354 355 /** 356 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 357 * @page: The page within the block of interest 358 * @pfn: The target page frame number 359 * @mask: mask of bits that the caller is interested in 360 * 361 * Return: pageblock_bits flags 362 */ 363 unsigned long get_pfnblock_flags_mask(const struct page *page, 364 unsigned long pfn, unsigned long mask) 365 { 366 unsigned long *bitmap; 367 unsigned long bitidx, word_bitidx; 368 unsigned long word; 369 370 bitmap = get_pageblock_bitmap(page, pfn); 371 bitidx = pfn_to_bitidx(page, pfn); 372 word_bitidx = bitidx / BITS_PER_LONG; 373 bitidx &= (BITS_PER_LONG-1); 374 /* 375 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 376 * a consistent read of the memory array, so that results, even though 377 * racy, are not corrupted. 378 */ 379 word = READ_ONCE(bitmap[word_bitidx]); 380 return (word >> bitidx) & mask; 381 } 382 383 static __always_inline int get_pfnblock_migratetype(const struct page *page, 384 unsigned long pfn) 385 { 386 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 387 } 388 389 /** 390 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 391 * @page: The page within the block of interest 392 * @flags: The flags to set 393 * @pfn: The target page frame number 394 * @mask: mask of bits that the caller is interested in 395 */ 396 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 397 unsigned long pfn, 398 unsigned long mask) 399 { 400 unsigned long *bitmap; 401 unsigned long bitidx, word_bitidx; 402 unsigned long word; 403 404 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 405 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 406 407 bitmap = get_pageblock_bitmap(page, pfn); 408 bitidx = pfn_to_bitidx(page, pfn); 409 word_bitidx = bitidx / BITS_PER_LONG; 410 bitidx &= (BITS_PER_LONG-1); 411 412 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 413 414 mask <<= bitidx; 415 flags <<= bitidx; 416 417 word = READ_ONCE(bitmap[word_bitidx]); 418 do { 419 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 420 } 421 422 void set_pageblock_migratetype(struct page *page, int migratetype) 423 { 424 if (unlikely(page_group_by_mobility_disabled && 425 migratetype < MIGRATE_PCPTYPES)) 426 migratetype = MIGRATE_UNMOVABLE; 427 428 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 429 page_to_pfn(page), MIGRATETYPE_MASK); 430 } 431 432 #ifdef CONFIG_DEBUG_VM 433 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 434 { 435 int ret; 436 unsigned seq; 437 unsigned long pfn = page_to_pfn(page); 438 unsigned long sp, start_pfn; 439 440 do { 441 seq = zone_span_seqbegin(zone); 442 start_pfn = zone->zone_start_pfn; 443 sp = zone->spanned_pages; 444 ret = !zone_spans_pfn(zone, pfn); 445 } while (zone_span_seqretry(zone, seq)); 446 447 if (ret) 448 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 449 pfn, zone_to_nid(zone), zone->name, 450 start_pfn, start_pfn + sp); 451 452 return ret; 453 } 454 455 /* 456 * Temporary debugging check for pages not lying within a given zone. 457 */ 458 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 459 { 460 if (page_outside_zone_boundaries(zone, page)) 461 return true; 462 if (zone != page_zone(page)) 463 return true; 464 465 return false; 466 } 467 #else 468 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 469 { 470 return false; 471 } 472 #endif 473 474 static void bad_page(struct page *page, const char *reason) 475 { 476 static unsigned long resume; 477 static unsigned long nr_shown; 478 static unsigned long nr_unshown; 479 480 /* 481 * Allow a burst of 60 reports, then keep quiet for that minute; 482 * or allow a steady drip of one report per second. 483 */ 484 if (nr_shown == 60) { 485 if (time_before(jiffies, resume)) { 486 nr_unshown++; 487 goto out; 488 } 489 if (nr_unshown) { 490 pr_alert( 491 "BUG: Bad page state: %lu messages suppressed\n", 492 nr_unshown); 493 nr_unshown = 0; 494 } 495 nr_shown = 0; 496 } 497 if (nr_shown++ == 0) 498 resume = jiffies + 60 * HZ; 499 500 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 501 current->comm, page_to_pfn(page)); 502 dump_page(page, reason); 503 504 print_modules(); 505 dump_stack(); 506 out: 507 /* Leave bad fields for debug, except PageBuddy could make trouble */ 508 if (PageBuddy(page)) 509 __ClearPageBuddy(page); 510 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 511 } 512 513 static inline unsigned int order_to_pindex(int migratetype, int order) 514 { 515 516 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 517 bool movable; 518 if (order > PAGE_ALLOC_COSTLY_ORDER) { 519 VM_BUG_ON(order != HPAGE_PMD_ORDER); 520 521 movable = migratetype == MIGRATE_MOVABLE; 522 523 return NR_LOWORDER_PCP_LISTS + movable; 524 } 525 #else 526 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 527 #endif 528 529 return (MIGRATE_PCPTYPES * order) + migratetype; 530 } 531 532 static inline int pindex_to_order(unsigned int pindex) 533 { 534 int order = pindex / MIGRATE_PCPTYPES; 535 536 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 537 if (pindex >= NR_LOWORDER_PCP_LISTS) 538 order = HPAGE_PMD_ORDER; 539 #else 540 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 541 #endif 542 543 return order; 544 } 545 546 static inline bool pcp_allowed_order(unsigned int order) 547 { 548 if (order <= PAGE_ALLOC_COSTLY_ORDER) 549 return true; 550 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 551 if (order == HPAGE_PMD_ORDER) 552 return true; 553 #endif 554 return false; 555 } 556 557 /* 558 * Higher-order pages are called "compound pages". They are structured thusly: 559 * 560 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 561 * 562 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 563 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 564 * 565 * The first tail page's ->compound_order holds the order of allocation. 566 * This usage means that zero-order pages may not be compound. 567 */ 568 569 void prep_compound_page(struct page *page, unsigned int order) 570 { 571 int i; 572 int nr_pages = 1 << order; 573 574 __SetPageHead(page); 575 for (i = 1; i < nr_pages; i++) 576 prep_compound_tail(page, i); 577 578 prep_compound_head(page, order); 579 } 580 581 static inline void set_buddy_order(struct page *page, unsigned int order) 582 { 583 set_page_private(page, order); 584 __SetPageBuddy(page); 585 } 586 587 #ifdef CONFIG_COMPACTION 588 static inline struct capture_control *task_capc(struct zone *zone) 589 { 590 struct capture_control *capc = current->capture_control; 591 592 return unlikely(capc) && 593 !(current->flags & PF_KTHREAD) && 594 !capc->page && 595 capc->cc->zone == zone ? capc : NULL; 596 } 597 598 static inline bool 599 compaction_capture(struct capture_control *capc, struct page *page, 600 int order, int migratetype) 601 { 602 if (!capc || order != capc->cc->order) 603 return false; 604 605 /* Do not accidentally pollute CMA or isolated regions*/ 606 if (is_migrate_cma(migratetype) || 607 is_migrate_isolate(migratetype)) 608 return false; 609 610 /* 611 * Do not let lower order allocations pollute a movable pageblock 612 * unless compaction is also requesting movable pages. 613 * This might let an unmovable request use a reclaimable pageblock 614 * and vice-versa but no more than normal fallback logic which can 615 * have trouble finding a high-order free page. 616 */ 617 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 618 capc->cc->migratetype != MIGRATE_MOVABLE) 619 return false; 620 621 if (migratetype != capc->cc->migratetype) 622 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 623 capc->cc->migratetype, migratetype); 624 625 capc->page = page; 626 return true; 627 } 628 629 #else 630 static inline struct capture_control *task_capc(struct zone *zone) 631 { 632 return NULL; 633 } 634 635 static inline bool 636 compaction_capture(struct capture_control *capc, struct page *page, 637 int order, int migratetype) 638 { 639 return false; 640 } 641 #endif /* CONFIG_COMPACTION */ 642 643 static inline void account_freepages(struct zone *zone, int nr_pages, 644 int migratetype) 645 { 646 lockdep_assert_held(&zone->lock); 647 648 if (is_migrate_isolate(migratetype)) 649 return; 650 651 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 652 653 if (is_migrate_cma(migratetype)) 654 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 655 else if (is_migrate_highatomic(migratetype)) 656 WRITE_ONCE(zone->nr_free_highatomic, 657 zone->nr_free_highatomic + nr_pages); 658 } 659 660 /* Used for pages not on another list */ 661 static inline void __add_to_free_list(struct page *page, struct zone *zone, 662 unsigned int order, int migratetype, 663 bool tail) 664 { 665 struct free_area *area = &zone->free_area[order]; 666 int nr_pages = 1 << order; 667 668 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 669 "page type is %lu, passed migratetype is %d (nr=%d)\n", 670 get_pageblock_migratetype(page), migratetype, nr_pages); 671 672 if (tail) 673 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 674 else 675 list_add(&page->buddy_list, &area->free_list[migratetype]); 676 area->nr_free++; 677 678 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 679 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 680 } 681 682 /* 683 * Used for pages which are on another list. Move the pages to the tail 684 * of the list - so the moved pages won't immediately be considered for 685 * allocation again (e.g., optimization for memory onlining). 686 */ 687 static inline void move_to_free_list(struct page *page, struct zone *zone, 688 unsigned int order, int old_mt, int new_mt) 689 { 690 struct free_area *area = &zone->free_area[order]; 691 int nr_pages = 1 << order; 692 693 /* Free page moving can fail, so it happens before the type update */ 694 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 695 "page type is %lu, passed migratetype is %d (nr=%d)\n", 696 get_pageblock_migratetype(page), old_mt, nr_pages); 697 698 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 699 700 account_freepages(zone, -nr_pages, old_mt); 701 account_freepages(zone, nr_pages, new_mt); 702 703 if (order >= pageblock_order && 704 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 705 if (!is_migrate_isolate(old_mt)) 706 nr_pages = -nr_pages; 707 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 708 } 709 } 710 711 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 712 unsigned int order, int migratetype) 713 { 714 int nr_pages = 1 << order; 715 716 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 717 "page type is %lu, passed migratetype is %d (nr=%d)\n", 718 get_pageblock_migratetype(page), migratetype, nr_pages); 719 720 /* clear reported state and update reported page count */ 721 if (page_reported(page)) 722 __ClearPageReported(page); 723 724 list_del(&page->buddy_list); 725 __ClearPageBuddy(page); 726 set_page_private(page, 0); 727 zone->free_area[order].nr_free--; 728 729 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 730 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 731 } 732 733 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 734 unsigned int order, int migratetype) 735 { 736 __del_page_from_free_list(page, zone, order, migratetype); 737 account_freepages(zone, -(1 << order), migratetype); 738 } 739 740 static inline struct page *get_page_from_free_area(struct free_area *area, 741 int migratetype) 742 { 743 return list_first_entry_or_null(&area->free_list[migratetype], 744 struct page, buddy_list); 745 } 746 747 /* 748 * If this is less than the 2nd largest possible page, check if the buddy 749 * of the next-higher order is free. If it is, it's possible 750 * that pages are being freed that will coalesce soon. In case, 751 * that is happening, add the free page to the tail of the list 752 * so it's less likely to be used soon and more likely to be merged 753 * as a 2-level higher order page 754 */ 755 static inline bool 756 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 757 struct page *page, unsigned int order) 758 { 759 unsigned long higher_page_pfn; 760 struct page *higher_page; 761 762 if (order >= MAX_PAGE_ORDER - 1) 763 return false; 764 765 higher_page_pfn = buddy_pfn & pfn; 766 higher_page = page + (higher_page_pfn - pfn); 767 768 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 769 NULL) != NULL; 770 } 771 772 /* 773 * Freeing function for a buddy system allocator. 774 * 775 * The concept of a buddy system is to maintain direct-mapped table 776 * (containing bit values) for memory blocks of various "orders". 777 * The bottom level table contains the map for the smallest allocatable 778 * units of memory (here, pages), and each level above it describes 779 * pairs of units from the levels below, hence, "buddies". 780 * At a high level, all that happens here is marking the table entry 781 * at the bottom level available, and propagating the changes upward 782 * as necessary, plus some accounting needed to play nicely with other 783 * parts of the VM system. 784 * At each level, we keep a list of pages, which are heads of continuous 785 * free pages of length of (1 << order) and marked with PageBuddy. 786 * Page's order is recorded in page_private(page) field. 787 * So when we are allocating or freeing one, we can derive the state of the 788 * other. That is, if we allocate a small block, and both were 789 * free, the remainder of the region must be split into blocks. 790 * If a block is freed, and its buddy is also free, then this 791 * triggers coalescing into a block of larger size. 792 * 793 * -- nyc 794 */ 795 796 static inline void __free_one_page(struct page *page, 797 unsigned long pfn, 798 struct zone *zone, unsigned int order, 799 int migratetype, fpi_t fpi_flags) 800 { 801 struct capture_control *capc = task_capc(zone); 802 unsigned long buddy_pfn = 0; 803 unsigned long combined_pfn; 804 struct page *buddy; 805 bool to_tail; 806 807 VM_BUG_ON(!zone_is_initialized(zone)); 808 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 809 810 VM_BUG_ON(migratetype == -1); 811 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 812 VM_BUG_ON_PAGE(bad_range(zone, page), page); 813 814 account_freepages(zone, 1 << order, migratetype); 815 816 while (order < MAX_PAGE_ORDER) { 817 int buddy_mt = migratetype; 818 819 if (compaction_capture(capc, page, order, migratetype)) { 820 account_freepages(zone, -(1 << order), migratetype); 821 return; 822 } 823 824 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 825 if (!buddy) 826 goto done_merging; 827 828 if (unlikely(order >= pageblock_order)) { 829 /* 830 * We want to prevent merge between freepages on pageblock 831 * without fallbacks and normal pageblock. Without this, 832 * pageblock isolation could cause incorrect freepage or CMA 833 * accounting or HIGHATOMIC accounting. 834 */ 835 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 836 837 if (migratetype != buddy_mt && 838 (!migratetype_is_mergeable(migratetype) || 839 !migratetype_is_mergeable(buddy_mt))) 840 goto done_merging; 841 } 842 843 /* 844 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 845 * merge with it and move up one order. 846 */ 847 if (page_is_guard(buddy)) 848 clear_page_guard(zone, buddy, order); 849 else 850 __del_page_from_free_list(buddy, zone, order, buddy_mt); 851 852 if (unlikely(buddy_mt != migratetype)) { 853 /* 854 * Match buddy type. This ensures that an 855 * expand() down the line puts the sub-blocks 856 * on the right freelists. 857 */ 858 set_pageblock_migratetype(buddy, migratetype); 859 } 860 861 combined_pfn = buddy_pfn & pfn; 862 page = page + (combined_pfn - pfn); 863 pfn = combined_pfn; 864 order++; 865 } 866 867 done_merging: 868 set_buddy_order(page, order); 869 870 if (fpi_flags & FPI_TO_TAIL) 871 to_tail = true; 872 else if (is_shuffle_order(order)) 873 to_tail = shuffle_pick_tail(); 874 else 875 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 876 877 __add_to_free_list(page, zone, order, migratetype, to_tail); 878 879 /* Notify page reporting subsystem of freed page */ 880 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 881 page_reporting_notify_free(order); 882 } 883 884 /* 885 * A bad page could be due to a number of fields. Instead of multiple branches, 886 * try and check multiple fields with one check. The caller must do a detailed 887 * check if necessary. 888 */ 889 static inline bool page_expected_state(struct page *page, 890 unsigned long check_flags) 891 { 892 if (unlikely(atomic_read(&page->_mapcount) != -1)) 893 return false; 894 895 if (unlikely((unsigned long)page->mapping | 896 page_ref_count(page) | 897 #ifdef CONFIG_MEMCG 898 page->memcg_data | 899 #endif 900 #ifdef CONFIG_PAGE_POOL 901 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 902 #endif 903 (page->flags & check_flags))) 904 return false; 905 906 return true; 907 } 908 909 static const char *page_bad_reason(struct page *page, unsigned long flags) 910 { 911 const char *bad_reason = NULL; 912 913 if (unlikely(atomic_read(&page->_mapcount) != -1)) 914 bad_reason = "nonzero mapcount"; 915 if (unlikely(page->mapping != NULL)) 916 bad_reason = "non-NULL mapping"; 917 if (unlikely(page_ref_count(page) != 0)) 918 bad_reason = "nonzero _refcount"; 919 if (unlikely(page->flags & flags)) { 920 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 921 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 922 else 923 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 924 } 925 #ifdef CONFIG_MEMCG 926 if (unlikely(page->memcg_data)) 927 bad_reason = "page still charged to cgroup"; 928 #endif 929 #ifdef CONFIG_PAGE_POOL 930 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 931 bad_reason = "page_pool leak"; 932 #endif 933 return bad_reason; 934 } 935 936 static void free_page_is_bad_report(struct page *page) 937 { 938 bad_page(page, 939 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 940 } 941 942 static inline bool free_page_is_bad(struct page *page) 943 { 944 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 945 return false; 946 947 /* Something has gone sideways, find it */ 948 free_page_is_bad_report(page); 949 return true; 950 } 951 952 static inline bool is_check_pages_enabled(void) 953 { 954 return static_branch_unlikely(&check_pages_enabled); 955 } 956 957 static int free_tail_page_prepare(struct page *head_page, struct page *page) 958 { 959 struct folio *folio = (struct folio *)head_page; 960 int ret = 1; 961 962 /* 963 * We rely page->lru.next never has bit 0 set, unless the page 964 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 965 */ 966 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 967 968 if (!is_check_pages_enabled()) { 969 ret = 0; 970 goto out; 971 } 972 switch (page - head_page) { 973 case 1: 974 /* the first tail page: these may be in place of ->mapping */ 975 if (unlikely(folio_large_mapcount(folio))) { 976 bad_page(page, "nonzero large_mapcount"); 977 goto out; 978 } 979 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 980 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 981 bad_page(page, "nonzero nr_pages_mapped"); 982 goto out; 983 } 984 if (IS_ENABLED(CONFIG_MM_ID)) { 985 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 986 bad_page(page, "nonzero mm mapcount 0"); 987 goto out; 988 } 989 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 990 bad_page(page, "nonzero mm mapcount 1"); 991 goto out; 992 } 993 } 994 if (IS_ENABLED(CONFIG_64BIT)) { 995 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 996 bad_page(page, "nonzero entire_mapcount"); 997 goto out; 998 } 999 if (unlikely(atomic_read(&folio->_pincount))) { 1000 bad_page(page, "nonzero pincount"); 1001 goto out; 1002 } 1003 } 1004 break; 1005 case 2: 1006 /* the second tail page: deferred_list overlaps ->mapping */ 1007 if (unlikely(!list_empty(&folio->_deferred_list))) { 1008 bad_page(page, "on deferred list"); 1009 goto out; 1010 } 1011 if (!IS_ENABLED(CONFIG_64BIT)) { 1012 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1013 bad_page(page, "nonzero entire_mapcount"); 1014 goto out; 1015 } 1016 if (unlikely(atomic_read(&folio->_pincount))) { 1017 bad_page(page, "nonzero pincount"); 1018 goto out; 1019 } 1020 } 1021 break; 1022 case 3: 1023 /* the third tail page: hugetlb specifics overlap ->mappings */ 1024 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1025 break; 1026 fallthrough; 1027 default: 1028 if (page->mapping != TAIL_MAPPING) { 1029 bad_page(page, "corrupted mapping in tail page"); 1030 goto out; 1031 } 1032 break; 1033 } 1034 if (unlikely(!PageTail(page))) { 1035 bad_page(page, "PageTail not set"); 1036 goto out; 1037 } 1038 if (unlikely(compound_head(page) != head_page)) { 1039 bad_page(page, "compound_head not consistent"); 1040 goto out; 1041 } 1042 ret = 0; 1043 out: 1044 page->mapping = NULL; 1045 clear_compound_head(page); 1046 return ret; 1047 } 1048 1049 /* 1050 * Skip KASAN memory poisoning when either: 1051 * 1052 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1053 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1054 * using page tags instead (see below). 1055 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1056 * that error detection is disabled for accesses via the page address. 1057 * 1058 * Pages will have match-all tags in the following circumstances: 1059 * 1060 * 1. Pages are being initialized for the first time, including during deferred 1061 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1062 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1063 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1064 * 3. The allocation was excluded from being checked due to sampling, 1065 * see the call to kasan_unpoison_pages. 1066 * 1067 * Poisoning pages during deferred memory init will greatly lengthen the 1068 * process and cause problem in large memory systems as the deferred pages 1069 * initialization is done with interrupt disabled. 1070 * 1071 * Assuming that there will be no reference to those newly initialized 1072 * pages before they are ever allocated, this should have no effect on 1073 * KASAN memory tracking as the poison will be properly inserted at page 1074 * allocation time. The only corner case is when pages are allocated by 1075 * on-demand allocation and then freed again before the deferred pages 1076 * initialization is done, but this is not likely to happen. 1077 */ 1078 static inline bool should_skip_kasan_poison(struct page *page) 1079 { 1080 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1081 return deferred_pages_enabled(); 1082 1083 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1084 } 1085 1086 static void kernel_init_pages(struct page *page, int numpages) 1087 { 1088 int i; 1089 1090 /* s390's use of memset() could override KASAN redzones. */ 1091 kasan_disable_current(); 1092 for (i = 0; i < numpages; i++) 1093 clear_highpage_kasan_tagged(page + i); 1094 kasan_enable_current(); 1095 } 1096 1097 #ifdef CONFIG_MEM_ALLOC_PROFILING 1098 1099 /* Should be called only if mem_alloc_profiling_enabled() */ 1100 void __clear_page_tag_ref(struct page *page) 1101 { 1102 union pgtag_ref_handle handle; 1103 union codetag_ref ref; 1104 1105 if (get_page_tag_ref(page, &ref, &handle)) { 1106 set_codetag_empty(&ref); 1107 update_page_tag_ref(handle, &ref); 1108 put_page_tag_ref(handle); 1109 } 1110 } 1111 1112 /* Should be called only if mem_alloc_profiling_enabled() */ 1113 static noinline 1114 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1115 unsigned int nr) 1116 { 1117 union pgtag_ref_handle handle; 1118 union codetag_ref ref; 1119 1120 if (get_page_tag_ref(page, &ref, &handle)) { 1121 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1122 update_page_tag_ref(handle, &ref); 1123 put_page_tag_ref(handle); 1124 } 1125 } 1126 1127 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1128 unsigned int nr) 1129 { 1130 if (mem_alloc_profiling_enabled()) 1131 __pgalloc_tag_add(page, task, nr); 1132 } 1133 1134 /* Should be called only if mem_alloc_profiling_enabled() */ 1135 static noinline 1136 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1137 { 1138 union pgtag_ref_handle handle; 1139 union codetag_ref ref; 1140 1141 if (get_page_tag_ref(page, &ref, &handle)) { 1142 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1143 update_page_tag_ref(handle, &ref); 1144 put_page_tag_ref(handle); 1145 } 1146 } 1147 1148 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1149 { 1150 if (mem_alloc_profiling_enabled()) 1151 __pgalloc_tag_sub(page, nr); 1152 } 1153 1154 static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) 1155 { 1156 struct alloc_tag *tag; 1157 1158 if (!mem_alloc_profiling_enabled()) 1159 return; 1160 1161 tag = __pgalloc_tag_get(page); 1162 if (tag) 1163 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1164 } 1165 1166 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1167 1168 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1169 unsigned int nr) {} 1170 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1171 static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} 1172 1173 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1174 1175 __always_inline bool free_pages_prepare(struct page *page, 1176 unsigned int order) 1177 { 1178 int bad = 0; 1179 bool skip_kasan_poison = should_skip_kasan_poison(page); 1180 bool init = want_init_on_free(); 1181 bool compound = PageCompound(page); 1182 struct folio *folio = page_folio(page); 1183 1184 VM_BUG_ON_PAGE(PageTail(page), page); 1185 1186 trace_mm_page_free(page, order); 1187 kmsan_free_page(page, order); 1188 1189 if (memcg_kmem_online() && PageMemcgKmem(page)) 1190 __memcg_kmem_uncharge_page(page, order); 1191 1192 /* 1193 * In rare cases, when truncation or holepunching raced with 1194 * munlock after VM_LOCKED was cleared, Mlocked may still be 1195 * found set here. This does not indicate a problem, unless 1196 * "unevictable_pgs_cleared" appears worryingly large. 1197 */ 1198 if (unlikely(folio_test_mlocked(folio))) { 1199 long nr_pages = folio_nr_pages(folio); 1200 1201 __folio_clear_mlocked(folio); 1202 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1203 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1204 } 1205 1206 if (unlikely(PageHWPoison(page)) && !order) { 1207 /* Do not let hwpoison pages hit pcplists/buddy */ 1208 reset_page_owner(page, order); 1209 page_table_check_free(page, order); 1210 pgalloc_tag_sub(page, 1 << order); 1211 1212 /* 1213 * The page is isolated and accounted for. 1214 * Mark the codetag as empty to avoid accounting error 1215 * when the page is freed by unpoison_memory(). 1216 */ 1217 clear_page_tag_ref(page); 1218 return false; 1219 } 1220 1221 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1222 1223 /* 1224 * Check tail pages before head page information is cleared to 1225 * avoid checking PageCompound for order-0 pages. 1226 */ 1227 if (unlikely(order)) { 1228 int i; 1229 1230 if (compound) { 1231 page[1].flags &= ~PAGE_FLAGS_SECOND; 1232 #ifdef NR_PAGES_IN_LARGE_FOLIO 1233 folio->_nr_pages = 0; 1234 #endif 1235 } 1236 for (i = 1; i < (1 << order); i++) { 1237 if (compound) 1238 bad += free_tail_page_prepare(page, page + i); 1239 if (is_check_pages_enabled()) { 1240 if (free_page_is_bad(page + i)) { 1241 bad++; 1242 continue; 1243 } 1244 } 1245 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1246 } 1247 } 1248 if (PageMappingFlags(page)) { 1249 if (PageAnon(page)) 1250 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1251 page->mapping = NULL; 1252 } 1253 if (is_check_pages_enabled()) { 1254 if (free_page_is_bad(page)) 1255 bad++; 1256 if (bad) 1257 return false; 1258 } 1259 1260 page_cpupid_reset_last(page); 1261 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1262 reset_page_owner(page, order); 1263 page_table_check_free(page, order); 1264 pgalloc_tag_sub(page, 1 << order); 1265 1266 if (!PageHighMem(page)) { 1267 debug_check_no_locks_freed(page_address(page), 1268 PAGE_SIZE << order); 1269 debug_check_no_obj_freed(page_address(page), 1270 PAGE_SIZE << order); 1271 } 1272 1273 kernel_poison_pages(page, 1 << order); 1274 1275 /* 1276 * As memory initialization might be integrated into KASAN, 1277 * KASAN poisoning and memory initialization code must be 1278 * kept together to avoid discrepancies in behavior. 1279 * 1280 * With hardware tag-based KASAN, memory tags must be set before the 1281 * page becomes unavailable via debug_pagealloc or arch_free_page. 1282 */ 1283 if (!skip_kasan_poison) { 1284 kasan_poison_pages(page, order, init); 1285 1286 /* Memory is already initialized if KASAN did it internally. */ 1287 if (kasan_has_integrated_init()) 1288 init = false; 1289 } 1290 if (init) 1291 kernel_init_pages(page, 1 << order); 1292 1293 /* 1294 * arch_free_page() can make the page's contents inaccessible. s390 1295 * does this. So nothing which can access the page's contents should 1296 * happen after this. 1297 */ 1298 arch_free_page(page, order); 1299 1300 debug_pagealloc_unmap_pages(page, 1 << order); 1301 1302 return true; 1303 } 1304 1305 /* 1306 * Frees a number of pages from the PCP lists 1307 * Assumes all pages on list are in same zone. 1308 * count is the number of pages to free. 1309 */ 1310 static void free_pcppages_bulk(struct zone *zone, int count, 1311 struct per_cpu_pages *pcp, 1312 int pindex) 1313 { 1314 unsigned long flags; 1315 unsigned int order; 1316 struct page *page; 1317 1318 /* 1319 * Ensure proper count is passed which otherwise would stuck in the 1320 * below while (list_empty(list)) loop. 1321 */ 1322 count = min(pcp->count, count); 1323 1324 /* Ensure requested pindex is drained first. */ 1325 pindex = pindex - 1; 1326 1327 spin_lock_irqsave(&zone->lock, flags); 1328 1329 while (count > 0) { 1330 struct list_head *list; 1331 int nr_pages; 1332 1333 /* Remove pages from lists in a round-robin fashion. */ 1334 do { 1335 if (++pindex > NR_PCP_LISTS - 1) 1336 pindex = 0; 1337 list = &pcp->lists[pindex]; 1338 } while (list_empty(list)); 1339 1340 order = pindex_to_order(pindex); 1341 nr_pages = 1 << order; 1342 do { 1343 unsigned long pfn; 1344 int mt; 1345 1346 page = list_last_entry(list, struct page, pcp_list); 1347 pfn = page_to_pfn(page); 1348 mt = get_pfnblock_migratetype(page, pfn); 1349 1350 /* must delete to avoid corrupting pcp list */ 1351 list_del(&page->pcp_list); 1352 count -= nr_pages; 1353 pcp->count -= nr_pages; 1354 1355 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1356 trace_mm_page_pcpu_drain(page, order, mt); 1357 } while (count > 0 && !list_empty(list)); 1358 } 1359 1360 spin_unlock_irqrestore(&zone->lock, flags); 1361 } 1362 1363 /* Split a multi-block free page into its individual pageblocks. */ 1364 static void split_large_buddy(struct zone *zone, struct page *page, 1365 unsigned long pfn, int order, fpi_t fpi) 1366 { 1367 unsigned long end = pfn + (1 << order); 1368 1369 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1370 /* Caller removed page from freelist, buddy info cleared! */ 1371 VM_WARN_ON_ONCE(PageBuddy(page)); 1372 1373 if (order > pageblock_order) 1374 order = pageblock_order; 1375 1376 do { 1377 int mt = get_pfnblock_migratetype(page, pfn); 1378 1379 __free_one_page(page, pfn, zone, order, mt, fpi); 1380 pfn += 1 << order; 1381 if (pfn == end) 1382 break; 1383 page = pfn_to_page(pfn); 1384 } while (1); 1385 } 1386 1387 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1388 unsigned int order) 1389 { 1390 /* Remember the order */ 1391 page->order = order; 1392 /* Add the page to the free list */ 1393 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1394 } 1395 1396 static void free_one_page(struct zone *zone, struct page *page, 1397 unsigned long pfn, unsigned int order, 1398 fpi_t fpi_flags) 1399 { 1400 struct llist_head *llhead; 1401 unsigned long flags; 1402 1403 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1404 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1405 add_page_to_zone_llist(zone, page, order); 1406 return; 1407 } 1408 } else { 1409 spin_lock_irqsave(&zone->lock, flags); 1410 } 1411 1412 /* The lock succeeded. Process deferred pages. */ 1413 llhead = &zone->trylock_free_pages; 1414 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1415 struct llist_node *llnode; 1416 struct page *p, *tmp; 1417 1418 llnode = llist_del_all(llhead); 1419 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1420 unsigned int p_order = p->order; 1421 1422 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1423 __count_vm_events(PGFREE, 1 << p_order); 1424 } 1425 } 1426 split_large_buddy(zone, page, pfn, order, fpi_flags); 1427 spin_unlock_irqrestore(&zone->lock, flags); 1428 1429 __count_vm_events(PGFREE, 1 << order); 1430 } 1431 1432 static void __free_pages_ok(struct page *page, unsigned int order, 1433 fpi_t fpi_flags) 1434 { 1435 unsigned long pfn = page_to_pfn(page); 1436 struct zone *zone = page_zone(page); 1437 1438 if (free_pages_prepare(page, order)) 1439 free_one_page(zone, page, pfn, order, fpi_flags); 1440 } 1441 1442 void __meminit __free_pages_core(struct page *page, unsigned int order, 1443 enum meminit_context context) 1444 { 1445 unsigned int nr_pages = 1 << order; 1446 struct page *p = page; 1447 unsigned int loop; 1448 1449 /* 1450 * When initializing the memmap, __init_single_page() sets the refcount 1451 * of all pages to 1 ("allocated"/"not free"). We have to set the 1452 * refcount of all involved pages to 0. 1453 * 1454 * Note that hotplugged memory pages are initialized to PageOffline(). 1455 * Pages freed from memblock might be marked as reserved. 1456 */ 1457 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1458 unlikely(context == MEMINIT_HOTPLUG)) { 1459 for (loop = 0; loop < nr_pages; loop++, p++) { 1460 VM_WARN_ON_ONCE(PageReserved(p)); 1461 __ClearPageOffline(p); 1462 set_page_count(p, 0); 1463 } 1464 1465 adjust_managed_page_count(page, nr_pages); 1466 } else { 1467 for (loop = 0; loop < nr_pages; loop++, p++) { 1468 __ClearPageReserved(p); 1469 set_page_count(p, 0); 1470 } 1471 1472 /* memblock adjusts totalram_pages() manually. */ 1473 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1474 } 1475 1476 if (page_contains_unaccepted(page, order)) { 1477 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1478 return; 1479 1480 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1481 } 1482 1483 /* 1484 * Bypass PCP and place fresh pages right to the tail, primarily 1485 * relevant for memory onlining. 1486 */ 1487 __free_pages_ok(page, order, FPI_TO_TAIL); 1488 } 1489 1490 /* 1491 * Check that the whole (or subset of) a pageblock given by the interval of 1492 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1493 * with the migration of free compaction scanner. 1494 * 1495 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1496 * 1497 * It's possible on some configurations to have a setup like node0 node1 node0 1498 * i.e. it's possible that all pages within a zones range of pages do not 1499 * belong to a single zone. We assume that a border between node0 and node1 1500 * can occur within a single pageblock, but not a node0 node1 node0 1501 * interleaving within a single pageblock. It is therefore sufficient to check 1502 * the first and last page of a pageblock and avoid checking each individual 1503 * page in a pageblock. 1504 * 1505 * Note: the function may return non-NULL struct page even for a page block 1506 * which contains a memory hole (i.e. there is no physical memory for a subset 1507 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1508 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1509 * even though the start pfn is online and valid. This should be safe most of 1510 * the time because struct pages are still initialized via init_unavailable_range() 1511 * and pfn walkers shouldn't touch any physical memory range for which they do 1512 * not recognize any specific metadata in struct pages. 1513 */ 1514 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1515 unsigned long end_pfn, struct zone *zone) 1516 { 1517 struct page *start_page; 1518 struct page *end_page; 1519 1520 /* end_pfn is one past the range we are checking */ 1521 end_pfn--; 1522 1523 if (!pfn_valid(end_pfn)) 1524 return NULL; 1525 1526 start_page = pfn_to_online_page(start_pfn); 1527 if (!start_page) 1528 return NULL; 1529 1530 if (page_zone(start_page) != zone) 1531 return NULL; 1532 1533 end_page = pfn_to_page(end_pfn); 1534 1535 /* This gives a shorter code than deriving page_zone(end_page) */ 1536 if (page_zone_id(start_page) != page_zone_id(end_page)) 1537 return NULL; 1538 1539 return start_page; 1540 } 1541 1542 /* 1543 * The order of subdivision here is critical for the IO subsystem. 1544 * Please do not alter this order without good reasons and regression 1545 * testing. Specifically, as large blocks of memory are subdivided, 1546 * the order in which smaller blocks are delivered depends on the order 1547 * they're subdivided in this function. This is the primary factor 1548 * influencing the order in which pages are delivered to the IO 1549 * subsystem according to empirical testing, and this is also justified 1550 * by considering the behavior of a buddy system containing a single 1551 * large block of memory acted on by a series of small allocations. 1552 * This behavior is a critical factor in sglist merging's success. 1553 * 1554 * -- nyc 1555 */ 1556 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1557 int high, int migratetype) 1558 { 1559 unsigned int size = 1 << high; 1560 unsigned int nr_added = 0; 1561 1562 while (high > low) { 1563 high--; 1564 size >>= 1; 1565 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1566 1567 /* 1568 * Mark as guard pages (or page), that will allow to 1569 * merge back to allocator when buddy will be freed. 1570 * Corresponding page table entries will not be touched, 1571 * pages will stay not present in virtual address space 1572 */ 1573 if (set_page_guard(zone, &page[size], high)) 1574 continue; 1575 1576 __add_to_free_list(&page[size], zone, high, migratetype, false); 1577 set_buddy_order(&page[size], high); 1578 nr_added += size; 1579 } 1580 1581 return nr_added; 1582 } 1583 1584 static __always_inline void page_del_and_expand(struct zone *zone, 1585 struct page *page, int low, 1586 int high, int migratetype) 1587 { 1588 int nr_pages = 1 << high; 1589 1590 __del_page_from_free_list(page, zone, high, migratetype); 1591 nr_pages -= expand(zone, page, low, high, migratetype); 1592 account_freepages(zone, -nr_pages, migratetype); 1593 } 1594 1595 static void check_new_page_bad(struct page *page) 1596 { 1597 if (unlikely(PageHWPoison(page))) { 1598 /* Don't complain about hwpoisoned pages */ 1599 if (PageBuddy(page)) 1600 __ClearPageBuddy(page); 1601 return; 1602 } 1603 1604 bad_page(page, 1605 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1606 } 1607 1608 /* 1609 * This page is about to be returned from the page allocator 1610 */ 1611 static bool check_new_page(struct page *page) 1612 { 1613 if (likely(page_expected_state(page, 1614 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1615 return false; 1616 1617 check_new_page_bad(page); 1618 return true; 1619 } 1620 1621 static inline bool check_new_pages(struct page *page, unsigned int order) 1622 { 1623 if (is_check_pages_enabled()) { 1624 for (int i = 0; i < (1 << order); i++) { 1625 struct page *p = page + i; 1626 1627 if (check_new_page(p)) 1628 return true; 1629 } 1630 } 1631 1632 return false; 1633 } 1634 1635 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1636 { 1637 /* Don't skip if a software KASAN mode is enabled. */ 1638 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1639 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1640 return false; 1641 1642 /* Skip, if hardware tag-based KASAN is not enabled. */ 1643 if (!kasan_hw_tags_enabled()) 1644 return true; 1645 1646 /* 1647 * With hardware tag-based KASAN enabled, skip if this has been 1648 * requested via __GFP_SKIP_KASAN. 1649 */ 1650 return flags & __GFP_SKIP_KASAN; 1651 } 1652 1653 static inline bool should_skip_init(gfp_t flags) 1654 { 1655 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1656 if (!kasan_hw_tags_enabled()) 1657 return false; 1658 1659 /* For hardware tag-based KASAN, skip if requested. */ 1660 return (flags & __GFP_SKIP_ZERO); 1661 } 1662 1663 inline void post_alloc_hook(struct page *page, unsigned int order, 1664 gfp_t gfp_flags) 1665 { 1666 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1667 !should_skip_init(gfp_flags); 1668 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1669 int i; 1670 1671 set_page_private(page, 0); 1672 1673 arch_alloc_page(page, order); 1674 debug_pagealloc_map_pages(page, 1 << order); 1675 1676 /* 1677 * Page unpoisoning must happen before memory initialization. 1678 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1679 * allocations and the page unpoisoning code will complain. 1680 */ 1681 kernel_unpoison_pages(page, 1 << order); 1682 1683 /* 1684 * As memory initialization might be integrated into KASAN, 1685 * KASAN unpoisoning and memory initializion code must be 1686 * kept together to avoid discrepancies in behavior. 1687 */ 1688 1689 /* 1690 * If memory tags should be zeroed 1691 * (which happens only when memory should be initialized as well). 1692 */ 1693 if (zero_tags) { 1694 /* Initialize both memory and memory tags. */ 1695 for (i = 0; i != 1 << order; ++i) 1696 tag_clear_highpage(page + i); 1697 1698 /* Take note that memory was initialized by the loop above. */ 1699 init = false; 1700 } 1701 if (!should_skip_kasan_unpoison(gfp_flags) && 1702 kasan_unpoison_pages(page, order, init)) { 1703 /* Take note that memory was initialized by KASAN. */ 1704 if (kasan_has_integrated_init()) 1705 init = false; 1706 } else { 1707 /* 1708 * If memory tags have not been set by KASAN, reset the page 1709 * tags to ensure page_address() dereferencing does not fault. 1710 */ 1711 for (i = 0; i != 1 << order; ++i) 1712 page_kasan_tag_reset(page + i); 1713 } 1714 /* If memory is still not initialized, initialize it now. */ 1715 if (init) 1716 kernel_init_pages(page, 1 << order); 1717 1718 set_page_owner(page, order, gfp_flags); 1719 page_table_check_alloc(page, order); 1720 pgalloc_tag_add(page, current, 1 << order); 1721 } 1722 1723 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1724 unsigned int alloc_flags) 1725 { 1726 post_alloc_hook(page, order, gfp_flags); 1727 1728 if (order && (gfp_flags & __GFP_COMP)) 1729 prep_compound_page(page, order); 1730 1731 /* 1732 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1733 * allocate the page. The expectation is that the caller is taking 1734 * steps that will free more memory. The caller should avoid the page 1735 * being used for !PFMEMALLOC purposes. 1736 */ 1737 if (alloc_flags & ALLOC_NO_WATERMARKS) 1738 set_page_pfmemalloc(page); 1739 else 1740 clear_page_pfmemalloc(page); 1741 } 1742 1743 /* 1744 * Go through the free lists for the given migratetype and remove 1745 * the smallest available page from the freelists 1746 */ 1747 static __always_inline 1748 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1749 int migratetype) 1750 { 1751 unsigned int current_order; 1752 struct free_area *area; 1753 struct page *page; 1754 1755 /* Find a page of the appropriate size in the preferred list */ 1756 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1757 area = &(zone->free_area[current_order]); 1758 page = get_page_from_free_area(area, migratetype); 1759 if (!page) 1760 continue; 1761 1762 page_del_and_expand(zone, page, order, current_order, 1763 migratetype); 1764 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1765 pcp_allowed_order(order) && 1766 migratetype < MIGRATE_PCPTYPES); 1767 return page; 1768 } 1769 1770 return NULL; 1771 } 1772 1773 1774 /* 1775 * This array describes the order lists are fallen back to when 1776 * the free lists for the desirable migrate type are depleted 1777 * 1778 * The other migratetypes do not have fallbacks. 1779 */ 1780 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1781 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1782 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1783 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1784 }; 1785 1786 #ifdef CONFIG_CMA 1787 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1788 unsigned int order) 1789 { 1790 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1791 } 1792 #else 1793 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1794 unsigned int order) { return NULL; } 1795 #endif 1796 1797 /* 1798 * Change the type of a block and move all its free pages to that 1799 * type's freelist. 1800 */ 1801 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1802 int old_mt, int new_mt) 1803 { 1804 struct page *page; 1805 unsigned long pfn, end_pfn; 1806 unsigned int order; 1807 int pages_moved = 0; 1808 1809 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1810 end_pfn = pageblock_end_pfn(start_pfn); 1811 1812 for (pfn = start_pfn; pfn < end_pfn;) { 1813 page = pfn_to_page(pfn); 1814 if (!PageBuddy(page)) { 1815 pfn++; 1816 continue; 1817 } 1818 1819 /* Make sure we are not inadvertently changing nodes */ 1820 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1821 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1822 1823 order = buddy_order(page); 1824 1825 move_to_free_list(page, zone, order, old_mt, new_mt); 1826 1827 pfn += 1 << order; 1828 pages_moved += 1 << order; 1829 } 1830 1831 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1832 1833 return pages_moved; 1834 } 1835 1836 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1837 unsigned long *start_pfn, 1838 int *num_free, int *num_movable) 1839 { 1840 unsigned long pfn, start, end; 1841 1842 pfn = page_to_pfn(page); 1843 start = pageblock_start_pfn(pfn); 1844 end = pageblock_end_pfn(pfn); 1845 1846 /* 1847 * The caller only has the lock for @zone, don't touch ranges 1848 * that straddle into other zones. While we could move part of 1849 * the range that's inside the zone, this call is usually 1850 * accompanied by other operations such as migratetype updates 1851 * which also should be locked. 1852 */ 1853 if (!zone_spans_pfn(zone, start)) 1854 return false; 1855 if (!zone_spans_pfn(zone, end - 1)) 1856 return false; 1857 1858 *start_pfn = start; 1859 1860 if (num_free) { 1861 *num_free = 0; 1862 *num_movable = 0; 1863 for (pfn = start; pfn < end;) { 1864 page = pfn_to_page(pfn); 1865 if (PageBuddy(page)) { 1866 int nr = 1 << buddy_order(page); 1867 1868 *num_free += nr; 1869 pfn += nr; 1870 continue; 1871 } 1872 /* 1873 * We assume that pages that could be isolated for 1874 * migration are movable. But we don't actually try 1875 * isolating, as that would be expensive. 1876 */ 1877 if (PageLRU(page) || __PageMovable(page)) 1878 (*num_movable)++; 1879 pfn++; 1880 } 1881 } 1882 1883 return true; 1884 } 1885 1886 static int move_freepages_block(struct zone *zone, struct page *page, 1887 int old_mt, int new_mt) 1888 { 1889 unsigned long start_pfn; 1890 1891 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1892 return -1; 1893 1894 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1895 } 1896 1897 #ifdef CONFIG_MEMORY_ISOLATION 1898 /* Look for a buddy that straddles start_pfn */ 1899 static unsigned long find_large_buddy(unsigned long start_pfn) 1900 { 1901 int order = 0; 1902 struct page *page; 1903 unsigned long pfn = start_pfn; 1904 1905 while (!PageBuddy(page = pfn_to_page(pfn))) { 1906 /* Nothing found */ 1907 if (++order > MAX_PAGE_ORDER) 1908 return start_pfn; 1909 pfn &= ~0UL << order; 1910 } 1911 1912 /* 1913 * Found a preceding buddy, but does it straddle? 1914 */ 1915 if (pfn + (1 << buddy_order(page)) > start_pfn) 1916 return pfn; 1917 1918 /* Nothing found */ 1919 return start_pfn; 1920 } 1921 1922 /** 1923 * move_freepages_block_isolate - move free pages in block for page isolation 1924 * @zone: the zone 1925 * @page: the pageblock page 1926 * @migratetype: migratetype to set on the pageblock 1927 * 1928 * This is similar to move_freepages_block(), but handles the special 1929 * case encountered in page isolation, where the block of interest 1930 * might be part of a larger buddy spanning multiple pageblocks. 1931 * 1932 * Unlike the regular page allocator path, which moves pages while 1933 * stealing buddies off the freelist, page isolation is interested in 1934 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1935 * 1936 * This function handles that. Straddling buddies are split into 1937 * individual pageblocks. Only the block of interest is moved. 1938 * 1939 * Returns %true if pages could be moved, %false otherwise. 1940 */ 1941 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1942 int migratetype) 1943 { 1944 unsigned long start_pfn, pfn; 1945 1946 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1947 return false; 1948 1949 /* No splits needed if buddies can't span multiple blocks */ 1950 if (pageblock_order == MAX_PAGE_ORDER) 1951 goto move; 1952 1953 /* We're a tail block in a larger buddy */ 1954 pfn = find_large_buddy(start_pfn); 1955 if (pfn != start_pfn) { 1956 struct page *buddy = pfn_to_page(pfn); 1957 int order = buddy_order(buddy); 1958 1959 del_page_from_free_list(buddy, zone, order, 1960 get_pfnblock_migratetype(buddy, pfn)); 1961 set_pageblock_migratetype(page, migratetype); 1962 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 1963 return true; 1964 } 1965 1966 /* We're the starting block of a larger buddy */ 1967 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1968 int order = buddy_order(page); 1969 1970 del_page_from_free_list(page, zone, order, 1971 get_pfnblock_migratetype(page, pfn)); 1972 set_pageblock_migratetype(page, migratetype); 1973 split_large_buddy(zone, page, pfn, order, FPI_NONE); 1974 return true; 1975 } 1976 move: 1977 __move_freepages_block(zone, start_pfn, 1978 get_pfnblock_migratetype(page, start_pfn), 1979 migratetype); 1980 return true; 1981 } 1982 #endif /* CONFIG_MEMORY_ISOLATION */ 1983 1984 static void change_pageblock_range(struct page *pageblock_page, 1985 int start_order, int migratetype) 1986 { 1987 int nr_pageblocks = 1 << (start_order - pageblock_order); 1988 1989 while (nr_pageblocks--) { 1990 set_pageblock_migratetype(pageblock_page, migratetype); 1991 pageblock_page += pageblock_nr_pages; 1992 } 1993 } 1994 1995 static inline bool boost_watermark(struct zone *zone) 1996 { 1997 unsigned long max_boost; 1998 1999 if (!watermark_boost_factor) 2000 return false; 2001 /* 2002 * Don't bother in zones that are unlikely to produce results. 2003 * On small machines, including kdump capture kernels running 2004 * in a small area, boosting the watermark can cause an out of 2005 * memory situation immediately. 2006 */ 2007 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2008 return false; 2009 2010 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2011 watermark_boost_factor, 10000); 2012 2013 /* 2014 * high watermark may be uninitialised if fragmentation occurs 2015 * very early in boot so do not boost. We do not fall 2016 * through and boost by pageblock_nr_pages as failing 2017 * allocations that early means that reclaim is not going 2018 * to help and it may even be impossible to reclaim the 2019 * boosted watermark resulting in a hang. 2020 */ 2021 if (!max_boost) 2022 return false; 2023 2024 max_boost = max(pageblock_nr_pages, max_boost); 2025 2026 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2027 max_boost); 2028 2029 return true; 2030 } 2031 2032 /* 2033 * When we are falling back to another migratetype during allocation, should we 2034 * try to claim an entire block to satisfy further allocations, instead of 2035 * polluting multiple pageblocks? 2036 */ 2037 static bool should_try_claim_block(unsigned int order, int start_mt) 2038 { 2039 /* 2040 * Leaving this order check is intended, although there is 2041 * relaxed order check in next check. The reason is that 2042 * we can actually claim the whole pageblock if this condition met, 2043 * but, below check doesn't guarantee it and that is just heuristic 2044 * so could be changed anytime. 2045 */ 2046 if (order >= pageblock_order) 2047 return true; 2048 2049 /* 2050 * Above a certain threshold, always try to claim, as it's likely there 2051 * will be more free pages in the pageblock. 2052 */ 2053 if (order >= pageblock_order / 2) 2054 return true; 2055 2056 /* 2057 * Unmovable/reclaimable allocations would cause permanent 2058 * fragmentations if they fell back to allocating from a movable block 2059 * (polluting it), so we try to claim the whole block regardless of the 2060 * allocation size. Later movable allocations can always steal from this 2061 * block, which is less problematic. 2062 */ 2063 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2064 return true; 2065 2066 if (page_group_by_mobility_disabled) 2067 return true; 2068 2069 /* 2070 * Movable pages won't cause permanent fragmentation, so when you alloc 2071 * small pages, we just need to temporarily steal unmovable or 2072 * reclaimable pages that are closest to the request size. After a 2073 * while, memory compaction may occur to form large contiguous pages, 2074 * and the next movable allocation may not need to steal. 2075 */ 2076 return false; 2077 } 2078 2079 /* 2080 * Check whether there is a suitable fallback freepage with requested order. 2081 * Sets *claim_block to instruct the caller whether it should convert a whole 2082 * pageblock to the returned migratetype. 2083 * If only_claim is true, this function returns fallback_mt only if 2084 * we would do this whole-block claiming. This would help to reduce 2085 * fragmentation due to mixed migratetype pages in one pageblock. 2086 */ 2087 int find_suitable_fallback(struct free_area *area, unsigned int order, 2088 int migratetype, bool only_claim, bool *claim_block) 2089 { 2090 int i; 2091 int fallback_mt; 2092 2093 if (area->nr_free == 0) 2094 return -1; 2095 2096 *claim_block = false; 2097 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2098 fallback_mt = fallbacks[migratetype][i]; 2099 if (free_area_empty(area, fallback_mt)) 2100 continue; 2101 2102 if (should_try_claim_block(order, migratetype)) 2103 *claim_block = true; 2104 2105 if (*claim_block || !only_claim) 2106 return fallback_mt; 2107 } 2108 2109 return -1; 2110 } 2111 2112 /* 2113 * This function implements actual block claiming behaviour. If order is large 2114 * enough, we can claim the whole pageblock for the requested migratetype. If 2115 * not, we check the pageblock for constituent pages; if at least half of the 2116 * pages are free or compatible, we can still claim the whole block, so pages 2117 * freed in the future will be put on the correct free list. 2118 */ 2119 static struct page * 2120 try_to_claim_block(struct zone *zone, struct page *page, 2121 int current_order, int order, int start_type, 2122 int block_type, unsigned int alloc_flags) 2123 { 2124 int free_pages, movable_pages, alike_pages; 2125 unsigned long start_pfn; 2126 2127 /* Take ownership for orders >= pageblock_order */ 2128 if (current_order >= pageblock_order) { 2129 unsigned int nr_added; 2130 2131 del_page_from_free_list(page, zone, current_order, block_type); 2132 change_pageblock_range(page, current_order, start_type); 2133 nr_added = expand(zone, page, order, current_order, start_type); 2134 account_freepages(zone, nr_added, start_type); 2135 return page; 2136 } 2137 2138 /* 2139 * Boost watermarks to increase reclaim pressure to reduce the 2140 * likelihood of future fallbacks. Wake kswapd now as the node 2141 * may be balanced overall and kswapd will not wake naturally. 2142 */ 2143 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2144 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2145 2146 /* moving whole block can fail due to zone boundary conditions */ 2147 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2148 &movable_pages)) 2149 return NULL; 2150 2151 /* 2152 * Determine how many pages are compatible with our allocation. 2153 * For movable allocation, it's the number of movable pages which 2154 * we just obtained. For other types it's a bit more tricky. 2155 */ 2156 if (start_type == MIGRATE_MOVABLE) { 2157 alike_pages = movable_pages; 2158 } else { 2159 /* 2160 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2161 * to MOVABLE pageblock, consider all non-movable pages as 2162 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2163 * vice versa, be conservative since we can't distinguish the 2164 * exact migratetype of non-movable pages. 2165 */ 2166 if (block_type == MIGRATE_MOVABLE) 2167 alike_pages = pageblock_nr_pages 2168 - (free_pages + movable_pages); 2169 else 2170 alike_pages = 0; 2171 } 2172 /* 2173 * If a sufficient number of pages in the block are either free or of 2174 * compatible migratability as our allocation, claim the whole block. 2175 */ 2176 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2177 page_group_by_mobility_disabled) { 2178 __move_freepages_block(zone, start_pfn, block_type, start_type); 2179 return __rmqueue_smallest(zone, order, start_type); 2180 } 2181 2182 return NULL; 2183 } 2184 2185 /* 2186 * Try finding a free buddy page on the fallback list. 2187 * 2188 * This will attempt to claim a whole pageblock for the requested type 2189 * to ensure grouping of such requests in the future. 2190 * 2191 * If a whole block cannot be claimed, steal an individual page, regressing to 2192 * __rmqueue_smallest() logic to at least break up as little contiguity as 2193 * possible. 2194 * 2195 * The use of signed ints for order and current_order is a deliberate 2196 * deviation from the rest of this file, to make the for loop 2197 * condition simpler. 2198 * 2199 * Return the stolen page, or NULL if none can be found. 2200 */ 2201 static __always_inline struct page * 2202 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2203 unsigned int alloc_flags) 2204 { 2205 struct free_area *area; 2206 int current_order; 2207 int min_order = order; 2208 struct page *page; 2209 int fallback_mt; 2210 bool claim_block; 2211 2212 /* 2213 * Do not steal pages from freelists belonging to other pageblocks 2214 * i.e. orders < pageblock_order. If there are no local zones free, 2215 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2216 */ 2217 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2218 min_order = pageblock_order; 2219 2220 /* 2221 * Find the largest available free page in the other list. This roughly 2222 * approximates finding the pageblock with the most free pages, which 2223 * would be too costly to do exactly. 2224 */ 2225 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2226 --current_order) { 2227 area = &(zone->free_area[current_order]); 2228 fallback_mt = find_suitable_fallback(area, current_order, 2229 start_migratetype, false, &claim_block); 2230 if (fallback_mt == -1) 2231 continue; 2232 2233 if (!claim_block) 2234 break; 2235 2236 page = get_page_from_free_area(area, fallback_mt); 2237 page = try_to_claim_block(zone, page, current_order, order, 2238 start_migratetype, fallback_mt, 2239 alloc_flags); 2240 if (page) 2241 goto got_one; 2242 } 2243 2244 if (alloc_flags & ALLOC_NOFRAGMENT) 2245 return NULL; 2246 2247 /* No luck claiming pageblock. Find the smallest fallback page */ 2248 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2249 area = &(zone->free_area[current_order]); 2250 fallback_mt = find_suitable_fallback(area, current_order, 2251 start_migratetype, false, &claim_block); 2252 if (fallback_mt == -1) 2253 continue; 2254 2255 page = get_page_from_free_area(area, fallback_mt); 2256 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2257 goto got_one; 2258 } 2259 2260 return NULL; 2261 2262 got_one: 2263 trace_mm_page_alloc_extfrag(page, order, current_order, 2264 start_migratetype, fallback_mt); 2265 2266 return page; 2267 } 2268 2269 /* 2270 * Do the hard work of removing an element from the buddy allocator. 2271 * Call me with the zone->lock already held. 2272 */ 2273 static __always_inline struct page * 2274 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2275 unsigned int alloc_flags) 2276 { 2277 struct page *page; 2278 2279 if (IS_ENABLED(CONFIG_CMA)) { 2280 /* 2281 * Balance movable allocations between regular and CMA areas by 2282 * allocating from CMA when over half of the zone's free memory 2283 * is in the CMA area. 2284 */ 2285 if (alloc_flags & ALLOC_CMA && 2286 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2287 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2288 page = __rmqueue_cma_fallback(zone, order); 2289 if (page) 2290 return page; 2291 } 2292 } 2293 2294 page = __rmqueue_smallest(zone, order, migratetype); 2295 if (unlikely(!page)) { 2296 if (alloc_flags & ALLOC_CMA) 2297 page = __rmqueue_cma_fallback(zone, order); 2298 2299 if (!page) 2300 page = __rmqueue_fallback(zone, order, migratetype, 2301 alloc_flags); 2302 } 2303 return page; 2304 } 2305 2306 /* 2307 * Obtain a specified number of elements from the buddy allocator, all under 2308 * a single hold of the lock, for efficiency. Add them to the supplied list. 2309 * Returns the number of new pages which were placed at *list. 2310 */ 2311 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2312 unsigned long count, struct list_head *list, 2313 int migratetype, unsigned int alloc_flags) 2314 { 2315 unsigned long flags; 2316 int i; 2317 2318 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2319 if (!spin_trylock_irqsave(&zone->lock, flags)) 2320 return 0; 2321 } else { 2322 spin_lock_irqsave(&zone->lock, flags); 2323 } 2324 for (i = 0; i < count; ++i) { 2325 struct page *page = __rmqueue(zone, order, migratetype, 2326 alloc_flags); 2327 if (unlikely(page == NULL)) 2328 break; 2329 2330 /* 2331 * Split buddy pages returned by expand() are received here in 2332 * physical page order. The page is added to the tail of 2333 * caller's list. From the callers perspective, the linked list 2334 * is ordered by page number under some conditions. This is 2335 * useful for IO devices that can forward direction from the 2336 * head, thus also in the physical page order. This is useful 2337 * for IO devices that can merge IO requests if the physical 2338 * pages are ordered properly. 2339 */ 2340 list_add_tail(&page->pcp_list, list); 2341 } 2342 spin_unlock_irqrestore(&zone->lock, flags); 2343 2344 return i; 2345 } 2346 2347 /* 2348 * Called from the vmstat counter updater to decay the PCP high. 2349 * Return whether there are addition works to do. 2350 */ 2351 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2352 { 2353 int high_min, to_drain, batch; 2354 int todo = 0; 2355 2356 high_min = READ_ONCE(pcp->high_min); 2357 batch = READ_ONCE(pcp->batch); 2358 /* 2359 * Decrease pcp->high periodically to try to free possible 2360 * idle PCP pages. And, avoid to free too many pages to 2361 * control latency. This caps pcp->high decrement too. 2362 */ 2363 if (pcp->high > high_min) { 2364 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2365 pcp->high - (pcp->high >> 3), high_min); 2366 if (pcp->high > high_min) 2367 todo++; 2368 } 2369 2370 to_drain = pcp->count - pcp->high; 2371 if (to_drain > 0) { 2372 spin_lock(&pcp->lock); 2373 free_pcppages_bulk(zone, to_drain, pcp, 0); 2374 spin_unlock(&pcp->lock); 2375 todo++; 2376 } 2377 2378 return todo; 2379 } 2380 2381 #ifdef CONFIG_NUMA 2382 /* 2383 * Called from the vmstat counter updater to drain pagesets of this 2384 * currently executing processor on remote nodes after they have 2385 * expired. 2386 */ 2387 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2388 { 2389 int to_drain, batch; 2390 2391 batch = READ_ONCE(pcp->batch); 2392 to_drain = min(pcp->count, batch); 2393 if (to_drain > 0) { 2394 spin_lock(&pcp->lock); 2395 free_pcppages_bulk(zone, to_drain, pcp, 0); 2396 spin_unlock(&pcp->lock); 2397 } 2398 } 2399 #endif 2400 2401 /* 2402 * Drain pcplists of the indicated processor and zone. 2403 */ 2404 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2405 { 2406 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2407 int count; 2408 2409 do { 2410 spin_lock(&pcp->lock); 2411 count = pcp->count; 2412 if (count) { 2413 int to_drain = min(count, 2414 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2415 2416 free_pcppages_bulk(zone, to_drain, pcp, 0); 2417 count -= to_drain; 2418 } 2419 spin_unlock(&pcp->lock); 2420 } while (count); 2421 } 2422 2423 /* 2424 * Drain pcplists of all zones on the indicated processor. 2425 */ 2426 static void drain_pages(unsigned int cpu) 2427 { 2428 struct zone *zone; 2429 2430 for_each_populated_zone(zone) { 2431 drain_pages_zone(cpu, zone); 2432 } 2433 } 2434 2435 /* 2436 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2437 */ 2438 void drain_local_pages(struct zone *zone) 2439 { 2440 int cpu = smp_processor_id(); 2441 2442 if (zone) 2443 drain_pages_zone(cpu, zone); 2444 else 2445 drain_pages(cpu); 2446 } 2447 2448 /* 2449 * The implementation of drain_all_pages(), exposing an extra parameter to 2450 * drain on all cpus. 2451 * 2452 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2453 * not empty. The check for non-emptiness can however race with a free to 2454 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2455 * that need the guarantee that every CPU has drained can disable the 2456 * optimizing racy check. 2457 */ 2458 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2459 { 2460 int cpu; 2461 2462 /* 2463 * Allocate in the BSS so we won't require allocation in 2464 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2465 */ 2466 static cpumask_t cpus_with_pcps; 2467 2468 /* 2469 * Do not drain if one is already in progress unless it's specific to 2470 * a zone. Such callers are primarily CMA and memory hotplug and need 2471 * the drain to be complete when the call returns. 2472 */ 2473 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2474 if (!zone) 2475 return; 2476 mutex_lock(&pcpu_drain_mutex); 2477 } 2478 2479 /* 2480 * We don't care about racing with CPU hotplug event 2481 * as offline notification will cause the notified 2482 * cpu to drain that CPU pcps and on_each_cpu_mask 2483 * disables preemption as part of its processing 2484 */ 2485 for_each_online_cpu(cpu) { 2486 struct per_cpu_pages *pcp; 2487 struct zone *z; 2488 bool has_pcps = false; 2489 2490 if (force_all_cpus) { 2491 /* 2492 * The pcp.count check is racy, some callers need a 2493 * guarantee that no cpu is missed. 2494 */ 2495 has_pcps = true; 2496 } else if (zone) { 2497 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2498 if (pcp->count) 2499 has_pcps = true; 2500 } else { 2501 for_each_populated_zone(z) { 2502 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2503 if (pcp->count) { 2504 has_pcps = true; 2505 break; 2506 } 2507 } 2508 } 2509 2510 if (has_pcps) 2511 cpumask_set_cpu(cpu, &cpus_with_pcps); 2512 else 2513 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2514 } 2515 2516 for_each_cpu(cpu, &cpus_with_pcps) { 2517 if (zone) 2518 drain_pages_zone(cpu, zone); 2519 else 2520 drain_pages(cpu); 2521 } 2522 2523 mutex_unlock(&pcpu_drain_mutex); 2524 } 2525 2526 /* 2527 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2528 * 2529 * When zone parameter is non-NULL, spill just the single zone's pages. 2530 */ 2531 void drain_all_pages(struct zone *zone) 2532 { 2533 __drain_all_pages(zone, false); 2534 } 2535 2536 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2537 { 2538 int min_nr_free, max_nr_free; 2539 2540 /* Free as much as possible if batch freeing high-order pages. */ 2541 if (unlikely(free_high)) 2542 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2543 2544 /* Check for PCP disabled or boot pageset */ 2545 if (unlikely(high < batch)) 2546 return 1; 2547 2548 /* Leave at least pcp->batch pages on the list */ 2549 min_nr_free = batch; 2550 max_nr_free = high - batch; 2551 2552 /* 2553 * Increase the batch number to the number of the consecutive 2554 * freed pages to reduce zone lock contention. 2555 */ 2556 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2557 2558 return batch; 2559 } 2560 2561 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2562 int batch, bool free_high) 2563 { 2564 int high, high_min, high_max; 2565 2566 high_min = READ_ONCE(pcp->high_min); 2567 high_max = READ_ONCE(pcp->high_max); 2568 high = pcp->high = clamp(pcp->high, high_min, high_max); 2569 2570 if (unlikely(!high)) 2571 return 0; 2572 2573 if (unlikely(free_high)) { 2574 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2575 high_min); 2576 return 0; 2577 } 2578 2579 /* 2580 * If reclaim is active, limit the number of pages that can be 2581 * stored on pcp lists 2582 */ 2583 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2584 int free_count = max_t(int, pcp->free_count, batch); 2585 2586 pcp->high = max(high - free_count, high_min); 2587 return min(batch << 2, pcp->high); 2588 } 2589 2590 if (high_min == high_max) 2591 return high; 2592 2593 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2594 int free_count = max_t(int, pcp->free_count, batch); 2595 2596 pcp->high = max(high - free_count, high_min); 2597 high = max(pcp->count, high_min); 2598 } else if (pcp->count >= high) { 2599 int need_high = pcp->free_count + batch; 2600 2601 /* pcp->high should be large enough to hold batch freed pages */ 2602 if (pcp->high < need_high) 2603 pcp->high = clamp(need_high, high_min, high_max); 2604 } 2605 2606 return high; 2607 } 2608 2609 static void free_frozen_page_commit(struct zone *zone, 2610 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2611 unsigned int order, fpi_t fpi_flags) 2612 { 2613 int high, batch; 2614 int pindex; 2615 bool free_high = false; 2616 2617 /* 2618 * On freeing, reduce the number of pages that are batch allocated. 2619 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2620 * allocations. 2621 */ 2622 pcp->alloc_factor >>= 1; 2623 __count_vm_events(PGFREE, 1 << order); 2624 pindex = order_to_pindex(migratetype, order); 2625 list_add(&page->pcp_list, &pcp->lists[pindex]); 2626 pcp->count += 1 << order; 2627 2628 batch = READ_ONCE(pcp->batch); 2629 /* 2630 * As high-order pages other than THP's stored on PCP can contribute 2631 * to fragmentation, limit the number stored when PCP is heavily 2632 * freeing without allocation. The remainder after bulk freeing 2633 * stops will be drained from vmstat refresh context. 2634 */ 2635 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2636 free_high = (pcp->free_count >= batch && 2637 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2638 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2639 pcp->count >= READ_ONCE(batch))); 2640 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2641 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2642 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2643 } 2644 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2645 pcp->free_count += (1 << order); 2646 2647 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2648 /* 2649 * Do not attempt to take a zone lock. Let pcp->count get 2650 * over high mark temporarily. 2651 */ 2652 return; 2653 } 2654 high = nr_pcp_high(pcp, zone, batch, free_high); 2655 if (pcp->count >= high) { 2656 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2657 pcp, pindex); 2658 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2659 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2660 ZONE_MOVABLE, 0)) 2661 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2662 } 2663 } 2664 2665 /* 2666 * Free a pcp page 2667 */ 2668 static void __free_frozen_pages(struct page *page, unsigned int order, 2669 fpi_t fpi_flags) 2670 { 2671 unsigned long __maybe_unused UP_flags; 2672 struct per_cpu_pages *pcp; 2673 struct zone *zone; 2674 unsigned long pfn = page_to_pfn(page); 2675 int migratetype; 2676 2677 if (!pcp_allowed_order(order)) { 2678 __free_pages_ok(page, order, fpi_flags); 2679 return; 2680 } 2681 2682 if (!free_pages_prepare(page, order)) 2683 return; 2684 2685 /* 2686 * We only track unmovable, reclaimable and movable on pcp lists. 2687 * Place ISOLATE pages on the isolated list because they are being 2688 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2689 * get those areas back if necessary. Otherwise, we may have to free 2690 * excessively into the page allocator 2691 */ 2692 zone = page_zone(page); 2693 migratetype = get_pfnblock_migratetype(page, pfn); 2694 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2695 if (unlikely(is_migrate_isolate(migratetype))) { 2696 free_one_page(zone, page, pfn, order, fpi_flags); 2697 return; 2698 } 2699 migratetype = MIGRATE_MOVABLE; 2700 } 2701 2702 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2703 && (in_nmi() || in_hardirq()))) { 2704 add_page_to_zone_llist(zone, page, order); 2705 return; 2706 } 2707 pcp_trylock_prepare(UP_flags); 2708 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2709 if (pcp) { 2710 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags); 2711 pcp_spin_unlock(pcp); 2712 } else { 2713 free_one_page(zone, page, pfn, order, fpi_flags); 2714 } 2715 pcp_trylock_finish(UP_flags); 2716 } 2717 2718 void free_frozen_pages(struct page *page, unsigned int order) 2719 { 2720 __free_frozen_pages(page, order, FPI_NONE); 2721 } 2722 2723 /* 2724 * Free a batch of folios 2725 */ 2726 void free_unref_folios(struct folio_batch *folios) 2727 { 2728 unsigned long __maybe_unused UP_flags; 2729 struct per_cpu_pages *pcp = NULL; 2730 struct zone *locked_zone = NULL; 2731 int i, j; 2732 2733 /* Prepare folios for freeing */ 2734 for (i = 0, j = 0; i < folios->nr; i++) { 2735 struct folio *folio = folios->folios[i]; 2736 unsigned long pfn = folio_pfn(folio); 2737 unsigned int order = folio_order(folio); 2738 2739 if (!free_pages_prepare(&folio->page, order)) 2740 continue; 2741 /* 2742 * Free orders not handled on the PCP directly to the 2743 * allocator. 2744 */ 2745 if (!pcp_allowed_order(order)) { 2746 free_one_page(folio_zone(folio), &folio->page, 2747 pfn, order, FPI_NONE); 2748 continue; 2749 } 2750 folio->private = (void *)(unsigned long)order; 2751 if (j != i) 2752 folios->folios[j] = folio; 2753 j++; 2754 } 2755 folios->nr = j; 2756 2757 for (i = 0; i < folios->nr; i++) { 2758 struct folio *folio = folios->folios[i]; 2759 struct zone *zone = folio_zone(folio); 2760 unsigned long pfn = folio_pfn(folio); 2761 unsigned int order = (unsigned long)folio->private; 2762 int migratetype; 2763 2764 folio->private = NULL; 2765 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2766 2767 /* Different zone requires a different pcp lock */ 2768 if (zone != locked_zone || 2769 is_migrate_isolate(migratetype)) { 2770 if (pcp) { 2771 pcp_spin_unlock(pcp); 2772 pcp_trylock_finish(UP_flags); 2773 locked_zone = NULL; 2774 pcp = NULL; 2775 } 2776 2777 /* 2778 * Free isolated pages directly to the 2779 * allocator, see comment in free_frozen_pages. 2780 */ 2781 if (is_migrate_isolate(migratetype)) { 2782 free_one_page(zone, &folio->page, pfn, 2783 order, FPI_NONE); 2784 continue; 2785 } 2786 2787 /* 2788 * trylock is necessary as folios may be getting freed 2789 * from IRQ or SoftIRQ context after an IO completion. 2790 */ 2791 pcp_trylock_prepare(UP_flags); 2792 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2793 if (unlikely(!pcp)) { 2794 pcp_trylock_finish(UP_flags); 2795 free_one_page(zone, &folio->page, pfn, 2796 order, FPI_NONE); 2797 continue; 2798 } 2799 locked_zone = zone; 2800 } 2801 2802 /* 2803 * Non-isolated types over MIGRATE_PCPTYPES get added 2804 * to the MIGRATE_MOVABLE pcp list. 2805 */ 2806 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2807 migratetype = MIGRATE_MOVABLE; 2808 2809 trace_mm_page_free_batched(&folio->page); 2810 free_frozen_page_commit(zone, pcp, &folio->page, migratetype, 2811 order, FPI_NONE); 2812 } 2813 2814 if (pcp) { 2815 pcp_spin_unlock(pcp); 2816 pcp_trylock_finish(UP_flags); 2817 } 2818 folio_batch_reinit(folios); 2819 } 2820 2821 /* 2822 * split_page takes a non-compound higher-order page, and splits it into 2823 * n (1<<order) sub-pages: page[0..n] 2824 * Each sub-page must be freed individually. 2825 * 2826 * Note: this is probably too low level an operation for use in drivers. 2827 * Please consult with lkml before using this in your driver. 2828 */ 2829 void split_page(struct page *page, unsigned int order) 2830 { 2831 int i; 2832 2833 VM_BUG_ON_PAGE(PageCompound(page), page); 2834 VM_BUG_ON_PAGE(!page_count(page), page); 2835 2836 for (i = 1; i < (1 << order); i++) 2837 set_page_refcounted(page + i); 2838 split_page_owner(page, order, 0); 2839 pgalloc_tag_split(page_folio(page), order, 0); 2840 split_page_memcg(page, order); 2841 } 2842 EXPORT_SYMBOL_GPL(split_page); 2843 2844 int __isolate_free_page(struct page *page, unsigned int order) 2845 { 2846 struct zone *zone = page_zone(page); 2847 int mt = get_pageblock_migratetype(page); 2848 2849 if (!is_migrate_isolate(mt)) { 2850 unsigned long watermark; 2851 /* 2852 * Obey watermarks as if the page was being allocated. We can 2853 * emulate a high-order watermark check with a raised order-0 2854 * watermark, because we already know our high-order page 2855 * exists. 2856 */ 2857 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2858 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2859 return 0; 2860 } 2861 2862 del_page_from_free_list(page, zone, order, mt); 2863 2864 /* 2865 * Set the pageblock if the isolated page is at least half of a 2866 * pageblock 2867 */ 2868 if (order >= pageblock_order - 1) { 2869 struct page *endpage = page + (1 << order) - 1; 2870 for (; page < endpage; page += pageblock_nr_pages) { 2871 int mt = get_pageblock_migratetype(page); 2872 /* 2873 * Only change normal pageblocks (i.e., they can merge 2874 * with others) 2875 */ 2876 if (migratetype_is_mergeable(mt)) 2877 move_freepages_block(zone, page, mt, 2878 MIGRATE_MOVABLE); 2879 } 2880 } 2881 2882 return 1UL << order; 2883 } 2884 2885 /** 2886 * __putback_isolated_page - Return a now-isolated page back where we got it 2887 * @page: Page that was isolated 2888 * @order: Order of the isolated page 2889 * @mt: The page's pageblock's migratetype 2890 * 2891 * This function is meant to return a page pulled from the free lists via 2892 * __isolate_free_page back to the free lists they were pulled from. 2893 */ 2894 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2895 { 2896 struct zone *zone = page_zone(page); 2897 2898 /* zone lock should be held when this function is called */ 2899 lockdep_assert_held(&zone->lock); 2900 2901 /* Return isolated page to tail of freelist. */ 2902 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2903 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2904 } 2905 2906 /* 2907 * Update NUMA hit/miss statistics 2908 */ 2909 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2910 long nr_account) 2911 { 2912 #ifdef CONFIG_NUMA 2913 enum numa_stat_item local_stat = NUMA_LOCAL; 2914 2915 /* skip numa counters update if numa stats is disabled */ 2916 if (!static_branch_likely(&vm_numa_stat_key)) 2917 return; 2918 2919 if (zone_to_nid(z) != numa_node_id()) 2920 local_stat = NUMA_OTHER; 2921 2922 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2923 __count_numa_events(z, NUMA_HIT, nr_account); 2924 else { 2925 __count_numa_events(z, NUMA_MISS, nr_account); 2926 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2927 } 2928 __count_numa_events(z, local_stat, nr_account); 2929 #endif 2930 } 2931 2932 static __always_inline 2933 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2934 unsigned int order, unsigned int alloc_flags, 2935 int migratetype) 2936 { 2937 struct page *page; 2938 unsigned long flags; 2939 2940 do { 2941 page = NULL; 2942 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2943 if (!spin_trylock_irqsave(&zone->lock, flags)) 2944 return NULL; 2945 } else { 2946 spin_lock_irqsave(&zone->lock, flags); 2947 } 2948 if (alloc_flags & ALLOC_HIGHATOMIC) 2949 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2950 if (!page) { 2951 page = __rmqueue(zone, order, migratetype, alloc_flags); 2952 2953 /* 2954 * If the allocation fails, allow OOM handling and 2955 * order-0 (atomic) allocs access to HIGHATOMIC 2956 * reserves as failing now is worse than failing a 2957 * high-order atomic allocation in the future. 2958 */ 2959 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 2960 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2961 2962 if (!page) { 2963 spin_unlock_irqrestore(&zone->lock, flags); 2964 return NULL; 2965 } 2966 } 2967 spin_unlock_irqrestore(&zone->lock, flags); 2968 } while (check_new_pages(page, order)); 2969 2970 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2971 zone_statistics(preferred_zone, zone, 1); 2972 2973 return page; 2974 } 2975 2976 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 2977 { 2978 int high, base_batch, batch, max_nr_alloc; 2979 int high_max, high_min; 2980 2981 base_batch = READ_ONCE(pcp->batch); 2982 high_min = READ_ONCE(pcp->high_min); 2983 high_max = READ_ONCE(pcp->high_max); 2984 high = pcp->high = clamp(pcp->high, high_min, high_max); 2985 2986 /* Check for PCP disabled or boot pageset */ 2987 if (unlikely(high < base_batch)) 2988 return 1; 2989 2990 if (order) 2991 batch = base_batch; 2992 else 2993 batch = (base_batch << pcp->alloc_factor); 2994 2995 /* 2996 * If we had larger pcp->high, we could avoid to allocate from 2997 * zone. 2998 */ 2999 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3000 high = pcp->high = min(high + batch, high_max); 3001 3002 if (!order) { 3003 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3004 /* 3005 * Double the number of pages allocated each time there is 3006 * subsequent allocation of order-0 pages without any freeing. 3007 */ 3008 if (batch <= max_nr_alloc && 3009 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3010 pcp->alloc_factor++; 3011 batch = min(batch, max_nr_alloc); 3012 } 3013 3014 /* 3015 * Scale batch relative to order if batch implies free pages 3016 * can be stored on the PCP. Batch can be 1 for small zones or 3017 * for boot pagesets which should never store free pages as 3018 * the pages may belong to arbitrary zones. 3019 */ 3020 if (batch > 1) 3021 batch = max(batch >> order, 2); 3022 3023 return batch; 3024 } 3025 3026 /* Remove page from the per-cpu list, caller must protect the list */ 3027 static inline 3028 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3029 int migratetype, 3030 unsigned int alloc_flags, 3031 struct per_cpu_pages *pcp, 3032 struct list_head *list) 3033 { 3034 struct page *page; 3035 3036 do { 3037 if (list_empty(list)) { 3038 int batch = nr_pcp_alloc(pcp, zone, order); 3039 int alloced; 3040 3041 alloced = rmqueue_bulk(zone, order, 3042 batch, list, 3043 migratetype, alloc_flags); 3044 3045 pcp->count += alloced << order; 3046 if (unlikely(list_empty(list))) 3047 return NULL; 3048 } 3049 3050 page = list_first_entry(list, struct page, pcp_list); 3051 list_del(&page->pcp_list); 3052 pcp->count -= 1 << order; 3053 } while (check_new_pages(page, order)); 3054 3055 return page; 3056 } 3057 3058 /* Lock and remove page from the per-cpu list */ 3059 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3060 struct zone *zone, unsigned int order, 3061 int migratetype, unsigned int alloc_flags) 3062 { 3063 struct per_cpu_pages *pcp; 3064 struct list_head *list; 3065 struct page *page; 3066 unsigned long __maybe_unused UP_flags; 3067 3068 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3069 pcp_trylock_prepare(UP_flags); 3070 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3071 if (!pcp) { 3072 pcp_trylock_finish(UP_flags); 3073 return NULL; 3074 } 3075 3076 /* 3077 * On allocation, reduce the number of pages that are batch freed. 3078 * See nr_pcp_free() where free_factor is increased for subsequent 3079 * frees. 3080 */ 3081 pcp->free_count >>= 1; 3082 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3083 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3084 pcp_spin_unlock(pcp); 3085 pcp_trylock_finish(UP_flags); 3086 if (page) { 3087 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3088 zone_statistics(preferred_zone, zone, 1); 3089 } 3090 return page; 3091 } 3092 3093 /* 3094 * Allocate a page from the given zone. 3095 * Use pcplists for THP or "cheap" high-order allocations. 3096 */ 3097 3098 /* 3099 * Do not instrument rmqueue() with KMSAN. This function may call 3100 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3101 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3102 * may call rmqueue() again, which will result in a deadlock. 3103 */ 3104 __no_sanitize_memory 3105 static inline 3106 struct page *rmqueue(struct zone *preferred_zone, 3107 struct zone *zone, unsigned int order, 3108 gfp_t gfp_flags, unsigned int alloc_flags, 3109 int migratetype) 3110 { 3111 struct page *page; 3112 3113 if (likely(pcp_allowed_order(order))) { 3114 page = rmqueue_pcplist(preferred_zone, zone, order, 3115 migratetype, alloc_flags); 3116 if (likely(page)) 3117 goto out; 3118 } 3119 3120 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3121 migratetype); 3122 3123 out: 3124 /* Separate test+clear to avoid unnecessary atomics */ 3125 if ((alloc_flags & ALLOC_KSWAPD) && 3126 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3127 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3128 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3129 } 3130 3131 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3132 return page; 3133 } 3134 3135 /* 3136 * Reserve the pageblock(s) surrounding an allocation request for 3137 * exclusive use of high-order atomic allocations if there are no 3138 * empty page blocks that contain a page with a suitable order 3139 */ 3140 static void reserve_highatomic_pageblock(struct page *page, int order, 3141 struct zone *zone) 3142 { 3143 int mt; 3144 unsigned long max_managed, flags; 3145 3146 /* 3147 * The number reserved as: minimum is 1 pageblock, maximum is 3148 * roughly 1% of a zone. But if 1% of a zone falls below a 3149 * pageblock size, then don't reserve any pageblocks. 3150 * Check is race-prone but harmless. 3151 */ 3152 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3153 return; 3154 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3155 if (zone->nr_reserved_highatomic >= max_managed) 3156 return; 3157 3158 spin_lock_irqsave(&zone->lock, flags); 3159 3160 /* Recheck the nr_reserved_highatomic limit under the lock */ 3161 if (zone->nr_reserved_highatomic >= max_managed) 3162 goto out_unlock; 3163 3164 /* Yoink! */ 3165 mt = get_pageblock_migratetype(page); 3166 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3167 if (!migratetype_is_mergeable(mt)) 3168 goto out_unlock; 3169 3170 if (order < pageblock_order) { 3171 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3172 goto out_unlock; 3173 zone->nr_reserved_highatomic += pageblock_nr_pages; 3174 } else { 3175 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3176 zone->nr_reserved_highatomic += 1 << order; 3177 } 3178 3179 out_unlock: 3180 spin_unlock_irqrestore(&zone->lock, flags); 3181 } 3182 3183 /* 3184 * Used when an allocation is about to fail under memory pressure. This 3185 * potentially hurts the reliability of high-order allocations when under 3186 * intense memory pressure but failed atomic allocations should be easier 3187 * to recover from than an OOM. 3188 * 3189 * If @force is true, try to unreserve pageblocks even though highatomic 3190 * pageblock is exhausted. 3191 */ 3192 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3193 bool force) 3194 { 3195 struct zonelist *zonelist = ac->zonelist; 3196 unsigned long flags; 3197 struct zoneref *z; 3198 struct zone *zone; 3199 struct page *page; 3200 int order; 3201 int ret; 3202 3203 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3204 ac->nodemask) { 3205 /* 3206 * Preserve at least one pageblock unless memory pressure 3207 * is really high. 3208 */ 3209 if (!force && zone->nr_reserved_highatomic <= 3210 pageblock_nr_pages) 3211 continue; 3212 3213 spin_lock_irqsave(&zone->lock, flags); 3214 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3215 struct free_area *area = &(zone->free_area[order]); 3216 unsigned long size; 3217 3218 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3219 if (!page) 3220 continue; 3221 3222 size = max(pageblock_nr_pages, 1UL << order); 3223 /* 3224 * It should never happen but changes to 3225 * locking could inadvertently allow a per-cpu 3226 * drain to add pages to MIGRATE_HIGHATOMIC 3227 * while unreserving so be safe and watch for 3228 * underflows. 3229 */ 3230 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3231 size = zone->nr_reserved_highatomic; 3232 zone->nr_reserved_highatomic -= size; 3233 3234 /* 3235 * Convert to ac->migratetype and avoid the normal 3236 * pageblock stealing heuristics. Minimally, the caller 3237 * is doing the work and needs the pages. More 3238 * importantly, if the block was always converted to 3239 * MIGRATE_UNMOVABLE or another type then the number 3240 * of pageblocks that cannot be completely freed 3241 * may increase. 3242 */ 3243 if (order < pageblock_order) 3244 ret = move_freepages_block(zone, page, 3245 MIGRATE_HIGHATOMIC, 3246 ac->migratetype); 3247 else { 3248 move_to_free_list(page, zone, order, 3249 MIGRATE_HIGHATOMIC, 3250 ac->migratetype); 3251 change_pageblock_range(page, order, 3252 ac->migratetype); 3253 ret = 1; 3254 } 3255 /* 3256 * Reserving the block(s) already succeeded, 3257 * so this should not fail on zone boundaries. 3258 */ 3259 WARN_ON_ONCE(ret == -1); 3260 if (ret > 0) { 3261 spin_unlock_irqrestore(&zone->lock, flags); 3262 return ret; 3263 } 3264 } 3265 spin_unlock_irqrestore(&zone->lock, flags); 3266 } 3267 3268 return false; 3269 } 3270 3271 static inline long __zone_watermark_unusable_free(struct zone *z, 3272 unsigned int order, unsigned int alloc_flags) 3273 { 3274 long unusable_free = (1 << order) - 1; 3275 3276 /* 3277 * If the caller does not have rights to reserves below the min 3278 * watermark then subtract the free pages reserved for highatomic. 3279 */ 3280 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3281 unusable_free += READ_ONCE(z->nr_free_highatomic); 3282 3283 #ifdef CONFIG_CMA 3284 /* If allocation can't use CMA areas don't use free CMA pages */ 3285 if (!(alloc_flags & ALLOC_CMA)) 3286 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3287 #endif 3288 3289 return unusable_free; 3290 } 3291 3292 /* 3293 * Return true if free base pages are above 'mark'. For high-order checks it 3294 * will return true of the order-0 watermark is reached and there is at least 3295 * one free page of a suitable size. Checking now avoids taking the zone lock 3296 * to check in the allocation paths if no pages are free. 3297 */ 3298 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3299 int highest_zoneidx, unsigned int alloc_flags, 3300 long free_pages) 3301 { 3302 long min = mark; 3303 int o; 3304 3305 /* free_pages may go negative - that's OK */ 3306 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3307 3308 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3309 /* 3310 * __GFP_HIGH allows access to 50% of the min reserve as well 3311 * as OOM. 3312 */ 3313 if (alloc_flags & ALLOC_MIN_RESERVE) { 3314 min -= min / 2; 3315 3316 /* 3317 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3318 * access more reserves than just __GFP_HIGH. Other 3319 * non-blocking allocations requests such as GFP_NOWAIT 3320 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3321 * access to the min reserve. 3322 */ 3323 if (alloc_flags & ALLOC_NON_BLOCK) 3324 min -= min / 4; 3325 } 3326 3327 /* 3328 * OOM victims can try even harder than the normal reserve 3329 * users on the grounds that it's definitely going to be in 3330 * the exit path shortly and free memory. Any allocation it 3331 * makes during the free path will be small and short-lived. 3332 */ 3333 if (alloc_flags & ALLOC_OOM) 3334 min -= min / 2; 3335 } 3336 3337 /* 3338 * Check watermarks for an order-0 allocation request. If these 3339 * are not met, then a high-order request also cannot go ahead 3340 * even if a suitable page happened to be free. 3341 */ 3342 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3343 return false; 3344 3345 /* If this is an order-0 request then the watermark is fine */ 3346 if (!order) 3347 return true; 3348 3349 /* For a high-order request, check at least one suitable page is free */ 3350 for (o = order; o < NR_PAGE_ORDERS; o++) { 3351 struct free_area *area = &z->free_area[o]; 3352 int mt; 3353 3354 if (!area->nr_free) 3355 continue; 3356 3357 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3358 if (!free_area_empty(area, mt)) 3359 return true; 3360 } 3361 3362 #ifdef CONFIG_CMA 3363 if ((alloc_flags & ALLOC_CMA) && 3364 !free_area_empty(area, MIGRATE_CMA)) { 3365 return true; 3366 } 3367 #endif 3368 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3369 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3370 return true; 3371 } 3372 } 3373 return false; 3374 } 3375 3376 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3377 int highest_zoneidx, unsigned int alloc_flags) 3378 { 3379 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3380 zone_page_state(z, NR_FREE_PAGES)); 3381 } 3382 3383 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3384 unsigned long mark, int highest_zoneidx, 3385 unsigned int alloc_flags, gfp_t gfp_mask) 3386 { 3387 long free_pages; 3388 3389 free_pages = zone_page_state(z, NR_FREE_PAGES); 3390 3391 /* 3392 * Fast check for order-0 only. If this fails then the reserves 3393 * need to be calculated. 3394 */ 3395 if (!order) { 3396 long usable_free; 3397 long reserved; 3398 3399 usable_free = free_pages; 3400 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3401 3402 /* reserved may over estimate high-atomic reserves. */ 3403 usable_free -= min(usable_free, reserved); 3404 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3405 return true; 3406 } 3407 3408 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3409 free_pages)) 3410 return true; 3411 3412 /* 3413 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3414 * when checking the min watermark. The min watermark is the 3415 * point where boosting is ignored so that kswapd is woken up 3416 * when below the low watermark. 3417 */ 3418 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3419 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3420 mark = z->_watermark[WMARK_MIN]; 3421 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3422 alloc_flags, free_pages); 3423 } 3424 3425 return false; 3426 } 3427 3428 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3429 unsigned long mark, int highest_zoneidx) 3430 { 3431 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3432 3433 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3434 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3435 3436 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3437 free_pages); 3438 } 3439 3440 #ifdef CONFIG_NUMA 3441 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3442 3443 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3444 { 3445 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3446 node_reclaim_distance; 3447 } 3448 #else /* CONFIG_NUMA */ 3449 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3450 { 3451 return true; 3452 } 3453 #endif /* CONFIG_NUMA */ 3454 3455 /* 3456 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3457 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3458 * premature use of a lower zone may cause lowmem pressure problems that 3459 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3460 * probably too small. It only makes sense to spread allocations to avoid 3461 * fragmentation between the Normal and DMA32 zones. 3462 */ 3463 static inline unsigned int 3464 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3465 { 3466 unsigned int alloc_flags; 3467 3468 /* 3469 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3470 * to save a branch. 3471 */ 3472 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3473 3474 if (defrag_mode) { 3475 alloc_flags |= ALLOC_NOFRAGMENT; 3476 return alloc_flags; 3477 } 3478 3479 #ifdef CONFIG_ZONE_DMA32 3480 if (!zone) 3481 return alloc_flags; 3482 3483 if (zone_idx(zone) != ZONE_NORMAL) 3484 return alloc_flags; 3485 3486 /* 3487 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3488 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3489 * on UMA that if Normal is populated then so is DMA32. 3490 */ 3491 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3492 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3493 return alloc_flags; 3494 3495 alloc_flags |= ALLOC_NOFRAGMENT; 3496 #endif /* CONFIG_ZONE_DMA32 */ 3497 return alloc_flags; 3498 } 3499 3500 /* Must be called after current_gfp_context() which can change gfp_mask */ 3501 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3502 unsigned int alloc_flags) 3503 { 3504 #ifdef CONFIG_CMA 3505 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3506 alloc_flags |= ALLOC_CMA; 3507 #endif 3508 return alloc_flags; 3509 } 3510 3511 /* 3512 * get_page_from_freelist goes through the zonelist trying to allocate 3513 * a page. 3514 */ 3515 static struct page * 3516 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3517 const struct alloc_context *ac) 3518 { 3519 struct zoneref *z; 3520 struct zone *zone; 3521 struct pglist_data *last_pgdat = NULL; 3522 bool last_pgdat_dirty_ok = false; 3523 bool no_fallback; 3524 3525 retry: 3526 /* 3527 * Scan zonelist, looking for a zone with enough free. 3528 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3529 */ 3530 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3531 z = ac->preferred_zoneref; 3532 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3533 ac->nodemask) { 3534 struct page *page; 3535 unsigned long mark; 3536 3537 if (cpusets_enabled() && 3538 (alloc_flags & ALLOC_CPUSET) && 3539 !__cpuset_zone_allowed(zone, gfp_mask)) 3540 continue; 3541 /* 3542 * When allocating a page cache page for writing, we 3543 * want to get it from a node that is within its dirty 3544 * limit, such that no single node holds more than its 3545 * proportional share of globally allowed dirty pages. 3546 * The dirty limits take into account the node's 3547 * lowmem reserves and high watermark so that kswapd 3548 * should be able to balance it without having to 3549 * write pages from its LRU list. 3550 * 3551 * XXX: For now, allow allocations to potentially 3552 * exceed the per-node dirty limit in the slowpath 3553 * (spread_dirty_pages unset) before going into reclaim, 3554 * which is important when on a NUMA setup the allowed 3555 * nodes are together not big enough to reach the 3556 * global limit. The proper fix for these situations 3557 * will require awareness of nodes in the 3558 * dirty-throttling and the flusher threads. 3559 */ 3560 if (ac->spread_dirty_pages) { 3561 if (last_pgdat != zone->zone_pgdat) { 3562 last_pgdat = zone->zone_pgdat; 3563 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3564 } 3565 3566 if (!last_pgdat_dirty_ok) 3567 continue; 3568 } 3569 3570 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3571 zone != zonelist_zone(ac->preferred_zoneref)) { 3572 int local_nid; 3573 3574 /* 3575 * If moving to a remote node, retry but allow 3576 * fragmenting fallbacks. Locality is more important 3577 * than fragmentation avoidance. 3578 */ 3579 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3580 if (zone_to_nid(zone) != local_nid) { 3581 alloc_flags &= ~ALLOC_NOFRAGMENT; 3582 goto retry; 3583 } 3584 } 3585 3586 cond_accept_memory(zone, order); 3587 3588 /* 3589 * Detect whether the number of free pages is below high 3590 * watermark. If so, we will decrease pcp->high and free 3591 * PCP pages in free path to reduce the possibility of 3592 * premature page reclaiming. Detection is done here to 3593 * avoid to do that in hotter free path. 3594 */ 3595 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3596 goto check_alloc_wmark; 3597 3598 mark = high_wmark_pages(zone); 3599 if (zone_watermark_fast(zone, order, mark, 3600 ac->highest_zoneidx, alloc_flags, 3601 gfp_mask)) 3602 goto try_this_zone; 3603 else 3604 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3605 3606 check_alloc_wmark: 3607 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3608 if (!zone_watermark_fast(zone, order, mark, 3609 ac->highest_zoneidx, alloc_flags, 3610 gfp_mask)) { 3611 int ret; 3612 3613 if (cond_accept_memory(zone, order)) 3614 goto try_this_zone; 3615 3616 /* 3617 * Watermark failed for this zone, but see if we can 3618 * grow this zone if it contains deferred pages. 3619 */ 3620 if (deferred_pages_enabled()) { 3621 if (_deferred_grow_zone(zone, order)) 3622 goto try_this_zone; 3623 } 3624 /* Checked here to keep the fast path fast */ 3625 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3626 if (alloc_flags & ALLOC_NO_WATERMARKS) 3627 goto try_this_zone; 3628 3629 if (!node_reclaim_enabled() || 3630 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3631 continue; 3632 3633 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3634 switch (ret) { 3635 case NODE_RECLAIM_NOSCAN: 3636 /* did not scan */ 3637 continue; 3638 case NODE_RECLAIM_FULL: 3639 /* scanned but unreclaimable */ 3640 continue; 3641 default: 3642 /* did we reclaim enough */ 3643 if (zone_watermark_ok(zone, order, mark, 3644 ac->highest_zoneidx, alloc_flags)) 3645 goto try_this_zone; 3646 3647 continue; 3648 } 3649 } 3650 3651 try_this_zone: 3652 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3653 gfp_mask, alloc_flags, ac->migratetype); 3654 if (page) { 3655 prep_new_page(page, order, gfp_mask, alloc_flags); 3656 3657 /* 3658 * If this is a high-order atomic allocation then check 3659 * if the pageblock should be reserved for the future 3660 */ 3661 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3662 reserve_highatomic_pageblock(page, order, zone); 3663 3664 return page; 3665 } else { 3666 if (cond_accept_memory(zone, order)) 3667 goto try_this_zone; 3668 3669 /* Try again if zone has deferred pages */ 3670 if (deferred_pages_enabled()) { 3671 if (_deferred_grow_zone(zone, order)) 3672 goto try_this_zone; 3673 } 3674 } 3675 } 3676 3677 /* 3678 * It's possible on a UMA machine to get through all zones that are 3679 * fragmented. If avoiding fragmentation, reset and try again. 3680 */ 3681 if (no_fallback && !defrag_mode) { 3682 alloc_flags &= ~ALLOC_NOFRAGMENT; 3683 goto retry; 3684 } 3685 3686 return NULL; 3687 } 3688 3689 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3690 { 3691 unsigned int filter = SHOW_MEM_FILTER_NODES; 3692 3693 /* 3694 * This documents exceptions given to allocations in certain 3695 * contexts that are allowed to allocate outside current's set 3696 * of allowed nodes. 3697 */ 3698 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3699 if (tsk_is_oom_victim(current) || 3700 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3701 filter &= ~SHOW_MEM_FILTER_NODES; 3702 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3703 filter &= ~SHOW_MEM_FILTER_NODES; 3704 3705 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3706 } 3707 3708 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3709 { 3710 struct va_format vaf; 3711 va_list args; 3712 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3713 3714 if ((gfp_mask & __GFP_NOWARN) || 3715 !__ratelimit(&nopage_rs) || 3716 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3717 return; 3718 3719 va_start(args, fmt); 3720 vaf.fmt = fmt; 3721 vaf.va = &args; 3722 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3723 current->comm, &vaf, gfp_mask, &gfp_mask, 3724 nodemask_pr_args(nodemask)); 3725 va_end(args); 3726 3727 cpuset_print_current_mems_allowed(); 3728 pr_cont("\n"); 3729 dump_stack(); 3730 warn_alloc_show_mem(gfp_mask, nodemask); 3731 } 3732 3733 static inline struct page * 3734 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3735 unsigned int alloc_flags, 3736 const struct alloc_context *ac) 3737 { 3738 struct page *page; 3739 3740 page = get_page_from_freelist(gfp_mask, order, 3741 alloc_flags|ALLOC_CPUSET, ac); 3742 /* 3743 * fallback to ignore cpuset restriction if our nodes 3744 * are depleted 3745 */ 3746 if (!page) 3747 page = get_page_from_freelist(gfp_mask, order, 3748 alloc_flags, ac); 3749 return page; 3750 } 3751 3752 static inline struct page * 3753 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3754 const struct alloc_context *ac, unsigned long *did_some_progress) 3755 { 3756 struct oom_control oc = { 3757 .zonelist = ac->zonelist, 3758 .nodemask = ac->nodemask, 3759 .memcg = NULL, 3760 .gfp_mask = gfp_mask, 3761 .order = order, 3762 }; 3763 struct page *page; 3764 3765 *did_some_progress = 0; 3766 3767 /* 3768 * Acquire the oom lock. If that fails, somebody else is 3769 * making progress for us. 3770 */ 3771 if (!mutex_trylock(&oom_lock)) { 3772 *did_some_progress = 1; 3773 schedule_timeout_uninterruptible(1); 3774 return NULL; 3775 } 3776 3777 /* 3778 * Go through the zonelist yet one more time, keep very high watermark 3779 * here, this is only to catch a parallel oom killing, we must fail if 3780 * we're still under heavy pressure. But make sure that this reclaim 3781 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3782 * allocation which will never fail due to oom_lock already held. 3783 */ 3784 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3785 ~__GFP_DIRECT_RECLAIM, order, 3786 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3787 if (page) 3788 goto out; 3789 3790 /* Coredumps can quickly deplete all memory reserves */ 3791 if (current->flags & PF_DUMPCORE) 3792 goto out; 3793 /* The OOM killer will not help higher order allocs */ 3794 if (order > PAGE_ALLOC_COSTLY_ORDER) 3795 goto out; 3796 /* 3797 * We have already exhausted all our reclaim opportunities without any 3798 * success so it is time to admit defeat. We will skip the OOM killer 3799 * because it is very likely that the caller has a more reasonable 3800 * fallback than shooting a random task. 3801 * 3802 * The OOM killer may not free memory on a specific node. 3803 */ 3804 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3805 goto out; 3806 /* The OOM killer does not needlessly kill tasks for lowmem */ 3807 if (ac->highest_zoneidx < ZONE_NORMAL) 3808 goto out; 3809 if (pm_suspended_storage()) 3810 goto out; 3811 /* 3812 * XXX: GFP_NOFS allocations should rather fail than rely on 3813 * other request to make a forward progress. 3814 * We are in an unfortunate situation where out_of_memory cannot 3815 * do much for this context but let's try it to at least get 3816 * access to memory reserved if the current task is killed (see 3817 * out_of_memory). Once filesystems are ready to handle allocation 3818 * failures more gracefully we should just bail out here. 3819 */ 3820 3821 /* Exhausted what can be done so it's blame time */ 3822 if (out_of_memory(&oc) || 3823 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3824 *did_some_progress = 1; 3825 3826 /* 3827 * Help non-failing allocations by giving them access to memory 3828 * reserves 3829 */ 3830 if (gfp_mask & __GFP_NOFAIL) 3831 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3832 ALLOC_NO_WATERMARKS, ac); 3833 } 3834 out: 3835 mutex_unlock(&oom_lock); 3836 return page; 3837 } 3838 3839 /* 3840 * Maximum number of compaction retries with a progress before OOM 3841 * killer is consider as the only way to move forward. 3842 */ 3843 #define MAX_COMPACT_RETRIES 16 3844 3845 #ifdef CONFIG_COMPACTION 3846 /* Try memory compaction for high-order allocations before reclaim */ 3847 static struct page * 3848 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3849 unsigned int alloc_flags, const struct alloc_context *ac, 3850 enum compact_priority prio, enum compact_result *compact_result) 3851 { 3852 struct page *page = NULL; 3853 unsigned long pflags; 3854 unsigned int noreclaim_flag; 3855 3856 if (!order) 3857 return NULL; 3858 3859 psi_memstall_enter(&pflags); 3860 delayacct_compact_start(); 3861 noreclaim_flag = memalloc_noreclaim_save(); 3862 3863 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3864 prio, &page); 3865 3866 memalloc_noreclaim_restore(noreclaim_flag); 3867 psi_memstall_leave(&pflags); 3868 delayacct_compact_end(); 3869 3870 if (*compact_result == COMPACT_SKIPPED) 3871 return NULL; 3872 /* 3873 * At least in one zone compaction wasn't deferred or skipped, so let's 3874 * count a compaction stall 3875 */ 3876 count_vm_event(COMPACTSTALL); 3877 3878 /* Prep a captured page if available */ 3879 if (page) 3880 prep_new_page(page, order, gfp_mask, alloc_flags); 3881 3882 /* Try get a page from the freelist if available */ 3883 if (!page) 3884 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3885 3886 if (page) { 3887 struct zone *zone = page_zone(page); 3888 3889 zone->compact_blockskip_flush = false; 3890 compaction_defer_reset(zone, order, true); 3891 count_vm_event(COMPACTSUCCESS); 3892 return page; 3893 } 3894 3895 /* 3896 * It's bad if compaction run occurs and fails. The most likely reason 3897 * is that pages exist, but not enough to satisfy watermarks. 3898 */ 3899 count_vm_event(COMPACTFAIL); 3900 3901 cond_resched(); 3902 3903 return NULL; 3904 } 3905 3906 static inline bool 3907 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3908 enum compact_result compact_result, 3909 enum compact_priority *compact_priority, 3910 int *compaction_retries) 3911 { 3912 int max_retries = MAX_COMPACT_RETRIES; 3913 int min_priority; 3914 bool ret = false; 3915 int retries = *compaction_retries; 3916 enum compact_priority priority = *compact_priority; 3917 3918 if (!order) 3919 return false; 3920 3921 if (fatal_signal_pending(current)) 3922 return false; 3923 3924 /* 3925 * Compaction was skipped due to a lack of free order-0 3926 * migration targets. Continue if reclaim can help. 3927 */ 3928 if (compact_result == COMPACT_SKIPPED) { 3929 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3930 goto out; 3931 } 3932 3933 /* 3934 * Compaction managed to coalesce some page blocks, but the 3935 * allocation failed presumably due to a race. Retry some. 3936 */ 3937 if (compact_result == COMPACT_SUCCESS) { 3938 /* 3939 * !costly requests are much more important than 3940 * __GFP_RETRY_MAYFAIL costly ones because they are de 3941 * facto nofail and invoke OOM killer to move on while 3942 * costly can fail and users are ready to cope with 3943 * that. 1/4 retries is rather arbitrary but we would 3944 * need much more detailed feedback from compaction to 3945 * make a better decision. 3946 */ 3947 if (order > PAGE_ALLOC_COSTLY_ORDER) 3948 max_retries /= 4; 3949 3950 if (++(*compaction_retries) <= max_retries) { 3951 ret = true; 3952 goto out; 3953 } 3954 } 3955 3956 /* 3957 * Compaction failed. Retry with increasing priority. 3958 */ 3959 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3960 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3961 3962 if (*compact_priority > min_priority) { 3963 (*compact_priority)--; 3964 *compaction_retries = 0; 3965 ret = true; 3966 } 3967 out: 3968 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3969 return ret; 3970 } 3971 #else 3972 static inline struct page * 3973 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3974 unsigned int alloc_flags, const struct alloc_context *ac, 3975 enum compact_priority prio, enum compact_result *compact_result) 3976 { 3977 *compact_result = COMPACT_SKIPPED; 3978 return NULL; 3979 } 3980 3981 static inline bool 3982 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3983 enum compact_result compact_result, 3984 enum compact_priority *compact_priority, 3985 int *compaction_retries) 3986 { 3987 struct zone *zone; 3988 struct zoneref *z; 3989 3990 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3991 return false; 3992 3993 /* 3994 * There are setups with compaction disabled which would prefer to loop 3995 * inside the allocator rather than hit the oom killer prematurely. 3996 * Let's give them a good hope and keep retrying while the order-0 3997 * watermarks are OK. 3998 */ 3999 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4000 ac->highest_zoneidx, ac->nodemask) { 4001 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4002 ac->highest_zoneidx, alloc_flags)) 4003 return true; 4004 } 4005 return false; 4006 } 4007 #endif /* CONFIG_COMPACTION */ 4008 4009 #ifdef CONFIG_LOCKDEP 4010 static struct lockdep_map __fs_reclaim_map = 4011 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4012 4013 static bool __need_reclaim(gfp_t gfp_mask) 4014 { 4015 /* no reclaim without waiting on it */ 4016 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4017 return false; 4018 4019 /* this guy won't enter reclaim */ 4020 if (current->flags & PF_MEMALLOC) 4021 return false; 4022 4023 if (gfp_mask & __GFP_NOLOCKDEP) 4024 return false; 4025 4026 return true; 4027 } 4028 4029 void __fs_reclaim_acquire(unsigned long ip) 4030 { 4031 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4032 } 4033 4034 void __fs_reclaim_release(unsigned long ip) 4035 { 4036 lock_release(&__fs_reclaim_map, ip); 4037 } 4038 4039 void fs_reclaim_acquire(gfp_t gfp_mask) 4040 { 4041 gfp_mask = current_gfp_context(gfp_mask); 4042 4043 if (__need_reclaim(gfp_mask)) { 4044 if (gfp_mask & __GFP_FS) 4045 __fs_reclaim_acquire(_RET_IP_); 4046 4047 #ifdef CONFIG_MMU_NOTIFIER 4048 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4049 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4050 #endif 4051 4052 } 4053 } 4054 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4055 4056 void fs_reclaim_release(gfp_t gfp_mask) 4057 { 4058 gfp_mask = current_gfp_context(gfp_mask); 4059 4060 if (__need_reclaim(gfp_mask)) { 4061 if (gfp_mask & __GFP_FS) 4062 __fs_reclaim_release(_RET_IP_); 4063 } 4064 } 4065 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4066 #endif 4067 4068 /* 4069 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4070 * have been rebuilt so allocation retries. Reader side does not lock and 4071 * retries the allocation if zonelist changes. Writer side is protected by the 4072 * embedded spin_lock. 4073 */ 4074 static DEFINE_SEQLOCK(zonelist_update_seq); 4075 4076 static unsigned int zonelist_iter_begin(void) 4077 { 4078 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4079 return read_seqbegin(&zonelist_update_seq); 4080 4081 return 0; 4082 } 4083 4084 static unsigned int check_retry_zonelist(unsigned int seq) 4085 { 4086 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4087 return read_seqretry(&zonelist_update_seq, seq); 4088 4089 return seq; 4090 } 4091 4092 /* Perform direct synchronous page reclaim */ 4093 static unsigned long 4094 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4095 const struct alloc_context *ac) 4096 { 4097 unsigned int noreclaim_flag; 4098 unsigned long progress; 4099 4100 cond_resched(); 4101 4102 /* We now go into synchronous reclaim */ 4103 cpuset_memory_pressure_bump(); 4104 fs_reclaim_acquire(gfp_mask); 4105 noreclaim_flag = memalloc_noreclaim_save(); 4106 4107 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4108 ac->nodemask); 4109 4110 memalloc_noreclaim_restore(noreclaim_flag); 4111 fs_reclaim_release(gfp_mask); 4112 4113 cond_resched(); 4114 4115 return progress; 4116 } 4117 4118 /* The really slow allocator path where we enter direct reclaim */ 4119 static inline struct page * 4120 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4121 unsigned int alloc_flags, const struct alloc_context *ac, 4122 unsigned long *did_some_progress) 4123 { 4124 struct page *page = NULL; 4125 unsigned long pflags; 4126 bool drained = false; 4127 4128 psi_memstall_enter(&pflags); 4129 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4130 if (unlikely(!(*did_some_progress))) 4131 goto out; 4132 4133 retry: 4134 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4135 4136 /* 4137 * If an allocation failed after direct reclaim, it could be because 4138 * pages are pinned on the per-cpu lists or in high alloc reserves. 4139 * Shrink them and try again 4140 */ 4141 if (!page && !drained) { 4142 unreserve_highatomic_pageblock(ac, false); 4143 drain_all_pages(NULL); 4144 drained = true; 4145 goto retry; 4146 } 4147 out: 4148 psi_memstall_leave(&pflags); 4149 4150 return page; 4151 } 4152 4153 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4154 const struct alloc_context *ac) 4155 { 4156 struct zoneref *z; 4157 struct zone *zone; 4158 pg_data_t *last_pgdat = NULL; 4159 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4160 unsigned int reclaim_order; 4161 4162 if (defrag_mode) 4163 reclaim_order = max(order, pageblock_order); 4164 else 4165 reclaim_order = order; 4166 4167 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4168 ac->nodemask) { 4169 if (!managed_zone(zone)) 4170 continue; 4171 if (last_pgdat == zone->zone_pgdat) 4172 continue; 4173 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4174 last_pgdat = zone->zone_pgdat; 4175 } 4176 } 4177 4178 static inline unsigned int 4179 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4180 { 4181 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4182 4183 /* 4184 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4185 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4186 * to save two branches. 4187 */ 4188 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4189 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4190 4191 /* 4192 * The caller may dip into page reserves a bit more if the caller 4193 * cannot run direct reclaim, or if the caller has realtime scheduling 4194 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4195 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4196 */ 4197 alloc_flags |= (__force int) 4198 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4199 4200 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4201 /* 4202 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4203 * if it can't schedule. 4204 */ 4205 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4206 alloc_flags |= ALLOC_NON_BLOCK; 4207 4208 if (order > 0) 4209 alloc_flags |= ALLOC_HIGHATOMIC; 4210 } 4211 4212 /* 4213 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4214 * GFP_ATOMIC) rather than fail, see the comment for 4215 * cpuset_node_allowed(). 4216 */ 4217 if (alloc_flags & ALLOC_MIN_RESERVE) 4218 alloc_flags &= ~ALLOC_CPUSET; 4219 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4220 alloc_flags |= ALLOC_MIN_RESERVE; 4221 4222 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4223 4224 if (defrag_mode) 4225 alloc_flags |= ALLOC_NOFRAGMENT; 4226 4227 return alloc_flags; 4228 } 4229 4230 static bool oom_reserves_allowed(struct task_struct *tsk) 4231 { 4232 if (!tsk_is_oom_victim(tsk)) 4233 return false; 4234 4235 /* 4236 * !MMU doesn't have oom reaper so give access to memory reserves 4237 * only to the thread with TIF_MEMDIE set 4238 */ 4239 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4240 return false; 4241 4242 return true; 4243 } 4244 4245 /* 4246 * Distinguish requests which really need access to full memory 4247 * reserves from oom victims which can live with a portion of it 4248 */ 4249 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4250 { 4251 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4252 return 0; 4253 if (gfp_mask & __GFP_MEMALLOC) 4254 return ALLOC_NO_WATERMARKS; 4255 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4256 return ALLOC_NO_WATERMARKS; 4257 if (!in_interrupt()) { 4258 if (current->flags & PF_MEMALLOC) 4259 return ALLOC_NO_WATERMARKS; 4260 else if (oom_reserves_allowed(current)) 4261 return ALLOC_OOM; 4262 } 4263 4264 return 0; 4265 } 4266 4267 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4268 { 4269 return !!__gfp_pfmemalloc_flags(gfp_mask); 4270 } 4271 4272 /* 4273 * Checks whether it makes sense to retry the reclaim to make a forward progress 4274 * for the given allocation request. 4275 * 4276 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4277 * without success, or when we couldn't even meet the watermark if we 4278 * reclaimed all remaining pages on the LRU lists. 4279 * 4280 * Returns true if a retry is viable or false to enter the oom path. 4281 */ 4282 static inline bool 4283 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4284 struct alloc_context *ac, int alloc_flags, 4285 bool did_some_progress, int *no_progress_loops) 4286 { 4287 struct zone *zone; 4288 struct zoneref *z; 4289 bool ret = false; 4290 4291 /* 4292 * Costly allocations might have made a progress but this doesn't mean 4293 * their order will become available due to high fragmentation so 4294 * always increment the no progress counter for them 4295 */ 4296 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4297 *no_progress_loops = 0; 4298 else 4299 (*no_progress_loops)++; 4300 4301 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4302 goto out; 4303 4304 4305 /* 4306 * Keep reclaiming pages while there is a chance this will lead 4307 * somewhere. If none of the target zones can satisfy our allocation 4308 * request even if all reclaimable pages are considered then we are 4309 * screwed and have to go OOM. 4310 */ 4311 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4312 ac->highest_zoneidx, ac->nodemask) { 4313 unsigned long available; 4314 unsigned long reclaimable; 4315 unsigned long min_wmark = min_wmark_pages(zone); 4316 bool wmark; 4317 4318 if (cpusets_enabled() && 4319 (alloc_flags & ALLOC_CPUSET) && 4320 !__cpuset_zone_allowed(zone, gfp_mask)) 4321 continue; 4322 4323 available = reclaimable = zone_reclaimable_pages(zone); 4324 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4325 4326 /* 4327 * Would the allocation succeed if we reclaimed all 4328 * reclaimable pages? 4329 */ 4330 wmark = __zone_watermark_ok(zone, order, min_wmark, 4331 ac->highest_zoneidx, alloc_flags, available); 4332 trace_reclaim_retry_zone(z, order, reclaimable, 4333 available, min_wmark, *no_progress_loops, wmark); 4334 if (wmark) { 4335 ret = true; 4336 break; 4337 } 4338 } 4339 4340 /* 4341 * Memory allocation/reclaim might be called from a WQ context and the 4342 * current implementation of the WQ concurrency control doesn't 4343 * recognize that a particular WQ is congested if the worker thread is 4344 * looping without ever sleeping. Therefore we have to do a short sleep 4345 * here rather than calling cond_resched(). 4346 */ 4347 if (current->flags & PF_WQ_WORKER) 4348 schedule_timeout_uninterruptible(1); 4349 else 4350 cond_resched(); 4351 out: 4352 /* Before OOM, exhaust highatomic_reserve */ 4353 if (!ret) 4354 return unreserve_highatomic_pageblock(ac, true); 4355 4356 return ret; 4357 } 4358 4359 static inline bool 4360 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4361 { 4362 /* 4363 * It's possible that cpuset's mems_allowed and the nodemask from 4364 * mempolicy don't intersect. This should be normally dealt with by 4365 * policy_nodemask(), but it's possible to race with cpuset update in 4366 * such a way the check therein was true, and then it became false 4367 * before we got our cpuset_mems_cookie here. 4368 * This assumes that for all allocations, ac->nodemask can come only 4369 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4370 * when it does not intersect with the cpuset restrictions) or the 4371 * caller can deal with a violated nodemask. 4372 */ 4373 if (cpusets_enabled() && ac->nodemask && 4374 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4375 ac->nodemask = NULL; 4376 return true; 4377 } 4378 4379 /* 4380 * When updating a task's mems_allowed or mempolicy nodemask, it is 4381 * possible to race with parallel threads in such a way that our 4382 * allocation can fail while the mask is being updated. If we are about 4383 * to fail, check if the cpuset changed during allocation and if so, 4384 * retry. 4385 */ 4386 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4387 return true; 4388 4389 return false; 4390 } 4391 4392 static inline struct page * 4393 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4394 struct alloc_context *ac) 4395 { 4396 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4397 bool can_compact = gfp_compaction_allowed(gfp_mask); 4398 bool nofail = gfp_mask & __GFP_NOFAIL; 4399 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4400 struct page *page = NULL; 4401 unsigned int alloc_flags; 4402 unsigned long did_some_progress; 4403 enum compact_priority compact_priority; 4404 enum compact_result compact_result; 4405 int compaction_retries; 4406 int no_progress_loops; 4407 unsigned int cpuset_mems_cookie; 4408 unsigned int zonelist_iter_cookie; 4409 int reserve_flags; 4410 4411 if (unlikely(nofail)) { 4412 /* 4413 * We most definitely don't want callers attempting to 4414 * allocate greater than order-1 page units with __GFP_NOFAIL. 4415 */ 4416 WARN_ON_ONCE(order > 1); 4417 /* 4418 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4419 * otherwise, we may result in lockup. 4420 */ 4421 WARN_ON_ONCE(!can_direct_reclaim); 4422 /* 4423 * PF_MEMALLOC request from this context is rather bizarre 4424 * because we cannot reclaim anything and only can loop waiting 4425 * for somebody to do a work for us. 4426 */ 4427 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4428 } 4429 4430 restart: 4431 compaction_retries = 0; 4432 no_progress_loops = 0; 4433 compact_result = COMPACT_SKIPPED; 4434 compact_priority = DEF_COMPACT_PRIORITY; 4435 cpuset_mems_cookie = read_mems_allowed_begin(); 4436 zonelist_iter_cookie = zonelist_iter_begin(); 4437 4438 /* 4439 * The fast path uses conservative alloc_flags to succeed only until 4440 * kswapd needs to be woken up, and to avoid the cost of setting up 4441 * alloc_flags precisely. So we do that now. 4442 */ 4443 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4444 4445 /* 4446 * We need to recalculate the starting point for the zonelist iterator 4447 * because we might have used different nodemask in the fast path, or 4448 * there was a cpuset modification and we are retrying - otherwise we 4449 * could end up iterating over non-eligible zones endlessly. 4450 */ 4451 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4452 ac->highest_zoneidx, ac->nodemask); 4453 if (!zonelist_zone(ac->preferred_zoneref)) 4454 goto nopage; 4455 4456 /* 4457 * Check for insane configurations where the cpuset doesn't contain 4458 * any suitable zone to satisfy the request - e.g. non-movable 4459 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4460 */ 4461 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4462 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4463 ac->highest_zoneidx, 4464 &cpuset_current_mems_allowed); 4465 if (!zonelist_zone(z)) 4466 goto nopage; 4467 } 4468 4469 if (alloc_flags & ALLOC_KSWAPD) 4470 wake_all_kswapds(order, gfp_mask, ac); 4471 4472 /* 4473 * The adjusted alloc_flags might result in immediate success, so try 4474 * that first 4475 */ 4476 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4477 if (page) 4478 goto got_pg; 4479 4480 /* 4481 * For costly allocations, try direct compaction first, as it's likely 4482 * that we have enough base pages and don't need to reclaim. For non- 4483 * movable high-order allocations, do that as well, as compaction will 4484 * try prevent permanent fragmentation by migrating from blocks of the 4485 * same migratetype. 4486 * Don't try this for allocations that are allowed to ignore 4487 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4488 */ 4489 if (can_direct_reclaim && can_compact && 4490 (costly_order || 4491 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4492 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4493 page = __alloc_pages_direct_compact(gfp_mask, order, 4494 alloc_flags, ac, 4495 INIT_COMPACT_PRIORITY, 4496 &compact_result); 4497 if (page) 4498 goto got_pg; 4499 4500 /* 4501 * Checks for costly allocations with __GFP_NORETRY, which 4502 * includes some THP page fault allocations 4503 */ 4504 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4505 /* 4506 * If allocating entire pageblock(s) and compaction 4507 * failed because all zones are below low watermarks 4508 * or is prohibited because it recently failed at this 4509 * order, fail immediately unless the allocator has 4510 * requested compaction and reclaim retry. 4511 * 4512 * Reclaim is 4513 * - potentially very expensive because zones are far 4514 * below their low watermarks or this is part of very 4515 * bursty high order allocations, 4516 * - not guaranteed to help because isolate_freepages() 4517 * may not iterate over freed pages as part of its 4518 * linear scan, and 4519 * - unlikely to make entire pageblocks free on its 4520 * own. 4521 */ 4522 if (compact_result == COMPACT_SKIPPED || 4523 compact_result == COMPACT_DEFERRED) 4524 goto nopage; 4525 4526 /* 4527 * Looks like reclaim/compaction is worth trying, but 4528 * sync compaction could be very expensive, so keep 4529 * using async compaction. 4530 */ 4531 compact_priority = INIT_COMPACT_PRIORITY; 4532 } 4533 } 4534 4535 retry: 4536 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4537 if (alloc_flags & ALLOC_KSWAPD) 4538 wake_all_kswapds(order, gfp_mask, ac); 4539 4540 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4541 if (reserve_flags) 4542 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4543 (alloc_flags & ALLOC_KSWAPD); 4544 4545 /* 4546 * Reset the nodemask and zonelist iterators if memory policies can be 4547 * ignored. These allocations are high priority and system rather than 4548 * user oriented. 4549 */ 4550 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4551 ac->nodemask = NULL; 4552 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4553 ac->highest_zoneidx, ac->nodemask); 4554 } 4555 4556 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4557 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4558 if (page) 4559 goto got_pg; 4560 4561 /* Caller is not willing to reclaim, we can't balance anything */ 4562 if (!can_direct_reclaim) 4563 goto nopage; 4564 4565 /* Avoid recursion of direct reclaim */ 4566 if (current->flags & PF_MEMALLOC) 4567 goto nopage; 4568 4569 /* Try direct reclaim and then allocating */ 4570 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4571 &did_some_progress); 4572 if (page) 4573 goto got_pg; 4574 4575 /* Try direct compaction and then allocating */ 4576 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4577 compact_priority, &compact_result); 4578 if (page) 4579 goto got_pg; 4580 4581 /* Do not loop if specifically requested */ 4582 if (gfp_mask & __GFP_NORETRY) 4583 goto nopage; 4584 4585 /* 4586 * Do not retry costly high order allocations unless they are 4587 * __GFP_RETRY_MAYFAIL and we can compact 4588 */ 4589 if (costly_order && (!can_compact || 4590 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4591 goto nopage; 4592 4593 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4594 did_some_progress > 0, &no_progress_loops)) 4595 goto retry; 4596 4597 /* 4598 * It doesn't make any sense to retry for the compaction if the order-0 4599 * reclaim is not able to make any progress because the current 4600 * implementation of the compaction depends on the sufficient amount 4601 * of free memory (see __compaction_suitable) 4602 */ 4603 if (did_some_progress > 0 && can_compact && 4604 should_compact_retry(ac, order, alloc_flags, 4605 compact_result, &compact_priority, 4606 &compaction_retries)) 4607 goto retry; 4608 4609 /* Reclaim/compaction failed to prevent the fallback */ 4610 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4611 alloc_flags &= ~ALLOC_NOFRAGMENT; 4612 goto retry; 4613 } 4614 4615 /* 4616 * Deal with possible cpuset update races or zonelist updates to avoid 4617 * a unnecessary OOM kill. 4618 */ 4619 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4620 check_retry_zonelist(zonelist_iter_cookie)) 4621 goto restart; 4622 4623 /* Reclaim has failed us, start killing things */ 4624 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4625 if (page) 4626 goto got_pg; 4627 4628 /* Avoid allocations with no watermarks from looping endlessly */ 4629 if (tsk_is_oom_victim(current) && 4630 (alloc_flags & ALLOC_OOM || 4631 (gfp_mask & __GFP_NOMEMALLOC))) 4632 goto nopage; 4633 4634 /* Retry as long as the OOM killer is making progress */ 4635 if (did_some_progress) { 4636 no_progress_loops = 0; 4637 goto retry; 4638 } 4639 4640 nopage: 4641 /* 4642 * Deal with possible cpuset update races or zonelist updates to avoid 4643 * a unnecessary OOM kill. 4644 */ 4645 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4646 check_retry_zonelist(zonelist_iter_cookie)) 4647 goto restart; 4648 4649 /* 4650 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4651 * we always retry 4652 */ 4653 if (unlikely(nofail)) { 4654 /* 4655 * Lacking direct_reclaim we can't do anything to reclaim memory, 4656 * we disregard these unreasonable nofail requests and still 4657 * return NULL 4658 */ 4659 if (!can_direct_reclaim) 4660 goto fail; 4661 4662 /* 4663 * Help non-failing allocations by giving some access to memory 4664 * reserves normally used for high priority non-blocking 4665 * allocations but do not use ALLOC_NO_WATERMARKS because this 4666 * could deplete whole memory reserves which would just make 4667 * the situation worse. 4668 */ 4669 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4670 if (page) 4671 goto got_pg; 4672 4673 cond_resched(); 4674 goto retry; 4675 } 4676 fail: 4677 warn_alloc(gfp_mask, ac->nodemask, 4678 "page allocation failure: order:%u", order); 4679 got_pg: 4680 return page; 4681 } 4682 4683 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4684 int preferred_nid, nodemask_t *nodemask, 4685 struct alloc_context *ac, gfp_t *alloc_gfp, 4686 unsigned int *alloc_flags) 4687 { 4688 ac->highest_zoneidx = gfp_zone(gfp_mask); 4689 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4690 ac->nodemask = nodemask; 4691 ac->migratetype = gfp_migratetype(gfp_mask); 4692 4693 if (cpusets_enabled()) { 4694 *alloc_gfp |= __GFP_HARDWALL; 4695 /* 4696 * When we are in the interrupt context, it is irrelevant 4697 * to the current task context. It means that any node ok. 4698 */ 4699 if (in_task() && !ac->nodemask) 4700 ac->nodemask = &cpuset_current_mems_allowed; 4701 else 4702 *alloc_flags |= ALLOC_CPUSET; 4703 } 4704 4705 might_alloc(gfp_mask); 4706 4707 /* 4708 * Don't invoke should_fail logic, since it may call 4709 * get_random_u32() and printk() which need to spin_lock. 4710 */ 4711 if (!(*alloc_flags & ALLOC_TRYLOCK) && 4712 should_fail_alloc_page(gfp_mask, order)) 4713 return false; 4714 4715 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4716 4717 /* Dirty zone balancing only done in the fast path */ 4718 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4719 4720 /* 4721 * The preferred zone is used for statistics but crucially it is 4722 * also used as the starting point for the zonelist iterator. It 4723 * may get reset for allocations that ignore memory policies. 4724 */ 4725 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4726 ac->highest_zoneidx, ac->nodemask); 4727 4728 return true; 4729 } 4730 4731 /* 4732 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 4733 * @gfp: GFP flags for the allocation 4734 * @preferred_nid: The preferred NUMA node ID to allocate from 4735 * @nodemask: Set of nodes to allocate from, may be NULL 4736 * @nr_pages: The number of pages desired in the array 4737 * @page_array: Array to store the pages 4738 * 4739 * This is a batched version of the page allocator that attempts to 4740 * allocate nr_pages quickly. Pages are added to the page_array. 4741 * 4742 * Note that only NULL elements are populated with pages and nr_pages 4743 * is the maximum number of pages that will be stored in the array. 4744 * 4745 * Returns the number of pages in the array. 4746 */ 4747 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4748 nodemask_t *nodemask, int nr_pages, 4749 struct page **page_array) 4750 { 4751 struct page *page; 4752 unsigned long __maybe_unused UP_flags; 4753 struct zone *zone; 4754 struct zoneref *z; 4755 struct per_cpu_pages *pcp; 4756 struct list_head *pcp_list; 4757 struct alloc_context ac; 4758 gfp_t alloc_gfp; 4759 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4760 int nr_populated = 0, nr_account = 0; 4761 4762 /* 4763 * Skip populated array elements to determine if any pages need 4764 * to be allocated before disabling IRQs. 4765 */ 4766 while (nr_populated < nr_pages && page_array[nr_populated]) 4767 nr_populated++; 4768 4769 /* No pages requested? */ 4770 if (unlikely(nr_pages <= 0)) 4771 goto out; 4772 4773 /* Already populated array? */ 4774 if (unlikely(nr_pages - nr_populated == 0)) 4775 goto out; 4776 4777 /* Bulk allocator does not support memcg accounting. */ 4778 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4779 goto failed; 4780 4781 /* Use the single page allocator for one page. */ 4782 if (nr_pages - nr_populated == 1) 4783 goto failed; 4784 4785 #ifdef CONFIG_PAGE_OWNER 4786 /* 4787 * PAGE_OWNER may recurse into the allocator to allocate space to 4788 * save the stack with pagesets.lock held. Releasing/reacquiring 4789 * removes much of the performance benefit of bulk allocation so 4790 * force the caller to allocate one page at a time as it'll have 4791 * similar performance to added complexity to the bulk allocator. 4792 */ 4793 if (static_branch_unlikely(&page_owner_inited)) 4794 goto failed; 4795 #endif 4796 4797 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4798 gfp &= gfp_allowed_mask; 4799 alloc_gfp = gfp; 4800 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4801 goto out; 4802 gfp = alloc_gfp; 4803 4804 /* Find an allowed local zone that meets the low watermark. */ 4805 z = ac.preferred_zoneref; 4806 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4807 unsigned long mark; 4808 4809 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4810 !__cpuset_zone_allowed(zone, gfp)) { 4811 continue; 4812 } 4813 4814 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4815 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4816 goto failed; 4817 } 4818 4819 cond_accept_memory(zone, 0); 4820 retry_this_zone: 4821 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4822 if (zone_watermark_fast(zone, 0, mark, 4823 zonelist_zone_idx(ac.preferred_zoneref), 4824 alloc_flags, gfp)) { 4825 break; 4826 } 4827 4828 if (cond_accept_memory(zone, 0)) 4829 goto retry_this_zone; 4830 4831 /* Try again if zone has deferred pages */ 4832 if (deferred_pages_enabled()) { 4833 if (_deferred_grow_zone(zone, 0)) 4834 goto retry_this_zone; 4835 } 4836 } 4837 4838 /* 4839 * If there are no allowed local zones that meets the watermarks then 4840 * try to allocate a single page and reclaim if necessary. 4841 */ 4842 if (unlikely(!zone)) 4843 goto failed; 4844 4845 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4846 pcp_trylock_prepare(UP_flags); 4847 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4848 if (!pcp) 4849 goto failed_irq; 4850 4851 /* Attempt the batch allocation */ 4852 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4853 while (nr_populated < nr_pages) { 4854 4855 /* Skip existing pages */ 4856 if (page_array[nr_populated]) { 4857 nr_populated++; 4858 continue; 4859 } 4860 4861 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4862 pcp, pcp_list); 4863 if (unlikely(!page)) { 4864 /* Try and allocate at least one page */ 4865 if (!nr_account) { 4866 pcp_spin_unlock(pcp); 4867 goto failed_irq; 4868 } 4869 break; 4870 } 4871 nr_account++; 4872 4873 prep_new_page(page, 0, gfp, 0); 4874 set_page_refcounted(page); 4875 page_array[nr_populated++] = page; 4876 } 4877 4878 pcp_spin_unlock(pcp); 4879 pcp_trylock_finish(UP_flags); 4880 4881 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4882 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4883 4884 out: 4885 return nr_populated; 4886 4887 failed_irq: 4888 pcp_trylock_finish(UP_flags); 4889 4890 failed: 4891 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4892 if (page) 4893 page_array[nr_populated++] = page; 4894 goto out; 4895 } 4896 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4897 4898 /* 4899 * This is the 'heart' of the zoned buddy allocator. 4900 */ 4901 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 4902 int preferred_nid, nodemask_t *nodemask) 4903 { 4904 struct page *page; 4905 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4906 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4907 struct alloc_context ac = { }; 4908 4909 /* 4910 * There are several places where we assume that the order value is sane 4911 * so bail out early if the request is out of bound. 4912 */ 4913 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4914 return NULL; 4915 4916 gfp &= gfp_allowed_mask; 4917 /* 4918 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4919 * resp. GFP_NOIO which has to be inherited for all allocation requests 4920 * from a particular context which has been marked by 4921 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4922 * movable zones are not used during allocation. 4923 */ 4924 gfp = current_gfp_context(gfp); 4925 alloc_gfp = gfp; 4926 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4927 &alloc_gfp, &alloc_flags)) 4928 return NULL; 4929 4930 /* 4931 * Forbid the first pass from falling back to types that fragment 4932 * memory until all local zones are considered. 4933 */ 4934 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4935 4936 /* First allocation attempt */ 4937 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4938 if (likely(page)) 4939 goto out; 4940 4941 alloc_gfp = gfp; 4942 ac.spread_dirty_pages = false; 4943 4944 /* 4945 * Restore the original nodemask if it was potentially replaced with 4946 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4947 */ 4948 ac.nodemask = nodemask; 4949 4950 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4951 4952 out: 4953 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4954 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4955 free_frozen_pages(page, order); 4956 page = NULL; 4957 } 4958 4959 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4960 kmsan_alloc_page(page, order, alloc_gfp); 4961 4962 return page; 4963 } 4964 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 4965 4966 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4967 int preferred_nid, nodemask_t *nodemask) 4968 { 4969 struct page *page; 4970 4971 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 4972 if (page) 4973 set_page_refcounted(page); 4974 return page; 4975 } 4976 EXPORT_SYMBOL(__alloc_pages_noprof); 4977 4978 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 4979 nodemask_t *nodemask) 4980 { 4981 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 4982 preferred_nid, nodemask); 4983 return page_rmappable_folio(page); 4984 } 4985 EXPORT_SYMBOL(__folio_alloc_noprof); 4986 4987 /* 4988 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4989 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4990 * you need to access high mem. 4991 */ 4992 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 4993 { 4994 struct page *page; 4995 4996 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 4997 if (!page) 4998 return 0; 4999 return (unsigned long) page_address(page); 5000 } 5001 EXPORT_SYMBOL(get_free_pages_noprof); 5002 5003 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5004 { 5005 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5006 } 5007 EXPORT_SYMBOL(get_zeroed_page_noprof); 5008 5009 /** 5010 * ___free_pages - Free pages allocated with alloc_pages(). 5011 * @page: The page pointer returned from alloc_pages(). 5012 * @order: The order of the allocation. 5013 * @fpi_flags: Free Page Internal flags. 5014 * 5015 * This function can free multi-page allocations that are not compound 5016 * pages. It does not check that the @order passed in matches that of 5017 * the allocation, so it is easy to leak memory. Freeing more memory 5018 * than was allocated will probably emit a warning. 5019 * 5020 * If the last reference to this page is speculative, it will be released 5021 * by put_page() which only frees the first page of a non-compound 5022 * allocation. To prevent the remaining pages from being leaked, we free 5023 * the subsequent pages here. If you want to use the page's reference 5024 * count to decide when to free the allocation, you should allocate a 5025 * compound page, and use put_page() instead of __free_pages(). 5026 * 5027 * Context: May be called in interrupt context or while holding a normal 5028 * spinlock, but not in NMI context or while holding a raw spinlock. 5029 */ 5030 static void ___free_pages(struct page *page, unsigned int order, 5031 fpi_t fpi_flags) 5032 { 5033 /* get PageHead before we drop reference */ 5034 int head = PageHead(page); 5035 5036 if (put_page_testzero(page)) 5037 __free_frozen_pages(page, order, fpi_flags); 5038 else if (!head) { 5039 pgalloc_tag_sub_pages(page, (1 << order) - 1); 5040 while (order-- > 0) 5041 __free_frozen_pages(page + (1 << order), order, 5042 fpi_flags); 5043 } 5044 } 5045 void __free_pages(struct page *page, unsigned int order) 5046 { 5047 ___free_pages(page, order, FPI_NONE); 5048 } 5049 EXPORT_SYMBOL(__free_pages); 5050 5051 /* 5052 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5053 * page type (not only those that came from try_alloc_pages) 5054 */ 5055 void free_pages_nolock(struct page *page, unsigned int order) 5056 { 5057 ___free_pages(page, order, FPI_TRYLOCK); 5058 } 5059 5060 void free_pages(unsigned long addr, unsigned int order) 5061 { 5062 if (addr != 0) { 5063 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5064 __free_pages(virt_to_page((void *)addr), order); 5065 } 5066 } 5067 5068 EXPORT_SYMBOL(free_pages); 5069 5070 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5071 size_t size) 5072 { 5073 if (addr) { 5074 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5075 struct page *page = virt_to_page((void *)addr); 5076 struct page *last = page + nr; 5077 5078 split_page_owner(page, order, 0); 5079 pgalloc_tag_split(page_folio(page), order, 0); 5080 split_page_memcg(page, order); 5081 while (page < --last) 5082 set_page_refcounted(last); 5083 5084 last = page + (1UL << order); 5085 for (page += nr; page < last; page++) 5086 __free_pages_ok(page, 0, FPI_TO_TAIL); 5087 } 5088 return (void *)addr; 5089 } 5090 5091 /** 5092 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5093 * @size: the number of bytes to allocate 5094 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5095 * 5096 * This function is similar to alloc_pages(), except that it allocates the 5097 * minimum number of pages to satisfy the request. alloc_pages() can only 5098 * allocate memory in power-of-two pages. 5099 * 5100 * This function is also limited by MAX_PAGE_ORDER. 5101 * 5102 * Memory allocated by this function must be released by free_pages_exact(). 5103 * 5104 * Return: pointer to the allocated area or %NULL in case of error. 5105 */ 5106 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5107 { 5108 unsigned int order = get_order(size); 5109 unsigned long addr; 5110 5111 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5112 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5113 5114 addr = get_free_pages_noprof(gfp_mask, order); 5115 return make_alloc_exact(addr, order, size); 5116 } 5117 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5118 5119 /** 5120 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5121 * pages on a node. 5122 * @nid: the preferred node ID where memory should be allocated 5123 * @size: the number of bytes to allocate 5124 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5125 * 5126 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5127 * back. 5128 * 5129 * Return: pointer to the allocated area or %NULL in case of error. 5130 */ 5131 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5132 { 5133 unsigned int order = get_order(size); 5134 struct page *p; 5135 5136 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5137 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5138 5139 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5140 if (!p) 5141 return NULL; 5142 return make_alloc_exact((unsigned long)page_address(p), order, size); 5143 } 5144 5145 /** 5146 * free_pages_exact - release memory allocated via alloc_pages_exact() 5147 * @virt: the value returned by alloc_pages_exact. 5148 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5149 * 5150 * Release the memory allocated by a previous call to alloc_pages_exact. 5151 */ 5152 void free_pages_exact(void *virt, size_t size) 5153 { 5154 unsigned long addr = (unsigned long)virt; 5155 unsigned long end = addr + PAGE_ALIGN(size); 5156 5157 while (addr < end) { 5158 free_page(addr); 5159 addr += PAGE_SIZE; 5160 } 5161 } 5162 EXPORT_SYMBOL(free_pages_exact); 5163 5164 /** 5165 * nr_free_zone_pages - count number of pages beyond high watermark 5166 * @offset: The zone index of the highest zone 5167 * 5168 * nr_free_zone_pages() counts the number of pages which are beyond the 5169 * high watermark within all zones at or below a given zone index. For each 5170 * zone, the number of pages is calculated as: 5171 * 5172 * nr_free_zone_pages = managed_pages - high_pages 5173 * 5174 * Return: number of pages beyond high watermark. 5175 */ 5176 static unsigned long nr_free_zone_pages(int offset) 5177 { 5178 struct zoneref *z; 5179 struct zone *zone; 5180 5181 /* Just pick one node, since fallback list is circular */ 5182 unsigned long sum = 0; 5183 5184 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5185 5186 for_each_zone_zonelist(zone, z, zonelist, offset) { 5187 unsigned long size = zone_managed_pages(zone); 5188 unsigned long high = high_wmark_pages(zone); 5189 if (size > high) 5190 sum += size - high; 5191 } 5192 5193 return sum; 5194 } 5195 5196 /** 5197 * nr_free_buffer_pages - count number of pages beyond high watermark 5198 * 5199 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5200 * watermark within ZONE_DMA and ZONE_NORMAL. 5201 * 5202 * Return: number of pages beyond high watermark within ZONE_DMA and 5203 * ZONE_NORMAL. 5204 */ 5205 unsigned long nr_free_buffer_pages(void) 5206 { 5207 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5208 } 5209 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5210 5211 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5212 { 5213 zoneref->zone = zone; 5214 zoneref->zone_idx = zone_idx(zone); 5215 } 5216 5217 /* 5218 * Builds allocation fallback zone lists. 5219 * 5220 * Add all populated zones of a node to the zonelist. 5221 */ 5222 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5223 { 5224 struct zone *zone; 5225 enum zone_type zone_type = MAX_NR_ZONES; 5226 int nr_zones = 0; 5227 5228 do { 5229 zone_type--; 5230 zone = pgdat->node_zones + zone_type; 5231 if (populated_zone(zone)) { 5232 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5233 check_highest_zone(zone_type); 5234 } 5235 } while (zone_type); 5236 5237 return nr_zones; 5238 } 5239 5240 #ifdef CONFIG_NUMA 5241 5242 static int __parse_numa_zonelist_order(char *s) 5243 { 5244 /* 5245 * We used to support different zonelists modes but they turned 5246 * out to be just not useful. Let's keep the warning in place 5247 * if somebody still use the cmd line parameter so that we do 5248 * not fail it silently 5249 */ 5250 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5251 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5252 return -EINVAL; 5253 } 5254 return 0; 5255 } 5256 5257 static char numa_zonelist_order[] = "Node"; 5258 #define NUMA_ZONELIST_ORDER_LEN 16 5259 /* 5260 * sysctl handler for numa_zonelist_order 5261 */ 5262 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5263 void *buffer, size_t *length, loff_t *ppos) 5264 { 5265 if (write) 5266 return __parse_numa_zonelist_order(buffer); 5267 return proc_dostring(table, write, buffer, length, ppos); 5268 } 5269 5270 static int node_load[MAX_NUMNODES]; 5271 5272 /** 5273 * find_next_best_node - find the next node that should appear in a given node's fallback list 5274 * @node: node whose fallback list we're appending 5275 * @used_node_mask: nodemask_t of already used nodes 5276 * 5277 * We use a number of factors to determine which is the next node that should 5278 * appear on a given node's fallback list. The node should not have appeared 5279 * already in @node's fallback list, and it should be the next closest node 5280 * according to the distance array (which contains arbitrary distance values 5281 * from each node to each node in the system), and should also prefer nodes 5282 * with no CPUs, since presumably they'll have very little allocation pressure 5283 * on them otherwise. 5284 * 5285 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5286 */ 5287 int find_next_best_node(int node, nodemask_t *used_node_mask) 5288 { 5289 int n, val; 5290 int min_val = INT_MAX; 5291 int best_node = NUMA_NO_NODE; 5292 5293 /* 5294 * Use the local node if we haven't already, but for memoryless local 5295 * node, we should skip it and fall back to other nodes. 5296 */ 5297 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5298 node_set(node, *used_node_mask); 5299 return node; 5300 } 5301 5302 for_each_node_state(n, N_MEMORY) { 5303 5304 /* Don't want a node to appear more than once */ 5305 if (node_isset(n, *used_node_mask)) 5306 continue; 5307 5308 /* Use the distance array to find the distance */ 5309 val = node_distance(node, n); 5310 5311 /* Penalize nodes under us ("prefer the next node") */ 5312 val += (n < node); 5313 5314 /* Give preference to headless and unused nodes */ 5315 if (!cpumask_empty(cpumask_of_node(n))) 5316 val += PENALTY_FOR_NODE_WITH_CPUS; 5317 5318 /* Slight preference for less loaded node */ 5319 val *= MAX_NUMNODES; 5320 val += node_load[n]; 5321 5322 if (val < min_val) { 5323 min_val = val; 5324 best_node = n; 5325 } 5326 } 5327 5328 if (best_node >= 0) 5329 node_set(best_node, *used_node_mask); 5330 5331 return best_node; 5332 } 5333 5334 5335 /* 5336 * Build zonelists ordered by node and zones within node. 5337 * This results in maximum locality--normal zone overflows into local 5338 * DMA zone, if any--but risks exhausting DMA zone. 5339 */ 5340 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5341 unsigned nr_nodes) 5342 { 5343 struct zoneref *zonerefs; 5344 int i; 5345 5346 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5347 5348 for (i = 0; i < nr_nodes; i++) { 5349 int nr_zones; 5350 5351 pg_data_t *node = NODE_DATA(node_order[i]); 5352 5353 nr_zones = build_zonerefs_node(node, zonerefs); 5354 zonerefs += nr_zones; 5355 } 5356 zonerefs->zone = NULL; 5357 zonerefs->zone_idx = 0; 5358 } 5359 5360 /* 5361 * Build __GFP_THISNODE zonelists 5362 */ 5363 static void build_thisnode_zonelists(pg_data_t *pgdat) 5364 { 5365 struct zoneref *zonerefs; 5366 int nr_zones; 5367 5368 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5369 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5370 zonerefs += nr_zones; 5371 zonerefs->zone = NULL; 5372 zonerefs->zone_idx = 0; 5373 } 5374 5375 static void build_zonelists(pg_data_t *pgdat) 5376 { 5377 static int node_order[MAX_NUMNODES]; 5378 int node, nr_nodes = 0; 5379 nodemask_t used_mask = NODE_MASK_NONE; 5380 int local_node, prev_node; 5381 5382 /* NUMA-aware ordering of nodes */ 5383 local_node = pgdat->node_id; 5384 prev_node = local_node; 5385 5386 memset(node_order, 0, sizeof(node_order)); 5387 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5388 /* 5389 * We don't want to pressure a particular node. 5390 * So adding penalty to the first node in same 5391 * distance group to make it round-robin. 5392 */ 5393 if (node_distance(local_node, node) != 5394 node_distance(local_node, prev_node)) 5395 node_load[node] += 1; 5396 5397 node_order[nr_nodes++] = node; 5398 prev_node = node; 5399 } 5400 5401 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5402 build_thisnode_zonelists(pgdat); 5403 pr_info("Fallback order for Node %d: ", local_node); 5404 for (node = 0; node < nr_nodes; node++) 5405 pr_cont("%d ", node_order[node]); 5406 pr_cont("\n"); 5407 } 5408 5409 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5410 /* 5411 * Return node id of node used for "local" allocations. 5412 * I.e., first node id of first zone in arg node's generic zonelist. 5413 * Used for initializing percpu 'numa_mem', which is used primarily 5414 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5415 */ 5416 int local_memory_node(int node) 5417 { 5418 struct zoneref *z; 5419 5420 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5421 gfp_zone(GFP_KERNEL), 5422 NULL); 5423 return zonelist_node_idx(z); 5424 } 5425 #endif 5426 5427 static void setup_min_unmapped_ratio(void); 5428 static void setup_min_slab_ratio(void); 5429 #else /* CONFIG_NUMA */ 5430 5431 static void build_zonelists(pg_data_t *pgdat) 5432 { 5433 struct zoneref *zonerefs; 5434 int nr_zones; 5435 5436 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5437 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5438 zonerefs += nr_zones; 5439 5440 zonerefs->zone = NULL; 5441 zonerefs->zone_idx = 0; 5442 } 5443 5444 #endif /* CONFIG_NUMA */ 5445 5446 /* 5447 * Boot pageset table. One per cpu which is going to be used for all 5448 * zones and all nodes. The parameters will be set in such a way 5449 * that an item put on a list will immediately be handed over to 5450 * the buddy list. This is safe since pageset manipulation is done 5451 * with interrupts disabled. 5452 * 5453 * The boot_pagesets must be kept even after bootup is complete for 5454 * unused processors and/or zones. They do play a role for bootstrapping 5455 * hotplugged processors. 5456 * 5457 * zoneinfo_show() and maybe other functions do 5458 * not check if the processor is online before following the pageset pointer. 5459 * Other parts of the kernel may not check if the zone is available. 5460 */ 5461 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5462 /* These effectively disable the pcplists in the boot pageset completely */ 5463 #define BOOT_PAGESET_HIGH 0 5464 #define BOOT_PAGESET_BATCH 1 5465 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5466 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5467 5468 static void __build_all_zonelists(void *data) 5469 { 5470 int nid; 5471 int __maybe_unused cpu; 5472 pg_data_t *self = data; 5473 unsigned long flags; 5474 5475 /* 5476 * The zonelist_update_seq must be acquired with irqsave because the 5477 * reader can be invoked from IRQ with GFP_ATOMIC. 5478 */ 5479 write_seqlock_irqsave(&zonelist_update_seq, flags); 5480 /* 5481 * Also disable synchronous printk() to prevent any printk() from 5482 * trying to hold port->lock, for 5483 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5484 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5485 */ 5486 printk_deferred_enter(); 5487 5488 #ifdef CONFIG_NUMA 5489 memset(node_load, 0, sizeof(node_load)); 5490 #endif 5491 5492 /* 5493 * This node is hotadded and no memory is yet present. So just 5494 * building zonelists is fine - no need to touch other nodes. 5495 */ 5496 if (self && !node_online(self->node_id)) { 5497 build_zonelists(self); 5498 } else { 5499 /* 5500 * All possible nodes have pgdat preallocated 5501 * in free_area_init 5502 */ 5503 for_each_node(nid) { 5504 pg_data_t *pgdat = NODE_DATA(nid); 5505 5506 build_zonelists(pgdat); 5507 } 5508 5509 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5510 /* 5511 * We now know the "local memory node" for each node-- 5512 * i.e., the node of the first zone in the generic zonelist. 5513 * Set up numa_mem percpu variable for on-line cpus. During 5514 * boot, only the boot cpu should be on-line; we'll init the 5515 * secondary cpus' numa_mem as they come on-line. During 5516 * node/memory hotplug, we'll fixup all on-line cpus. 5517 */ 5518 for_each_online_cpu(cpu) 5519 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5520 #endif 5521 } 5522 5523 printk_deferred_exit(); 5524 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5525 } 5526 5527 static noinline void __init 5528 build_all_zonelists_init(void) 5529 { 5530 int cpu; 5531 5532 __build_all_zonelists(NULL); 5533 5534 /* 5535 * Initialize the boot_pagesets that are going to be used 5536 * for bootstrapping processors. The real pagesets for 5537 * each zone will be allocated later when the per cpu 5538 * allocator is available. 5539 * 5540 * boot_pagesets are used also for bootstrapping offline 5541 * cpus if the system is already booted because the pagesets 5542 * are needed to initialize allocators on a specific cpu too. 5543 * F.e. the percpu allocator needs the page allocator which 5544 * needs the percpu allocator in order to allocate its pagesets 5545 * (a chicken-egg dilemma). 5546 */ 5547 for_each_possible_cpu(cpu) 5548 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5549 5550 mminit_verify_zonelist(); 5551 cpuset_init_current_mems_allowed(); 5552 } 5553 5554 /* 5555 * unless system_state == SYSTEM_BOOTING. 5556 * 5557 * __ref due to call of __init annotated helper build_all_zonelists_init 5558 * [protected by SYSTEM_BOOTING]. 5559 */ 5560 void __ref build_all_zonelists(pg_data_t *pgdat) 5561 { 5562 unsigned long vm_total_pages; 5563 5564 if (system_state == SYSTEM_BOOTING) { 5565 build_all_zonelists_init(); 5566 } else { 5567 __build_all_zonelists(pgdat); 5568 /* cpuset refresh routine should be here */ 5569 } 5570 /* Get the number of free pages beyond high watermark in all zones. */ 5571 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5572 /* 5573 * Disable grouping by mobility if the number of pages in the 5574 * system is too low to allow the mechanism to work. It would be 5575 * more accurate, but expensive to check per-zone. This check is 5576 * made on memory-hotadd so a system can start with mobility 5577 * disabled and enable it later 5578 */ 5579 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5580 page_group_by_mobility_disabled = 1; 5581 else 5582 page_group_by_mobility_disabled = 0; 5583 5584 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5585 nr_online_nodes, 5586 str_off_on(page_group_by_mobility_disabled), 5587 vm_total_pages); 5588 #ifdef CONFIG_NUMA 5589 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5590 #endif 5591 } 5592 5593 static int zone_batchsize(struct zone *zone) 5594 { 5595 #ifdef CONFIG_MMU 5596 int batch; 5597 5598 /* 5599 * The number of pages to batch allocate is either ~0.1% 5600 * of the zone or 1MB, whichever is smaller. The batch 5601 * size is striking a balance between allocation latency 5602 * and zone lock contention. 5603 */ 5604 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5605 batch /= 4; /* We effectively *= 4 below */ 5606 if (batch < 1) 5607 batch = 1; 5608 5609 /* 5610 * Clamp the batch to a 2^n - 1 value. Having a power 5611 * of 2 value was found to be more likely to have 5612 * suboptimal cache aliasing properties in some cases. 5613 * 5614 * For example if 2 tasks are alternately allocating 5615 * batches of pages, one task can end up with a lot 5616 * of pages of one half of the possible page colors 5617 * and the other with pages of the other colors. 5618 */ 5619 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5620 5621 return batch; 5622 5623 #else 5624 /* The deferral and batching of frees should be suppressed under NOMMU 5625 * conditions. 5626 * 5627 * The problem is that NOMMU needs to be able to allocate large chunks 5628 * of contiguous memory as there's no hardware page translation to 5629 * assemble apparent contiguous memory from discontiguous pages. 5630 * 5631 * Queueing large contiguous runs of pages for batching, however, 5632 * causes the pages to actually be freed in smaller chunks. As there 5633 * can be a significant delay between the individual batches being 5634 * recycled, this leads to the once large chunks of space being 5635 * fragmented and becoming unavailable for high-order allocations. 5636 */ 5637 return 0; 5638 #endif 5639 } 5640 5641 static int percpu_pagelist_high_fraction; 5642 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5643 int high_fraction) 5644 { 5645 #ifdef CONFIG_MMU 5646 int high; 5647 int nr_split_cpus; 5648 unsigned long total_pages; 5649 5650 if (!high_fraction) { 5651 /* 5652 * By default, the high value of the pcp is based on the zone 5653 * low watermark so that if they are full then background 5654 * reclaim will not be started prematurely. 5655 */ 5656 total_pages = low_wmark_pages(zone); 5657 } else { 5658 /* 5659 * If percpu_pagelist_high_fraction is configured, the high 5660 * value is based on a fraction of the managed pages in the 5661 * zone. 5662 */ 5663 total_pages = zone_managed_pages(zone) / high_fraction; 5664 } 5665 5666 /* 5667 * Split the high value across all online CPUs local to the zone. Note 5668 * that early in boot that CPUs may not be online yet and that during 5669 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5670 * onlined. For memory nodes that have no CPUs, split the high value 5671 * across all online CPUs to mitigate the risk that reclaim is triggered 5672 * prematurely due to pages stored on pcp lists. 5673 */ 5674 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5675 if (!nr_split_cpus) 5676 nr_split_cpus = num_online_cpus(); 5677 high = total_pages / nr_split_cpus; 5678 5679 /* 5680 * Ensure high is at least batch*4. The multiple is based on the 5681 * historical relationship between high and batch. 5682 */ 5683 high = max(high, batch << 2); 5684 5685 return high; 5686 #else 5687 return 0; 5688 #endif 5689 } 5690 5691 /* 5692 * pcp->high and pcp->batch values are related and generally batch is lower 5693 * than high. They are also related to pcp->count such that count is lower 5694 * than high, and as soon as it reaches high, the pcplist is flushed. 5695 * 5696 * However, guaranteeing these relations at all times would require e.g. write 5697 * barriers here but also careful usage of read barriers at the read side, and 5698 * thus be prone to error and bad for performance. Thus the update only prevents 5699 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5700 * should ensure they can cope with those fields changing asynchronously, and 5701 * fully trust only the pcp->count field on the local CPU with interrupts 5702 * disabled. 5703 * 5704 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5705 * outside of boot time (or some other assurance that no concurrent updaters 5706 * exist). 5707 */ 5708 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5709 unsigned long high_max, unsigned long batch) 5710 { 5711 WRITE_ONCE(pcp->batch, batch); 5712 WRITE_ONCE(pcp->high_min, high_min); 5713 WRITE_ONCE(pcp->high_max, high_max); 5714 } 5715 5716 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5717 { 5718 int pindex; 5719 5720 memset(pcp, 0, sizeof(*pcp)); 5721 memset(pzstats, 0, sizeof(*pzstats)); 5722 5723 spin_lock_init(&pcp->lock); 5724 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5725 INIT_LIST_HEAD(&pcp->lists[pindex]); 5726 5727 /* 5728 * Set batch and high values safe for a boot pageset. A true percpu 5729 * pageset's initialization will update them subsequently. Here we don't 5730 * need to be as careful as pageset_update() as nobody can access the 5731 * pageset yet. 5732 */ 5733 pcp->high_min = BOOT_PAGESET_HIGH; 5734 pcp->high_max = BOOT_PAGESET_HIGH; 5735 pcp->batch = BOOT_PAGESET_BATCH; 5736 pcp->free_count = 0; 5737 } 5738 5739 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5740 unsigned long high_max, unsigned long batch) 5741 { 5742 struct per_cpu_pages *pcp; 5743 int cpu; 5744 5745 for_each_possible_cpu(cpu) { 5746 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5747 pageset_update(pcp, high_min, high_max, batch); 5748 } 5749 } 5750 5751 /* 5752 * Calculate and set new high and batch values for all per-cpu pagesets of a 5753 * zone based on the zone's size. 5754 */ 5755 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5756 { 5757 int new_high_min, new_high_max, new_batch; 5758 5759 new_batch = max(1, zone_batchsize(zone)); 5760 if (percpu_pagelist_high_fraction) { 5761 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5762 percpu_pagelist_high_fraction); 5763 /* 5764 * PCP high is tuned manually, disable auto-tuning via 5765 * setting high_min and high_max to the manual value. 5766 */ 5767 new_high_max = new_high_min; 5768 } else { 5769 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5770 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5771 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5772 } 5773 5774 if (zone->pageset_high_min == new_high_min && 5775 zone->pageset_high_max == new_high_max && 5776 zone->pageset_batch == new_batch) 5777 return; 5778 5779 zone->pageset_high_min = new_high_min; 5780 zone->pageset_high_max = new_high_max; 5781 zone->pageset_batch = new_batch; 5782 5783 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5784 new_batch); 5785 } 5786 5787 void __meminit setup_zone_pageset(struct zone *zone) 5788 { 5789 int cpu; 5790 5791 /* Size may be 0 on !SMP && !NUMA */ 5792 if (sizeof(struct per_cpu_zonestat) > 0) 5793 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5794 5795 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5796 for_each_possible_cpu(cpu) { 5797 struct per_cpu_pages *pcp; 5798 struct per_cpu_zonestat *pzstats; 5799 5800 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5801 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5802 per_cpu_pages_init(pcp, pzstats); 5803 } 5804 5805 zone_set_pageset_high_and_batch(zone, 0); 5806 } 5807 5808 /* 5809 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5810 * page high values need to be recalculated. 5811 */ 5812 static void zone_pcp_update(struct zone *zone, int cpu_online) 5813 { 5814 mutex_lock(&pcp_batch_high_lock); 5815 zone_set_pageset_high_and_batch(zone, cpu_online); 5816 mutex_unlock(&pcp_batch_high_lock); 5817 } 5818 5819 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5820 { 5821 struct per_cpu_pages *pcp; 5822 struct cpu_cacheinfo *cci; 5823 5824 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5825 cci = get_cpu_cacheinfo(cpu); 5826 /* 5827 * If data cache slice of CPU is large enough, "pcp->batch" 5828 * pages can be preserved in PCP before draining PCP for 5829 * consecutive high-order pages freeing without allocation. 5830 * This can reduce zone lock contention without hurting 5831 * cache-hot pages sharing. 5832 */ 5833 spin_lock(&pcp->lock); 5834 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5835 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5836 else 5837 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5838 spin_unlock(&pcp->lock); 5839 } 5840 5841 void setup_pcp_cacheinfo(unsigned int cpu) 5842 { 5843 struct zone *zone; 5844 5845 for_each_populated_zone(zone) 5846 zone_pcp_update_cacheinfo(zone, cpu); 5847 } 5848 5849 /* 5850 * Allocate per cpu pagesets and initialize them. 5851 * Before this call only boot pagesets were available. 5852 */ 5853 void __init setup_per_cpu_pageset(void) 5854 { 5855 struct pglist_data *pgdat; 5856 struct zone *zone; 5857 int __maybe_unused cpu; 5858 5859 for_each_populated_zone(zone) 5860 setup_zone_pageset(zone); 5861 5862 #ifdef CONFIG_NUMA 5863 /* 5864 * Unpopulated zones continue using the boot pagesets. 5865 * The numa stats for these pagesets need to be reset. 5866 * Otherwise, they will end up skewing the stats of 5867 * the nodes these zones are associated with. 5868 */ 5869 for_each_possible_cpu(cpu) { 5870 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5871 memset(pzstats->vm_numa_event, 0, 5872 sizeof(pzstats->vm_numa_event)); 5873 } 5874 #endif 5875 5876 for_each_online_pgdat(pgdat) 5877 pgdat->per_cpu_nodestats = 5878 alloc_percpu(struct per_cpu_nodestat); 5879 } 5880 5881 __meminit void zone_pcp_init(struct zone *zone) 5882 { 5883 /* 5884 * per cpu subsystem is not up at this point. The following code 5885 * relies on the ability of the linker to provide the 5886 * offset of a (static) per cpu variable into the per cpu area. 5887 */ 5888 zone->per_cpu_pageset = &boot_pageset; 5889 zone->per_cpu_zonestats = &boot_zonestats; 5890 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5891 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5892 zone->pageset_batch = BOOT_PAGESET_BATCH; 5893 5894 if (populated_zone(zone)) 5895 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5896 zone->present_pages, zone_batchsize(zone)); 5897 } 5898 5899 static void setup_per_zone_lowmem_reserve(void); 5900 5901 void adjust_managed_page_count(struct page *page, long count) 5902 { 5903 atomic_long_add(count, &page_zone(page)->managed_pages); 5904 totalram_pages_add(count); 5905 setup_per_zone_lowmem_reserve(); 5906 } 5907 EXPORT_SYMBOL(adjust_managed_page_count); 5908 5909 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5910 { 5911 void *pos; 5912 unsigned long pages = 0; 5913 5914 start = (void *)PAGE_ALIGN((unsigned long)start); 5915 end = (void *)((unsigned long)end & PAGE_MASK); 5916 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5917 struct page *page = virt_to_page(pos); 5918 void *direct_map_addr; 5919 5920 /* 5921 * 'direct_map_addr' might be different from 'pos' 5922 * because some architectures' virt_to_page() 5923 * work with aliases. Getting the direct map 5924 * address ensures that we get a _writeable_ 5925 * alias for the memset(). 5926 */ 5927 direct_map_addr = page_address(page); 5928 /* 5929 * Perform a kasan-unchecked memset() since this memory 5930 * has not been initialized. 5931 */ 5932 direct_map_addr = kasan_reset_tag(direct_map_addr); 5933 if ((unsigned int)poison <= 0xFF) 5934 memset(direct_map_addr, poison, PAGE_SIZE); 5935 5936 free_reserved_page(page); 5937 } 5938 5939 if (pages && s) 5940 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5941 5942 return pages; 5943 } 5944 5945 void free_reserved_page(struct page *page) 5946 { 5947 clear_page_tag_ref(page); 5948 ClearPageReserved(page); 5949 init_page_count(page); 5950 __free_page(page); 5951 adjust_managed_page_count(page, 1); 5952 } 5953 EXPORT_SYMBOL(free_reserved_page); 5954 5955 static int page_alloc_cpu_dead(unsigned int cpu) 5956 { 5957 struct zone *zone; 5958 5959 lru_add_drain_cpu(cpu); 5960 mlock_drain_remote(cpu); 5961 drain_pages(cpu); 5962 5963 /* 5964 * Spill the event counters of the dead processor 5965 * into the current processors event counters. 5966 * This artificially elevates the count of the current 5967 * processor. 5968 */ 5969 vm_events_fold_cpu(cpu); 5970 5971 /* 5972 * Zero the differential counters of the dead processor 5973 * so that the vm statistics are consistent. 5974 * 5975 * This is only okay since the processor is dead and cannot 5976 * race with what we are doing. 5977 */ 5978 cpu_vm_stats_fold(cpu); 5979 5980 for_each_populated_zone(zone) 5981 zone_pcp_update(zone, 0); 5982 5983 return 0; 5984 } 5985 5986 static int page_alloc_cpu_online(unsigned int cpu) 5987 { 5988 struct zone *zone; 5989 5990 for_each_populated_zone(zone) 5991 zone_pcp_update(zone, 1); 5992 return 0; 5993 } 5994 5995 void __init page_alloc_init_cpuhp(void) 5996 { 5997 int ret; 5998 5999 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6000 "mm/page_alloc:pcp", 6001 page_alloc_cpu_online, 6002 page_alloc_cpu_dead); 6003 WARN_ON(ret < 0); 6004 } 6005 6006 /* 6007 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6008 * or min_free_kbytes changes. 6009 */ 6010 static void calculate_totalreserve_pages(void) 6011 { 6012 struct pglist_data *pgdat; 6013 unsigned long reserve_pages = 0; 6014 enum zone_type i, j; 6015 6016 for_each_online_pgdat(pgdat) { 6017 6018 pgdat->totalreserve_pages = 0; 6019 6020 for (i = 0; i < MAX_NR_ZONES; i++) { 6021 struct zone *zone = pgdat->node_zones + i; 6022 long max = 0; 6023 unsigned long managed_pages = zone_managed_pages(zone); 6024 6025 /* Find valid and maximum lowmem_reserve in the zone */ 6026 for (j = i; j < MAX_NR_ZONES; j++) { 6027 if (zone->lowmem_reserve[j] > max) 6028 max = zone->lowmem_reserve[j]; 6029 } 6030 6031 /* we treat the high watermark as reserved pages. */ 6032 max += high_wmark_pages(zone); 6033 6034 if (max > managed_pages) 6035 max = managed_pages; 6036 6037 pgdat->totalreserve_pages += max; 6038 6039 reserve_pages += max; 6040 } 6041 } 6042 totalreserve_pages = reserve_pages; 6043 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6044 } 6045 6046 /* 6047 * setup_per_zone_lowmem_reserve - called whenever 6048 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6049 * has a correct pages reserved value, so an adequate number of 6050 * pages are left in the zone after a successful __alloc_pages(). 6051 */ 6052 static void setup_per_zone_lowmem_reserve(void) 6053 { 6054 struct pglist_data *pgdat; 6055 enum zone_type i, j; 6056 6057 for_each_online_pgdat(pgdat) { 6058 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6059 struct zone *zone = &pgdat->node_zones[i]; 6060 int ratio = sysctl_lowmem_reserve_ratio[i]; 6061 bool clear = !ratio || !zone_managed_pages(zone); 6062 unsigned long managed_pages = 0; 6063 6064 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6065 struct zone *upper_zone = &pgdat->node_zones[j]; 6066 6067 managed_pages += zone_managed_pages(upper_zone); 6068 6069 if (clear) 6070 zone->lowmem_reserve[j] = 0; 6071 else 6072 zone->lowmem_reserve[j] = managed_pages / ratio; 6073 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6074 zone->lowmem_reserve[j]); 6075 } 6076 } 6077 } 6078 6079 /* update totalreserve_pages */ 6080 calculate_totalreserve_pages(); 6081 } 6082 6083 static void __setup_per_zone_wmarks(void) 6084 { 6085 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6086 unsigned long lowmem_pages = 0; 6087 struct zone *zone; 6088 unsigned long flags; 6089 6090 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6091 for_each_zone(zone) { 6092 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6093 lowmem_pages += zone_managed_pages(zone); 6094 } 6095 6096 for_each_zone(zone) { 6097 u64 tmp; 6098 6099 spin_lock_irqsave(&zone->lock, flags); 6100 tmp = (u64)pages_min * zone_managed_pages(zone); 6101 tmp = div64_ul(tmp, lowmem_pages); 6102 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6103 /* 6104 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6105 * need highmem and movable zones pages, so cap pages_min 6106 * to a small value here. 6107 * 6108 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6109 * deltas control async page reclaim, and so should 6110 * not be capped for highmem and movable zones. 6111 */ 6112 unsigned long min_pages; 6113 6114 min_pages = zone_managed_pages(zone) / 1024; 6115 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6116 zone->_watermark[WMARK_MIN] = min_pages; 6117 } else { 6118 /* 6119 * If it's a lowmem zone, reserve a number of pages 6120 * proportionate to the zone's size. 6121 */ 6122 zone->_watermark[WMARK_MIN] = tmp; 6123 } 6124 6125 /* 6126 * Set the kswapd watermarks distance according to the 6127 * scale factor in proportion to available memory, but 6128 * ensure a minimum size on small systems. 6129 */ 6130 tmp = max_t(u64, tmp >> 2, 6131 mult_frac(zone_managed_pages(zone), 6132 watermark_scale_factor, 10000)); 6133 6134 zone->watermark_boost = 0; 6135 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6136 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6137 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6138 trace_mm_setup_per_zone_wmarks(zone); 6139 6140 spin_unlock_irqrestore(&zone->lock, flags); 6141 } 6142 6143 /* update totalreserve_pages */ 6144 calculate_totalreserve_pages(); 6145 } 6146 6147 /** 6148 * setup_per_zone_wmarks - called when min_free_kbytes changes 6149 * or when memory is hot-{added|removed} 6150 * 6151 * Ensures that the watermark[min,low,high] values for each zone are set 6152 * correctly with respect to min_free_kbytes. 6153 */ 6154 void setup_per_zone_wmarks(void) 6155 { 6156 struct zone *zone; 6157 static DEFINE_SPINLOCK(lock); 6158 6159 spin_lock(&lock); 6160 __setup_per_zone_wmarks(); 6161 spin_unlock(&lock); 6162 6163 /* 6164 * The watermark size have changed so update the pcpu batch 6165 * and high limits or the limits may be inappropriate. 6166 */ 6167 for_each_zone(zone) 6168 zone_pcp_update(zone, 0); 6169 } 6170 6171 /* 6172 * Initialise min_free_kbytes. 6173 * 6174 * For small machines we want it small (128k min). For large machines 6175 * we want it large (256MB max). But it is not linear, because network 6176 * bandwidth does not increase linearly with machine size. We use 6177 * 6178 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6179 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6180 * 6181 * which yields 6182 * 6183 * 16MB: 512k 6184 * 32MB: 724k 6185 * 64MB: 1024k 6186 * 128MB: 1448k 6187 * 256MB: 2048k 6188 * 512MB: 2896k 6189 * 1024MB: 4096k 6190 * 2048MB: 5792k 6191 * 4096MB: 8192k 6192 * 8192MB: 11584k 6193 * 16384MB: 16384k 6194 */ 6195 void calculate_min_free_kbytes(void) 6196 { 6197 unsigned long lowmem_kbytes; 6198 int new_min_free_kbytes; 6199 6200 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6201 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6202 6203 if (new_min_free_kbytes > user_min_free_kbytes) 6204 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6205 else 6206 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6207 new_min_free_kbytes, user_min_free_kbytes); 6208 6209 } 6210 6211 int __meminit init_per_zone_wmark_min(void) 6212 { 6213 calculate_min_free_kbytes(); 6214 setup_per_zone_wmarks(); 6215 refresh_zone_stat_thresholds(); 6216 setup_per_zone_lowmem_reserve(); 6217 6218 #ifdef CONFIG_NUMA 6219 setup_min_unmapped_ratio(); 6220 setup_min_slab_ratio(); 6221 #endif 6222 6223 khugepaged_min_free_kbytes_update(); 6224 6225 return 0; 6226 } 6227 postcore_initcall(init_per_zone_wmark_min) 6228 6229 /* 6230 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6231 * that we can call two helper functions whenever min_free_kbytes 6232 * changes. 6233 */ 6234 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6235 void *buffer, size_t *length, loff_t *ppos) 6236 { 6237 int rc; 6238 6239 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6240 if (rc) 6241 return rc; 6242 6243 if (write) { 6244 user_min_free_kbytes = min_free_kbytes; 6245 setup_per_zone_wmarks(); 6246 } 6247 return 0; 6248 } 6249 6250 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6251 void *buffer, size_t *length, loff_t *ppos) 6252 { 6253 int rc; 6254 6255 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6256 if (rc) 6257 return rc; 6258 6259 if (write) 6260 setup_per_zone_wmarks(); 6261 6262 return 0; 6263 } 6264 6265 #ifdef CONFIG_NUMA 6266 static void setup_min_unmapped_ratio(void) 6267 { 6268 pg_data_t *pgdat; 6269 struct zone *zone; 6270 6271 for_each_online_pgdat(pgdat) 6272 pgdat->min_unmapped_pages = 0; 6273 6274 for_each_zone(zone) 6275 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6276 sysctl_min_unmapped_ratio) / 100; 6277 } 6278 6279 6280 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6281 void *buffer, size_t *length, loff_t *ppos) 6282 { 6283 int rc; 6284 6285 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6286 if (rc) 6287 return rc; 6288 6289 setup_min_unmapped_ratio(); 6290 6291 return 0; 6292 } 6293 6294 static void setup_min_slab_ratio(void) 6295 { 6296 pg_data_t *pgdat; 6297 struct zone *zone; 6298 6299 for_each_online_pgdat(pgdat) 6300 pgdat->min_slab_pages = 0; 6301 6302 for_each_zone(zone) 6303 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6304 sysctl_min_slab_ratio) / 100; 6305 } 6306 6307 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6308 void *buffer, size_t *length, loff_t *ppos) 6309 { 6310 int rc; 6311 6312 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6313 if (rc) 6314 return rc; 6315 6316 setup_min_slab_ratio(); 6317 6318 return 0; 6319 } 6320 #endif 6321 6322 /* 6323 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6324 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6325 * whenever sysctl_lowmem_reserve_ratio changes. 6326 * 6327 * The reserve ratio obviously has absolutely no relation with the 6328 * minimum watermarks. The lowmem reserve ratio can only make sense 6329 * if in function of the boot time zone sizes. 6330 */ 6331 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6332 int write, void *buffer, size_t *length, loff_t *ppos) 6333 { 6334 int i; 6335 6336 proc_dointvec_minmax(table, write, buffer, length, ppos); 6337 6338 for (i = 0; i < MAX_NR_ZONES; i++) { 6339 if (sysctl_lowmem_reserve_ratio[i] < 1) 6340 sysctl_lowmem_reserve_ratio[i] = 0; 6341 } 6342 6343 setup_per_zone_lowmem_reserve(); 6344 return 0; 6345 } 6346 6347 /* 6348 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6349 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6350 * pagelist can have before it gets flushed back to buddy allocator. 6351 */ 6352 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6353 int write, void *buffer, size_t *length, loff_t *ppos) 6354 { 6355 struct zone *zone; 6356 int old_percpu_pagelist_high_fraction; 6357 int ret; 6358 6359 mutex_lock(&pcp_batch_high_lock); 6360 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6361 6362 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6363 if (!write || ret < 0) 6364 goto out; 6365 6366 /* Sanity checking to avoid pcp imbalance */ 6367 if (percpu_pagelist_high_fraction && 6368 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6369 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6370 ret = -EINVAL; 6371 goto out; 6372 } 6373 6374 /* No change? */ 6375 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6376 goto out; 6377 6378 for_each_populated_zone(zone) 6379 zone_set_pageset_high_and_batch(zone, 0); 6380 out: 6381 mutex_unlock(&pcp_batch_high_lock); 6382 return ret; 6383 } 6384 6385 static const struct ctl_table page_alloc_sysctl_table[] = { 6386 { 6387 .procname = "min_free_kbytes", 6388 .data = &min_free_kbytes, 6389 .maxlen = sizeof(min_free_kbytes), 6390 .mode = 0644, 6391 .proc_handler = min_free_kbytes_sysctl_handler, 6392 .extra1 = SYSCTL_ZERO, 6393 }, 6394 { 6395 .procname = "watermark_boost_factor", 6396 .data = &watermark_boost_factor, 6397 .maxlen = sizeof(watermark_boost_factor), 6398 .mode = 0644, 6399 .proc_handler = proc_dointvec_minmax, 6400 .extra1 = SYSCTL_ZERO, 6401 }, 6402 { 6403 .procname = "watermark_scale_factor", 6404 .data = &watermark_scale_factor, 6405 .maxlen = sizeof(watermark_scale_factor), 6406 .mode = 0644, 6407 .proc_handler = watermark_scale_factor_sysctl_handler, 6408 .extra1 = SYSCTL_ONE, 6409 .extra2 = SYSCTL_THREE_THOUSAND, 6410 }, 6411 { 6412 .procname = "defrag_mode", 6413 .data = &defrag_mode, 6414 .maxlen = sizeof(defrag_mode), 6415 .mode = 0644, 6416 .proc_handler = proc_dointvec_minmax, 6417 .extra1 = SYSCTL_ZERO, 6418 .extra2 = SYSCTL_ONE, 6419 }, 6420 { 6421 .procname = "percpu_pagelist_high_fraction", 6422 .data = &percpu_pagelist_high_fraction, 6423 .maxlen = sizeof(percpu_pagelist_high_fraction), 6424 .mode = 0644, 6425 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6426 .extra1 = SYSCTL_ZERO, 6427 }, 6428 { 6429 .procname = "lowmem_reserve_ratio", 6430 .data = &sysctl_lowmem_reserve_ratio, 6431 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6432 .mode = 0644, 6433 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6434 }, 6435 #ifdef CONFIG_NUMA 6436 { 6437 .procname = "numa_zonelist_order", 6438 .data = &numa_zonelist_order, 6439 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6440 .mode = 0644, 6441 .proc_handler = numa_zonelist_order_handler, 6442 }, 6443 { 6444 .procname = "min_unmapped_ratio", 6445 .data = &sysctl_min_unmapped_ratio, 6446 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6447 .mode = 0644, 6448 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6449 .extra1 = SYSCTL_ZERO, 6450 .extra2 = SYSCTL_ONE_HUNDRED, 6451 }, 6452 { 6453 .procname = "min_slab_ratio", 6454 .data = &sysctl_min_slab_ratio, 6455 .maxlen = sizeof(sysctl_min_slab_ratio), 6456 .mode = 0644, 6457 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6458 .extra1 = SYSCTL_ZERO, 6459 .extra2 = SYSCTL_ONE_HUNDRED, 6460 }, 6461 #endif 6462 }; 6463 6464 void __init page_alloc_sysctl_init(void) 6465 { 6466 register_sysctl_init("vm", page_alloc_sysctl_table); 6467 } 6468 6469 #ifdef CONFIG_CONTIG_ALLOC 6470 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6471 static void alloc_contig_dump_pages(struct list_head *page_list) 6472 { 6473 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6474 6475 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6476 struct page *page; 6477 6478 dump_stack(); 6479 list_for_each_entry(page, page_list, lru) 6480 dump_page(page, "migration failure"); 6481 } 6482 } 6483 6484 /* 6485 * [start, end) must belong to a single zone. 6486 * @migratetype: using migratetype to filter the type of migration in 6487 * trace_mm_alloc_contig_migrate_range_info. 6488 */ 6489 static int __alloc_contig_migrate_range(struct compact_control *cc, 6490 unsigned long start, unsigned long end, int migratetype) 6491 { 6492 /* This function is based on compact_zone() from compaction.c. */ 6493 unsigned int nr_reclaimed; 6494 unsigned long pfn = start; 6495 unsigned int tries = 0; 6496 int ret = 0; 6497 struct migration_target_control mtc = { 6498 .nid = zone_to_nid(cc->zone), 6499 .gfp_mask = cc->gfp_mask, 6500 .reason = MR_CONTIG_RANGE, 6501 }; 6502 struct page *page; 6503 unsigned long total_mapped = 0; 6504 unsigned long total_migrated = 0; 6505 unsigned long total_reclaimed = 0; 6506 6507 lru_cache_disable(); 6508 6509 while (pfn < end || !list_empty(&cc->migratepages)) { 6510 if (fatal_signal_pending(current)) { 6511 ret = -EINTR; 6512 break; 6513 } 6514 6515 if (list_empty(&cc->migratepages)) { 6516 cc->nr_migratepages = 0; 6517 ret = isolate_migratepages_range(cc, pfn, end); 6518 if (ret && ret != -EAGAIN) 6519 break; 6520 pfn = cc->migrate_pfn; 6521 tries = 0; 6522 } else if (++tries == 5) { 6523 ret = -EBUSY; 6524 break; 6525 } 6526 6527 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6528 &cc->migratepages); 6529 cc->nr_migratepages -= nr_reclaimed; 6530 6531 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6532 total_reclaimed += nr_reclaimed; 6533 list_for_each_entry(page, &cc->migratepages, lru) { 6534 struct folio *folio = page_folio(page); 6535 6536 total_mapped += folio_mapped(folio) * 6537 folio_nr_pages(folio); 6538 } 6539 } 6540 6541 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6542 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6543 6544 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6545 total_migrated += cc->nr_migratepages; 6546 6547 /* 6548 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6549 * to retry again over this error, so do the same here. 6550 */ 6551 if (ret == -ENOMEM) 6552 break; 6553 } 6554 6555 lru_cache_enable(); 6556 if (ret < 0) { 6557 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6558 alloc_contig_dump_pages(&cc->migratepages); 6559 putback_movable_pages(&cc->migratepages); 6560 } 6561 6562 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6563 total_migrated, 6564 total_reclaimed, 6565 total_mapped); 6566 return (ret < 0) ? ret : 0; 6567 } 6568 6569 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) 6570 { 6571 int order; 6572 6573 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6574 struct page *page, *next; 6575 int nr_pages = 1 << order; 6576 6577 list_for_each_entry_safe(page, next, &list[order], lru) { 6578 int i; 6579 6580 post_alloc_hook(page, order, gfp_mask); 6581 set_page_refcounted(page); 6582 if (!order) 6583 continue; 6584 6585 split_page(page, order); 6586 6587 /* Add all subpages to the order-0 head, in sequence. */ 6588 list_del(&page->lru); 6589 for (i = 0; i < nr_pages; i++) 6590 list_add_tail(&page[i].lru, &list[0]); 6591 } 6592 } 6593 } 6594 6595 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6596 { 6597 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6598 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6599 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6600 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6601 6602 /* 6603 * We are given the range to allocate; node, mobility and placement 6604 * hints are irrelevant at this point. We'll simply ignore them. 6605 */ 6606 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6607 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6608 6609 /* 6610 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6611 * selected action flags. 6612 */ 6613 if (gfp_mask & ~(reclaim_mask | action_mask)) 6614 return -EINVAL; 6615 6616 /* 6617 * Flags to control page compaction/migration/reclaim, to free up our 6618 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6619 * for them. 6620 * 6621 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6622 * to not degrade callers. 6623 */ 6624 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6625 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6626 return 0; 6627 } 6628 6629 /** 6630 * alloc_contig_range() -- tries to allocate given range of pages 6631 * @start: start PFN to allocate 6632 * @end: one-past-the-last PFN to allocate 6633 * @migratetype: migratetype of the underlying pageblocks (either 6634 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6635 * in range must have the same migratetype and it must 6636 * be either of the two. 6637 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6638 * action and reclaim modifiers are supported. Reclaim modifiers 6639 * control allocation behavior during compaction/migration/reclaim. 6640 * 6641 * The PFN range does not have to be pageblock aligned. The PFN range must 6642 * belong to a single zone. 6643 * 6644 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6645 * pageblocks in the range. Once isolated, the pageblocks should not 6646 * be modified by others. 6647 * 6648 * Return: zero on success or negative error code. On success all 6649 * pages which PFN is in [start, end) are allocated for the caller and 6650 * need to be freed with free_contig_range(). 6651 */ 6652 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6653 unsigned migratetype, gfp_t gfp_mask) 6654 { 6655 unsigned long outer_start, outer_end; 6656 int ret = 0; 6657 6658 struct compact_control cc = { 6659 .nr_migratepages = 0, 6660 .order = -1, 6661 .zone = page_zone(pfn_to_page(start)), 6662 .mode = MIGRATE_SYNC, 6663 .ignore_skip_hint = true, 6664 .no_set_skip_hint = true, 6665 .alloc_contig = true, 6666 }; 6667 INIT_LIST_HEAD(&cc.migratepages); 6668 6669 gfp_mask = current_gfp_context(gfp_mask); 6670 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 6671 return -EINVAL; 6672 6673 /* 6674 * What we do here is we mark all pageblocks in range as 6675 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6676 * have different sizes, and due to the way page allocator 6677 * work, start_isolate_page_range() has special handlings for this. 6678 * 6679 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6680 * migrate the pages from an unaligned range (ie. pages that 6681 * we are interested in). This will put all the pages in 6682 * range back to page allocator as MIGRATE_ISOLATE. 6683 * 6684 * When this is done, we take the pages in range from page 6685 * allocator removing them from the buddy system. This way 6686 * page allocator will never consider using them. 6687 * 6688 * This lets us mark the pageblocks back as 6689 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6690 * aligned range but not in the unaligned, original range are 6691 * put back to page allocator so that buddy can use them. 6692 */ 6693 6694 ret = start_isolate_page_range(start, end, migratetype, 0); 6695 if (ret) 6696 goto done; 6697 6698 drain_all_pages(cc.zone); 6699 6700 /* 6701 * In case of -EBUSY, we'd like to know which page causes problem. 6702 * So, just fall through. test_pages_isolated() has a tracepoint 6703 * which will report the busy page. 6704 * 6705 * It is possible that busy pages could become available before 6706 * the call to test_pages_isolated, and the range will actually be 6707 * allocated. So, if we fall through be sure to clear ret so that 6708 * -EBUSY is not accidentally used or returned to caller. 6709 */ 6710 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6711 if (ret && ret != -EBUSY) 6712 goto done; 6713 6714 /* 6715 * When in-use hugetlb pages are migrated, they may simply be released 6716 * back into the free hugepage pool instead of being returned to the 6717 * buddy system. After the migration of in-use huge pages is completed, 6718 * we will invoke replace_free_hugepage_folios() to ensure that these 6719 * hugepages are properly released to the buddy system. 6720 */ 6721 ret = replace_free_hugepage_folios(start, end); 6722 if (ret) 6723 goto done; 6724 6725 /* 6726 * Pages from [start, end) are within a pageblock_nr_pages 6727 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6728 * more, all pages in [start, end) are free in page allocator. 6729 * What we are going to do is to allocate all pages from 6730 * [start, end) (that is remove them from page allocator). 6731 * 6732 * The only problem is that pages at the beginning and at the 6733 * end of interesting range may be not aligned with pages that 6734 * page allocator holds, ie. they can be part of higher order 6735 * pages. Because of this, we reserve the bigger range and 6736 * once this is done free the pages we are not interested in. 6737 * 6738 * We don't have to hold zone->lock here because the pages are 6739 * isolated thus they won't get removed from buddy. 6740 */ 6741 outer_start = find_large_buddy(start); 6742 6743 /* Make sure the range is really isolated. */ 6744 if (test_pages_isolated(outer_start, end, 0)) { 6745 ret = -EBUSY; 6746 goto done; 6747 } 6748 6749 /* Grab isolated pages from freelists. */ 6750 outer_end = isolate_freepages_range(&cc, outer_start, end); 6751 if (!outer_end) { 6752 ret = -EBUSY; 6753 goto done; 6754 } 6755 6756 if (!(gfp_mask & __GFP_COMP)) { 6757 split_free_pages(cc.freepages, gfp_mask); 6758 6759 /* Free head and tail (if any) */ 6760 if (start != outer_start) 6761 free_contig_range(outer_start, start - outer_start); 6762 if (end != outer_end) 6763 free_contig_range(end, outer_end - end); 6764 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6765 struct page *head = pfn_to_page(start); 6766 int order = ilog2(end - start); 6767 6768 check_new_pages(head, order); 6769 prep_new_page(head, order, gfp_mask, 0); 6770 set_page_refcounted(head); 6771 } else { 6772 ret = -EINVAL; 6773 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6774 start, end, outer_start, outer_end); 6775 } 6776 done: 6777 undo_isolate_page_range(start, end, migratetype); 6778 return ret; 6779 } 6780 EXPORT_SYMBOL(alloc_contig_range_noprof); 6781 6782 static int __alloc_contig_pages(unsigned long start_pfn, 6783 unsigned long nr_pages, gfp_t gfp_mask) 6784 { 6785 unsigned long end_pfn = start_pfn + nr_pages; 6786 6787 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6788 gfp_mask); 6789 } 6790 6791 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6792 unsigned long nr_pages) 6793 { 6794 unsigned long i, end_pfn = start_pfn + nr_pages; 6795 struct page *page; 6796 6797 for (i = start_pfn; i < end_pfn; i++) { 6798 page = pfn_to_online_page(i); 6799 if (!page) 6800 return false; 6801 6802 if (page_zone(page) != z) 6803 return false; 6804 6805 if (PageReserved(page)) 6806 return false; 6807 6808 if (PageHuge(page)) 6809 return false; 6810 } 6811 return true; 6812 } 6813 6814 static bool zone_spans_last_pfn(const struct zone *zone, 6815 unsigned long start_pfn, unsigned long nr_pages) 6816 { 6817 unsigned long last_pfn = start_pfn + nr_pages - 1; 6818 6819 return zone_spans_pfn(zone, last_pfn); 6820 } 6821 6822 /** 6823 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6824 * @nr_pages: Number of contiguous pages to allocate 6825 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 6826 * action and reclaim modifiers are supported. Reclaim modifiers 6827 * control allocation behavior during compaction/migration/reclaim. 6828 * @nid: Target node 6829 * @nodemask: Mask for other possible nodes 6830 * 6831 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6832 * on an applicable zonelist to find a contiguous pfn range which can then be 6833 * tried for allocation with alloc_contig_range(). This routine is intended 6834 * for allocation requests which can not be fulfilled with the buddy allocator. 6835 * 6836 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6837 * power of two, then allocated range is also guaranteed to be aligned to same 6838 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6839 * 6840 * Allocated pages can be freed with free_contig_range() or by manually calling 6841 * __free_page() on each allocated page. 6842 * 6843 * Return: pointer to contiguous pages on success, or NULL if not successful. 6844 */ 6845 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6846 int nid, nodemask_t *nodemask) 6847 { 6848 unsigned long ret, pfn, flags; 6849 struct zonelist *zonelist; 6850 struct zone *zone; 6851 struct zoneref *z; 6852 6853 zonelist = node_zonelist(nid, gfp_mask); 6854 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6855 gfp_zone(gfp_mask), nodemask) { 6856 spin_lock_irqsave(&zone->lock, flags); 6857 6858 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6859 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6860 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6861 /* 6862 * We release the zone lock here because 6863 * alloc_contig_range() will also lock the zone 6864 * at some point. If there's an allocation 6865 * spinning on this lock, it may win the race 6866 * and cause alloc_contig_range() to fail... 6867 */ 6868 spin_unlock_irqrestore(&zone->lock, flags); 6869 ret = __alloc_contig_pages(pfn, nr_pages, 6870 gfp_mask); 6871 if (!ret) 6872 return pfn_to_page(pfn); 6873 spin_lock_irqsave(&zone->lock, flags); 6874 } 6875 pfn += nr_pages; 6876 } 6877 spin_unlock_irqrestore(&zone->lock, flags); 6878 } 6879 return NULL; 6880 } 6881 #endif /* CONFIG_CONTIG_ALLOC */ 6882 6883 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6884 { 6885 unsigned long count = 0; 6886 struct folio *folio = pfn_folio(pfn); 6887 6888 if (folio_test_large(folio)) { 6889 int expected = folio_nr_pages(folio); 6890 6891 if (nr_pages == expected) 6892 folio_put(folio); 6893 else 6894 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 6895 pfn, nr_pages, expected); 6896 return; 6897 } 6898 6899 for (; nr_pages--; pfn++) { 6900 struct page *page = pfn_to_page(pfn); 6901 6902 count += page_count(page) != 1; 6903 __free_page(page); 6904 } 6905 WARN(count != 0, "%lu pages are still in use!\n", count); 6906 } 6907 EXPORT_SYMBOL(free_contig_range); 6908 6909 /* 6910 * Effectively disable pcplists for the zone by setting the high limit to 0 6911 * and draining all cpus. A concurrent page freeing on another CPU that's about 6912 * to put the page on pcplist will either finish before the drain and the page 6913 * will be drained, or observe the new high limit and skip the pcplist. 6914 * 6915 * Must be paired with a call to zone_pcp_enable(). 6916 */ 6917 void zone_pcp_disable(struct zone *zone) 6918 { 6919 mutex_lock(&pcp_batch_high_lock); 6920 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6921 __drain_all_pages(zone, true); 6922 } 6923 6924 void zone_pcp_enable(struct zone *zone) 6925 { 6926 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6927 zone->pageset_high_max, zone->pageset_batch); 6928 mutex_unlock(&pcp_batch_high_lock); 6929 } 6930 6931 void zone_pcp_reset(struct zone *zone) 6932 { 6933 int cpu; 6934 struct per_cpu_zonestat *pzstats; 6935 6936 if (zone->per_cpu_pageset != &boot_pageset) { 6937 for_each_online_cpu(cpu) { 6938 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6939 drain_zonestat(zone, pzstats); 6940 } 6941 free_percpu(zone->per_cpu_pageset); 6942 zone->per_cpu_pageset = &boot_pageset; 6943 if (zone->per_cpu_zonestats != &boot_zonestats) { 6944 free_percpu(zone->per_cpu_zonestats); 6945 zone->per_cpu_zonestats = &boot_zonestats; 6946 } 6947 } 6948 } 6949 6950 #ifdef CONFIG_MEMORY_HOTREMOVE 6951 /* 6952 * All pages in the range must be in a single zone, must not contain holes, 6953 * must span full sections, and must be isolated before calling this function. 6954 * 6955 * Returns the number of managed (non-PageOffline()) pages in the range: the 6956 * number of pages for which memory offlining code must adjust managed page 6957 * counters using adjust_managed_page_count(). 6958 */ 6959 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6960 unsigned long end_pfn) 6961 { 6962 unsigned long already_offline = 0, flags; 6963 unsigned long pfn = start_pfn; 6964 struct page *page; 6965 struct zone *zone; 6966 unsigned int order; 6967 6968 offline_mem_sections(pfn, end_pfn); 6969 zone = page_zone(pfn_to_page(pfn)); 6970 spin_lock_irqsave(&zone->lock, flags); 6971 while (pfn < end_pfn) { 6972 page = pfn_to_page(pfn); 6973 /* 6974 * The HWPoisoned page may be not in buddy system, and 6975 * page_count() is not 0. 6976 */ 6977 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6978 pfn++; 6979 continue; 6980 } 6981 /* 6982 * At this point all remaining PageOffline() pages have a 6983 * reference count of 0 and can simply be skipped. 6984 */ 6985 if (PageOffline(page)) { 6986 BUG_ON(page_count(page)); 6987 BUG_ON(PageBuddy(page)); 6988 already_offline++; 6989 pfn++; 6990 continue; 6991 } 6992 6993 BUG_ON(page_count(page)); 6994 BUG_ON(!PageBuddy(page)); 6995 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 6996 order = buddy_order(page); 6997 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 6998 pfn += (1 << order); 6999 } 7000 spin_unlock_irqrestore(&zone->lock, flags); 7001 7002 return end_pfn - start_pfn - already_offline; 7003 } 7004 #endif 7005 7006 /* 7007 * This function returns a stable result only if called under zone lock. 7008 */ 7009 bool is_free_buddy_page(const struct page *page) 7010 { 7011 unsigned long pfn = page_to_pfn(page); 7012 unsigned int order; 7013 7014 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7015 const struct page *head = page - (pfn & ((1 << order) - 1)); 7016 7017 if (PageBuddy(head) && 7018 buddy_order_unsafe(head) >= order) 7019 break; 7020 } 7021 7022 return order <= MAX_PAGE_ORDER; 7023 } 7024 EXPORT_SYMBOL(is_free_buddy_page); 7025 7026 #ifdef CONFIG_MEMORY_FAILURE 7027 static inline void add_to_free_list(struct page *page, struct zone *zone, 7028 unsigned int order, int migratetype, 7029 bool tail) 7030 { 7031 __add_to_free_list(page, zone, order, migratetype, tail); 7032 account_freepages(zone, 1 << order, migratetype); 7033 } 7034 7035 /* 7036 * Break down a higher-order page in sub-pages, and keep our target out of 7037 * buddy allocator. 7038 */ 7039 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7040 struct page *target, int low, int high, 7041 int migratetype) 7042 { 7043 unsigned long size = 1 << high; 7044 struct page *current_buddy; 7045 7046 while (high > low) { 7047 high--; 7048 size >>= 1; 7049 7050 if (target >= &page[size]) { 7051 current_buddy = page; 7052 page = page + size; 7053 } else { 7054 current_buddy = page + size; 7055 } 7056 7057 if (set_page_guard(zone, current_buddy, high)) 7058 continue; 7059 7060 add_to_free_list(current_buddy, zone, high, migratetype, false); 7061 set_buddy_order(current_buddy, high); 7062 } 7063 } 7064 7065 /* 7066 * Take a page that will be marked as poisoned off the buddy allocator. 7067 */ 7068 bool take_page_off_buddy(struct page *page) 7069 { 7070 struct zone *zone = page_zone(page); 7071 unsigned long pfn = page_to_pfn(page); 7072 unsigned long flags; 7073 unsigned int order; 7074 bool ret = false; 7075 7076 spin_lock_irqsave(&zone->lock, flags); 7077 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7078 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7079 int page_order = buddy_order(page_head); 7080 7081 if (PageBuddy(page_head) && page_order >= order) { 7082 unsigned long pfn_head = page_to_pfn(page_head); 7083 int migratetype = get_pfnblock_migratetype(page_head, 7084 pfn_head); 7085 7086 del_page_from_free_list(page_head, zone, page_order, 7087 migratetype); 7088 break_down_buddy_pages(zone, page_head, page, 0, 7089 page_order, migratetype); 7090 SetPageHWPoisonTakenOff(page); 7091 ret = true; 7092 break; 7093 } 7094 if (page_count(page_head) > 0) 7095 break; 7096 } 7097 spin_unlock_irqrestore(&zone->lock, flags); 7098 return ret; 7099 } 7100 7101 /* 7102 * Cancel takeoff done by take_page_off_buddy(). 7103 */ 7104 bool put_page_back_buddy(struct page *page) 7105 { 7106 struct zone *zone = page_zone(page); 7107 unsigned long flags; 7108 bool ret = false; 7109 7110 spin_lock_irqsave(&zone->lock, flags); 7111 if (put_page_testzero(page)) { 7112 unsigned long pfn = page_to_pfn(page); 7113 int migratetype = get_pfnblock_migratetype(page, pfn); 7114 7115 ClearPageHWPoisonTakenOff(page); 7116 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7117 if (TestClearPageHWPoison(page)) { 7118 ret = true; 7119 } 7120 } 7121 spin_unlock_irqrestore(&zone->lock, flags); 7122 7123 return ret; 7124 } 7125 #endif 7126 7127 #ifdef CONFIG_ZONE_DMA 7128 bool has_managed_dma(void) 7129 { 7130 struct pglist_data *pgdat; 7131 7132 for_each_online_pgdat(pgdat) { 7133 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 7134 7135 if (managed_zone(zone)) 7136 return true; 7137 } 7138 return false; 7139 } 7140 #endif /* CONFIG_ZONE_DMA */ 7141 7142 #ifdef CONFIG_UNACCEPTED_MEMORY 7143 7144 /* Counts number of zones with unaccepted pages. */ 7145 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 7146 7147 static bool lazy_accept = true; 7148 7149 static int __init accept_memory_parse(char *p) 7150 { 7151 if (!strcmp(p, "lazy")) { 7152 lazy_accept = true; 7153 return 0; 7154 } else if (!strcmp(p, "eager")) { 7155 lazy_accept = false; 7156 return 0; 7157 } else { 7158 return -EINVAL; 7159 } 7160 } 7161 early_param("accept_memory", accept_memory_parse); 7162 7163 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7164 { 7165 phys_addr_t start = page_to_phys(page); 7166 7167 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7168 } 7169 7170 static void __accept_page(struct zone *zone, unsigned long *flags, 7171 struct page *page) 7172 { 7173 bool last; 7174 7175 list_del(&page->lru); 7176 last = list_empty(&zone->unaccepted_pages); 7177 7178 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7179 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7180 __ClearPageUnaccepted(page); 7181 spin_unlock_irqrestore(&zone->lock, *flags); 7182 7183 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7184 7185 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7186 7187 if (last) 7188 static_branch_dec(&zones_with_unaccepted_pages); 7189 } 7190 7191 void accept_page(struct page *page) 7192 { 7193 struct zone *zone = page_zone(page); 7194 unsigned long flags; 7195 7196 spin_lock_irqsave(&zone->lock, flags); 7197 if (!PageUnaccepted(page)) { 7198 spin_unlock_irqrestore(&zone->lock, flags); 7199 return; 7200 } 7201 7202 /* Unlocks zone->lock */ 7203 __accept_page(zone, &flags, page); 7204 } 7205 7206 static bool try_to_accept_memory_one(struct zone *zone) 7207 { 7208 unsigned long flags; 7209 struct page *page; 7210 7211 spin_lock_irqsave(&zone->lock, flags); 7212 page = list_first_entry_or_null(&zone->unaccepted_pages, 7213 struct page, lru); 7214 if (!page) { 7215 spin_unlock_irqrestore(&zone->lock, flags); 7216 return false; 7217 } 7218 7219 /* Unlocks zone->lock */ 7220 __accept_page(zone, &flags, page); 7221 7222 return true; 7223 } 7224 7225 static inline bool has_unaccepted_memory(void) 7226 { 7227 return static_branch_unlikely(&zones_with_unaccepted_pages); 7228 } 7229 7230 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7231 { 7232 long to_accept, wmark; 7233 bool ret = false; 7234 7235 if (!has_unaccepted_memory()) 7236 return false; 7237 7238 if (list_empty(&zone->unaccepted_pages)) 7239 return false; 7240 7241 wmark = promo_wmark_pages(zone); 7242 7243 /* 7244 * Watermarks have not been initialized yet. 7245 * 7246 * Accepting one MAX_ORDER page to ensure progress. 7247 */ 7248 if (!wmark) 7249 return try_to_accept_memory_one(zone); 7250 7251 /* How much to accept to get to promo watermark? */ 7252 to_accept = wmark - 7253 (zone_page_state(zone, NR_FREE_PAGES) - 7254 __zone_watermark_unusable_free(zone, order, 0) - 7255 zone_page_state(zone, NR_UNACCEPTED)); 7256 7257 while (to_accept > 0) { 7258 if (!try_to_accept_memory_one(zone)) 7259 break; 7260 ret = true; 7261 to_accept -= MAX_ORDER_NR_PAGES; 7262 } 7263 7264 return ret; 7265 } 7266 7267 static bool __free_unaccepted(struct page *page) 7268 { 7269 struct zone *zone = page_zone(page); 7270 unsigned long flags; 7271 bool first = false; 7272 7273 if (!lazy_accept) 7274 return false; 7275 7276 spin_lock_irqsave(&zone->lock, flags); 7277 first = list_empty(&zone->unaccepted_pages); 7278 list_add_tail(&page->lru, &zone->unaccepted_pages); 7279 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7280 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7281 __SetPageUnaccepted(page); 7282 spin_unlock_irqrestore(&zone->lock, flags); 7283 7284 if (first) 7285 static_branch_inc(&zones_with_unaccepted_pages); 7286 7287 return true; 7288 } 7289 7290 #else 7291 7292 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7293 { 7294 return false; 7295 } 7296 7297 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7298 { 7299 return false; 7300 } 7301 7302 static bool __free_unaccepted(struct page *page) 7303 { 7304 BUILD_BUG(); 7305 return false; 7306 } 7307 7308 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7309 7310 /** 7311 * try_alloc_pages - opportunistic reentrant allocation from any context 7312 * @nid: node to allocate from 7313 * @order: allocation order size 7314 * 7315 * Allocates pages of a given order from the given node. This is safe to 7316 * call from any context (from atomic, NMI, and also reentrant 7317 * allocator -> tracepoint -> try_alloc_pages_noprof). 7318 * Allocation is best effort and to be expected to fail easily so nobody should 7319 * rely on the success. Failures are not reported via warn_alloc(). 7320 * See always fail conditions below. 7321 * 7322 * Return: allocated page or NULL on failure. 7323 */ 7324 struct page *try_alloc_pages_noprof(int nid, unsigned int order) 7325 { 7326 /* 7327 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7328 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7329 * is not safe in arbitrary context. 7330 * 7331 * These two are the conditions for gfpflags_allow_spinning() being true. 7332 * 7333 * Specify __GFP_NOWARN since failing try_alloc_pages() is not a reason 7334 * to warn. Also warn would trigger printk() which is unsafe from 7335 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7336 * since the running context is unknown. 7337 * 7338 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7339 * is safe in any context. Also zeroing the page is mandatory for 7340 * BPF use cases. 7341 * 7342 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7343 * specify it here to highlight that try_alloc_pages() 7344 * doesn't want to deplete reserves. 7345 */ 7346 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC 7347 | __GFP_ACCOUNT; 7348 unsigned int alloc_flags = ALLOC_TRYLOCK; 7349 struct alloc_context ac = { }; 7350 struct page *page; 7351 7352 /* 7353 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7354 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7355 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7356 * mark the task as the owner of another rt_spin_lock which will 7357 * confuse PI logic, so return immediately if called form hard IRQ or 7358 * NMI. 7359 * 7360 * Note, irqs_disabled() case is ok. This function can be called 7361 * from raw_spin_lock_irqsave region. 7362 */ 7363 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7364 return NULL; 7365 if (!pcp_allowed_order(order)) 7366 return NULL; 7367 7368 #ifdef CONFIG_UNACCEPTED_MEMORY 7369 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7370 if (has_unaccepted_memory()) 7371 return NULL; 7372 #endif 7373 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7374 if (deferred_pages_enabled()) 7375 return NULL; 7376 7377 if (nid == NUMA_NO_NODE) 7378 nid = numa_node_id(); 7379 7380 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7381 &alloc_gfp, &alloc_flags); 7382 7383 /* 7384 * Best effort allocation from percpu free list. 7385 * If it's empty attempt to spin_trylock zone->lock. 7386 */ 7387 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7388 7389 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7390 7391 if (page) 7392 set_page_refcounted(page); 7393 7394 if (memcg_kmem_online() && page && 7395 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7396 free_pages_nolock(page, order); 7397 page = NULL; 7398 } 7399 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7400 kmsan_alloc_page(page, order, alloc_gfp); 7401 return page; 7402 } 7403