1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 92 static DEFINE_MUTEX(pcp_batch_high_lock); 93 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 94 95 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 96 /* 97 * On SMP, spin_trylock is sufficient protection. 98 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 99 */ 100 #define pcp_trylock_prepare(flags) do { } while (0) 101 #define pcp_trylock_finish(flag) do { } while (0) 102 #else 103 104 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 105 #define pcp_trylock_prepare(flags) local_irq_save(flags) 106 #define pcp_trylock_finish(flags) local_irq_restore(flags) 107 #endif 108 109 /* 110 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 111 * a migration causing the wrong PCP to be locked and remote memory being 112 * potentially allocated, pin the task to the CPU for the lookup+lock. 113 * preempt_disable is used on !RT because it is faster than migrate_disable. 114 * migrate_disable is used on RT because otherwise RT spinlock usage is 115 * interfered with and a high priority task cannot preempt the allocator. 116 */ 117 #ifndef CONFIG_PREEMPT_RT 118 #define pcpu_task_pin() preempt_disable() 119 #define pcpu_task_unpin() preempt_enable() 120 #else 121 #define pcpu_task_pin() migrate_disable() 122 #define pcpu_task_unpin() migrate_enable() 123 #endif 124 125 /* 126 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 127 * Return value should be used with equivalent unlock helper. 128 */ 129 #define pcpu_spin_lock(type, member, ptr) \ 130 ({ \ 131 type *_ret; \ 132 pcpu_task_pin(); \ 133 _ret = this_cpu_ptr(ptr); \ 134 spin_lock(&_ret->member); \ 135 _ret; \ 136 }) 137 138 #define pcpu_spin_trylock(type, member, ptr) \ 139 ({ \ 140 type *_ret; \ 141 pcpu_task_pin(); \ 142 _ret = this_cpu_ptr(ptr); \ 143 if (!spin_trylock(&_ret->member)) { \ 144 pcpu_task_unpin(); \ 145 _ret = NULL; \ 146 } \ 147 _ret; \ 148 }) 149 150 #define pcpu_spin_unlock(member, ptr) \ 151 ({ \ 152 spin_unlock(&ptr->member); \ 153 pcpu_task_unpin(); \ 154 }) 155 156 /* struct per_cpu_pages specific helpers. */ 157 #define pcp_spin_lock(ptr) \ 158 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_trylock(ptr) \ 161 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_unlock(ptr) \ 164 pcpu_spin_unlock(lock, ptr) 165 166 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 167 DEFINE_PER_CPU(int, numa_node); 168 EXPORT_PER_CPU_SYMBOL(numa_node); 169 #endif 170 171 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 172 173 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 174 /* 175 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 176 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 177 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 178 * defined in <linux/topology.h>. 179 */ 180 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 181 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 182 #endif 183 184 static DEFINE_MUTEX(pcpu_drain_mutex); 185 186 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 187 volatile unsigned long latent_entropy __latent_entropy; 188 EXPORT_SYMBOL(latent_entropy); 189 #endif 190 191 /* 192 * Array of node states. 193 */ 194 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 195 [N_POSSIBLE] = NODE_MASK_ALL, 196 [N_ONLINE] = { { [0] = 1UL } }, 197 #ifndef CONFIG_NUMA 198 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 199 #ifdef CONFIG_HIGHMEM 200 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 201 #endif 202 [N_MEMORY] = { { [0] = 1UL } }, 203 [N_CPU] = { { [0] = 1UL } }, 204 #endif /* NUMA */ 205 }; 206 EXPORT_SYMBOL(node_states); 207 208 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 209 210 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 211 unsigned int pageblock_order __read_mostly; 212 #endif 213 214 static void __free_pages_ok(struct page *page, unsigned int order, 215 fpi_t fpi_flags); 216 217 /* 218 * results with 256, 32 in the lowmem_reserve sysctl: 219 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 220 * 1G machine -> (16M dma, 784M normal, 224M high) 221 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 222 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 223 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 224 * 225 * TBD: should special case ZONE_DMA32 machines here - in those we normally 226 * don't need any ZONE_NORMAL reservation 227 */ 228 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 229 #ifdef CONFIG_ZONE_DMA 230 [ZONE_DMA] = 256, 231 #endif 232 #ifdef CONFIG_ZONE_DMA32 233 [ZONE_DMA32] = 256, 234 #endif 235 [ZONE_NORMAL] = 32, 236 #ifdef CONFIG_HIGHMEM 237 [ZONE_HIGHMEM] = 0, 238 #endif 239 [ZONE_MOVABLE] = 0, 240 }; 241 242 char * const zone_names[MAX_NR_ZONES] = { 243 #ifdef CONFIG_ZONE_DMA 244 "DMA", 245 #endif 246 #ifdef CONFIG_ZONE_DMA32 247 "DMA32", 248 #endif 249 "Normal", 250 #ifdef CONFIG_HIGHMEM 251 "HighMem", 252 #endif 253 "Movable", 254 #ifdef CONFIG_ZONE_DEVICE 255 "Device", 256 #endif 257 }; 258 259 const char * const migratetype_names[MIGRATE_TYPES] = { 260 "Unmovable", 261 "Movable", 262 "Reclaimable", 263 "HighAtomic", 264 #ifdef CONFIG_CMA 265 "CMA", 266 #endif 267 #ifdef CONFIG_MEMORY_ISOLATION 268 "Isolate", 269 #endif 270 }; 271 272 int min_free_kbytes = 1024; 273 int user_min_free_kbytes = -1; 274 static int watermark_boost_factor __read_mostly = 15000; 275 static int watermark_scale_factor = 10; 276 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 278 int movable_zone; 279 EXPORT_SYMBOL(movable_zone); 280 281 #if MAX_NUMNODES > 1 282 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 283 unsigned int nr_online_nodes __read_mostly = 1; 284 EXPORT_SYMBOL(nr_node_ids); 285 EXPORT_SYMBOL(nr_online_nodes); 286 #endif 287 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 290 static bool __free_unaccepted(struct page *page); 291 292 int page_group_by_mobility_disabled __read_mostly; 293 294 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 295 /* 296 * During boot we initialize deferred pages on-demand, as needed, but once 297 * page_alloc_init_late() has finished, the deferred pages are all initialized, 298 * and we can permanently disable that path. 299 */ 300 DEFINE_STATIC_KEY_TRUE(deferred_pages); 301 302 static inline bool deferred_pages_enabled(void) 303 { 304 return static_branch_unlikely(&deferred_pages); 305 } 306 307 /* 308 * deferred_grow_zone() is __init, but it is called from 309 * get_page_from_freelist() during early boot until deferred_pages permanently 310 * disables this call. This is why we have refdata wrapper to avoid warning, 311 * and to ensure that the function body gets unloaded. 312 */ 313 static bool __ref 314 _deferred_grow_zone(struct zone *zone, unsigned int order) 315 { 316 return deferred_grow_zone(zone, order); 317 } 318 #else 319 static inline bool deferred_pages_enabled(void) 320 { 321 return false; 322 } 323 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 325 { 326 return false; 327 } 328 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 329 330 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 331 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 332 unsigned long pfn) 333 { 334 #ifdef CONFIG_SPARSEMEM 335 return section_to_usemap(__pfn_to_section(pfn)); 336 #else 337 return page_zone(page)->pageblock_flags; 338 #endif /* CONFIG_SPARSEMEM */ 339 } 340 341 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 342 { 343 #ifdef CONFIG_SPARSEMEM 344 pfn &= (PAGES_PER_SECTION-1); 345 #else 346 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 347 #endif /* CONFIG_SPARSEMEM */ 348 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 349 } 350 351 /** 352 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 353 * @page: The page within the block of interest 354 * @pfn: The target page frame number 355 * @mask: mask of bits that the caller is interested in 356 * 357 * Return: pageblock_bits flags 358 */ 359 unsigned long get_pfnblock_flags_mask(const struct page *page, 360 unsigned long pfn, unsigned long mask) 361 { 362 unsigned long *bitmap; 363 unsigned long bitidx, word_bitidx; 364 unsigned long word; 365 366 bitmap = get_pageblock_bitmap(page, pfn); 367 bitidx = pfn_to_bitidx(page, pfn); 368 word_bitidx = bitidx / BITS_PER_LONG; 369 bitidx &= (BITS_PER_LONG-1); 370 /* 371 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 372 * a consistent read of the memory array, so that results, even though 373 * racy, are not corrupted. 374 */ 375 word = READ_ONCE(bitmap[word_bitidx]); 376 return (word >> bitidx) & mask; 377 } 378 379 static __always_inline int get_pfnblock_migratetype(const struct page *page, 380 unsigned long pfn) 381 { 382 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 383 } 384 385 /** 386 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 387 * @page: The page within the block of interest 388 * @flags: The flags to set 389 * @pfn: The target page frame number 390 * @mask: mask of bits that the caller is interested in 391 */ 392 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 393 unsigned long pfn, 394 unsigned long mask) 395 { 396 unsigned long *bitmap; 397 unsigned long bitidx, word_bitidx; 398 unsigned long word; 399 400 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 401 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 402 403 bitmap = get_pageblock_bitmap(page, pfn); 404 bitidx = pfn_to_bitidx(page, pfn); 405 word_bitidx = bitidx / BITS_PER_LONG; 406 bitidx &= (BITS_PER_LONG-1); 407 408 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 409 410 mask <<= bitidx; 411 flags <<= bitidx; 412 413 word = READ_ONCE(bitmap[word_bitidx]); 414 do { 415 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 416 } 417 418 void set_pageblock_migratetype(struct page *page, int migratetype) 419 { 420 if (unlikely(page_group_by_mobility_disabled && 421 migratetype < MIGRATE_PCPTYPES)) 422 migratetype = MIGRATE_UNMOVABLE; 423 424 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 425 page_to_pfn(page), MIGRATETYPE_MASK); 426 } 427 428 #ifdef CONFIG_DEBUG_VM 429 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 430 { 431 int ret; 432 unsigned seq; 433 unsigned long pfn = page_to_pfn(page); 434 unsigned long sp, start_pfn; 435 436 do { 437 seq = zone_span_seqbegin(zone); 438 start_pfn = zone->zone_start_pfn; 439 sp = zone->spanned_pages; 440 ret = !zone_spans_pfn(zone, pfn); 441 } while (zone_span_seqretry(zone, seq)); 442 443 if (ret) 444 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 445 pfn, zone_to_nid(zone), zone->name, 446 start_pfn, start_pfn + sp); 447 448 return ret; 449 } 450 451 /* 452 * Temporary debugging check for pages not lying within a given zone. 453 */ 454 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 455 { 456 if (page_outside_zone_boundaries(zone, page)) 457 return true; 458 if (zone != page_zone(page)) 459 return true; 460 461 return false; 462 } 463 #else 464 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 465 { 466 return false; 467 } 468 #endif 469 470 static void bad_page(struct page *page, const char *reason) 471 { 472 static unsigned long resume; 473 static unsigned long nr_shown; 474 static unsigned long nr_unshown; 475 476 /* 477 * Allow a burst of 60 reports, then keep quiet for that minute; 478 * or allow a steady drip of one report per second. 479 */ 480 if (nr_shown == 60) { 481 if (time_before(jiffies, resume)) { 482 nr_unshown++; 483 goto out; 484 } 485 if (nr_unshown) { 486 pr_alert( 487 "BUG: Bad page state: %lu messages suppressed\n", 488 nr_unshown); 489 nr_unshown = 0; 490 } 491 nr_shown = 0; 492 } 493 if (nr_shown++ == 0) 494 resume = jiffies + 60 * HZ; 495 496 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 497 current->comm, page_to_pfn(page)); 498 dump_page(page, reason); 499 500 print_modules(); 501 dump_stack(); 502 out: 503 /* Leave bad fields for debug, except PageBuddy could make trouble */ 504 if (PageBuddy(page)) 505 __ClearPageBuddy(page); 506 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 507 } 508 509 static inline unsigned int order_to_pindex(int migratetype, int order) 510 { 511 bool __maybe_unused movable; 512 513 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 514 if (order > PAGE_ALLOC_COSTLY_ORDER) { 515 VM_BUG_ON(order != HPAGE_PMD_ORDER); 516 517 movable = migratetype == MIGRATE_MOVABLE; 518 519 return NR_LOWORDER_PCP_LISTS + movable; 520 } 521 #else 522 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 523 #endif 524 525 return (MIGRATE_PCPTYPES * order) + migratetype; 526 } 527 528 static inline int pindex_to_order(unsigned int pindex) 529 { 530 int order = pindex / MIGRATE_PCPTYPES; 531 532 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 533 if (pindex >= NR_LOWORDER_PCP_LISTS) 534 order = HPAGE_PMD_ORDER; 535 #else 536 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 537 #endif 538 539 return order; 540 } 541 542 static inline bool pcp_allowed_order(unsigned int order) 543 { 544 if (order <= PAGE_ALLOC_COSTLY_ORDER) 545 return true; 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 if (order == HPAGE_PMD_ORDER) 548 return true; 549 #endif 550 return false; 551 } 552 553 /* 554 * Higher-order pages are called "compound pages". They are structured thusly: 555 * 556 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 557 * 558 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 559 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 560 * 561 * The first tail page's ->compound_order holds the order of allocation. 562 * This usage means that zero-order pages may not be compound. 563 */ 564 565 void prep_compound_page(struct page *page, unsigned int order) 566 { 567 int i; 568 int nr_pages = 1 << order; 569 570 __SetPageHead(page); 571 for (i = 1; i < nr_pages; i++) 572 prep_compound_tail(page, i); 573 574 prep_compound_head(page, order); 575 } 576 577 static inline void set_buddy_order(struct page *page, unsigned int order) 578 { 579 set_page_private(page, order); 580 __SetPageBuddy(page); 581 } 582 583 #ifdef CONFIG_COMPACTION 584 static inline struct capture_control *task_capc(struct zone *zone) 585 { 586 struct capture_control *capc = current->capture_control; 587 588 return unlikely(capc) && 589 !(current->flags & PF_KTHREAD) && 590 !capc->page && 591 capc->cc->zone == zone ? capc : NULL; 592 } 593 594 static inline bool 595 compaction_capture(struct capture_control *capc, struct page *page, 596 int order, int migratetype) 597 { 598 if (!capc || order != capc->cc->order) 599 return false; 600 601 /* Do not accidentally pollute CMA or isolated regions*/ 602 if (is_migrate_cma(migratetype) || 603 is_migrate_isolate(migratetype)) 604 return false; 605 606 /* 607 * Do not let lower order allocations pollute a movable pageblock 608 * unless compaction is also requesting movable pages. 609 * This might let an unmovable request use a reclaimable pageblock 610 * and vice-versa but no more than normal fallback logic which can 611 * have trouble finding a high-order free page. 612 */ 613 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 614 capc->cc->migratetype != MIGRATE_MOVABLE) 615 return false; 616 617 capc->page = page; 618 return true; 619 } 620 621 #else 622 static inline struct capture_control *task_capc(struct zone *zone) 623 { 624 return NULL; 625 } 626 627 static inline bool 628 compaction_capture(struct capture_control *capc, struct page *page, 629 int order, int migratetype) 630 { 631 return false; 632 } 633 #endif /* CONFIG_COMPACTION */ 634 635 static inline void account_freepages(struct zone *zone, int nr_pages, 636 int migratetype) 637 { 638 lockdep_assert_held(&zone->lock); 639 640 if (is_migrate_isolate(migratetype)) 641 return; 642 643 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 644 645 if (is_migrate_cma(migratetype)) 646 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 647 else if (is_migrate_highatomic(migratetype)) 648 WRITE_ONCE(zone->nr_free_highatomic, 649 zone->nr_free_highatomic + nr_pages); 650 } 651 652 /* Used for pages not on another list */ 653 static inline void __add_to_free_list(struct page *page, struct zone *zone, 654 unsigned int order, int migratetype, 655 bool tail) 656 { 657 struct free_area *area = &zone->free_area[order]; 658 659 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 660 "page type is %lu, passed migratetype is %d (nr=%d)\n", 661 get_pageblock_migratetype(page), migratetype, 1 << order); 662 663 if (tail) 664 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 665 else 666 list_add(&page->buddy_list, &area->free_list[migratetype]); 667 area->nr_free++; 668 } 669 670 /* 671 * Used for pages which are on another list. Move the pages to the tail 672 * of the list - so the moved pages won't immediately be considered for 673 * allocation again (e.g., optimization for memory onlining). 674 */ 675 static inline void move_to_free_list(struct page *page, struct zone *zone, 676 unsigned int order, int old_mt, int new_mt) 677 { 678 struct free_area *area = &zone->free_area[order]; 679 680 /* Free page moving can fail, so it happens before the type update */ 681 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 682 "page type is %lu, passed migratetype is %d (nr=%d)\n", 683 get_pageblock_migratetype(page), old_mt, 1 << order); 684 685 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 686 687 account_freepages(zone, -(1 << order), old_mt); 688 account_freepages(zone, 1 << order, new_mt); 689 } 690 691 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 692 unsigned int order, int migratetype) 693 { 694 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 695 "page type is %lu, passed migratetype is %d (nr=%d)\n", 696 get_pageblock_migratetype(page), migratetype, 1 << order); 697 698 /* clear reported state and update reported page count */ 699 if (page_reported(page)) 700 __ClearPageReported(page); 701 702 list_del(&page->buddy_list); 703 __ClearPageBuddy(page); 704 set_page_private(page, 0); 705 zone->free_area[order].nr_free--; 706 } 707 708 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 709 unsigned int order, int migratetype) 710 { 711 __del_page_from_free_list(page, zone, order, migratetype); 712 account_freepages(zone, -(1 << order), migratetype); 713 } 714 715 static inline struct page *get_page_from_free_area(struct free_area *area, 716 int migratetype) 717 { 718 return list_first_entry_or_null(&area->free_list[migratetype], 719 struct page, buddy_list); 720 } 721 722 /* 723 * If this is less than the 2nd largest possible page, check if the buddy 724 * of the next-higher order is free. If it is, it's possible 725 * that pages are being freed that will coalesce soon. In case, 726 * that is happening, add the free page to the tail of the list 727 * so it's less likely to be used soon and more likely to be merged 728 * as a 2-level higher order page 729 */ 730 static inline bool 731 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 732 struct page *page, unsigned int order) 733 { 734 unsigned long higher_page_pfn; 735 struct page *higher_page; 736 737 if (order >= MAX_PAGE_ORDER - 1) 738 return false; 739 740 higher_page_pfn = buddy_pfn & pfn; 741 higher_page = page + (higher_page_pfn - pfn); 742 743 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 744 NULL) != NULL; 745 } 746 747 /* 748 * Freeing function for a buddy system allocator. 749 * 750 * The concept of a buddy system is to maintain direct-mapped table 751 * (containing bit values) for memory blocks of various "orders". 752 * The bottom level table contains the map for the smallest allocatable 753 * units of memory (here, pages), and each level above it describes 754 * pairs of units from the levels below, hence, "buddies". 755 * At a high level, all that happens here is marking the table entry 756 * at the bottom level available, and propagating the changes upward 757 * as necessary, plus some accounting needed to play nicely with other 758 * parts of the VM system. 759 * At each level, we keep a list of pages, which are heads of continuous 760 * free pages of length of (1 << order) and marked with PageBuddy. 761 * Page's order is recorded in page_private(page) field. 762 * So when we are allocating or freeing one, we can derive the state of the 763 * other. That is, if we allocate a small block, and both were 764 * free, the remainder of the region must be split into blocks. 765 * If a block is freed, and its buddy is also free, then this 766 * triggers coalescing into a block of larger size. 767 * 768 * -- nyc 769 */ 770 771 static inline void __free_one_page(struct page *page, 772 unsigned long pfn, 773 struct zone *zone, unsigned int order, 774 int migratetype, fpi_t fpi_flags) 775 { 776 struct capture_control *capc = task_capc(zone); 777 unsigned long buddy_pfn = 0; 778 unsigned long combined_pfn; 779 struct page *buddy; 780 bool to_tail; 781 782 VM_BUG_ON(!zone_is_initialized(zone)); 783 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 784 785 VM_BUG_ON(migratetype == -1); 786 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 787 VM_BUG_ON_PAGE(bad_range(zone, page), page); 788 789 account_freepages(zone, 1 << order, migratetype); 790 791 while (order < MAX_PAGE_ORDER) { 792 int buddy_mt = migratetype; 793 794 if (compaction_capture(capc, page, order, migratetype)) { 795 account_freepages(zone, -(1 << order), migratetype); 796 return; 797 } 798 799 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 800 if (!buddy) 801 goto done_merging; 802 803 if (unlikely(order >= pageblock_order)) { 804 /* 805 * We want to prevent merge between freepages on pageblock 806 * without fallbacks and normal pageblock. Without this, 807 * pageblock isolation could cause incorrect freepage or CMA 808 * accounting or HIGHATOMIC accounting. 809 */ 810 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 811 812 if (migratetype != buddy_mt && 813 (!migratetype_is_mergeable(migratetype) || 814 !migratetype_is_mergeable(buddy_mt))) 815 goto done_merging; 816 } 817 818 /* 819 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 820 * merge with it and move up one order. 821 */ 822 if (page_is_guard(buddy)) 823 clear_page_guard(zone, buddy, order); 824 else 825 __del_page_from_free_list(buddy, zone, order, buddy_mt); 826 827 if (unlikely(buddy_mt != migratetype)) { 828 /* 829 * Match buddy type. This ensures that an 830 * expand() down the line puts the sub-blocks 831 * on the right freelists. 832 */ 833 set_pageblock_migratetype(buddy, migratetype); 834 } 835 836 combined_pfn = buddy_pfn & pfn; 837 page = page + (combined_pfn - pfn); 838 pfn = combined_pfn; 839 order++; 840 } 841 842 done_merging: 843 set_buddy_order(page, order); 844 845 if (fpi_flags & FPI_TO_TAIL) 846 to_tail = true; 847 else if (is_shuffle_order(order)) 848 to_tail = shuffle_pick_tail(); 849 else 850 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 851 852 __add_to_free_list(page, zone, order, migratetype, to_tail); 853 854 /* Notify page reporting subsystem of freed page */ 855 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 856 page_reporting_notify_free(order); 857 } 858 859 /* 860 * A bad page could be due to a number of fields. Instead of multiple branches, 861 * try and check multiple fields with one check. The caller must do a detailed 862 * check if necessary. 863 */ 864 static inline bool page_expected_state(struct page *page, 865 unsigned long check_flags) 866 { 867 if (unlikely(atomic_read(&page->_mapcount) != -1)) 868 return false; 869 870 if (unlikely((unsigned long)page->mapping | 871 page_ref_count(page) | 872 #ifdef CONFIG_MEMCG 873 page->memcg_data | 874 #endif 875 #ifdef CONFIG_PAGE_POOL 876 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 877 #endif 878 (page->flags & check_flags))) 879 return false; 880 881 return true; 882 } 883 884 static const char *page_bad_reason(struct page *page, unsigned long flags) 885 { 886 const char *bad_reason = NULL; 887 888 if (unlikely(atomic_read(&page->_mapcount) != -1)) 889 bad_reason = "nonzero mapcount"; 890 if (unlikely(page->mapping != NULL)) 891 bad_reason = "non-NULL mapping"; 892 if (unlikely(page_ref_count(page) != 0)) 893 bad_reason = "nonzero _refcount"; 894 if (unlikely(page->flags & flags)) { 895 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 896 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 897 else 898 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 899 } 900 #ifdef CONFIG_MEMCG 901 if (unlikely(page->memcg_data)) 902 bad_reason = "page still charged to cgroup"; 903 #endif 904 #ifdef CONFIG_PAGE_POOL 905 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 906 bad_reason = "page_pool leak"; 907 #endif 908 return bad_reason; 909 } 910 911 static void free_page_is_bad_report(struct page *page) 912 { 913 bad_page(page, 914 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 915 } 916 917 static inline bool free_page_is_bad(struct page *page) 918 { 919 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 920 return false; 921 922 /* Something has gone sideways, find it */ 923 free_page_is_bad_report(page); 924 return true; 925 } 926 927 static inline bool is_check_pages_enabled(void) 928 { 929 return static_branch_unlikely(&check_pages_enabled); 930 } 931 932 static int free_tail_page_prepare(struct page *head_page, struct page *page) 933 { 934 struct folio *folio = (struct folio *)head_page; 935 int ret = 1; 936 937 /* 938 * We rely page->lru.next never has bit 0 set, unless the page 939 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 940 */ 941 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 942 943 if (!is_check_pages_enabled()) { 944 ret = 0; 945 goto out; 946 } 947 switch (page - head_page) { 948 case 1: 949 /* the first tail page: these may be in place of ->mapping */ 950 if (unlikely(folio_entire_mapcount(folio))) { 951 bad_page(page, "nonzero entire_mapcount"); 952 goto out; 953 } 954 if (unlikely(folio_large_mapcount(folio))) { 955 bad_page(page, "nonzero large_mapcount"); 956 goto out; 957 } 958 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 959 bad_page(page, "nonzero nr_pages_mapped"); 960 goto out; 961 } 962 if (unlikely(atomic_read(&folio->_pincount))) { 963 bad_page(page, "nonzero pincount"); 964 goto out; 965 } 966 break; 967 case 2: 968 /* the second tail page: deferred_list overlaps ->mapping */ 969 if (unlikely(!list_empty(&folio->_deferred_list))) { 970 bad_page(page, "on deferred list"); 971 goto out; 972 } 973 break; 974 default: 975 if (page->mapping != TAIL_MAPPING) { 976 bad_page(page, "corrupted mapping in tail page"); 977 goto out; 978 } 979 break; 980 } 981 if (unlikely(!PageTail(page))) { 982 bad_page(page, "PageTail not set"); 983 goto out; 984 } 985 if (unlikely(compound_head(page) != head_page)) { 986 bad_page(page, "compound_head not consistent"); 987 goto out; 988 } 989 ret = 0; 990 out: 991 page->mapping = NULL; 992 clear_compound_head(page); 993 return ret; 994 } 995 996 /* 997 * Skip KASAN memory poisoning when either: 998 * 999 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1000 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1001 * using page tags instead (see below). 1002 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1003 * that error detection is disabled for accesses via the page address. 1004 * 1005 * Pages will have match-all tags in the following circumstances: 1006 * 1007 * 1. Pages are being initialized for the first time, including during deferred 1008 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1009 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1010 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1011 * 3. The allocation was excluded from being checked due to sampling, 1012 * see the call to kasan_unpoison_pages. 1013 * 1014 * Poisoning pages during deferred memory init will greatly lengthen the 1015 * process and cause problem in large memory systems as the deferred pages 1016 * initialization is done with interrupt disabled. 1017 * 1018 * Assuming that there will be no reference to those newly initialized 1019 * pages before they are ever allocated, this should have no effect on 1020 * KASAN memory tracking as the poison will be properly inserted at page 1021 * allocation time. The only corner case is when pages are allocated by 1022 * on-demand allocation and then freed again before the deferred pages 1023 * initialization is done, but this is not likely to happen. 1024 */ 1025 static inline bool should_skip_kasan_poison(struct page *page) 1026 { 1027 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1028 return deferred_pages_enabled(); 1029 1030 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1031 } 1032 1033 static void kernel_init_pages(struct page *page, int numpages) 1034 { 1035 int i; 1036 1037 /* s390's use of memset() could override KASAN redzones. */ 1038 kasan_disable_current(); 1039 for (i = 0; i < numpages; i++) 1040 clear_highpage_kasan_tagged(page + i); 1041 kasan_enable_current(); 1042 } 1043 1044 __always_inline bool free_pages_prepare(struct page *page, 1045 unsigned int order) 1046 { 1047 int bad = 0; 1048 bool skip_kasan_poison = should_skip_kasan_poison(page); 1049 bool init = want_init_on_free(); 1050 bool compound = PageCompound(page); 1051 struct folio *folio = page_folio(page); 1052 1053 VM_BUG_ON_PAGE(PageTail(page), page); 1054 1055 trace_mm_page_free(page, order); 1056 kmsan_free_page(page, order); 1057 1058 if (memcg_kmem_online() && PageMemcgKmem(page)) 1059 __memcg_kmem_uncharge_page(page, order); 1060 1061 /* 1062 * In rare cases, when truncation or holepunching raced with 1063 * munlock after VM_LOCKED was cleared, Mlocked may still be 1064 * found set here. This does not indicate a problem, unless 1065 * "unevictable_pgs_cleared" appears worryingly large. 1066 */ 1067 if (unlikely(folio_test_mlocked(folio))) { 1068 long nr_pages = folio_nr_pages(folio); 1069 1070 __folio_clear_mlocked(folio); 1071 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1072 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1073 } 1074 1075 if (unlikely(PageHWPoison(page)) && !order) { 1076 /* Do not let hwpoison pages hit pcplists/buddy */ 1077 reset_page_owner(page, order); 1078 page_table_check_free(page, order); 1079 pgalloc_tag_sub(page, 1 << order); 1080 1081 /* 1082 * The page is isolated and accounted for. 1083 * Mark the codetag as empty to avoid accounting error 1084 * when the page is freed by unpoison_memory(). 1085 */ 1086 clear_page_tag_ref(page); 1087 return false; 1088 } 1089 1090 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1091 1092 /* 1093 * Check tail pages before head page information is cleared to 1094 * avoid checking PageCompound for order-0 pages. 1095 */ 1096 if (unlikely(order)) { 1097 int i; 1098 1099 if (compound) 1100 page[1].flags &= ~PAGE_FLAGS_SECOND; 1101 for (i = 1; i < (1 << order); i++) { 1102 if (compound) 1103 bad += free_tail_page_prepare(page, page + i); 1104 if (is_check_pages_enabled()) { 1105 if (free_page_is_bad(page + i)) { 1106 bad++; 1107 continue; 1108 } 1109 } 1110 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1111 } 1112 } 1113 if (PageMappingFlags(page)) { 1114 if (PageAnon(page)) 1115 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1116 page->mapping = NULL; 1117 } 1118 if (is_check_pages_enabled()) { 1119 if (free_page_is_bad(page)) 1120 bad++; 1121 if (bad) 1122 return false; 1123 } 1124 1125 page_cpupid_reset_last(page); 1126 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1127 reset_page_owner(page, order); 1128 page_table_check_free(page, order); 1129 pgalloc_tag_sub(page, 1 << order); 1130 1131 if (!PageHighMem(page)) { 1132 debug_check_no_locks_freed(page_address(page), 1133 PAGE_SIZE << order); 1134 debug_check_no_obj_freed(page_address(page), 1135 PAGE_SIZE << order); 1136 } 1137 1138 kernel_poison_pages(page, 1 << order); 1139 1140 /* 1141 * As memory initialization might be integrated into KASAN, 1142 * KASAN poisoning and memory initialization code must be 1143 * kept together to avoid discrepancies in behavior. 1144 * 1145 * With hardware tag-based KASAN, memory tags must be set before the 1146 * page becomes unavailable via debug_pagealloc or arch_free_page. 1147 */ 1148 if (!skip_kasan_poison) { 1149 kasan_poison_pages(page, order, init); 1150 1151 /* Memory is already initialized if KASAN did it internally. */ 1152 if (kasan_has_integrated_init()) 1153 init = false; 1154 } 1155 if (init) 1156 kernel_init_pages(page, 1 << order); 1157 1158 /* 1159 * arch_free_page() can make the page's contents inaccessible. s390 1160 * does this. So nothing which can access the page's contents should 1161 * happen after this. 1162 */ 1163 arch_free_page(page, order); 1164 1165 debug_pagealloc_unmap_pages(page, 1 << order); 1166 1167 return true; 1168 } 1169 1170 /* 1171 * Frees a number of pages from the PCP lists 1172 * Assumes all pages on list are in same zone. 1173 * count is the number of pages to free. 1174 */ 1175 static void free_pcppages_bulk(struct zone *zone, int count, 1176 struct per_cpu_pages *pcp, 1177 int pindex) 1178 { 1179 unsigned long flags; 1180 unsigned int order; 1181 struct page *page; 1182 1183 /* 1184 * Ensure proper count is passed which otherwise would stuck in the 1185 * below while (list_empty(list)) loop. 1186 */ 1187 count = min(pcp->count, count); 1188 1189 /* Ensure requested pindex is drained first. */ 1190 pindex = pindex - 1; 1191 1192 spin_lock_irqsave(&zone->lock, flags); 1193 1194 while (count > 0) { 1195 struct list_head *list; 1196 int nr_pages; 1197 1198 /* Remove pages from lists in a round-robin fashion. */ 1199 do { 1200 if (++pindex > NR_PCP_LISTS - 1) 1201 pindex = 0; 1202 list = &pcp->lists[pindex]; 1203 } while (list_empty(list)); 1204 1205 order = pindex_to_order(pindex); 1206 nr_pages = 1 << order; 1207 do { 1208 unsigned long pfn; 1209 int mt; 1210 1211 page = list_last_entry(list, struct page, pcp_list); 1212 pfn = page_to_pfn(page); 1213 mt = get_pfnblock_migratetype(page, pfn); 1214 1215 /* must delete to avoid corrupting pcp list */ 1216 list_del(&page->pcp_list); 1217 count -= nr_pages; 1218 pcp->count -= nr_pages; 1219 1220 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1221 trace_mm_page_pcpu_drain(page, order, mt); 1222 } while (count > 0 && !list_empty(list)); 1223 } 1224 1225 spin_unlock_irqrestore(&zone->lock, flags); 1226 } 1227 1228 /* Split a multi-block free page into its individual pageblocks. */ 1229 static void split_large_buddy(struct zone *zone, struct page *page, 1230 unsigned long pfn, int order, fpi_t fpi) 1231 { 1232 unsigned long end = pfn + (1 << order); 1233 1234 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1235 /* Caller removed page from freelist, buddy info cleared! */ 1236 VM_WARN_ON_ONCE(PageBuddy(page)); 1237 1238 if (order > pageblock_order) 1239 order = pageblock_order; 1240 1241 while (pfn != end) { 1242 int mt = get_pfnblock_migratetype(page, pfn); 1243 1244 __free_one_page(page, pfn, zone, order, mt, fpi); 1245 pfn += 1 << order; 1246 page = pfn_to_page(pfn); 1247 } 1248 } 1249 1250 static void free_one_page(struct zone *zone, struct page *page, 1251 unsigned long pfn, unsigned int order, 1252 fpi_t fpi_flags) 1253 { 1254 unsigned long flags; 1255 1256 spin_lock_irqsave(&zone->lock, flags); 1257 split_large_buddy(zone, page, pfn, order, fpi_flags); 1258 spin_unlock_irqrestore(&zone->lock, flags); 1259 1260 __count_vm_events(PGFREE, 1 << order); 1261 } 1262 1263 static void __free_pages_ok(struct page *page, unsigned int order, 1264 fpi_t fpi_flags) 1265 { 1266 unsigned long pfn = page_to_pfn(page); 1267 struct zone *zone = page_zone(page); 1268 1269 if (free_pages_prepare(page, order)) 1270 free_one_page(zone, page, pfn, order, fpi_flags); 1271 } 1272 1273 void __meminit __free_pages_core(struct page *page, unsigned int order, 1274 enum meminit_context context) 1275 { 1276 unsigned int nr_pages = 1 << order; 1277 struct page *p = page; 1278 unsigned int loop; 1279 1280 /* 1281 * When initializing the memmap, __init_single_page() sets the refcount 1282 * of all pages to 1 ("allocated"/"not free"). We have to set the 1283 * refcount of all involved pages to 0. 1284 * 1285 * Note that hotplugged memory pages are initialized to PageOffline(). 1286 * Pages freed from memblock might be marked as reserved. 1287 */ 1288 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1289 unlikely(context == MEMINIT_HOTPLUG)) { 1290 for (loop = 0; loop < nr_pages; loop++, p++) { 1291 VM_WARN_ON_ONCE(PageReserved(p)); 1292 __ClearPageOffline(p); 1293 set_page_count(p, 0); 1294 } 1295 1296 /* 1297 * Freeing the page with debug_pagealloc enabled will try to 1298 * unmap it; some archs don't like double-unmappings, so 1299 * map it first. 1300 */ 1301 debug_pagealloc_map_pages(page, nr_pages); 1302 adjust_managed_page_count(page, nr_pages); 1303 } else { 1304 for (loop = 0; loop < nr_pages; loop++, p++) { 1305 __ClearPageReserved(p); 1306 set_page_count(p, 0); 1307 } 1308 1309 /* memblock adjusts totalram_pages() manually. */ 1310 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1311 } 1312 1313 if (page_contains_unaccepted(page, order)) { 1314 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1315 return; 1316 1317 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1318 } 1319 1320 /* 1321 * Bypass PCP and place fresh pages right to the tail, primarily 1322 * relevant for memory onlining. 1323 */ 1324 __free_pages_ok(page, order, FPI_TO_TAIL); 1325 } 1326 1327 /* 1328 * Check that the whole (or subset of) a pageblock given by the interval of 1329 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1330 * with the migration of free compaction scanner. 1331 * 1332 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1333 * 1334 * It's possible on some configurations to have a setup like node0 node1 node0 1335 * i.e. it's possible that all pages within a zones range of pages do not 1336 * belong to a single zone. We assume that a border between node0 and node1 1337 * can occur within a single pageblock, but not a node0 node1 node0 1338 * interleaving within a single pageblock. It is therefore sufficient to check 1339 * the first and last page of a pageblock and avoid checking each individual 1340 * page in a pageblock. 1341 * 1342 * Note: the function may return non-NULL struct page even for a page block 1343 * which contains a memory hole (i.e. there is no physical memory for a subset 1344 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1345 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1346 * even though the start pfn is online and valid. This should be safe most of 1347 * the time because struct pages are still initialized via init_unavailable_range() 1348 * and pfn walkers shouldn't touch any physical memory range for which they do 1349 * not recognize any specific metadata in struct pages. 1350 */ 1351 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1352 unsigned long end_pfn, struct zone *zone) 1353 { 1354 struct page *start_page; 1355 struct page *end_page; 1356 1357 /* end_pfn is one past the range we are checking */ 1358 end_pfn--; 1359 1360 if (!pfn_valid(end_pfn)) 1361 return NULL; 1362 1363 start_page = pfn_to_online_page(start_pfn); 1364 if (!start_page) 1365 return NULL; 1366 1367 if (page_zone(start_page) != zone) 1368 return NULL; 1369 1370 end_page = pfn_to_page(end_pfn); 1371 1372 /* This gives a shorter code than deriving page_zone(end_page) */ 1373 if (page_zone_id(start_page) != page_zone_id(end_page)) 1374 return NULL; 1375 1376 return start_page; 1377 } 1378 1379 /* 1380 * The order of subdivision here is critical for the IO subsystem. 1381 * Please do not alter this order without good reasons and regression 1382 * testing. Specifically, as large blocks of memory are subdivided, 1383 * the order in which smaller blocks are delivered depends on the order 1384 * they're subdivided in this function. This is the primary factor 1385 * influencing the order in which pages are delivered to the IO 1386 * subsystem according to empirical testing, and this is also justified 1387 * by considering the behavior of a buddy system containing a single 1388 * large block of memory acted on by a series of small allocations. 1389 * This behavior is a critical factor in sglist merging's success. 1390 * 1391 * -- nyc 1392 */ 1393 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1394 int high, int migratetype) 1395 { 1396 unsigned int size = 1 << high; 1397 unsigned int nr_added = 0; 1398 1399 while (high > low) { 1400 high--; 1401 size >>= 1; 1402 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1403 1404 /* 1405 * Mark as guard pages (or page), that will allow to 1406 * merge back to allocator when buddy will be freed. 1407 * Corresponding page table entries will not be touched, 1408 * pages will stay not present in virtual address space 1409 */ 1410 if (set_page_guard(zone, &page[size], high)) 1411 continue; 1412 1413 __add_to_free_list(&page[size], zone, high, migratetype, false); 1414 set_buddy_order(&page[size], high); 1415 nr_added += size; 1416 } 1417 1418 return nr_added; 1419 } 1420 1421 static __always_inline void page_del_and_expand(struct zone *zone, 1422 struct page *page, int low, 1423 int high, int migratetype) 1424 { 1425 int nr_pages = 1 << high; 1426 1427 __del_page_from_free_list(page, zone, high, migratetype); 1428 nr_pages -= expand(zone, page, low, high, migratetype); 1429 account_freepages(zone, -nr_pages, migratetype); 1430 } 1431 1432 static void check_new_page_bad(struct page *page) 1433 { 1434 if (unlikely(page->flags & __PG_HWPOISON)) { 1435 /* Don't complain about hwpoisoned pages */ 1436 if (PageBuddy(page)) 1437 __ClearPageBuddy(page); 1438 return; 1439 } 1440 1441 bad_page(page, 1442 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1443 } 1444 1445 /* 1446 * This page is about to be returned from the page allocator 1447 */ 1448 static bool check_new_page(struct page *page) 1449 { 1450 if (likely(page_expected_state(page, 1451 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1452 return false; 1453 1454 check_new_page_bad(page); 1455 return true; 1456 } 1457 1458 static inline bool check_new_pages(struct page *page, unsigned int order) 1459 { 1460 if (is_check_pages_enabled()) { 1461 for (int i = 0; i < (1 << order); i++) { 1462 struct page *p = page + i; 1463 1464 if (check_new_page(p)) 1465 return true; 1466 } 1467 } 1468 1469 return false; 1470 } 1471 1472 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1473 { 1474 /* Don't skip if a software KASAN mode is enabled. */ 1475 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1476 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1477 return false; 1478 1479 /* Skip, if hardware tag-based KASAN is not enabled. */ 1480 if (!kasan_hw_tags_enabled()) 1481 return true; 1482 1483 /* 1484 * With hardware tag-based KASAN enabled, skip if this has been 1485 * requested via __GFP_SKIP_KASAN. 1486 */ 1487 return flags & __GFP_SKIP_KASAN; 1488 } 1489 1490 static inline bool should_skip_init(gfp_t flags) 1491 { 1492 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1493 if (!kasan_hw_tags_enabled()) 1494 return false; 1495 1496 /* For hardware tag-based KASAN, skip if requested. */ 1497 return (flags & __GFP_SKIP_ZERO); 1498 } 1499 1500 inline void post_alloc_hook(struct page *page, unsigned int order, 1501 gfp_t gfp_flags) 1502 { 1503 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1504 !should_skip_init(gfp_flags); 1505 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1506 int i; 1507 1508 set_page_private(page, 0); 1509 set_page_refcounted(page); 1510 1511 arch_alloc_page(page, order); 1512 debug_pagealloc_map_pages(page, 1 << order); 1513 1514 /* 1515 * Page unpoisoning must happen before memory initialization. 1516 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1517 * allocations and the page unpoisoning code will complain. 1518 */ 1519 kernel_unpoison_pages(page, 1 << order); 1520 1521 /* 1522 * As memory initialization might be integrated into KASAN, 1523 * KASAN unpoisoning and memory initializion code must be 1524 * kept together to avoid discrepancies in behavior. 1525 */ 1526 1527 /* 1528 * If memory tags should be zeroed 1529 * (which happens only when memory should be initialized as well). 1530 */ 1531 if (zero_tags) { 1532 /* Initialize both memory and memory tags. */ 1533 for (i = 0; i != 1 << order; ++i) 1534 tag_clear_highpage(page + i); 1535 1536 /* Take note that memory was initialized by the loop above. */ 1537 init = false; 1538 } 1539 if (!should_skip_kasan_unpoison(gfp_flags) && 1540 kasan_unpoison_pages(page, order, init)) { 1541 /* Take note that memory was initialized by KASAN. */ 1542 if (kasan_has_integrated_init()) 1543 init = false; 1544 } else { 1545 /* 1546 * If memory tags have not been set by KASAN, reset the page 1547 * tags to ensure page_address() dereferencing does not fault. 1548 */ 1549 for (i = 0; i != 1 << order; ++i) 1550 page_kasan_tag_reset(page + i); 1551 } 1552 /* If memory is still not initialized, initialize it now. */ 1553 if (init) 1554 kernel_init_pages(page, 1 << order); 1555 1556 set_page_owner(page, order, gfp_flags); 1557 page_table_check_alloc(page, order); 1558 pgalloc_tag_add(page, current, 1 << order); 1559 } 1560 1561 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1562 unsigned int alloc_flags) 1563 { 1564 post_alloc_hook(page, order, gfp_flags); 1565 1566 if (order && (gfp_flags & __GFP_COMP)) 1567 prep_compound_page(page, order); 1568 1569 /* 1570 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1571 * allocate the page. The expectation is that the caller is taking 1572 * steps that will free more memory. The caller should avoid the page 1573 * being used for !PFMEMALLOC purposes. 1574 */ 1575 if (alloc_flags & ALLOC_NO_WATERMARKS) 1576 set_page_pfmemalloc(page); 1577 else 1578 clear_page_pfmemalloc(page); 1579 } 1580 1581 /* 1582 * Go through the free lists for the given migratetype and remove 1583 * the smallest available page from the freelists 1584 */ 1585 static __always_inline 1586 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1587 int migratetype) 1588 { 1589 unsigned int current_order; 1590 struct free_area *area; 1591 struct page *page; 1592 1593 /* Find a page of the appropriate size in the preferred list */ 1594 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1595 area = &(zone->free_area[current_order]); 1596 page = get_page_from_free_area(area, migratetype); 1597 if (!page) 1598 continue; 1599 1600 page_del_and_expand(zone, page, order, current_order, 1601 migratetype); 1602 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1603 pcp_allowed_order(order) && 1604 migratetype < MIGRATE_PCPTYPES); 1605 return page; 1606 } 1607 1608 return NULL; 1609 } 1610 1611 1612 /* 1613 * This array describes the order lists are fallen back to when 1614 * the free lists for the desirable migrate type are depleted 1615 * 1616 * The other migratetypes do not have fallbacks. 1617 */ 1618 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1619 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1620 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1621 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1622 }; 1623 1624 #ifdef CONFIG_CMA 1625 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1626 unsigned int order) 1627 { 1628 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1629 } 1630 #else 1631 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1632 unsigned int order) { return NULL; } 1633 #endif 1634 1635 /* 1636 * Change the type of a block and move all its free pages to that 1637 * type's freelist. 1638 */ 1639 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1640 int old_mt, int new_mt) 1641 { 1642 struct page *page; 1643 unsigned long pfn, end_pfn; 1644 unsigned int order; 1645 int pages_moved = 0; 1646 1647 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1648 end_pfn = pageblock_end_pfn(start_pfn); 1649 1650 for (pfn = start_pfn; pfn < end_pfn;) { 1651 page = pfn_to_page(pfn); 1652 if (!PageBuddy(page)) { 1653 pfn++; 1654 continue; 1655 } 1656 1657 /* Make sure we are not inadvertently changing nodes */ 1658 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1659 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1660 1661 order = buddy_order(page); 1662 1663 move_to_free_list(page, zone, order, old_mt, new_mt); 1664 1665 pfn += 1 << order; 1666 pages_moved += 1 << order; 1667 } 1668 1669 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1670 1671 return pages_moved; 1672 } 1673 1674 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1675 unsigned long *start_pfn, 1676 int *num_free, int *num_movable) 1677 { 1678 unsigned long pfn, start, end; 1679 1680 pfn = page_to_pfn(page); 1681 start = pageblock_start_pfn(pfn); 1682 end = pageblock_end_pfn(pfn); 1683 1684 /* 1685 * The caller only has the lock for @zone, don't touch ranges 1686 * that straddle into other zones. While we could move part of 1687 * the range that's inside the zone, this call is usually 1688 * accompanied by other operations such as migratetype updates 1689 * which also should be locked. 1690 */ 1691 if (!zone_spans_pfn(zone, start)) 1692 return false; 1693 if (!zone_spans_pfn(zone, end - 1)) 1694 return false; 1695 1696 *start_pfn = start; 1697 1698 if (num_free) { 1699 *num_free = 0; 1700 *num_movable = 0; 1701 for (pfn = start; pfn < end;) { 1702 page = pfn_to_page(pfn); 1703 if (PageBuddy(page)) { 1704 int nr = 1 << buddy_order(page); 1705 1706 *num_free += nr; 1707 pfn += nr; 1708 continue; 1709 } 1710 /* 1711 * We assume that pages that could be isolated for 1712 * migration are movable. But we don't actually try 1713 * isolating, as that would be expensive. 1714 */ 1715 if (PageLRU(page) || __PageMovable(page)) 1716 (*num_movable)++; 1717 pfn++; 1718 } 1719 } 1720 1721 return true; 1722 } 1723 1724 static int move_freepages_block(struct zone *zone, struct page *page, 1725 int old_mt, int new_mt) 1726 { 1727 unsigned long start_pfn; 1728 1729 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1730 return -1; 1731 1732 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1733 } 1734 1735 #ifdef CONFIG_MEMORY_ISOLATION 1736 /* Look for a buddy that straddles start_pfn */ 1737 static unsigned long find_large_buddy(unsigned long start_pfn) 1738 { 1739 int order = 0; 1740 struct page *page; 1741 unsigned long pfn = start_pfn; 1742 1743 while (!PageBuddy(page = pfn_to_page(pfn))) { 1744 /* Nothing found */ 1745 if (++order > MAX_PAGE_ORDER) 1746 return start_pfn; 1747 pfn &= ~0UL << order; 1748 } 1749 1750 /* 1751 * Found a preceding buddy, but does it straddle? 1752 */ 1753 if (pfn + (1 << buddy_order(page)) > start_pfn) 1754 return pfn; 1755 1756 /* Nothing found */ 1757 return start_pfn; 1758 } 1759 1760 /** 1761 * move_freepages_block_isolate - move free pages in block for page isolation 1762 * @zone: the zone 1763 * @page: the pageblock page 1764 * @migratetype: migratetype to set on the pageblock 1765 * 1766 * This is similar to move_freepages_block(), but handles the special 1767 * case encountered in page isolation, where the block of interest 1768 * might be part of a larger buddy spanning multiple pageblocks. 1769 * 1770 * Unlike the regular page allocator path, which moves pages while 1771 * stealing buddies off the freelist, page isolation is interested in 1772 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1773 * 1774 * This function handles that. Straddling buddies are split into 1775 * individual pageblocks. Only the block of interest is moved. 1776 * 1777 * Returns %true if pages could be moved, %false otherwise. 1778 */ 1779 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1780 int migratetype) 1781 { 1782 unsigned long start_pfn, pfn; 1783 1784 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1785 return false; 1786 1787 /* No splits needed if buddies can't span multiple blocks */ 1788 if (pageblock_order == MAX_PAGE_ORDER) 1789 goto move; 1790 1791 /* We're a tail block in a larger buddy */ 1792 pfn = find_large_buddy(start_pfn); 1793 if (pfn != start_pfn) { 1794 struct page *buddy = pfn_to_page(pfn); 1795 int order = buddy_order(buddy); 1796 1797 del_page_from_free_list(buddy, zone, order, 1798 get_pfnblock_migratetype(buddy, pfn)); 1799 set_pageblock_migratetype(page, migratetype); 1800 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 1801 return true; 1802 } 1803 1804 /* We're the starting block of a larger buddy */ 1805 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1806 int order = buddy_order(page); 1807 1808 del_page_from_free_list(page, zone, order, 1809 get_pfnblock_migratetype(page, pfn)); 1810 set_pageblock_migratetype(page, migratetype); 1811 split_large_buddy(zone, page, pfn, order, FPI_NONE); 1812 return true; 1813 } 1814 move: 1815 __move_freepages_block(zone, start_pfn, 1816 get_pfnblock_migratetype(page, start_pfn), 1817 migratetype); 1818 return true; 1819 } 1820 #endif /* CONFIG_MEMORY_ISOLATION */ 1821 1822 static void change_pageblock_range(struct page *pageblock_page, 1823 int start_order, int migratetype) 1824 { 1825 int nr_pageblocks = 1 << (start_order - pageblock_order); 1826 1827 while (nr_pageblocks--) { 1828 set_pageblock_migratetype(pageblock_page, migratetype); 1829 pageblock_page += pageblock_nr_pages; 1830 } 1831 } 1832 1833 /* 1834 * When we are falling back to another migratetype during allocation, try to 1835 * steal extra free pages from the same pageblocks to satisfy further 1836 * allocations, instead of polluting multiple pageblocks. 1837 * 1838 * If we are stealing a relatively large buddy page, it is likely there will 1839 * be more free pages in the pageblock, so try to steal them all. For 1840 * reclaimable and unmovable allocations, we steal regardless of page size, 1841 * as fragmentation caused by those allocations polluting movable pageblocks 1842 * is worse than movable allocations stealing from unmovable and reclaimable 1843 * pageblocks. 1844 */ 1845 static bool can_steal_fallback(unsigned int order, int start_mt) 1846 { 1847 /* 1848 * Leaving this order check is intended, although there is 1849 * relaxed order check in next check. The reason is that 1850 * we can actually steal whole pageblock if this condition met, 1851 * but, below check doesn't guarantee it and that is just heuristic 1852 * so could be changed anytime. 1853 */ 1854 if (order >= pageblock_order) 1855 return true; 1856 1857 if (order >= pageblock_order / 2 || 1858 start_mt == MIGRATE_RECLAIMABLE || 1859 start_mt == MIGRATE_UNMOVABLE || 1860 page_group_by_mobility_disabled) 1861 return true; 1862 1863 return false; 1864 } 1865 1866 static inline bool boost_watermark(struct zone *zone) 1867 { 1868 unsigned long max_boost; 1869 1870 if (!watermark_boost_factor) 1871 return false; 1872 /* 1873 * Don't bother in zones that are unlikely to produce results. 1874 * On small machines, including kdump capture kernels running 1875 * in a small area, boosting the watermark can cause an out of 1876 * memory situation immediately. 1877 */ 1878 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1879 return false; 1880 1881 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1882 watermark_boost_factor, 10000); 1883 1884 /* 1885 * high watermark may be uninitialised if fragmentation occurs 1886 * very early in boot so do not boost. We do not fall 1887 * through and boost by pageblock_nr_pages as failing 1888 * allocations that early means that reclaim is not going 1889 * to help and it may even be impossible to reclaim the 1890 * boosted watermark resulting in a hang. 1891 */ 1892 if (!max_boost) 1893 return false; 1894 1895 max_boost = max(pageblock_nr_pages, max_boost); 1896 1897 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1898 max_boost); 1899 1900 return true; 1901 } 1902 1903 /* 1904 * This function implements actual steal behaviour. If order is large enough, we 1905 * can claim the whole pageblock for the requested migratetype. If not, we check 1906 * the pageblock for constituent pages; if at least half of the pages are free 1907 * or compatible, we can still claim the whole block, so pages freed in the 1908 * future will be put on the correct free list. Otherwise, we isolate exactly 1909 * the order we need from the fallback block and leave its migratetype alone. 1910 */ 1911 static struct page * 1912 steal_suitable_fallback(struct zone *zone, struct page *page, 1913 int current_order, int order, int start_type, 1914 unsigned int alloc_flags, bool whole_block) 1915 { 1916 int free_pages, movable_pages, alike_pages; 1917 unsigned long start_pfn; 1918 int block_type; 1919 1920 block_type = get_pageblock_migratetype(page); 1921 1922 /* 1923 * This can happen due to races and we want to prevent broken 1924 * highatomic accounting. 1925 */ 1926 if (is_migrate_highatomic(block_type)) 1927 goto single_page; 1928 1929 /* Take ownership for orders >= pageblock_order */ 1930 if (current_order >= pageblock_order) { 1931 unsigned int nr_added; 1932 1933 del_page_from_free_list(page, zone, current_order, block_type); 1934 change_pageblock_range(page, current_order, start_type); 1935 nr_added = expand(zone, page, order, current_order, start_type); 1936 account_freepages(zone, nr_added, start_type); 1937 return page; 1938 } 1939 1940 /* 1941 * Boost watermarks to increase reclaim pressure to reduce the 1942 * likelihood of future fallbacks. Wake kswapd now as the node 1943 * may be balanced overall and kswapd will not wake naturally. 1944 */ 1945 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1946 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1947 1948 /* We are not allowed to try stealing from the whole block */ 1949 if (!whole_block) 1950 goto single_page; 1951 1952 /* moving whole block can fail due to zone boundary conditions */ 1953 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 1954 &movable_pages)) 1955 goto single_page; 1956 1957 /* 1958 * Determine how many pages are compatible with our allocation. 1959 * For movable allocation, it's the number of movable pages which 1960 * we just obtained. For other types it's a bit more tricky. 1961 */ 1962 if (start_type == MIGRATE_MOVABLE) { 1963 alike_pages = movable_pages; 1964 } else { 1965 /* 1966 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1967 * to MOVABLE pageblock, consider all non-movable pages as 1968 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1969 * vice versa, be conservative since we can't distinguish the 1970 * exact migratetype of non-movable pages. 1971 */ 1972 if (block_type == MIGRATE_MOVABLE) 1973 alike_pages = pageblock_nr_pages 1974 - (free_pages + movable_pages); 1975 else 1976 alike_pages = 0; 1977 } 1978 /* 1979 * If a sufficient number of pages in the block are either free or of 1980 * compatible migratability as our allocation, claim the whole block. 1981 */ 1982 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1983 page_group_by_mobility_disabled) { 1984 __move_freepages_block(zone, start_pfn, block_type, start_type); 1985 return __rmqueue_smallest(zone, order, start_type); 1986 } 1987 1988 single_page: 1989 page_del_and_expand(zone, page, order, current_order, block_type); 1990 return page; 1991 } 1992 1993 /* 1994 * Check whether there is a suitable fallback freepage with requested order. 1995 * If only_stealable is true, this function returns fallback_mt only if 1996 * we can steal other freepages all together. This would help to reduce 1997 * fragmentation due to mixed migratetype pages in one pageblock. 1998 */ 1999 int find_suitable_fallback(struct free_area *area, unsigned int order, 2000 int migratetype, bool only_stealable, bool *can_steal) 2001 { 2002 int i; 2003 int fallback_mt; 2004 2005 if (area->nr_free == 0) 2006 return -1; 2007 2008 *can_steal = false; 2009 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2010 fallback_mt = fallbacks[migratetype][i]; 2011 if (free_area_empty(area, fallback_mt)) 2012 continue; 2013 2014 if (can_steal_fallback(order, migratetype)) 2015 *can_steal = true; 2016 2017 if (!only_stealable) 2018 return fallback_mt; 2019 2020 if (*can_steal) 2021 return fallback_mt; 2022 } 2023 2024 return -1; 2025 } 2026 2027 /* 2028 * Reserve the pageblock(s) surrounding an allocation request for 2029 * exclusive use of high-order atomic allocations if there are no 2030 * empty page blocks that contain a page with a suitable order 2031 */ 2032 static void reserve_highatomic_pageblock(struct page *page, int order, 2033 struct zone *zone) 2034 { 2035 int mt; 2036 unsigned long max_managed, flags; 2037 2038 /* 2039 * The number reserved as: minimum is 1 pageblock, maximum is 2040 * roughly 1% of a zone. But if 1% of a zone falls below a 2041 * pageblock size, then don't reserve any pageblocks. 2042 * Check is race-prone but harmless. 2043 */ 2044 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 2045 return; 2046 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 2047 if (zone->nr_reserved_highatomic >= max_managed) 2048 return; 2049 2050 spin_lock_irqsave(&zone->lock, flags); 2051 2052 /* Recheck the nr_reserved_highatomic limit under the lock */ 2053 if (zone->nr_reserved_highatomic >= max_managed) 2054 goto out_unlock; 2055 2056 /* Yoink! */ 2057 mt = get_pageblock_migratetype(page); 2058 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2059 if (!migratetype_is_mergeable(mt)) 2060 goto out_unlock; 2061 2062 if (order < pageblock_order) { 2063 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 2064 goto out_unlock; 2065 zone->nr_reserved_highatomic += pageblock_nr_pages; 2066 } else { 2067 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 2068 zone->nr_reserved_highatomic += 1 << order; 2069 } 2070 2071 out_unlock: 2072 spin_unlock_irqrestore(&zone->lock, flags); 2073 } 2074 2075 /* 2076 * Used when an allocation is about to fail under memory pressure. This 2077 * potentially hurts the reliability of high-order allocations when under 2078 * intense memory pressure but failed atomic allocations should be easier 2079 * to recover from than an OOM. 2080 * 2081 * If @force is true, try to unreserve pageblocks even though highatomic 2082 * pageblock is exhausted. 2083 */ 2084 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2085 bool force) 2086 { 2087 struct zonelist *zonelist = ac->zonelist; 2088 unsigned long flags; 2089 struct zoneref *z; 2090 struct zone *zone; 2091 struct page *page; 2092 int order; 2093 int ret; 2094 2095 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2096 ac->nodemask) { 2097 /* 2098 * Preserve at least one pageblock unless memory pressure 2099 * is really high. 2100 */ 2101 if (!force && zone->nr_reserved_highatomic <= 2102 pageblock_nr_pages) 2103 continue; 2104 2105 spin_lock_irqsave(&zone->lock, flags); 2106 for (order = 0; order < NR_PAGE_ORDERS; order++) { 2107 struct free_area *area = &(zone->free_area[order]); 2108 int mt; 2109 2110 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2111 if (!page) 2112 continue; 2113 2114 mt = get_pageblock_migratetype(page); 2115 /* 2116 * In page freeing path, migratetype change is racy so 2117 * we can counter several free pages in a pageblock 2118 * in this loop although we changed the pageblock type 2119 * from highatomic to ac->migratetype. So we should 2120 * adjust the count once. 2121 */ 2122 if (is_migrate_highatomic(mt)) { 2123 unsigned long size; 2124 /* 2125 * It should never happen but changes to 2126 * locking could inadvertently allow a per-cpu 2127 * drain to add pages to MIGRATE_HIGHATOMIC 2128 * while unreserving so be safe and watch for 2129 * underflows. 2130 */ 2131 size = max(pageblock_nr_pages, 1UL << order); 2132 size = min(size, zone->nr_reserved_highatomic); 2133 zone->nr_reserved_highatomic -= size; 2134 } 2135 2136 /* 2137 * Convert to ac->migratetype and avoid the normal 2138 * pageblock stealing heuristics. Minimally, the caller 2139 * is doing the work and needs the pages. More 2140 * importantly, if the block was always converted to 2141 * MIGRATE_UNMOVABLE or another type then the number 2142 * of pageblocks that cannot be completely freed 2143 * may increase. 2144 */ 2145 if (order < pageblock_order) 2146 ret = move_freepages_block(zone, page, mt, 2147 ac->migratetype); 2148 else { 2149 move_to_free_list(page, zone, order, mt, 2150 ac->migratetype); 2151 change_pageblock_range(page, order, 2152 ac->migratetype); 2153 ret = 1; 2154 } 2155 /* 2156 * Reserving the block(s) already succeeded, 2157 * so this should not fail on zone boundaries. 2158 */ 2159 WARN_ON_ONCE(ret == -1); 2160 if (ret > 0) { 2161 spin_unlock_irqrestore(&zone->lock, flags); 2162 return ret; 2163 } 2164 } 2165 spin_unlock_irqrestore(&zone->lock, flags); 2166 } 2167 2168 return false; 2169 } 2170 2171 /* 2172 * Try finding a free buddy page on the fallback list and put it on the free 2173 * list of requested migratetype, possibly along with other pages from the same 2174 * block, depending on fragmentation avoidance heuristics. Returns true if 2175 * fallback was found so that __rmqueue_smallest() can grab it. 2176 * 2177 * The use of signed ints for order and current_order is a deliberate 2178 * deviation from the rest of this file, to make the for loop 2179 * condition simpler. 2180 */ 2181 static __always_inline struct page * 2182 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2183 unsigned int alloc_flags) 2184 { 2185 struct free_area *area; 2186 int current_order; 2187 int min_order = order; 2188 struct page *page; 2189 int fallback_mt; 2190 bool can_steal; 2191 2192 /* 2193 * Do not steal pages from freelists belonging to other pageblocks 2194 * i.e. orders < pageblock_order. If there are no local zones free, 2195 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2196 */ 2197 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2198 min_order = pageblock_order; 2199 2200 /* 2201 * Find the largest available free page in the other list. This roughly 2202 * approximates finding the pageblock with the most free pages, which 2203 * would be too costly to do exactly. 2204 */ 2205 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2206 --current_order) { 2207 area = &(zone->free_area[current_order]); 2208 fallback_mt = find_suitable_fallback(area, current_order, 2209 start_migratetype, false, &can_steal); 2210 if (fallback_mt == -1) 2211 continue; 2212 2213 /* 2214 * We cannot steal all free pages from the pageblock and the 2215 * requested migratetype is movable. In that case it's better to 2216 * steal and split the smallest available page instead of the 2217 * largest available page, because even if the next movable 2218 * allocation falls back into a different pageblock than this 2219 * one, it won't cause permanent fragmentation. 2220 */ 2221 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2222 && current_order > order) 2223 goto find_smallest; 2224 2225 goto do_steal; 2226 } 2227 2228 return NULL; 2229 2230 find_smallest: 2231 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2232 area = &(zone->free_area[current_order]); 2233 fallback_mt = find_suitable_fallback(area, current_order, 2234 start_migratetype, false, &can_steal); 2235 if (fallback_mt != -1) 2236 break; 2237 } 2238 2239 /* 2240 * This should not happen - we already found a suitable fallback 2241 * when looking for the largest page. 2242 */ 2243 VM_BUG_ON(current_order > MAX_PAGE_ORDER); 2244 2245 do_steal: 2246 page = get_page_from_free_area(area, fallback_mt); 2247 2248 /* take off list, maybe claim block, expand remainder */ 2249 page = steal_suitable_fallback(zone, page, current_order, order, 2250 start_migratetype, alloc_flags, can_steal); 2251 2252 trace_mm_page_alloc_extfrag(page, order, current_order, 2253 start_migratetype, fallback_mt); 2254 2255 return page; 2256 } 2257 2258 /* 2259 * Do the hard work of removing an element from the buddy allocator. 2260 * Call me with the zone->lock already held. 2261 */ 2262 static __always_inline struct page * 2263 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2264 unsigned int alloc_flags) 2265 { 2266 struct page *page; 2267 2268 if (IS_ENABLED(CONFIG_CMA)) { 2269 /* 2270 * Balance movable allocations between regular and CMA areas by 2271 * allocating from CMA when over half of the zone's free memory 2272 * is in the CMA area. 2273 */ 2274 if (alloc_flags & ALLOC_CMA && 2275 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2276 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2277 page = __rmqueue_cma_fallback(zone, order); 2278 if (page) 2279 return page; 2280 } 2281 } 2282 2283 page = __rmqueue_smallest(zone, order, migratetype); 2284 if (unlikely(!page)) { 2285 if (alloc_flags & ALLOC_CMA) 2286 page = __rmqueue_cma_fallback(zone, order); 2287 2288 if (!page) 2289 page = __rmqueue_fallback(zone, order, migratetype, 2290 alloc_flags); 2291 } 2292 return page; 2293 } 2294 2295 /* 2296 * Obtain a specified number of elements from the buddy allocator, all under 2297 * a single hold of the lock, for efficiency. Add them to the supplied list. 2298 * Returns the number of new pages which were placed at *list. 2299 */ 2300 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2301 unsigned long count, struct list_head *list, 2302 int migratetype, unsigned int alloc_flags) 2303 { 2304 unsigned long flags; 2305 int i; 2306 2307 spin_lock_irqsave(&zone->lock, flags); 2308 for (i = 0; i < count; ++i) { 2309 struct page *page = __rmqueue(zone, order, migratetype, 2310 alloc_flags); 2311 if (unlikely(page == NULL)) 2312 break; 2313 2314 /* 2315 * Split buddy pages returned by expand() are received here in 2316 * physical page order. The page is added to the tail of 2317 * caller's list. From the callers perspective, the linked list 2318 * is ordered by page number under some conditions. This is 2319 * useful for IO devices that can forward direction from the 2320 * head, thus also in the physical page order. This is useful 2321 * for IO devices that can merge IO requests if the physical 2322 * pages are ordered properly. 2323 */ 2324 list_add_tail(&page->pcp_list, list); 2325 } 2326 spin_unlock_irqrestore(&zone->lock, flags); 2327 2328 return i; 2329 } 2330 2331 /* 2332 * Called from the vmstat counter updater to decay the PCP high. 2333 * Return whether there are addition works to do. 2334 */ 2335 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2336 { 2337 int high_min, to_drain, batch; 2338 int todo = 0; 2339 2340 high_min = READ_ONCE(pcp->high_min); 2341 batch = READ_ONCE(pcp->batch); 2342 /* 2343 * Decrease pcp->high periodically to try to free possible 2344 * idle PCP pages. And, avoid to free too many pages to 2345 * control latency. This caps pcp->high decrement too. 2346 */ 2347 if (pcp->high > high_min) { 2348 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2349 pcp->high - (pcp->high >> 3), high_min); 2350 if (pcp->high > high_min) 2351 todo++; 2352 } 2353 2354 to_drain = pcp->count - pcp->high; 2355 if (to_drain > 0) { 2356 spin_lock(&pcp->lock); 2357 free_pcppages_bulk(zone, to_drain, pcp, 0); 2358 spin_unlock(&pcp->lock); 2359 todo++; 2360 } 2361 2362 return todo; 2363 } 2364 2365 #ifdef CONFIG_NUMA 2366 /* 2367 * Called from the vmstat counter updater to drain pagesets of this 2368 * currently executing processor on remote nodes after they have 2369 * expired. 2370 */ 2371 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2372 { 2373 int to_drain, batch; 2374 2375 batch = READ_ONCE(pcp->batch); 2376 to_drain = min(pcp->count, batch); 2377 if (to_drain > 0) { 2378 spin_lock(&pcp->lock); 2379 free_pcppages_bulk(zone, to_drain, pcp, 0); 2380 spin_unlock(&pcp->lock); 2381 } 2382 } 2383 #endif 2384 2385 /* 2386 * Drain pcplists of the indicated processor and zone. 2387 */ 2388 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2389 { 2390 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2391 int count; 2392 2393 do { 2394 spin_lock(&pcp->lock); 2395 count = pcp->count; 2396 if (count) { 2397 int to_drain = min(count, 2398 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2399 2400 free_pcppages_bulk(zone, to_drain, pcp, 0); 2401 count -= to_drain; 2402 } 2403 spin_unlock(&pcp->lock); 2404 } while (count); 2405 } 2406 2407 /* 2408 * Drain pcplists of all zones on the indicated processor. 2409 */ 2410 static void drain_pages(unsigned int cpu) 2411 { 2412 struct zone *zone; 2413 2414 for_each_populated_zone(zone) { 2415 drain_pages_zone(cpu, zone); 2416 } 2417 } 2418 2419 /* 2420 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2421 */ 2422 void drain_local_pages(struct zone *zone) 2423 { 2424 int cpu = smp_processor_id(); 2425 2426 if (zone) 2427 drain_pages_zone(cpu, zone); 2428 else 2429 drain_pages(cpu); 2430 } 2431 2432 /* 2433 * The implementation of drain_all_pages(), exposing an extra parameter to 2434 * drain on all cpus. 2435 * 2436 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2437 * not empty. The check for non-emptiness can however race with a free to 2438 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2439 * that need the guarantee that every CPU has drained can disable the 2440 * optimizing racy check. 2441 */ 2442 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2443 { 2444 int cpu; 2445 2446 /* 2447 * Allocate in the BSS so we won't require allocation in 2448 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2449 */ 2450 static cpumask_t cpus_with_pcps; 2451 2452 /* 2453 * Do not drain if one is already in progress unless it's specific to 2454 * a zone. Such callers are primarily CMA and memory hotplug and need 2455 * the drain to be complete when the call returns. 2456 */ 2457 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2458 if (!zone) 2459 return; 2460 mutex_lock(&pcpu_drain_mutex); 2461 } 2462 2463 /* 2464 * We don't care about racing with CPU hotplug event 2465 * as offline notification will cause the notified 2466 * cpu to drain that CPU pcps and on_each_cpu_mask 2467 * disables preemption as part of its processing 2468 */ 2469 for_each_online_cpu(cpu) { 2470 struct per_cpu_pages *pcp; 2471 struct zone *z; 2472 bool has_pcps = false; 2473 2474 if (force_all_cpus) { 2475 /* 2476 * The pcp.count check is racy, some callers need a 2477 * guarantee that no cpu is missed. 2478 */ 2479 has_pcps = true; 2480 } else if (zone) { 2481 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2482 if (pcp->count) 2483 has_pcps = true; 2484 } else { 2485 for_each_populated_zone(z) { 2486 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2487 if (pcp->count) { 2488 has_pcps = true; 2489 break; 2490 } 2491 } 2492 } 2493 2494 if (has_pcps) 2495 cpumask_set_cpu(cpu, &cpus_with_pcps); 2496 else 2497 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2498 } 2499 2500 for_each_cpu(cpu, &cpus_with_pcps) { 2501 if (zone) 2502 drain_pages_zone(cpu, zone); 2503 else 2504 drain_pages(cpu); 2505 } 2506 2507 mutex_unlock(&pcpu_drain_mutex); 2508 } 2509 2510 /* 2511 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2512 * 2513 * When zone parameter is non-NULL, spill just the single zone's pages. 2514 */ 2515 void drain_all_pages(struct zone *zone) 2516 { 2517 __drain_all_pages(zone, false); 2518 } 2519 2520 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2521 { 2522 int min_nr_free, max_nr_free; 2523 2524 /* Free as much as possible if batch freeing high-order pages. */ 2525 if (unlikely(free_high)) 2526 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2527 2528 /* Check for PCP disabled or boot pageset */ 2529 if (unlikely(high < batch)) 2530 return 1; 2531 2532 /* Leave at least pcp->batch pages on the list */ 2533 min_nr_free = batch; 2534 max_nr_free = high - batch; 2535 2536 /* 2537 * Increase the batch number to the number of the consecutive 2538 * freed pages to reduce zone lock contention. 2539 */ 2540 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2541 2542 return batch; 2543 } 2544 2545 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2546 int batch, bool free_high) 2547 { 2548 int high, high_min, high_max; 2549 2550 high_min = READ_ONCE(pcp->high_min); 2551 high_max = READ_ONCE(pcp->high_max); 2552 high = pcp->high = clamp(pcp->high, high_min, high_max); 2553 2554 if (unlikely(!high)) 2555 return 0; 2556 2557 if (unlikely(free_high)) { 2558 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2559 high_min); 2560 return 0; 2561 } 2562 2563 /* 2564 * If reclaim is active, limit the number of pages that can be 2565 * stored on pcp lists 2566 */ 2567 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2568 int free_count = max_t(int, pcp->free_count, batch); 2569 2570 pcp->high = max(high - free_count, high_min); 2571 return min(batch << 2, pcp->high); 2572 } 2573 2574 if (high_min == high_max) 2575 return high; 2576 2577 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2578 int free_count = max_t(int, pcp->free_count, batch); 2579 2580 pcp->high = max(high - free_count, high_min); 2581 high = max(pcp->count, high_min); 2582 } else if (pcp->count >= high) { 2583 int need_high = pcp->free_count + batch; 2584 2585 /* pcp->high should be large enough to hold batch freed pages */ 2586 if (pcp->high < need_high) 2587 pcp->high = clamp(need_high, high_min, high_max); 2588 } 2589 2590 return high; 2591 } 2592 2593 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2594 struct page *page, int migratetype, 2595 unsigned int order) 2596 { 2597 int high, batch; 2598 int pindex; 2599 bool free_high = false; 2600 2601 /* 2602 * On freeing, reduce the number of pages that are batch allocated. 2603 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2604 * allocations. 2605 */ 2606 pcp->alloc_factor >>= 1; 2607 __count_vm_events(PGFREE, 1 << order); 2608 pindex = order_to_pindex(migratetype, order); 2609 list_add(&page->pcp_list, &pcp->lists[pindex]); 2610 pcp->count += 1 << order; 2611 2612 batch = READ_ONCE(pcp->batch); 2613 /* 2614 * As high-order pages other than THP's stored on PCP can contribute 2615 * to fragmentation, limit the number stored when PCP is heavily 2616 * freeing without allocation. The remainder after bulk freeing 2617 * stops will be drained from vmstat refresh context. 2618 */ 2619 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2620 free_high = (pcp->free_count >= batch && 2621 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2622 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2623 pcp->count >= READ_ONCE(batch))); 2624 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2625 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2626 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2627 } 2628 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2629 pcp->free_count += (1 << order); 2630 high = nr_pcp_high(pcp, zone, batch, free_high); 2631 if (pcp->count >= high) { 2632 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2633 pcp, pindex); 2634 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2635 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2636 ZONE_MOVABLE, 0)) 2637 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2638 } 2639 } 2640 2641 /* 2642 * Free a pcp page 2643 */ 2644 void free_unref_page(struct page *page, unsigned int order) 2645 { 2646 unsigned long __maybe_unused UP_flags; 2647 struct per_cpu_pages *pcp; 2648 struct zone *zone; 2649 unsigned long pfn = page_to_pfn(page); 2650 int migratetype; 2651 2652 if (!pcp_allowed_order(order)) { 2653 __free_pages_ok(page, order, FPI_NONE); 2654 return; 2655 } 2656 2657 if (!free_pages_prepare(page, order)) 2658 return; 2659 2660 /* 2661 * We only track unmovable, reclaimable and movable on pcp lists. 2662 * Place ISOLATE pages on the isolated list because they are being 2663 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2664 * get those areas back if necessary. Otherwise, we may have to free 2665 * excessively into the page allocator 2666 */ 2667 migratetype = get_pfnblock_migratetype(page, pfn); 2668 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2669 if (unlikely(is_migrate_isolate(migratetype))) { 2670 free_one_page(page_zone(page), page, pfn, order, FPI_NONE); 2671 return; 2672 } 2673 migratetype = MIGRATE_MOVABLE; 2674 } 2675 2676 zone = page_zone(page); 2677 pcp_trylock_prepare(UP_flags); 2678 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2679 if (pcp) { 2680 free_unref_page_commit(zone, pcp, page, migratetype, order); 2681 pcp_spin_unlock(pcp); 2682 } else { 2683 free_one_page(zone, page, pfn, order, FPI_NONE); 2684 } 2685 pcp_trylock_finish(UP_flags); 2686 } 2687 2688 /* 2689 * Free a batch of folios 2690 */ 2691 void free_unref_folios(struct folio_batch *folios) 2692 { 2693 unsigned long __maybe_unused UP_flags; 2694 struct per_cpu_pages *pcp = NULL; 2695 struct zone *locked_zone = NULL; 2696 int i, j; 2697 2698 /* Prepare folios for freeing */ 2699 for (i = 0, j = 0; i < folios->nr; i++) { 2700 struct folio *folio = folios->folios[i]; 2701 unsigned long pfn = folio_pfn(folio); 2702 unsigned int order = folio_order(folio); 2703 2704 if (!free_pages_prepare(&folio->page, order)) 2705 continue; 2706 /* 2707 * Free orders not handled on the PCP directly to the 2708 * allocator. 2709 */ 2710 if (!pcp_allowed_order(order)) { 2711 free_one_page(folio_zone(folio), &folio->page, 2712 pfn, order, FPI_NONE); 2713 continue; 2714 } 2715 folio->private = (void *)(unsigned long)order; 2716 if (j != i) 2717 folios->folios[j] = folio; 2718 j++; 2719 } 2720 folios->nr = j; 2721 2722 for (i = 0; i < folios->nr; i++) { 2723 struct folio *folio = folios->folios[i]; 2724 struct zone *zone = folio_zone(folio); 2725 unsigned long pfn = folio_pfn(folio); 2726 unsigned int order = (unsigned long)folio->private; 2727 int migratetype; 2728 2729 folio->private = NULL; 2730 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2731 2732 /* Different zone requires a different pcp lock */ 2733 if (zone != locked_zone || 2734 is_migrate_isolate(migratetype)) { 2735 if (pcp) { 2736 pcp_spin_unlock(pcp); 2737 pcp_trylock_finish(UP_flags); 2738 locked_zone = NULL; 2739 pcp = NULL; 2740 } 2741 2742 /* 2743 * Free isolated pages directly to the 2744 * allocator, see comment in free_unref_page. 2745 */ 2746 if (is_migrate_isolate(migratetype)) { 2747 free_one_page(zone, &folio->page, pfn, 2748 order, FPI_NONE); 2749 continue; 2750 } 2751 2752 /* 2753 * trylock is necessary as folios may be getting freed 2754 * from IRQ or SoftIRQ context after an IO completion. 2755 */ 2756 pcp_trylock_prepare(UP_flags); 2757 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2758 if (unlikely(!pcp)) { 2759 pcp_trylock_finish(UP_flags); 2760 free_one_page(zone, &folio->page, pfn, 2761 order, FPI_NONE); 2762 continue; 2763 } 2764 locked_zone = zone; 2765 } 2766 2767 /* 2768 * Non-isolated types over MIGRATE_PCPTYPES get added 2769 * to the MIGRATE_MOVABLE pcp list. 2770 */ 2771 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2772 migratetype = MIGRATE_MOVABLE; 2773 2774 trace_mm_page_free_batched(&folio->page); 2775 free_unref_page_commit(zone, pcp, &folio->page, migratetype, 2776 order); 2777 } 2778 2779 if (pcp) { 2780 pcp_spin_unlock(pcp); 2781 pcp_trylock_finish(UP_flags); 2782 } 2783 folio_batch_reinit(folios); 2784 } 2785 2786 /* 2787 * split_page takes a non-compound higher-order page, and splits it into 2788 * n (1<<order) sub-pages: page[0..n] 2789 * Each sub-page must be freed individually. 2790 * 2791 * Note: this is probably too low level an operation for use in drivers. 2792 * Please consult with lkml before using this in your driver. 2793 */ 2794 void split_page(struct page *page, unsigned int order) 2795 { 2796 int i; 2797 2798 VM_BUG_ON_PAGE(PageCompound(page), page); 2799 VM_BUG_ON_PAGE(!page_count(page), page); 2800 2801 for (i = 1; i < (1 << order); i++) 2802 set_page_refcounted(page + i); 2803 split_page_owner(page, order, 0); 2804 pgalloc_tag_split(page_folio(page), order, 0); 2805 split_page_memcg(page, order, 0); 2806 } 2807 EXPORT_SYMBOL_GPL(split_page); 2808 2809 int __isolate_free_page(struct page *page, unsigned int order) 2810 { 2811 struct zone *zone = page_zone(page); 2812 int mt = get_pageblock_migratetype(page); 2813 2814 if (!is_migrate_isolate(mt)) { 2815 unsigned long watermark; 2816 /* 2817 * Obey watermarks as if the page was being allocated. We can 2818 * emulate a high-order watermark check with a raised order-0 2819 * watermark, because we already know our high-order page 2820 * exists. 2821 */ 2822 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2823 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2824 return 0; 2825 } 2826 2827 del_page_from_free_list(page, zone, order, mt); 2828 2829 /* 2830 * Set the pageblock if the isolated page is at least half of a 2831 * pageblock 2832 */ 2833 if (order >= pageblock_order - 1) { 2834 struct page *endpage = page + (1 << order) - 1; 2835 for (; page < endpage; page += pageblock_nr_pages) { 2836 int mt = get_pageblock_migratetype(page); 2837 /* 2838 * Only change normal pageblocks (i.e., they can merge 2839 * with others) 2840 */ 2841 if (migratetype_is_mergeable(mt)) 2842 move_freepages_block(zone, page, mt, 2843 MIGRATE_MOVABLE); 2844 } 2845 } 2846 2847 return 1UL << order; 2848 } 2849 2850 /** 2851 * __putback_isolated_page - Return a now-isolated page back where we got it 2852 * @page: Page that was isolated 2853 * @order: Order of the isolated page 2854 * @mt: The page's pageblock's migratetype 2855 * 2856 * This function is meant to return a page pulled from the free lists via 2857 * __isolate_free_page back to the free lists they were pulled from. 2858 */ 2859 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2860 { 2861 struct zone *zone = page_zone(page); 2862 2863 /* zone lock should be held when this function is called */ 2864 lockdep_assert_held(&zone->lock); 2865 2866 /* Return isolated page to tail of freelist. */ 2867 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2868 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2869 } 2870 2871 /* 2872 * Update NUMA hit/miss statistics 2873 */ 2874 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2875 long nr_account) 2876 { 2877 #ifdef CONFIG_NUMA 2878 enum numa_stat_item local_stat = NUMA_LOCAL; 2879 2880 /* skip numa counters update if numa stats is disabled */ 2881 if (!static_branch_likely(&vm_numa_stat_key)) 2882 return; 2883 2884 if (zone_to_nid(z) != numa_node_id()) 2885 local_stat = NUMA_OTHER; 2886 2887 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2888 __count_numa_events(z, NUMA_HIT, nr_account); 2889 else { 2890 __count_numa_events(z, NUMA_MISS, nr_account); 2891 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2892 } 2893 __count_numa_events(z, local_stat, nr_account); 2894 #endif 2895 } 2896 2897 static __always_inline 2898 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2899 unsigned int order, unsigned int alloc_flags, 2900 int migratetype) 2901 { 2902 struct page *page; 2903 unsigned long flags; 2904 2905 do { 2906 page = NULL; 2907 spin_lock_irqsave(&zone->lock, flags); 2908 if (alloc_flags & ALLOC_HIGHATOMIC) 2909 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2910 if (!page) { 2911 page = __rmqueue(zone, order, migratetype, alloc_flags); 2912 2913 /* 2914 * If the allocation fails, allow OOM handling and 2915 * order-0 (atomic) allocs access to HIGHATOMIC 2916 * reserves as failing now is worse than failing a 2917 * high-order atomic allocation in the future. 2918 */ 2919 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 2920 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2921 2922 if (!page) { 2923 spin_unlock_irqrestore(&zone->lock, flags); 2924 return NULL; 2925 } 2926 } 2927 spin_unlock_irqrestore(&zone->lock, flags); 2928 } while (check_new_pages(page, order)); 2929 2930 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2931 zone_statistics(preferred_zone, zone, 1); 2932 2933 return page; 2934 } 2935 2936 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 2937 { 2938 int high, base_batch, batch, max_nr_alloc; 2939 int high_max, high_min; 2940 2941 base_batch = READ_ONCE(pcp->batch); 2942 high_min = READ_ONCE(pcp->high_min); 2943 high_max = READ_ONCE(pcp->high_max); 2944 high = pcp->high = clamp(pcp->high, high_min, high_max); 2945 2946 /* Check for PCP disabled or boot pageset */ 2947 if (unlikely(high < base_batch)) 2948 return 1; 2949 2950 if (order) 2951 batch = base_batch; 2952 else 2953 batch = (base_batch << pcp->alloc_factor); 2954 2955 /* 2956 * If we had larger pcp->high, we could avoid to allocate from 2957 * zone. 2958 */ 2959 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 2960 high = pcp->high = min(high + batch, high_max); 2961 2962 if (!order) { 2963 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 2964 /* 2965 * Double the number of pages allocated each time there is 2966 * subsequent allocation of order-0 pages without any freeing. 2967 */ 2968 if (batch <= max_nr_alloc && 2969 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 2970 pcp->alloc_factor++; 2971 batch = min(batch, max_nr_alloc); 2972 } 2973 2974 /* 2975 * Scale batch relative to order if batch implies free pages 2976 * can be stored on the PCP. Batch can be 1 for small zones or 2977 * for boot pagesets which should never store free pages as 2978 * the pages may belong to arbitrary zones. 2979 */ 2980 if (batch > 1) 2981 batch = max(batch >> order, 2); 2982 2983 return batch; 2984 } 2985 2986 /* Remove page from the per-cpu list, caller must protect the list */ 2987 static inline 2988 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2989 int migratetype, 2990 unsigned int alloc_flags, 2991 struct per_cpu_pages *pcp, 2992 struct list_head *list) 2993 { 2994 struct page *page; 2995 2996 do { 2997 if (list_empty(list)) { 2998 int batch = nr_pcp_alloc(pcp, zone, order); 2999 int alloced; 3000 3001 alloced = rmqueue_bulk(zone, order, 3002 batch, list, 3003 migratetype, alloc_flags); 3004 3005 pcp->count += alloced << order; 3006 if (unlikely(list_empty(list))) 3007 return NULL; 3008 } 3009 3010 page = list_first_entry(list, struct page, pcp_list); 3011 list_del(&page->pcp_list); 3012 pcp->count -= 1 << order; 3013 } while (check_new_pages(page, order)); 3014 3015 return page; 3016 } 3017 3018 /* Lock and remove page from the per-cpu list */ 3019 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3020 struct zone *zone, unsigned int order, 3021 int migratetype, unsigned int alloc_flags) 3022 { 3023 struct per_cpu_pages *pcp; 3024 struct list_head *list; 3025 struct page *page; 3026 unsigned long __maybe_unused UP_flags; 3027 3028 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3029 pcp_trylock_prepare(UP_flags); 3030 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3031 if (!pcp) { 3032 pcp_trylock_finish(UP_flags); 3033 return NULL; 3034 } 3035 3036 /* 3037 * On allocation, reduce the number of pages that are batch freed. 3038 * See nr_pcp_free() where free_factor is increased for subsequent 3039 * frees. 3040 */ 3041 pcp->free_count >>= 1; 3042 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3043 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3044 pcp_spin_unlock(pcp); 3045 pcp_trylock_finish(UP_flags); 3046 if (page) { 3047 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3048 zone_statistics(preferred_zone, zone, 1); 3049 } 3050 return page; 3051 } 3052 3053 /* 3054 * Allocate a page from the given zone. 3055 * Use pcplists for THP or "cheap" high-order allocations. 3056 */ 3057 3058 /* 3059 * Do not instrument rmqueue() with KMSAN. This function may call 3060 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3061 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3062 * may call rmqueue() again, which will result in a deadlock. 3063 */ 3064 __no_sanitize_memory 3065 static inline 3066 struct page *rmqueue(struct zone *preferred_zone, 3067 struct zone *zone, unsigned int order, 3068 gfp_t gfp_flags, unsigned int alloc_flags, 3069 int migratetype) 3070 { 3071 struct page *page; 3072 3073 if (likely(pcp_allowed_order(order))) { 3074 page = rmqueue_pcplist(preferred_zone, zone, order, 3075 migratetype, alloc_flags); 3076 if (likely(page)) 3077 goto out; 3078 } 3079 3080 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3081 migratetype); 3082 3083 out: 3084 /* Separate test+clear to avoid unnecessary atomics */ 3085 if ((alloc_flags & ALLOC_KSWAPD) && 3086 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3087 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3088 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3089 } 3090 3091 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3092 return page; 3093 } 3094 3095 static inline long __zone_watermark_unusable_free(struct zone *z, 3096 unsigned int order, unsigned int alloc_flags) 3097 { 3098 long unusable_free = (1 << order) - 1; 3099 3100 /* 3101 * If the caller does not have rights to reserves below the min 3102 * watermark then subtract the free pages reserved for highatomic. 3103 */ 3104 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3105 unusable_free += READ_ONCE(z->nr_free_highatomic); 3106 3107 #ifdef CONFIG_CMA 3108 /* If allocation can't use CMA areas don't use free CMA pages */ 3109 if (!(alloc_flags & ALLOC_CMA)) 3110 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3111 #endif 3112 3113 return unusable_free; 3114 } 3115 3116 /* 3117 * Return true if free base pages are above 'mark'. For high-order checks it 3118 * will return true of the order-0 watermark is reached and there is at least 3119 * one free page of a suitable size. Checking now avoids taking the zone lock 3120 * to check in the allocation paths if no pages are free. 3121 */ 3122 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3123 int highest_zoneidx, unsigned int alloc_flags, 3124 long free_pages) 3125 { 3126 long min = mark; 3127 int o; 3128 3129 /* free_pages may go negative - that's OK */ 3130 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3131 3132 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3133 /* 3134 * __GFP_HIGH allows access to 50% of the min reserve as well 3135 * as OOM. 3136 */ 3137 if (alloc_flags & ALLOC_MIN_RESERVE) { 3138 min -= min / 2; 3139 3140 /* 3141 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3142 * access more reserves than just __GFP_HIGH. Other 3143 * non-blocking allocations requests such as GFP_NOWAIT 3144 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3145 * access to the min reserve. 3146 */ 3147 if (alloc_flags & ALLOC_NON_BLOCK) 3148 min -= min / 4; 3149 } 3150 3151 /* 3152 * OOM victims can try even harder than the normal reserve 3153 * users on the grounds that it's definitely going to be in 3154 * the exit path shortly and free memory. Any allocation it 3155 * makes during the free path will be small and short-lived. 3156 */ 3157 if (alloc_flags & ALLOC_OOM) 3158 min -= min / 2; 3159 } 3160 3161 /* 3162 * Check watermarks for an order-0 allocation request. If these 3163 * are not met, then a high-order request also cannot go ahead 3164 * even if a suitable page happened to be free. 3165 */ 3166 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3167 return false; 3168 3169 /* If this is an order-0 request then the watermark is fine */ 3170 if (!order) 3171 return true; 3172 3173 /* For a high-order request, check at least one suitable page is free */ 3174 for (o = order; o < NR_PAGE_ORDERS; o++) { 3175 struct free_area *area = &z->free_area[o]; 3176 int mt; 3177 3178 if (!area->nr_free) 3179 continue; 3180 3181 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3182 if (!free_area_empty(area, mt)) 3183 return true; 3184 } 3185 3186 #ifdef CONFIG_CMA 3187 if ((alloc_flags & ALLOC_CMA) && 3188 !free_area_empty(area, MIGRATE_CMA)) { 3189 return true; 3190 } 3191 #endif 3192 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3193 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3194 return true; 3195 } 3196 } 3197 return false; 3198 } 3199 3200 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3201 int highest_zoneidx, unsigned int alloc_flags) 3202 { 3203 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3204 zone_page_state(z, NR_FREE_PAGES)); 3205 } 3206 3207 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3208 unsigned long mark, int highest_zoneidx, 3209 unsigned int alloc_flags, gfp_t gfp_mask) 3210 { 3211 long free_pages; 3212 3213 free_pages = zone_page_state(z, NR_FREE_PAGES); 3214 3215 /* 3216 * Fast check for order-0 only. If this fails then the reserves 3217 * need to be calculated. 3218 */ 3219 if (!order) { 3220 long usable_free; 3221 long reserved; 3222 3223 usable_free = free_pages; 3224 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3225 3226 /* reserved may over estimate high-atomic reserves. */ 3227 usable_free -= min(usable_free, reserved); 3228 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3229 return true; 3230 } 3231 3232 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3233 free_pages)) 3234 return true; 3235 3236 /* 3237 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3238 * when checking the min watermark. The min watermark is the 3239 * point where boosting is ignored so that kswapd is woken up 3240 * when below the low watermark. 3241 */ 3242 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3243 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3244 mark = z->_watermark[WMARK_MIN]; 3245 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3246 alloc_flags, free_pages); 3247 } 3248 3249 return false; 3250 } 3251 3252 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3253 unsigned long mark, int highest_zoneidx) 3254 { 3255 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3256 3257 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3258 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3259 3260 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3261 free_pages); 3262 } 3263 3264 #ifdef CONFIG_NUMA 3265 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3266 3267 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3268 { 3269 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3270 node_reclaim_distance; 3271 } 3272 #else /* CONFIG_NUMA */ 3273 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3274 { 3275 return true; 3276 } 3277 #endif /* CONFIG_NUMA */ 3278 3279 /* 3280 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3281 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3282 * premature use of a lower zone may cause lowmem pressure problems that 3283 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3284 * probably too small. It only makes sense to spread allocations to avoid 3285 * fragmentation between the Normal and DMA32 zones. 3286 */ 3287 static inline unsigned int 3288 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3289 { 3290 unsigned int alloc_flags; 3291 3292 /* 3293 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3294 * to save a branch. 3295 */ 3296 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3297 3298 #ifdef CONFIG_ZONE_DMA32 3299 if (!zone) 3300 return alloc_flags; 3301 3302 if (zone_idx(zone) != ZONE_NORMAL) 3303 return alloc_flags; 3304 3305 /* 3306 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3307 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3308 * on UMA that if Normal is populated then so is DMA32. 3309 */ 3310 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3311 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3312 return alloc_flags; 3313 3314 alloc_flags |= ALLOC_NOFRAGMENT; 3315 #endif /* CONFIG_ZONE_DMA32 */ 3316 return alloc_flags; 3317 } 3318 3319 /* Must be called after current_gfp_context() which can change gfp_mask */ 3320 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3321 unsigned int alloc_flags) 3322 { 3323 #ifdef CONFIG_CMA 3324 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3325 alloc_flags |= ALLOC_CMA; 3326 #endif 3327 return alloc_flags; 3328 } 3329 3330 /* 3331 * get_page_from_freelist goes through the zonelist trying to allocate 3332 * a page. 3333 */ 3334 static struct page * 3335 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3336 const struct alloc_context *ac) 3337 { 3338 struct zoneref *z; 3339 struct zone *zone; 3340 struct pglist_data *last_pgdat = NULL; 3341 bool last_pgdat_dirty_ok = false; 3342 bool no_fallback; 3343 3344 retry: 3345 /* 3346 * Scan zonelist, looking for a zone with enough free. 3347 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3348 */ 3349 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3350 z = ac->preferred_zoneref; 3351 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3352 ac->nodemask) { 3353 struct page *page; 3354 unsigned long mark; 3355 3356 if (cpusets_enabled() && 3357 (alloc_flags & ALLOC_CPUSET) && 3358 !__cpuset_zone_allowed(zone, gfp_mask)) 3359 continue; 3360 /* 3361 * When allocating a page cache page for writing, we 3362 * want to get it from a node that is within its dirty 3363 * limit, such that no single node holds more than its 3364 * proportional share of globally allowed dirty pages. 3365 * The dirty limits take into account the node's 3366 * lowmem reserves and high watermark so that kswapd 3367 * should be able to balance it without having to 3368 * write pages from its LRU list. 3369 * 3370 * XXX: For now, allow allocations to potentially 3371 * exceed the per-node dirty limit in the slowpath 3372 * (spread_dirty_pages unset) before going into reclaim, 3373 * which is important when on a NUMA setup the allowed 3374 * nodes are together not big enough to reach the 3375 * global limit. The proper fix for these situations 3376 * will require awareness of nodes in the 3377 * dirty-throttling and the flusher threads. 3378 */ 3379 if (ac->spread_dirty_pages) { 3380 if (last_pgdat != zone->zone_pgdat) { 3381 last_pgdat = zone->zone_pgdat; 3382 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3383 } 3384 3385 if (!last_pgdat_dirty_ok) 3386 continue; 3387 } 3388 3389 if (no_fallback && nr_online_nodes > 1 && 3390 zone != zonelist_zone(ac->preferred_zoneref)) { 3391 int local_nid; 3392 3393 /* 3394 * If moving to a remote node, retry but allow 3395 * fragmenting fallbacks. Locality is more important 3396 * than fragmentation avoidance. 3397 */ 3398 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3399 if (zone_to_nid(zone) != local_nid) { 3400 alloc_flags &= ~ALLOC_NOFRAGMENT; 3401 goto retry; 3402 } 3403 } 3404 3405 cond_accept_memory(zone, order); 3406 3407 /* 3408 * Detect whether the number of free pages is below high 3409 * watermark. If so, we will decrease pcp->high and free 3410 * PCP pages in free path to reduce the possibility of 3411 * premature page reclaiming. Detection is done here to 3412 * avoid to do that in hotter free path. 3413 */ 3414 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3415 goto check_alloc_wmark; 3416 3417 mark = high_wmark_pages(zone); 3418 if (zone_watermark_fast(zone, order, mark, 3419 ac->highest_zoneidx, alloc_flags, 3420 gfp_mask)) 3421 goto try_this_zone; 3422 else 3423 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3424 3425 check_alloc_wmark: 3426 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3427 if (!zone_watermark_fast(zone, order, mark, 3428 ac->highest_zoneidx, alloc_flags, 3429 gfp_mask)) { 3430 int ret; 3431 3432 if (cond_accept_memory(zone, order)) 3433 goto try_this_zone; 3434 3435 /* 3436 * Watermark failed for this zone, but see if we can 3437 * grow this zone if it contains deferred pages. 3438 */ 3439 if (deferred_pages_enabled()) { 3440 if (_deferred_grow_zone(zone, order)) 3441 goto try_this_zone; 3442 } 3443 /* Checked here to keep the fast path fast */ 3444 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3445 if (alloc_flags & ALLOC_NO_WATERMARKS) 3446 goto try_this_zone; 3447 3448 if (!node_reclaim_enabled() || 3449 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3450 continue; 3451 3452 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3453 switch (ret) { 3454 case NODE_RECLAIM_NOSCAN: 3455 /* did not scan */ 3456 continue; 3457 case NODE_RECLAIM_FULL: 3458 /* scanned but unreclaimable */ 3459 continue; 3460 default: 3461 /* did we reclaim enough */ 3462 if (zone_watermark_ok(zone, order, mark, 3463 ac->highest_zoneidx, alloc_flags)) 3464 goto try_this_zone; 3465 3466 continue; 3467 } 3468 } 3469 3470 try_this_zone: 3471 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3472 gfp_mask, alloc_flags, ac->migratetype); 3473 if (page) { 3474 prep_new_page(page, order, gfp_mask, alloc_flags); 3475 3476 /* 3477 * If this is a high-order atomic allocation then check 3478 * if the pageblock should be reserved for the future 3479 */ 3480 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3481 reserve_highatomic_pageblock(page, order, zone); 3482 3483 return page; 3484 } else { 3485 if (cond_accept_memory(zone, order)) 3486 goto try_this_zone; 3487 3488 /* Try again if zone has deferred pages */ 3489 if (deferred_pages_enabled()) { 3490 if (_deferred_grow_zone(zone, order)) 3491 goto try_this_zone; 3492 } 3493 } 3494 } 3495 3496 /* 3497 * It's possible on a UMA machine to get through all zones that are 3498 * fragmented. If avoiding fragmentation, reset and try again. 3499 */ 3500 if (no_fallback) { 3501 alloc_flags &= ~ALLOC_NOFRAGMENT; 3502 goto retry; 3503 } 3504 3505 return NULL; 3506 } 3507 3508 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3509 { 3510 unsigned int filter = SHOW_MEM_FILTER_NODES; 3511 3512 /* 3513 * This documents exceptions given to allocations in certain 3514 * contexts that are allowed to allocate outside current's set 3515 * of allowed nodes. 3516 */ 3517 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3518 if (tsk_is_oom_victim(current) || 3519 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3520 filter &= ~SHOW_MEM_FILTER_NODES; 3521 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3522 filter &= ~SHOW_MEM_FILTER_NODES; 3523 3524 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3525 } 3526 3527 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3528 { 3529 struct va_format vaf; 3530 va_list args; 3531 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3532 3533 if ((gfp_mask & __GFP_NOWARN) || 3534 !__ratelimit(&nopage_rs) || 3535 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3536 return; 3537 3538 va_start(args, fmt); 3539 vaf.fmt = fmt; 3540 vaf.va = &args; 3541 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3542 current->comm, &vaf, gfp_mask, &gfp_mask, 3543 nodemask_pr_args(nodemask)); 3544 va_end(args); 3545 3546 cpuset_print_current_mems_allowed(); 3547 pr_cont("\n"); 3548 dump_stack(); 3549 warn_alloc_show_mem(gfp_mask, nodemask); 3550 } 3551 3552 static inline struct page * 3553 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3554 unsigned int alloc_flags, 3555 const struct alloc_context *ac) 3556 { 3557 struct page *page; 3558 3559 page = get_page_from_freelist(gfp_mask, order, 3560 alloc_flags|ALLOC_CPUSET, ac); 3561 /* 3562 * fallback to ignore cpuset restriction if our nodes 3563 * are depleted 3564 */ 3565 if (!page) 3566 page = get_page_from_freelist(gfp_mask, order, 3567 alloc_flags, ac); 3568 3569 return page; 3570 } 3571 3572 static inline struct page * 3573 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3574 const struct alloc_context *ac, unsigned long *did_some_progress) 3575 { 3576 struct oom_control oc = { 3577 .zonelist = ac->zonelist, 3578 .nodemask = ac->nodemask, 3579 .memcg = NULL, 3580 .gfp_mask = gfp_mask, 3581 .order = order, 3582 }; 3583 struct page *page; 3584 3585 *did_some_progress = 0; 3586 3587 /* 3588 * Acquire the oom lock. If that fails, somebody else is 3589 * making progress for us. 3590 */ 3591 if (!mutex_trylock(&oom_lock)) { 3592 *did_some_progress = 1; 3593 schedule_timeout_uninterruptible(1); 3594 return NULL; 3595 } 3596 3597 /* 3598 * Go through the zonelist yet one more time, keep very high watermark 3599 * here, this is only to catch a parallel oom killing, we must fail if 3600 * we're still under heavy pressure. But make sure that this reclaim 3601 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3602 * allocation which will never fail due to oom_lock already held. 3603 */ 3604 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3605 ~__GFP_DIRECT_RECLAIM, order, 3606 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3607 if (page) 3608 goto out; 3609 3610 /* Coredumps can quickly deplete all memory reserves */ 3611 if (current->flags & PF_DUMPCORE) 3612 goto out; 3613 /* The OOM killer will not help higher order allocs */ 3614 if (order > PAGE_ALLOC_COSTLY_ORDER) 3615 goto out; 3616 /* 3617 * We have already exhausted all our reclaim opportunities without any 3618 * success so it is time to admit defeat. We will skip the OOM killer 3619 * because it is very likely that the caller has a more reasonable 3620 * fallback than shooting a random task. 3621 * 3622 * The OOM killer may not free memory on a specific node. 3623 */ 3624 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3625 goto out; 3626 /* The OOM killer does not needlessly kill tasks for lowmem */ 3627 if (ac->highest_zoneidx < ZONE_NORMAL) 3628 goto out; 3629 if (pm_suspended_storage()) 3630 goto out; 3631 /* 3632 * XXX: GFP_NOFS allocations should rather fail than rely on 3633 * other request to make a forward progress. 3634 * We are in an unfortunate situation where out_of_memory cannot 3635 * do much for this context but let's try it to at least get 3636 * access to memory reserved if the current task is killed (see 3637 * out_of_memory). Once filesystems are ready to handle allocation 3638 * failures more gracefully we should just bail out here. 3639 */ 3640 3641 /* Exhausted what can be done so it's blame time */ 3642 if (out_of_memory(&oc) || 3643 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3644 *did_some_progress = 1; 3645 3646 /* 3647 * Help non-failing allocations by giving them access to memory 3648 * reserves 3649 */ 3650 if (gfp_mask & __GFP_NOFAIL) 3651 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3652 ALLOC_NO_WATERMARKS, ac); 3653 } 3654 out: 3655 mutex_unlock(&oom_lock); 3656 return page; 3657 } 3658 3659 /* 3660 * Maximum number of compaction retries with a progress before OOM 3661 * killer is consider as the only way to move forward. 3662 */ 3663 #define MAX_COMPACT_RETRIES 16 3664 3665 #ifdef CONFIG_COMPACTION 3666 /* Try memory compaction for high-order allocations before reclaim */ 3667 static struct page * 3668 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3669 unsigned int alloc_flags, const struct alloc_context *ac, 3670 enum compact_priority prio, enum compact_result *compact_result) 3671 { 3672 struct page *page = NULL; 3673 unsigned long pflags; 3674 unsigned int noreclaim_flag; 3675 3676 if (!order) 3677 return NULL; 3678 3679 psi_memstall_enter(&pflags); 3680 delayacct_compact_start(); 3681 noreclaim_flag = memalloc_noreclaim_save(); 3682 3683 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3684 prio, &page); 3685 3686 memalloc_noreclaim_restore(noreclaim_flag); 3687 psi_memstall_leave(&pflags); 3688 delayacct_compact_end(); 3689 3690 if (*compact_result == COMPACT_SKIPPED) 3691 return NULL; 3692 /* 3693 * At least in one zone compaction wasn't deferred or skipped, so let's 3694 * count a compaction stall 3695 */ 3696 count_vm_event(COMPACTSTALL); 3697 3698 /* Prep a captured page if available */ 3699 if (page) 3700 prep_new_page(page, order, gfp_mask, alloc_flags); 3701 3702 /* Try get a page from the freelist if available */ 3703 if (!page) 3704 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3705 3706 if (page) { 3707 struct zone *zone = page_zone(page); 3708 3709 zone->compact_blockskip_flush = false; 3710 compaction_defer_reset(zone, order, true); 3711 count_vm_event(COMPACTSUCCESS); 3712 return page; 3713 } 3714 3715 /* 3716 * It's bad if compaction run occurs and fails. The most likely reason 3717 * is that pages exist, but not enough to satisfy watermarks. 3718 */ 3719 count_vm_event(COMPACTFAIL); 3720 3721 cond_resched(); 3722 3723 return NULL; 3724 } 3725 3726 static inline bool 3727 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3728 enum compact_result compact_result, 3729 enum compact_priority *compact_priority, 3730 int *compaction_retries) 3731 { 3732 int max_retries = MAX_COMPACT_RETRIES; 3733 int min_priority; 3734 bool ret = false; 3735 int retries = *compaction_retries; 3736 enum compact_priority priority = *compact_priority; 3737 3738 if (!order) 3739 return false; 3740 3741 if (fatal_signal_pending(current)) 3742 return false; 3743 3744 /* 3745 * Compaction was skipped due to a lack of free order-0 3746 * migration targets. Continue if reclaim can help. 3747 */ 3748 if (compact_result == COMPACT_SKIPPED) { 3749 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3750 goto out; 3751 } 3752 3753 /* 3754 * Compaction managed to coalesce some page blocks, but the 3755 * allocation failed presumably due to a race. Retry some. 3756 */ 3757 if (compact_result == COMPACT_SUCCESS) { 3758 /* 3759 * !costly requests are much more important than 3760 * __GFP_RETRY_MAYFAIL costly ones because they are de 3761 * facto nofail and invoke OOM killer to move on while 3762 * costly can fail and users are ready to cope with 3763 * that. 1/4 retries is rather arbitrary but we would 3764 * need much more detailed feedback from compaction to 3765 * make a better decision. 3766 */ 3767 if (order > PAGE_ALLOC_COSTLY_ORDER) 3768 max_retries /= 4; 3769 3770 if (++(*compaction_retries) <= max_retries) { 3771 ret = true; 3772 goto out; 3773 } 3774 } 3775 3776 /* 3777 * Compaction failed. Retry with increasing priority. 3778 */ 3779 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3780 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3781 3782 if (*compact_priority > min_priority) { 3783 (*compact_priority)--; 3784 *compaction_retries = 0; 3785 ret = true; 3786 } 3787 out: 3788 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3789 return ret; 3790 } 3791 #else 3792 static inline struct page * 3793 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3794 unsigned int alloc_flags, const struct alloc_context *ac, 3795 enum compact_priority prio, enum compact_result *compact_result) 3796 { 3797 *compact_result = COMPACT_SKIPPED; 3798 return NULL; 3799 } 3800 3801 static inline bool 3802 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3803 enum compact_result compact_result, 3804 enum compact_priority *compact_priority, 3805 int *compaction_retries) 3806 { 3807 struct zone *zone; 3808 struct zoneref *z; 3809 3810 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3811 return false; 3812 3813 /* 3814 * There are setups with compaction disabled which would prefer to loop 3815 * inside the allocator rather than hit the oom killer prematurely. 3816 * Let's give them a good hope and keep retrying while the order-0 3817 * watermarks are OK. 3818 */ 3819 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3820 ac->highest_zoneidx, ac->nodemask) { 3821 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3822 ac->highest_zoneidx, alloc_flags)) 3823 return true; 3824 } 3825 return false; 3826 } 3827 #endif /* CONFIG_COMPACTION */ 3828 3829 #ifdef CONFIG_LOCKDEP 3830 static struct lockdep_map __fs_reclaim_map = 3831 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3832 3833 static bool __need_reclaim(gfp_t gfp_mask) 3834 { 3835 /* no reclaim without waiting on it */ 3836 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3837 return false; 3838 3839 /* this guy won't enter reclaim */ 3840 if (current->flags & PF_MEMALLOC) 3841 return false; 3842 3843 if (gfp_mask & __GFP_NOLOCKDEP) 3844 return false; 3845 3846 return true; 3847 } 3848 3849 void __fs_reclaim_acquire(unsigned long ip) 3850 { 3851 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3852 } 3853 3854 void __fs_reclaim_release(unsigned long ip) 3855 { 3856 lock_release(&__fs_reclaim_map, ip); 3857 } 3858 3859 void fs_reclaim_acquire(gfp_t gfp_mask) 3860 { 3861 gfp_mask = current_gfp_context(gfp_mask); 3862 3863 if (__need_reclaim(gfp_mask)) { 3864 if (gfp_mask & __GFP_FS) 3865 __fs_reclaim_acquire(_RET_IP_); 3866 3867 #ifdef CONFIG_MMU_NOTIFIER 3868 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3869 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3870 #endif 3871 3872 } 3873 } 3874 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3875 3876 void fs_reclaim_release(gfp_t gfp_mask) 3877 { 3878 gfp_mask = current_gfp_context(gfp_mask); 3879 3880 if (__need_reclaim(gfp_mask)) { 3881 if (gfp_mask & __GFP_FS) 3882 __fs_reclaim_release(_RET_IP_); 3883 } 3884 } 3885 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3886 #endif 3887 3888 /* 3889 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3890 * have been rebuilt so allocation retries. Reader side does not lock and 3891 * retries the allocation if zonelist changes. Writer side is protected by the 3892 * embedded spin_lock. 3893 */ 3894 static DEFINE_SEQLOCK(zonelist_update_seq); 3895 3896 static unsigned int zonelist_iter_begin(void) 3897 { 3898 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3899 return read_seqbegin(&zonelist_update_seq); 3900 3901 return 0; 3902 } 3903 3904 static unsigned int check_retry_zonelist(unsigned int seq) 3905 { 3906 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3907 return read_seqretry(&zonelist_update_seq, seq); 3908 3909 return seq; 3910 } 3911 3912 /* Perform direct synchronous page reclaim */ 3913 static unsigned long 3914 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3915 const struct alloc_context *ac) 3916 { 3917 unsigned int noreclaim_flag; 3918 unsigned long progress; 3919 3920 cond_resched(); 3921 3922 /* We now go into synchronous reclaim */ 3923 cpuset_memory_pressure_bump(); 3924 fs_reclaim_acquire(gfp_mask); 3925 noreclaim_flag = memalloc_noreclaim_save(); 3926 3927 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3928 ac->nodemask); 3929 3930 memalloc_noreclaim_restore(noreclaim_flag); 3931 fs_reclaim_release(gfp_mask); 3932 3933 cond_resched(); 3934 3935 return progress; 3936 } 3937 3938 /* The really slow allocator path where we enter direct reclaim */ 3939 static inline struct page * 3940 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3941 unsigned int alloc_flags, const struct alloc_context *ac, 3942 unsigned long *did_some_progress) 3943 { 3944 struct page *page = NULL; 3945 unsigned long pflags; 3946 bool drained = false; 3947 3948 psi_memstall_enter(&pflags); 3949 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3950 if (unlikely(!(*did_some_progress))) 3951 goto out; 3952 3953 retry: 3954 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3955 3956 /* 3957 * If an allocation failed after direct reclaim, it could be because 3958 * pages are pinned on the per-cpu lists or in high alloc reserves. 3959 * Shrink them and try again 3960 */ 3961 if (!page && !drained) { 3962 unreserve_highatomic_pageblock(ac, false); 3963 drain_all_pages(NULL); 3964 drained = true; 3965 goto retry; 3966 } 3967 out: 3968 psi_memstall_leave(&pflags); 3969 3970 return page; 3971 } 3972 3973 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3974 const struct alloc_context *ac) 3975 { 3976 struct zoneref *z; 3977 struct zone *zone; 3978 pg_data_t *last_pgdat = NULL; 3979 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3980 3981 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3982 ac->nodemask) { 3983 if (!managed_zone(zone)) 3984 continue; 3985 if (last_pgdat != zone->zone_pgdat) { 3986 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3987 last_pgdat = zone->zone_pgdat; 3988 } 3989 } 3990 } 3991 3992 static inline unsigned int 3993 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3994 { 3995 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3996 3997 /* 3998 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3999 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4000 * to save two branches. 4001 */ 4002 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4003 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4004 4005 /* 4006 * The caller may dip into page reserves a bit more if the caller 4007 * cannot run direct reclaim, or if the caller has realtime scheduling 4008 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4009 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4010 */ 4011 alloc_flags |= (__force int) 4012 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4013 4014 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4015 /* 4016 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4017 * if it can't schedule. 4018 */ 4019 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4020 alloc_flags |= ALLOC_NON_BLOCK; 4021 4022 if (order > 0) 4023 alloc_flags |= ALLOC_HIGHATOMIC; 4024 } 4025 4026 /* 4027 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4028 * GFP_ATOMIC) rather than fail, see the comment for 4029 * cpuset_node_allowed(). 4030 */ 4031 if (alloc_flags & ALLOC_MIN_RESERVE) 4032 alloc_flags &= ~ALLOC_CPUSET; 4033 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4034 alloc_flags |= ALLOC_MIN_RESERVE; 4035 4036 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4037 4038 return alloc_flags; 4039 } 4040 4041 static bool oom_reserves_allowed(struct task_struct *tsk) 4042 { 4043 if (!tsk_is_oom_victim(tsk)) 4044 return false; 4045 4046 /* 4047 * !MMU doesn't have oom reaper so give access to memory reserves 4048 * only to the thread with TIF_MEMDIE set 4049 */ 4050 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4051 return false; 4052 4053 return true; 4054 } 4055 4056 /* 4057 * Distinguish requests which really need access to full memory 4058 * reserves from oom victims which can live with a portion of it 4059 */ 4060 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4061 { 4062 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4063 return 0; 4064 if (gfp_mask & __GFP_MEMALLOC) 4065 return ALLOC_NO_WATERMARKS; 4066 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4067 return ALLOC_NO_WATERMARKS; 4068 if (!in_interrupt()) { 4069 if (current->flags & PF_MEMALLOC) 4070 return ALLOC_NO_WATERMARKS; 4071 else if (oom_reserves_allowed(current)) 4072 return ALLOC_OOM; 4073 } 4074 4075 return 0; 4076 } 4077 4078 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4079 { 4080 return !!__gfp_pfmemalloc_flags(gfp_mask); 4081 } 4082 4083 /* 4084 * Checks whether it makes sense to retry the reclaim to make a forward progress 4085 * for the given allocation request. 4086 * 4087 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4088 * without success, or when we couldn't even meet the watermark if we 4089 * reclaimed all remaining pages on the LRU lists. 4090 * 4091 * Returns true if a retry is viable or false to enter the oom path. 4092 */ 4093 static inline bool 4094 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4095 struct alloc_context *ac, int alloc_flags, 4096 bool did_some_progress, int *no_progress_loops) 4097 { 4098 struct zone *zone; 4099 struct zoneref *z; 4100 bool ret = false; 4101 4102 /* 4103 * Costly allocations might have made a progress but this doesn't mean 4104 * their order will become available due to high fragmentation so 4105 * always increment the no progress counter for them 4106 */ 4107 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4108 *no_progress_loops = 0; 4109 else 4110 (*no_progress_loops)++; 4111 4112 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4113 goto out; 4114 4115 4116 /* 4117 * Keep reclaiming pages while there is a chance this will lead 4118 * somewhere. If none of the target zones can satisfy our allocation 4119 * request even if all reclaimable pages are considered then we are 4120 * screwed and have to go OOM. 4121 */ 4122 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4123 ac->highest_zoneidx, ac->nodemask) { 4124 unsigned long available; 4125 unsigned long reclaimable; 4126 unsigned long min_wmark = min_wmark_pages(zone); 4127 bool wmark; 4128 4129 if (cpusets_enabled() && 4130 (alloc_flags & ALLOC_CPUSET) && 4131 !__cpuset_zone_allowed(zone, gfp_mask)) 4132 continue; 4133 4134 available = reclaimable = zone_reclaimable_pages(zone); 4135 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4136 4137 /* 4138 * Would the allocation succeed if we reclaimed all 4139 * reclaimable pages? 4140 */ 4141 wmark = __zone_watermark_ok(zone, order, min_wmark, 4142 ac->highest_zoneidx, alloc_flags, available); 4143 trace_reclaim_retry_zone(z, order, reclaimable, 4144 available, min_wmark, *no_progress_loops, wmark); 4145 if (wmark) { 4146 ret = true; 4147 break; 4148 } 4149 } 4150 4151 /* 4152 * Memory allocation/reclaim might be called from a WQ context and the 4153 * current implementation of the WQ concurrency control doesn't 4154 * recognize that a particular WQ is congested if the worker thread is 4155 * looping without ever sleeping. Therefore we have to do a short sleep 4156 * here rather than calling cond_resched(). 4157 */ 4158 if (current->flags & PF_WQ_WORKER) 4159 schedule_timeout_uninterruptible(1); 4160 else 4161 cond_resched(); 4162 out: 4163 /* Before OOM, exhaust highatomic_reserve */ 4164 if (!ret) 4165 return unreserve_highatomic_pageblock(ac, true); 4166 4167 return ret; 4168 } 4169 4170 static inline bool 4171 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4172 { 4173 /* 4174 * It's possible that cpuset's mems_allowed and the nodemask from 4175 * mempolicy don't intersect. This should be normally dealt with by 4176 * policy_nodemask(), but it's possible to race with cpuset update in 4177 * such a way the check therein was true, and then it became false 4178 * before we got our cpuset_mems_cookie here. 4179 * This assumes that for all allocations, ac->nodemask can come only 4180 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4181 * when it does not intersect with the cpuset restrictions) or the 4182 * caller can deal with a violated nodemask. 4183 */ 4184 if (cpusets_enabled() && ac->nodemask && 4185 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4186 ac->nodemask = NULL; 4187 return true; 4188 } 4189 4190 /* 4191 * When updating a task's mems_allowed or mempolicy nodemask, it is 4192 * possible to race with parallel threads in such a way that our 4193 * allocation can fail while the mask is being updated. If we are about 4194 * to fail, check if the cpuset changed during allocation and if so, 4195 * retry. 4196 */ 4197 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4198 return true; 4199 4200 return false; 4201 } 4202 4203 static inline struct page * 4204 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4205 struct alloc_context *ac) 4206 { 4207 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4208 bool can_compact = gfp_compaction_allowed(gfp_mask); 4209 bool nofail = gfp_mask & __GFP_NOFAIL; 4210 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4211 struct page *page = NULL; 4212 unsigned int alloc_flags; 4213 unsigned long did_some_progress; 4214 enum compact_priority compact_priority; 4215 enum compact_result compact_result; 4216 int compaction_retries; 4217 int no_progress_loops; 4218 unsigned int cpuset_mems_cookie; 4219 unsigned int zonelist_iter_cookie; 4220 int reserve_flags; 4221 4222 if (unlikely(nofail)) { 4223 /* 4224 * We most definitely don't want callers attempting to 4225 * allocate greater than order-1 page units with __GFP_NOFAIL. 4226 */ 4227 WARN_ON_ONCE(order > 1); 4228 /* 4229 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4230 * otherwise, we may result in lockup. 4231 */ 4232 WARN_ON_ONCE(!can_direct_reclaim); 4233 /* 4234 * PF_MEMALLOC request from this context is rather bizarre 4235 * because we cannot reclaim anything and only can loop waiting 4236 * for somebody to do a work for us. 4237 */ 4238 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4239 } 4240 4241 restart: 4242 compaction_retries = 0; 4243 no_progress_loops = 0; 4244 compact_priority = DEF_COMPACT_PRIORITY; 4245 cpuset_mems_cookie = read_mems_allowed_begin(); 4246 zonelist_iter_cookie = zonelist_iter_begin(); 4247 4248 /* 4249 * The fast path uses conservative alloc_flags to succeed only until 4250 * kswapd needs to be woken up, and to avoid the cost of setting up 4251 * alloc_flags precisely. So we do that now. 4252 */ 4253 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4254 4255 /* 4256 * We need to recalculate the starting point for the zonelist iterator 4257 * because we might have used different nodemask in the fast path, or 4258 * there was a cpuset modification and we are retrying - otherwise we 4259 * could end up iterating over non-eligible zones endlessly. 4260 */ 4261 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4262 ac->highest_zoneidx, ac->nodemask); 4263 if (!zonelist_zone(ac->preferred_zoneref)) 4264 goto nopage; 4265 4266 /* 4267 * Check for insane configurations where the cpuset doesn't contain 4268 * any suitable zone to satisfy the request - e.g. non-movable 4269 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4270 */ 4271 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4272 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4273 ac->highest_zoneidx, 4274 &cpuset_current_mems_allowed); 4275 if (!zonelist_zone(z)) 4276 goto nopage; 4277 } 4278 4279 if (alloc_flags & ALLOC_KSWAPD) 4280 wake_all_kswapds(order, gfp_mask, ac); 4281 4282 /* 4283 * The adjusted alloc_flags might result in immediate success, so try 4284 * that first 4285 */ 4286 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4287 if (page) 4288 goto got_pg; 4289 4290 /* 4291 * For costly allocations, try direct compaction first, as it's likely 4292 * that we have enough base pages and don't need to reclaim. For non- 4293 * movable high-order allocations, do that as well, as compaction will 4294 * try prevent permanent fragmentation by migrating from blocks of the 4295 * same migratetype. 4296 * Don't try this for allocations that are allowed to ignore 4297 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4298 */ 4299 if (can_direct_reclaim && can_compact && 4300 (costly_order || 4301 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4302 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4303 page = __alloc_pages_direct_compact(gfp_mask, order, 4304 alloc_flags, ac, 4305 INIT_COMPACT_PRIORITY, 4306 &compact_result); 4307 if (page) 4308 goto got_pg; 4309 4310 /* 4311 * Checks for costly allocations with __GFP_NORETRY, which 4312 * includes some THP page fault allocations 4313 */ 4314 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4315 /* 4316 * If allocating entire pageblock(s) and compaction 4317 * failed because all zones are below low watermarks 4318 * or is prohibited because it recently failed at this 4319 * order, fail immediately unless the allocator has 4320 * requested compaction and reclaim retry. 4321 * 4322 * Reclaim is 4323 * - potentially very expensive because zones are far 4324 * below their low watermarks or this is part of very 4325 * bursty high order allocations, 4326 * - not guaranteed to help because isolate_freepages() 4327 * may not iterate over freed pages as part of its 4328 * linear scan, and 4329 * - unlikely to make entire pageblocks free on its 4330 * own. 4331 */ 4332 if (compact_result == COMPACT_SKIPPED || 4333 compact_result == COMPACT_DEFERRED) 4334 goto nopage; 4335 4336 /* 4337 * Looks like reclaim/compaction is worth trying, but 4338 * sync compaction could be very expensive, so keep 4339 * using async compaction. 4340 */ 4341 compact_priority = INIT_COMPACT_PRIORITY; 4342 } 4343 } 4344 4345 retry: 4346 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4347 if (alloc_flags & ALLOC_KSWAPD) 4348 wake_all_kswapds(order, gfp_mask, ac); 4349 4350 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4351 if (reserve_flags) 4352 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4353 (alloc_flags & ALLOC_KSWAPD); 4354 4355 /* 4356 * Reset the nodemask and zonelist iterators if memory policies can be 4357 * ignored. These allocations are high priority and system rather than 4358 * user oriented. 4359 */ 4360 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4361 ac->nodemask = NULL; 4362 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4363 ac->highest_zoneidx, ac->nodemask); 4364 } 4365 4366 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4367 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4368 if (page) 4369 goto got_pg; 4370 4371 /* Caller is not willing to reclaim, we can't balance anything */ 4372 if (!can_direct_reclaim) 4373 goto nopage; 4374 4375 /* Avoid recursion of direct reclaim */ 4376 if (current->flags & PF_MEMALLOC) 4377 goto nopage; 4378 4379 /* Try direct reclaim and then allocating */ 4380 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4381 &did_some_progress); 4382 if (page) 4383 goto got_pg; 4384 4385 /* Try direct compaction and then allocating */ 4386 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4387 compact_priority, &compact_result); 4388 if (page) 4389 goto got_pg; 4390 4391 /* Do not loop if specifically requested */ 4392 if (gfp_mask & __GFP_NORETRY) 4393 goto nopage; 4394 4395 /* 4396 * Do not retry costly high order allocations unless they are 4397 * __GFP_RETRY_MAYFAIL and we can compact 4398 */ 4399 if (costly_order && (!can_compact || 4400 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4401 goto nopage; 4402 4403 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4404 did_some_progress > 0, &no_progress_loops)) 4405 goto retry; 4406 4407 /* 4408 * It doesn't make any sense to retry for the compaction if the order-0 4409 * reclaim is not able to make any progress because the current 4410 * implementation of the compaction depends on the sufficient amount 4411 * of free memory (see __compaction_suitable) 4412 */ 4413 if (did_some_progress > 0 && can_compact && 4414 should_compact_retry(ac, order, alloc_flags, 4415 compact_result, &compact_priority, 4416 &compaction_retries)) 4417 goto retry; 4418 4419 4420 /* 4421 * Deal with possible cpuset update races or zonelist updates to avoid 4422 * a unnecessary OOM kill. 4423 */ 4424 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4425 check_retry_zonelist(zonelist_iter_cookie)) 4426 goto restart; 4427 4428 /* Reclaim has failed us, start killing things */ 4429 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4430 if (page) 4431 goto got_pg; 4432 4433 /* Avoid allocations with no watermarks from looping endlessly */ 4434 if (tsk_is_oom_victim(current) && 4435 (alloc_flags & ALLOC_OOM || 4436 (gfp_mask & __GFP_NOMEMALLOC))) 4437 goto nopage; 4438 4439 /* Retry as long as the OOM killer is making progress */ 4440 if (did_some_progress) { 4441 no_progress_loops = 0; 4442 goto retry; 4443 } 4444 4445 nopage: 4446 /* 4447 * Deal with possible cpuset update races or zonelist updates to avoid 4448 * a unnecessary OOM kill. 4449 */ 4450 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4451 check_retry_zonelist(zonelist_iter_cookie)) 4452 goto restart; 4453 4454 /* 4455 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4456 * we always retry 4457 */ 4458 if (unlikely(nofail)) { 4459 /* 4460 * Lacking direct_reclaim we can't do anything to reclaim memory, 4461 * we disregard these unreasonable nofail requests and still 4462 * return NULL 4463 */ 4464 if (!can_direct_reclaim) 4465 goto fail; 4466 4467 /* 4468 * Help non-failing allocations by giving some access to memory 4469 * reserves normally used for high priority non-blocking 4470 * allocations but do not use ALLOC_NO_WATERMARKS because this 4471 * could deplete whole memory reserves which would just make 4472 * the situation worse. 4473 */ 4474 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4475 if (page) 4476 goto got_pg; 4477 4478 cond_resched(); 4479 goto retry; 4480 } 4481 fail: 4482 warn_alloc(gfp_mask, ac->nodemask, 4483 "page allocation failure: order:%u", order); 4484 got_pg: 4485 return page; 4486 } 4487 4488 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4489 int preferred_nid, nodemask_t *nodemask, 4490 struct alloc_context *ac, gfp_t *alloc_gfp, 4491 unsigned int *alloc_flags) 4492 { 4493 ac->highest_zoneidx = gfp_zone(gfp_mask); 4494 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4495 ac->nodemask = nodemask; 4496 ac->migratetype = gfp_migratetype(gfp_mask); 4497 4498 if (cpusets_enabled()) { 4499 *alloc_gfp |= __GFP_HARDWALL; 4500 /* 4501 * When we are in the interrupt context, it is irrelevant 4502 * to the current task context. It means that any node ok. 4503 */ 4504 if (in_task() && !ac->nodemask) 4505 ac->nodemask = &cpuset_current_mems_allowed; 4506 else 4507 *alloc_flags |= ALLOC_CPUSET; 4508 } 4509 4510 might_alloc(gfp_mask); 4511 4512 if (should_fail_alloc_page(gfp_mask, order)) 4513 return false; 4514 4515 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4516 4517 /* Dirty zone balancing only done in the fast path */ 4518 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4519 4520 /* 4521 * The preferred zone is used for statistics but crucially it is 4522 * also used as the starting point for the zonelist iterator. It 4523 * may get reset for allocations that ignore memory policies. 4524 */ 4525 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4526 ac->highest_zoneidx, ac->nodemask); 4527 4528 return true; 4529 } 4530 4531 /* 4532 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4533 * @gfp: GFP flags for the allocation 4534 * @preferred_nid: The preferred NUMA node ID to allocate from 4535 * @nodemask: Set of nodes to allocate from, may be NULL 4536 * @nr_pages: The number of pages desired on the list or array 4537 * @page_list: Optional list to store the allocated pages 4538 * @page_array: Optional array to store the pages 4539 * 4540 * This is a batched version of the page allocator that attempts to 4541 * allocate nr_pages quickly. Pages are added to page_list if page_list 4542 * is not NULL, otherwise it is assumed that the page_array is valid. 4543 * 4544 * For lists, nr_pages is the number of pages that should be allocated. 4545 * 4546 * For arrays, only NULL elements are populated with pages and nr_pages 4547 * is the maximum number of pages that will be stored in the array. 4548 * 4549 * Returns the number of pages on the list or array. 4550 */ 4551 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4552 nodemask_t *nodemask, int nr_pages, 4553 struct list_head *page_list, 4554 struct page **page_array) 4555 { 4556 struct page *page; 4557 unsigned long __maybe_unused UP_flags; 4558 struct zone *zone; 4559 struct zoneref *z; 4560 struct per_cpu_pages *pcp; 4561 struct list_head *pcp_list; 4562 struct alloc_context ac; 4563 gfp_t alloc_gfp; 4564 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4565 int nr_populated = 0, nr_account = 0; 4566 4567 /* 4568 * Skip populated array elements to determine if any pages need 4569 * to be allocated before disabling IRQs. 4570 */ 4571 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4572 nr_populated++; 4573 4574 /* No pages requested? */ 4575 if (unlikely(nr_pages <= 0)) 4576 goto out; 4577 4578 /* Already populated array? */ 4579 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4580 goto out; 4581 4582 /* Bulk allocator does not support memcg accounting. */ 4583 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4584 goto failed; 4585 4586 /* Use the single page allocator for one page. */ 4587 if (nr_pages - nr_populated == 1) 4588 goto failed; 4589 4590 #ifdef CONFIG_PAGE_OWNER 4591 /* 4592 * PAGE_OWNER may recurse into the allocator to allocate space to 4593 * save the stack with pagesets.lock held. Releasing/reacquiring 4594 * removes much of the performance benefit of bulk allocation so 4595 * force the caller to allocate one page at a time as it'll have 4596 * similar performance to added complexity to the bulk allocator. 4597 */ 4598 if (static_branch_unlikely(&page_owner_inited)) 4599 goto failed; 4600 #endif 4601 4602 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4603 gfp &= gfp_allowed_mask; 4604 alloc_gfp = gfp; 4605 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4606 goto out; 4607 gfp = alloc_gfp; 4608 4609 /* Find an allowed local zone that meets the low watermark. */ 4610 z = ac.preferred_zoneref; 4611 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4612 unsigned long mark; 4613 4614 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4615 !__cpuset_zone_allowed(zone, gfp)) { 4616 continue; 4617 } 4618 4619 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4620 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4621 goto failed; 4622 } 4623 4624 cond_accept_memory(zone, 0); 4625 retry_this_zone: 4626 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4627 if (zone_watermark_fast(zone, 0, mark, 4628 zonelist_zone_idx(ac.preferred_zoneref), 4629 alloc_flags, gfp)) { 4630 break; 4631 } 4632 4633 if (cond_accept_memory(zone, 0)) 4634 goto retry_this_zone; 4635 4636 /* Try again if zone has deferred pages */ 4637 if (deferred_pages_enabled()) { 4638 if (_deferred_grow_zone(zone, 0)) 4639 goto retry_this_zone; 4640 } 4641 } 4642 4643 /* 4644 * If there are no allowed local zones that meets the watermarks then 4645 * try to allocate a single page and reclaim if necessary. 4646 */ 4647 if (unlikely(!zone)) 4648 goto failed; 4649 4650 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4651 pcp_trylock_prepare(UP_flags); 4652 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4653 if (!pcp) 4654 goto failed_irq; 4655 4656 /* Attempt the batch allocation */ 4657 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4658 while (nr_populated < nr_pages) { 4659 4660 /* Skip existing pages */ 4661 if (page_array && page_array[nr_populated]) { 4662 nr_populated++; 4663 continue; 4664 } 4665 4666 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4667 pcp, pcp_list); 4668 if (unlikely(!page)) { 4669 /* Try and allocate at least one page */ 4670 if (!nr_account) { 4671 pcp_spin_unlock(pcp); 4672 goto failed_irq; 4673 } 4674 break; 4675 } 4676 nr_account++; 4677 4678 prep_new_page(page, 0, gfp, 0); 4679 if (page_list) 4680 list_add(&page->lru, page_list); 4681 else 4682 page_array[nr_populated] = page; 4683 nr_populated++; 4684 } 4685 4686 pcp_spin_unlock(pcp); 4687 pcp_trylock_finish(UP_flags); 4688 4689 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4690 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4691 4692 out: 4693 return nr_populated; 4694 4695 failed_irq: 4696 pcp_trylock_finish(UP_flags); 4697 4698 failed: 4699 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4700 if (page) { 4701 if (page_list) 4702 list_add(&page->lru, page_list); 4703 else 4704 page_array[nr_populated] = page; 4705 nr_populated++; 4706 } 4707 4708 goto out; 4709 } 4710 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4711 4712 /* 4713 * This is the 'heart' of the zoned buddy allocator. 4714 */ 4715 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4716 int preferred_nid, nodemask_t *nodemask) 4717 { 4718 struct page *page; 4719 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4720 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4721 struct alloc_context ac = { }; 4722 4723 /* 4724 * There are several places where we assume that the order value is sane 4725 * so bail out early if the request is out of bound. 4726 */ 4727 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4728 return NULL; 4729 4730 gfp &= gfp_allowed_mask; 4731 /* 4732 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4733 * resp. GFP_NOIO which has to be inherited for all allocation requests 4734 * from a particular context which has been marked by 4735 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4736 * movable zones are not used during allocation. 4737 */ 4738 gfp = current_gfp_context(gfp); 4739 alloc_gfp = gfp; 4740 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4741 &alloc_gfp, &alloc_flags)) 4742 return NULL; 4743 4744 /* 4745 * Forbid the first pass from falling back to types that fragment 4746 * memory until all local zones are considered. 4747 */ 4748 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4749 4750 /* First allocation attempt */ 4751 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4752 if (likely(page)) 4753 goto out; 4754 4755 alloc_gfp = gfp; 4756 ac.spread_dirty_pages = false; 4757 4758 /* 4759 * Restore the original nodemask if it was potentially replaced with 4760 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4761 */ 4762 ac.nodemask = nodemask; 4763 4764 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4765 4766 out: 4767 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4768 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4769 __free_pages(page, order); 4770 page = NULL; 4771 } 4772 4773 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4774 kmsan_alloc_page(page, order, alloc_gfp); 4775 4776 return page; 4777 } 4778 EXPORT_SYMBOL(__alloc_pages_noprof); 4779 4780 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 4781 nodemask_t *nodemask) 4782 { 4783 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 4784 preferred_nid, nodemask); 4785 return page_rmappable_folio(page); 4786 } 4787 EXPORT_SYMBOL(__folio_alloc_noprof); 4788 4789 /* 4790 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4791 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4792 * you need to access high mem. 4793 */ 4794 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 4795 { 4796 struct page *page; 4797 4798 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 4799 if (!page) 4800 return 0; 4801 return (unsigned long) page_address(page); 4802 } 4803 EXPORT_SYMBOL(get_free_pages_noprof); 4804 4805 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 4806 { 4807 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 4808 } 4809 EXPORT_SYMBOL(get_zeroed_page_noprof); 4810 4811 /** 4812 * __free_pages - Free pages allocated with alloc_pages(). 4813 * @page: The page pointer returned from alloc_pages(). 4814 * @order: The order of the allocation. 4815 * 4816 * This function can free multi-page allocations that are not compound 4817 * pages. It does not check that the @order passed in matches that of 4818 * the allocation, so it is easy to leak memory. Freeing more memory 4819 * than was allocated will probably emit a warning. 4820 * 4821 * If the last reference to this page is speculative, it will be released 4822 * by put_page() which only frees the first page of a non-compound 4823 * allocation. To prevent the remaining pages from being leaked, we free 4824 * the subsequent pages here. If you want to use the page's reference 4825 * count to decide when to free the allocation, you should allocate a 4826 * compound page, and use put_page() instead of __free_pages(). 4827 * 4828 * Context: May be called in interrupt context or while holding a normal 4829 * spinlock, but not in NMI context or while holding a raw spinlock. 4830 */ 4831 void __free_pages(struct page *page, unsigned int order) 4832 { 4833 /* get PageHead before we drop reference */ 4834 int head = PageHead(page); 4835 struct alloc_tag *tag = pgalloc_tag_get(page); 4836 4837 if (put_page_testzero(page)) 4838 free_unref_page(page, order); 4839 else if (!head) { 4840 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 4841 while (order-- > 0) 4842 free_unref_page(page + (1 << order), order); 4843 } 4844 } 4845 EXPORT_SYMBOL(__free_pages); 4846 4847 void free_pages(unsigned long addr, unsigned int order) 4848 { 4849 if (addr != 0) { 4850 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4851 __free_pages(virt_to_page((void *)addr), order); 4852 } 4853 } 4854 4855 EXPORT_SYMBOL(free_pages); 4856 4857 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4858 size_t size) 4859 { 4860 if (addr) { 4861 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4862 struct page *page = virt_to_page((void *)addr); 4863 struct page *last = page + nr; 4864 4865 split_page_owner(page, order, 0); 4866 pgalloc_tag_split(page_folio(page), order, 0); 4867 split_page_memcg(page, order, 0); 4868 while (page < --last) 4869 set_page_refcounted(last); 4870 4871 last = page + (1UL << order); 4872 for (page += nr; page < last; page++) 4873 __free_pages_ok(page, 0, FPI_TO_TAIL); 4874 } 4875 return (void *)addr; 4876 } 4877 4878 /** 4879 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4880 * @size: the number of bytes to allocate 4881 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4882 * 4883 * This function is similar to alloc_pages(), except that it allocates the 4884 * minimum number of pages to satisfy the request. alloc_pages() can only 4885 * allocate memory in power-of-two pages. 4886 * 4887 * This function is also limited by MAX_PAGE_ORDER. 4888 * 4889 * Memory allocated by this function must be released by free_pages_exact(). 4890 * 4891 * Return: pointer to the allocated area or %NULL in case of error. 4892 */ 4893 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 4894 { 4895 unsigned int order = get_order(size); 4896 unsigned long addr; 4897 4898 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4899 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4900 4901 addr = get_free_pages_noprof(gfp_mask, order); 4902 return make_alloc_exact(addr, order, size); 4903 } 4904 EXPORT_SYMBOL(alloc_pages_exact_noprof); 4905 4906 /** 4907 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4908 * pages on a node. 4909 * @nid: the preferred node ID where memory should be allocated 4910 * @size: the number of bytes to allocate 4911 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4912 * 4913 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4914 * back. 4915 * 4916 * Return: pointer to the allocated area or %NULL in case of error. 4917 */ 4918 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 4919 { 4920 unsigned int order = get_order(size); 4921 struct page *p; 4922 4923 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4924 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4925 4926 p = alloc_pages_node_noprof(nid, gfp_mask, order); 4927 if (!p) 4928 return NULL; 4929 return make_alloc_exact((unsigned long)page_address(p), order, size); 4930 } 4931 4932 /** 4933 * free_pages_exact - release memory allocated via alloc_pages_exact() 4934 * @virt: the value returned by alloc_pages_exact. 4935 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4936 * 4937 * Release the memory allocated by a previous call to alloc_pages_exact. 4938 */ 4939 void free_pages_exact(void *virt, size_t size) 4940 { 4941 unsigned long addr = (unsigned long)virt; 4942 unsigned long end = addr + PAGE_ALIGN(size); 4943 4944 while (addr < end) { 4945 free_page(addr); 4946 addr += PAGE_SIZE; 4947 } 4948 } 4949 EXPORT_SYMBOL(free_pages_exact); 4950 4951 /** 4952 * nr_free_zone_pages - count number of pages beyond high watermark 4953 * @offset: The zone index of the highest zone 4954 * 4955 * nr_free_zone_pages() counts the number of pages which are beyond the 4956 * high watermark within all zones at or below a given zone index. For each 4957 * zone, the number of pages is calculated as: 4958 * 4959 * nr_free_zone_pages = managed_pages - high_pages 4960 * 4961 * Return: number of pages beyond high watermark. 4962 */ 4963 static unsigned long nr_free_zone_pages(int offset) 4964 { 4965 struct zoneref *z; 4966 struct zone *zone; 4967 4968 /* Just pick one node, since fallback list is circular */ 4969 unsigned long sum = 0; 4970 4971 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4972 4973 for_each_zone_zonelist(zone, z, zonelist, offset) { 4974 unsigned long size = zone_managed_pages(zone); 4975 unsigned long high = high_wmark_pages(zone); 4976 if (size > high) 4977 sum += size - high; 4978 } 4979 4980 return sum; 4981 } 4982 4983 /** 4984 * nr_free_buffer_pages - count number of pages beyond high watermark 4985 * 4986 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4987 * watermark within ZONE_DMA and ZONE_NORMAL. 4988 * 4989 * Return: number of pages beyond high watermark within ZONE_DMA and 4990 * ZONE_NORMAL. 4991 */ 4992 unsigned long nr_free_buffer_pages(void) 4993 { 4994 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4995 } 4996 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4997 4998 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4999 { 5000 zoneref->zone = zone; 5001 zoneref->zone_idx = zone_idx(zone); 5002 } 5003 5004 /* 5005 * Builds allocation fallback zone lists. 5006 * 5007 * Add all populated zones of a node to the zonelist. 5008 */ 5009 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5010 { 5011 struct zone *zone; 5012 enum zone_type zone_type = MAX_NR_ZONES; 5013 int nr_zones = 0; 5014 5015 do { 5016 zone_type--; 5017 zone = pgdat->node_zones + zone_type; 5018 if (populated_zone(zone)) { 5019 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5020 check_highest_zone(zone_type); 5021 } 5022 } while (zone_type); 5023 5024 return nr_zones; 5025 } 5026 5027 #ifdef CONFIG_NUMA 5028 5029 static int __parse_numa_zonelist_order(char *s) 5030 { 5031 /* 5032 * We used to support different zonelists modes but they turned 5033 * out to be just not useful. Let's keep the warning in place 5034 * if somebody still use the cmd line parameter so that we do 5035 * not fail it silently 5036 */ 5037 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5038 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5039 return -EINVAL; 5040 } 5041 return 0; 5042 } 5043 5044 static char numa_zonelist_order[] = "Node"; 5045 #define NUMA_ZONELIST_ORDER_LEN 16 5046 /* 5047 * sysctl handler for numa_zonelist_order 5048 */ 5049 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5050 void *buffer, size_t *length, loff_t *ppos) 5051 { 5052 if (write) 5053 return __parse_numa_zonelist_order(buffer); 5054 return proc_dostring(table, write, buffer, length, ppos); 5055 } 5056 5057 static int node_load[MAX_NUMNODES]; 5058 5059 /** 5060 * find_next_best_node - find the next node that should appear in a given node's fallback list 5061 * @node: node whose fallback list we're appending 5062 * @used_node_mask: nodemask_t of already used nodes 5063 * 5064 * We use a number of factors to determine which is the next node that should 5065 * appear on a given node's fallback list. The node should not have appeared 5066 * already in @node's fallback list, and it should be the next closest node 5067 * according to the distance array (which contains arbitrary distance values 5068 * from each node to each node in the system), and should also prefer nodes 5069 * with no CPUs, since presumably they'll have very little allocation pressure 5070 * on them otherwise. 5071 * 5072 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5073 */ 5074 int find_next_best_node(int node, nodemask_t *used_node_mask) 5075 { 5076 int n, val; 5077 int min_val = INT_MAX; 5078 int best_node = NUMA_NO_NODE; 5079 5080 /* 5081 * Use the local node if we haven't already, but for memoryless local 5082 * node, we should skip it and fall back to other nodes. 5083 */ 5084 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5085 node_set(node, *used_node_mask); 5086 return node; 5087 } 5088 5089 for_each_node_state(n, N_MEMORY) { 5090 5091 /* Don't want a node to appear more than once */ 5092 if (node_isset(n, *used_node_mask)) 5093 continue; 5094 5095 /* Use the distance array to find the distance */ 5096 val = node_distance(node, n); 5097 5098 /* Penalize nodes under us ("prefer the next node") */ 5099 val += (n < node); 5100 5101 /* Give preference to headless and unused nodes */ 5102 if (!cpumask_empty(cpumask_of_node(n))) 5103 val += PENALTY_FOR_NODE_WITH_CPUS; 5104 5105 /* Slight preference for less loaded node */ 5106 val *= MAX_NUMNODES; 5107 val += node_load[n]; 5108 5109 if (val < min_val) { 5110 min_val = val; 5111 best_node = n; 5112 } 5113 } 5114 5115 if (best_node >= 0) 5116 node_set(best_node, *used_node_mask); 5117 5118 return best_node; 5119 } 5120 5121 5122 /* 5123 * Build zonelists ordered by node and zones within node. 5124 * This results in maximum locality--normal zone overflows into local 5125 * DMA zone, if any--but risks exhausting DMA zone. 5126 */ 5127 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5128 unsigned nr_nodes) 5129 { 5130 struct zoneref *zonerefs; 5131 int i; 5132 5133 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5134 5135 for (i = 0; i < nr_nodes; i++) { 5136 int nr_zones; 5137 5138 pg_data_t *node = NODE_DATA(node_order[i]); 5139 5140 nr_zones = build_zonerefs_node(node, zonerefs); 5141 zonerefs += nr_zones; 5142 } 5143 zonerefs->zone = NULL; 5144 zonerefs->zone_idx = 0; 5145 } 5146 5147 /* 5148 * Build __GFP_THISNODE zonelists 5149 */ 5150 static void build_thisnode_zonelists(pg_data_t *pgdat) 5151 { 5152 struct zoneref *zonerefs; 5153 int nr_zones; 5154 5155 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5156 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5157 zonerefs += nr_zones; 5158 zonerefs->zone = NULL; 5159 zonerefs->zone_idx = 0; 5160 } 5161 5162 /* 5163 * Build zonelists ordered by zone and nodes within zones. 5164 * This results in conserving DMA zone[s] until all Normal memory is 5165 * exhausted, but results in overflowing to remote node while memory 5166 * may still exist in local DMA zone. 5167 */ 5168 5169 static void build_zonelists(pg_data_t *pgdat) 5170 { 5171 static int node_order[MAX_NUMNODES]; 5172 int node, nr_nodes = 0; 5173 nodemask_t used_mask = NODE_MASK_NONE; 5174 int local_node, prev_node; 5175 5176 /* NUMA-aware ordering of nodes */ 5177 local_node = pgdat->node_id; 5178 prev_node = local_node; 5179 5180 memset(node_order, 0, sizeof(node_order)); 5181 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5182 /* 5183 * We don't want to pressure a particular node. 5184 * So adding penalty to the first node in same 5185 * distance group to make it round-robin. 5186 */ 5187 if (node_distance(local_node, node) != 5188 node_distance(local_node, prev_node)) 5189 node_load[node] += 1; 5190 5191 node_order[nr_nodes++] = node; 5192 prev_node = node; 5193 } 5194 5195 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5196 build_thisnode_zonelists(pgdat); 5197 pr_info("Fallback order for Node %d: ", local_node); 5198 for (node = 0; node < nr_nodes; node++) 5199 pr_cont("%d ", node_order[node]); 5200 pr_cont("\n"); 5201 } 5202 5203 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5204 /* 5205 * Return node id of node used for "local" allocations. 5206 * I.e., first node id of first zone in arg node's generic zonelist. 5207 * Used for initializing percpu 'numa_mem', which is used primarily 5208 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5209 */ 5210 int local_memory_node(int node) 5211 { 5212 struct zoneref *z; 5213 5214 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5215 gfp_zone(GFP_KERNEL), 5216 NULL); 5217 return zonelist_node_idx(z); 5218 } 5219 #endif 5220 5221 static void setup_min_unmapped_ratio(void); 5222 static void setup_min_slab_ratio(void); 5223 #else /* CONFIG_NUMA */ 5224 5225 static void build_zonelists(pg_data_t *pgdat) 5226 { 5227 struct zoneref *zonerefs; 5228 int nr_zones; 5229 5230 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5231 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5232 zonerefs += nr_zones; 5233 5234 zonerefs->zone = NULL; 5235 zonerefs->zone_idx = 0; 5236 } 5237 5238 #endif /* CONFIG_NUMA */ 5239 5240 /* 5241 * Boot pageset table. One per cpu which is going to be used for all 5242 * zones and all nodes. The parameters will be set in such a way 5243 * that an item put on a list will immediately be handed over to 5244 * the buddy list. This is safe since pageset manipulation is done 5245 * with interrupts disabled. 5246 * 5247 * The boot_pagesets must be kept even after bootup is complete for 5248 * unused processors and/or zones. They do play a role for bootstrapping 5249 * hotplugged processors. 5250 * 5251 * zoneinfo_show() and maybe other functions do 5252 * not check if the processor is online before following the pageset pointer. 5253 * Other parts of the kernel may not check if the zone is available. 5254 */ 5255 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5256 /* These effectively disable the pcplists in the boot pageset completely */ 5257 #define BOOT_PAGESET_HIGH 0 5258 #define BOOT_PAGESET_BATCH 1 5259 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5260 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5261 5262 static void __build_all_zonelists(void *data) 5263 { 5264 int nid; 5265 int __maybe_unused cpu; 5266 pg_data_t *self = data; 5267 unsigned long flags; 5268 5269 /* 5270 * The zonelist_update_seq must be acquired with irqsave because the 5271 * reader can be invoked from IRQ with GFP_ATOMIC. 5272 */ 5273 write_seqlock_irqsave(&zonelist_update_seq, flags); 5274 /* 5275 * Also disable synchronous printk() to prevent any printk() from 5276 * trying to hold port->lock, for 5277 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5278 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5279 */ 5280 printk_deferred_enter(); 5281 5282 #ifdef CONFIG_NUMA 5283 memset(node_load, 0, sizeof(node_load)); 5284 #endif 5285 5286 /* 5287 * This node is hotadded and no memory is yet present. So just 5288 * building zonelists is fine - no need to touch other nodes. 5289 */ 5290 if (self && !node_online(self->node_id)) { 5291 build_zonelists(self); 5292 } else { 5293 /* 5294 * All possible nodes have pgdat preallocated 5295 * in free_area_init 5296 */ 5297 for_each_node(nid) { 5298 pg_data_t *pgdat = NODE_DATA(nid); 5299 5300 build_zonelists(pgdat); 5301 } 5302 5303 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5304 /* 5305 * We now know the "local memory node" for each node-- 5306 * i.e., the node of the first zone in the generic zonelist. 5307 * Set up numa_mem percpu variable for on-line cpus. During 5308 * boot, only the boot cpu should be on-line; we'll init the 5309 * secondary cpus' numa_mem as they come on-line. During 5310 * node/memory hotplug, we'll fixup all on-line cpus. 5311 */ 5312 for_each_online_cpu(cpu) 5313 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5314 #endif 5315 } 5316 5317 printk_deferred_exit(); 5318 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5319 } 5320 5321 static noinline void __init 5322 build_all_zonelists_init(void) 5323 { 5324 int cpu; 5325 5326 __build_all_zonelists(NULL); 5327 5328 /* 5329 * Initialize the boot_pagesets that are going to be used 5330 * for bootstrapping processors. The real pagesets for 5331 * each zone will be allocated later when the per cpu 5332 * allocator is available. 5333 * 5334 * boot_pagesets are used also for bootstrapping offline 5335 * cpus if the system is already booted because the pagesets 5336 * are needed to initialize allocators on a specific cpu too. 5337 * F.e. the percpu allocator needs the page allocator which 5338 * needs the percpu allocator in order to allocate its pagesets 5339 * (a chicken-egg dilemma). 5340 */ 5341 for_each_possible_cpu(cpu) 5342 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5343 5344 mminit_verify_zonelist(); 5345 cpuset_init_current_mems_allowed(); 5346 } 5347 5348 /* 5349 * unless system_state == SYSTEM_BOOTING. 5350 * 5351 * __ref due to call of __init annotated helper build_all_zonelists_init 5352 * [protected by SYSTEM_BOOTING]. 5353 */ 5354 void __ref build_all_zonelists(pg_data_t *pgdat) 5355 { 5356 unsigned long vm_total_pages; 5357 5358 if (system_state == SYSTEM_BOOTING) { 5359 build_all_zonelists_init(); 5360 } else { 5361 __build_all_zonelists(pgdat); 5362 /* cpuset refresh routine should be here */ 5363 } 5364 /* Get the number of free pages beyond high watermark in all zones. */ 5365 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5366 /* 5367 * Disable grouping by mobility if the number of pages in the 5368 * system is too low to allow the mechanism to work. It would be 5369 * more accurate, but expensive to check per-zone. This check is 5370 * made on memory-hotadd so a system can start with mobility 5371 * disabled and enable it later 5372 */ 5373 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5374 page_group_by_mobility_disabled = 1; 5375 else 5376 page_group_by_mobility_disabled = 0; 5377 5378 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5379 nr_online_nodes, 5380 str_off_on(page_group_by_mobility_disabled), 5381 vm_total_pages); 5382 #ifdef CONFIG_NUMA 5383 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5384 #endif 5385 } 5386 5387 static int zone_batchsize(struct zone *zone) 5388 { 5389 #ifdef CONFIG_MMU 5390 int batch; 5391 5392 /* 5393 * The number of pages to batch allocate is either ~0.1% 5394 * of the zone or 1MB, whichever is smaller. The batch 5395 * size is striking a balance between allocation latency 5396 * and zone lock contention. 5397 */ 5398 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5399 batch /= 4; /* We effectively *= 4 below */ 5400 if (batch < 1) 5401 batch = 1; 5402 5403 /* 5404 * Clamp the batch to a 2^n - 1 value. Having a power 5405 * of 2 value was found to be more likely to have 5406 * suboptimal cache aliasing properties in some cases. 5407 * 5408 * For example if 2 tasks are alternately allocating 5409 * batches of pages, one task can end up with a lot 5410 * of pages of one half of the possible page colors 5411 * and the other with pages of the other colors. 5412 */ 5413 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5414 5415 return batch; 5416 5417 #else 5418 /* The deferral and batching of frees should be suppressed under NOMMU 5419 * conditions. 5420 * 5421 * The problem is that NOMMU needs to be able to allocate large chunks 5422 * of contiguous memory as there's no hardware page translation to 5423 * assemble apparent contiguous memory from discontiguous pages. 5424 * 5425 * Queueing large contiguous runs of pages for batching, however, 5426 * causes the pages to actually be freed in smaller chunks. As there 5427 * can be a significant delay between the individual batches being 5428 * recycled, this leads to the once large chunks of space being 5429 * fragmented and becoming unavailable for high-order allocations. 5430 */ 5431 return 0; 5432 #endif 5433 } 5434 5435 static int percpu_pagelist_high_fraction; 5436 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5437 int high_fraction) 5438 { 5439 #ifdef CONFIG_MMU 5440 int high; 5441 int nr_split_cpus; 5442 unsigned long total_pages; 5443 5444 if (!high_fraction) { 5445 /* 5446 * By default, the high value of the pcp is based on the zone 5447 * low watermark so that if they are full then background 5448 * reclaim will not be started prematurely. 5449 */ 5450 total_pages = low_wmark_pages(zone); 5451 } else { 5452 /* 5453 * If percpu_pagelist_high_fraction is configured, the high 5454 * value is based on a fraction of the managed pages in the 5455 * zone. 5456 */ 5457 total_pages = zone_managed_pages(zone) / high_fraction; 5458 } 5459 5460 /* 5461 * Split the high value across all online CPUs local to the zone. Note 5462 * that early in boot that CPUs may not be online yet and that during 5463 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5464 * onlined. For memory nodes that have no CPUs, split the high value 5465 * across all online CPUs to mitigate the risk that reclaim is triggered 5466 * prematurely due to pages stored on pcp lists. 5467 */ 5468 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5469 if (!nr_split_cpus) 5470 nr_split_cpus = num_online_cpus(); 5471 high = total_pages / nr_split_cpus; 5472 5473 /* 5474 * Ensure high is at least batch*4. The multiple is based on the 5475 * historical relationship between high and batch. 5476 */ 5477 high = max(high, batch << 2); 5478 5479 return high; 5480 #else 5481 return 0; 5482 #endif 5483 } 5484 5485 /* 5486 * pcp->high and pcp->batch values are related and generally batch is lower 5487 * than high. They are also related to pcp->count such that count is lower 5488 * than high, and as soon as it reaches high, the pcplist is flushed. 5489 * 5490 * However, guaranteeing these relations at all times would require e.g. write 5491 * barriers here but also careful usage of read barriers at the read side, and 5492 * thus be prone to error and bad for performance. Thus the update only prevents 5493 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5494 * should ensure they can cope with those fields changing asynchronously, and 5495 * fully trust only the pcp->count field on the local CPU with interrupts 5496 * disabled. 5497 * 5498 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5499 * outside of boot time (or some other assurance that no concurrent updaters 5500 * exist). 5501 */ 5502 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5503 unsigned long high_max, unsigned long batch) 5504 { 5505 WRITE_ONCE(pcp->batch, batch); 5506 WRITE_ONCE(pcp->high_min, high_min); 5507 WRITE_ONCE(pcp->high_max, high_max); 5508 } 5509 5510 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5511 { 5512 int pindex; 5513 5514 memset(pcp, 0, sizeof(*pcp)); 5515 memset(pzstats, 0, sizeof(*pzstats)); 5516 5517 spin_lock_init(&pcp->lock); 5518 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5519 INIT_LIST_HEAD(&pcp->lists[pindex]); 5520 5521 /* 5522 * Set batch and high values safe for a boot pageset. A true percpu 5523 * pageset's initialization will update them subsequently. Here we don't 5524 * need to be as careful as pageset_update() as nobody can access the 5525 * pageset yet. 5526 */ 5527 pcp->high_min = BOOT_PAGESET_HIGH; 5528 pcp->high_max = BOOT_PAGESET_HIGH; 5529 pcp->batch = BOOT_PAGESET_BATCH; 5530 pcp->free_count = 0; 5531 } 5532 5533 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5534 unsigned long high_max, unsigned long batch) 5535 { 5536 struct per_cpu_pages *pcp; 5537 int cpu; 5538 5539 for_each_possible_cpu(cpu) { 5540 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5541 pageset_update(pcp, high_min, high_max, batch); 5542 } 5543 } 5544 5545 /* 5546 * Calculate and set new high and batch values for all per-cpu pagesets of a 5547 * zone based on the zone's size. 5548 */ 5549 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5550 { 5551 int new_high_min, new_high_max, new_batch; 5552 5553 new_batch = max(1, zone_batchsize(zone)); 5554 if (percpu_pagelist_high_fraction) { 5555 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5556 percpu_pagelist_high_fraction); 5557 /* 5558 * PCP high is tuned manually, disable auto-tuning via 5559 * setting high_min and high_max to the manual value. 5560 */ 5561 new_high_max = new_high_min; 5562 } else { 5563 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5564 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5565 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5566 } 5567 5568 if (zone->pageset_high_min == new_high_min && 5569 zone->pageset_high_max == new_high_max && 5570 zone->pageset_batch == new_batch) 5571 return; 5572 5573 zone->pageset_high_min = new_high_min; 5574 zone->pageset_high_max = new_high_max; 5575 zone->pageset_batch = new_batch; 5576 5577 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5578 new_batch); 5579 } 5580 5581 void __meminit setup_zone_pageset(struct zone *zone) 5582 { 5583 int cpu; 5584 5585 /* Size may be 0 on !SMP && !NUMA */ 5586 if (sizeof(struct per_cpu_zonestat) > 0) 5587 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5588 5589 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5590 for_each_possible_cpu(cpu) { 5591 struct per_cpu_pages *pcp; 5592 struct per_cpu_zonestat *pzstats; 5593 5594 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5595 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5596 per_cpu_pages_init(pcp, pzstats); 5597 } 5598 5599 zone_set_pageset_high_and_batch(zone, 0); 5600 } 5601 5602 /* 5603 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5604 * page high values need to be recalculated. 5605 */ 5606 static void zone_pcp_update(struct zone *zone, int cpu_online) 5607 { 5608 mutex_lock(&pcp_batch_high_lock); 5609 zone_set_pageset_high_and_batch(zone, cpu_online); 5610 mutex_unlock(&pcp_batch_high_lock); 5611 } 5612 5613 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5614 { 5615 struct per_cpu_pages *pcp; 5616 struct cpu_cacheinfo *cci; 5617 5618 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5619 cci = get_cpu_cacheinfo(cpu); 5620 /* 5621 * If data cache slice of CPU is large enough, "pcp->batch" 5622 * pages can be preserved in PCP before draining PCP for 5623 * consecutive high-order pages freeing without allocation. 5624 * This can reduce zone lock contention without hurting 5625 * cache-hot pages sharing. 5626 */ 5627 spin_lock(&pcp->lock); 5628 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5629 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5630 else 5631 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5632 spin_unlock(&pcp->lock); 5633 } 5634 5635 void setup_pcp_cacheinfo(unsigned int cpu) 5636 { 5637 struct zone *zone; 5638 5639 for_each_populated_zone(zone) 5640 zone_pcp_update_cacheinfo(zone, cpu); 5641 } 5642 5643 /* 5644 * Allocate per cpu pagesets and initialize them. 5645 * Before this call only boot pagesets were available. 5646 */ 5647 void __init setup_per_cpu_pageset(void) 5648 { 5649 struct pglist_data *pgdat; 5650 struct zone *zone; 5651 int __maybe_unused cpu; 5652 5653 for_each_populated_zone(zone) 5654 setup_zone_pageset(zone); 5655 5656 #ifdef CONFIG_NUMA 5657 /* 5658 * Unpopulated zones continue using the boot pagesets. 5659 * The numa stats for these pagesets need to be reset. 5660 * Otherwise, they will end up skewing the stats of 5661 * the nodes these zones are associated with. 5662 */ 5663 for_each_possible_cpu(cpu) { 5664 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5665 memset(pzstats->vm_numa_event, 0, 5666 sizeof(pzstats->vm_numa_event)); 5667 } 5668 #endif 5669 5670 for_each_online_pgdat(pgdat) 5671 pgdat->per_cpu_nodestats = 5672 alloc_percpu(struct per_cpu_nodestat); 5673 } 5674 5675 __meminit void zone_pcp_init(struct zone *zone) 5676 { 5677 /* 5678 * per cpu subsystem is not up at this point. The following code 5679 * relies on the ability of the linker to provide the 5680 * offset of a (static) per cpu variable into the per cpu area. 5681 */ 5682 zone->per_cpu_pageset = &boot_pageset; 5683 zone->per_cpu_zonestats = &boot_zonestats; 5684 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5685 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5686 zone->pageset_batch = BOOT_PAGESET_BATCH; 5687 5688 if (populated_zone(zone)) 5689 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5690 zone->present_pages, zone_batchsize(zone)); 5691 } 5692 5693 void adjust_managed_page_count(struct page *page, long count) 5694 { 5695 atomic_long_add(count, &page_zone(page)->managed_pages); 5696 totalram_pages_add(count); 5697 } 5698 EXPORT_SYMBOL(adjust_managed_page_count); 5699 5700 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5701 { 5702 void *pos; 5703 unsigned long pages = 0; 5704 5705 start = (void *)PAGE_ALIGN((unsigned long)start); 5706 end = (void *)((unsigned long)end & PAGE_MASK); 5707 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5708 struct page *page = virt_to_page(pos); 5709 void *direct_map_addr; 5710 5711 /* 5712 * 'direct_map_addr' might be different from 'pos' 5713 * because some architectures' virt_to_page() 5714 * work with aliases. Getting the direct map 5715 * address ensures that we get a _writeable_ 5716 * alias for the memset(). 5717 */ 5718 direct_map_addr = page_address(page); 5719 /* 5720 * Perform a kasan-unchecked memset() since this memory 5721 * has not been initialized. 5722 */ 5723 direct_map_addr = kasan_reset_tag(direct_map_addr); 5724 if ((unsigned int)poison <= 0xFF) 5725 memset(direct_map_addr, poison, PAGE_SIZE); 5726 5727 free_reserved_page(page); 5728 } 5729 5730 if (pages && s) 5731 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5732 5733 return pages; 5734 } 5735 5736 void free_reserved_page(struct page *page) 5737 { 5738 clear_page_tag_ref(page); 5739 ClearPageReserved(page); 5740 init_page_count(page); 5741 __free_page(page); 5742 adjust_managed_page_count(page, 1); 5743 } 5744 EXPORT_SYMBOL(free_reserved_page); 5745 5746 static int page_alloc_cpu_dead(unsigned int cpu) 5747 { 5748 struct zone *zone; 5749 5750 lru_add_drain_cpu(cpu); 5751 mlock_drain_remote(cpu); 5752 drain_pages(cpu); 5753 5754 /* 5755 * Spill the event counters of the dead processor 5756 * into the current processors event counters. 5757 * This artificially elevates the count of the current 5758 * processor. 5759 */ 5760 vm_events_fold_cpu(cpu); 5761 5762 /* 5763 * Zero the differential counters of the dead processor 5764 * so that the vm statistics are consistent. 5765 * 5766 * This is only okay since the processor is dead and cannot 5767 * race with what we are doing. 5768 */ 5769 cpu_vm_stats_fold(cpu); 5770 5771 for_each_populated_zone(zone) 5772 zone_pcp_update(zone, 0); 5773 5774 return 0; 5775 } 5776 5777 static int page_alloc_cpu_online(unsigned int cpu) 5778 { 5779 struct zone *zone; 5780 5781 for_each_populated_zone(zone) 5782 zone_pcp_update(zone, 1); 5783 return 0; 5784 } 5785 5786 void __init page_alloc_init_cpuhp(void) 5787 { 5788 int ret; 5789 5790 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5791 "mm/page_alloc:pcp", 5792 page_alloc_cpu_online, 5793 page_alloc_cpu_dead); 5794 WARN_ON(ret < 0); 5795 } 5796 5797 /* 5798 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5799 * or min_free_kbytes changes. 5800 */ 5801 static void calculate_totalreserve_pages(void) 5802 { 5803 struct pglist_data *pgdat; 5804 unsigned long reserve_pages = 0; 5805 enum zone_type i, j; 5806 5807 for_each_online_pgdat(pgdat) { 5808 5809 pgdat->totalreserve_pages = 0; 5810 5811 for (i = 0; i < MAX_NR_ZONES; i++) { 5812 struct zone *zone = pgdat->node_zones + i; 5813 long max = 0; 5814 unsigned long managed_pages = zone_managed_pages(zone); 5815 5816 /* Find valid and maximum lowmem_reserve in the zone */ 5817 for (j = i; j < MAX_NR_ZONES; j++) { 5818 if (zone->lowmem_reserve[j] > max) 5819 max = zone->lowmem_reserve[j]; 5820 } 5821 5822 /* we treat the high watermark as reserved pages. */ 5823 max += high_wmark_pages(zone); 5824 5825 if (max > managed_pages) 5826 max = managed_pages; 5827 5828 pgdat->totalreserve_pages += max; 5829 5830 reserve_pages += max; 5831 } 5832 } 5833 totalreserve_pages = reserve_pages; 5834 } 5835 5836 /* 5837 * setup_per_zone_lowmem_reserve - called whenever 5838 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5839 * has a correct pages reserved value, so an adequate number of 5840 * pages are left in the zone after a successful __alloc_pages(). 5841 */ 5842 static void setup_per_zone_lowmem_reserve(void) 5843 { 5844 struct pglist_data *pgdat; 5845 enum zone_type i, j; 5846 5847 for_each_online_pgdat(pgdat) { 5848 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5849 struct zone *zone = &pgdat->node_zones[i]; 5850 int ratio = sysctl_lowmem_reserve_ratio[i]; 5851 bool clear = !ratio || !zone_managed_pages(zone); 5852 unsigned long managed_pages = 0; 5853 5854 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5855 struct zone *upper_zone = &pgdat->node_zones[j]; 5856 bool empty = !zone_managed_pages(upper_zone); 5857 5858 managed_pages += zone_managed_pages(upper_zone); 5859 5860 if (clear || empty) 5861 zone->lowmem_reserve[j] = 0; 5862 else 5863 zone->lowmem_reserve[j] = managed_pages / ratio; 5864 } 5865 } 5866 } 5867 5868 /* update totalreserve_pages */ 5869 calculate_totalreserve_pages(); 5870 } 5871 5872 static void __setup_per_zone_wmarks(void) 5873 { 5874 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5875 unsigned long lowmem_pages = 0; 5876 struct zone *zone; 5877 unsigned long flags; 5878 5879 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5880 for_each_zone(zone) { 5881 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5882 lowmem_pages += zone_managed_pages(zone); 5883 } 5884 5885 for_each_zone(zone) { 5886 u64 tmp; 5887 5888 spin_lock_irqsave(&zone->lock, flags); 5889 tmp = (u64)pages_min * zone_managed_pages(zone); 5890 tmp = div64_ul(tmp, lowmem_pages); 5891 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5892 /* 5893 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5894 * need highmem and movable zones pages, so cap pages_min 5895 * to a small value here. 5896 * 5897 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5898 * deltas control async page reclaim, and so should 5899 * not be capped for highmem and movable zones. 5900 */ 5901 unsigned long min_pages; 5902 5903 min_pages = zone_managed_pages(zone) / 1024; 5904 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5905 zone->_watermark[WMARK_MIN] = min_pages; 5906 } else { 5907 /* 5908 * If it's a lowmem zone, reserve a number of pages 5909 * proportionate to the zone's size. 5910 */ 5911 zone->_watermark[WMARK_MIN] = tmp; 5912 } 5913 5914 /* 5915 * Set the kswapd watermarks distance according to the 5916 * scale factor in proportion to available memory, but 5917 * ensure a minimum size on small systems. 5918 */ 5919 tmp = max_t(u64, tmp >> 2, 5920 mult_frac(zone_managed_pages(zone), 5921 watermark_scale_factor, 10000)); 5922 5923 zone->watermark_boost = 0; 5924 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5925 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5926 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 5927 5928 spin_unlock_irqrestore(&zone->lock, flags); 5929 } 5930 5931 /* update totalreserve_pages */ 5932 calculate_totalreserve_pages(); 5933 } 5934 5935 /** 5936 * setup_per_zone_wmarks - called when min_free_kbytes changes 5937 * or when memory is hot-{added|removed} 5938 * 5939 * Ensures that the watermark[min,low,high] values for each zone are set 5940 * correctly with respect to min_free_kbytes. 5941 */ 5942 void setup_per_zone_wmarks(void) 5943 { 5944 struct zone *zone; 5945 static DEFINE_SPINLOCK(lock); 5946 5947 spin_lock(&lock); 5948 __setup_per_zone_wmarks(); 5949 spin_unlock(&lock); 5950 5951 /* 5952 * The watermark size have changed so update the pcpu batch 5953 * and high limits or the limits may be inappropriate. 5954 */ 5955 for_each_zone(zone) 5956 zone_pcp_update(zone, 0); 5957 } 5958 5959 /* 5960 * Initialise min_free_kbytes. 5961 * 5962 * For small machines we want it small (128k min). For large machines 5963 * we want it large (256MB max). But it is not linear, because network 5964 * bandwidth does not increase linearly with machine size. We use 5965 * 5966 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5967 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5968 * 5969 * which yields 5970 * 5971 * 16MB: 512k 5972 * 32MB: 724k 5973 * 64MB: 1024k 5974 * 128MB: 1448k 5975 * 256MB: 2048k 5976 * 512MB: 2896k 5977 * 1024MB: 4096k 5978 * 2048MB: 5792k 5979 * 4096MB: 8192k 5980 * 8192MB: 11584k 5981 * 16384MB: 16384k 5982 */ 5983 void calculate_min_free_kbytes(void) 5984 { 5985 unsigned long lowmem_kbytes; 5986 int new_min_free_kbytes; 5987 5988 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5989 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5990 5991 if (new_min_free_kbytes > user_min_free_kbytes) 5992 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 5993 else 5994 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5995 new_min_free_kbytes, user_min_free_kbytes); 5996 5997 } 5998 5999 int __meminit init_per_zone_wmark_min(void) 6000 { 6001 calculate_min_free_kbytes(); 6002 setup_per_zone_wmarks(); 6003 refresh_zone_stat_thresholds(); 6004 setup_per_zone_lowmem_reserve(); 6005 6006 #ifdef CONFIG_NUMA 6007 setup_min_unmapped_ratio(); 6008 setup_min_slab_ratio(); 6009 #endif 6010 6011 khugepaged_min_free_kbytes_update(); 6012 6013 return 0; 6014 } 6015 postcore_initcall(init_per_zone_wmark_min) 6016 6017 /* 6018 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6019 * that we can call two helper functions whenever min_free_kbytes 6020 * changes. 6021 */ 6022 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6023 void *buffer, size_t *length, loff_t *ppos) 6024 { 6025 int rc; 6026 6027 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6028 if (rc) 6029 return rc; 6030 6031 if (write) { 6032 user_min_free_kbytes = min_free_kbytes; 6033 setup_per_zone_wmarks(); 6034 } 6035 return 0; 6036 } 6037 6038 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6039 void *buffer, size_t *length, loff_t *ppos) 6040 { 6041 int rc; 6042 6043 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6044 if (rc) 6045 return rc; 6046 6047 if (write) 6048 setup_per_zone_wmarks(); 6049 6050 return 0; 6051 } 6052 6053 #ifdef CONFIG_NUMA 6054 static void setup_min_unmapped_ratio(void) 6055 { 6056 pg_data_t *pgdat; 6057 struct zone *zone; 6058 6059 for_each_online_pgdat(pgdat) 6060 pgdat->min_unmapped_pages = 0; 6061 6062 for_each_zone(zone) 6063 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6064 sysctl_min_unmapped_ratio) / 100; 6065 } 6066 6067 6068 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6069 void *buffer, size_t *length, loff_t *ppos) 6070 { 6071 int rc; 6072 6073 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6074 if (rc) 6075 return rc; 6076 6077 setup_min_unmapped_ratio(); 6078 6079 return 0; 6080 } 6081 6082 static void setup_min_slab_ratio(void) 6083 { 6084 pg_data_t *pgdat; 6085 struct zone *zone; 6086 6087 for_each_online_pgdat(pgdat) 6088 pgdat->min_slab_pages = 0; 6089 6090 for_each_zone(zone) 6091 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6092 sysctl_min_slab_ratio) / 100; 6093 } 6094 6095 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6096 void *buffer, size_t *length, loff_t *ppos) 6097 { 6098 int rc; 6099 6100 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6101 if (rc) 6102 return rc; 6103 6104 setup_min_slab_ratio(); 6105 6106 return 0; 6107 } 6108 #endif 6109 6110 /* 6111 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6112 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6113 * whenever sysctl_lowmem_reserve_ratio changes. 6114 * 6115 * The reserve ratio obviously has absolutely no relation with the 6116 * minimum watermarks. The lowmem reserve ratio can only make sense 6117 * if in function of the boot time zone sizes. 6118 */ 6119 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6120 int write, void *buffer, size_t *length, loff_t *ppos) 6121 { 6122 int i; 6123 6124 proc_dointvec_minmax(table, write, buffer, length, ppos); 6125 6126 for (i = 0; i < MAX_NR_ZONES; i++) { 6127 if (sysctl_lowmem_reserve_ratio[i] < 1) 6128 sysctl_lowmem_reserve_ratio[i] = 0; 6129 } 6130 6131 setup_per_zone_lowmem_reserve(); 6132 return 0; 6133 } 6134 6135 /* 6136 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6137 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6138 * pagelist can have before it gets flushed back to buddy allocator. 6139 */ 6140 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6141 int write, void *buffer, size_t *length, loff_t *ppos) 6142 { 6143 struct zone *zone; 6144 int old_percpu_pagelist_high_fraction; 6145 int ret; 6146 6147 mutex_lock(&pcp_batch_high_lock); 6148 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6149 6150 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6151 if (!write || ret < 0) 6152 goto out; 6153 6154 /* Sanity checking to avoid pcp imbalance */ 6155 if (percpu_pagelist_high_fraction && 6156 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6157 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6158 ret = -EINVAL; 6159 goto out; 6160 } 6161 6162 /* No change? */ 6163 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6164 goto out; 6165 6166 for_each_populated_zone(zone) 6167 zone_set_pageset_high_and_batch(zone, 0); 6168 out: 6169 mutex_unlock(&pcp_batch_high_lock); 6170 return ret; 6171 } 6172 6173 static struct ctl_table page_alloc_sysctl_table[] = { 6174 { 6175 .procname = "min_free_kbytes", 6176 .data = &min_free_kbytes, 6177 .maxlen = sizeof(min_free_kbytes), 6178 .mode = 0644, 6179 .proc_handler = min_free_kbytes_sysctl_handler, 6180 .extra1 = SYSCTL_ZERO, 6181 }, 6182 { 6183 .procname = "watermark_boost_factor", 6184 .data = &watermark_boost_factor, 6185 .maxlen = sizeof(watermark_boost_factor), 6186 .mode = 0644, 6187 .proc_handler = proc_dointvec_minmax, 6188 .extra1 = SYSCTL_ZERO, 6189 }, 6190 { 6191 .procname = "watermark_scale_factor", 6192 .data = &watermark_scale_factor, 6193 .maxlen = sizeof(watermark_scale_factor), 6194 .mode = 0644, 6195 .proc_handler = watermark_scale_factor_sysctl_handler, 6196 .extra1 = SYSCTL_ONE, 6197 .extra2 = SYSCTL_THREE_THOUSAND, 6198 }, 6199 { 6200 .procname = "percpu_pagelist_high_fraction", 6201 .data = &percpu_pagelist_high_fraction, 6202 .maxlen = sizeof(percpu_pagelist_high_fraction), 6203 .mode = 0644, 6204 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6205 .extra1 = SYSCTL_ZERO, 6206 }, 6207 { 6208 .procname = "lowmem_reserve_ratio", 6209 .data = &sysctl_lowmem_reserve_ratio, 6210 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6211 .mode = 0644, 6212 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6213 }, 6214 #ifdef CONFIG_NUMA 6215 { 6216 .procname = "numa_zonelist_order", 6217 .data = &numa_zonelist_order, 6218 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6219 .mode = 0644, 6220 .proc_handler = numa_zonelist_order_handler, 6221 }, 6222 { 6223 .procname = "min_unmapped_ratio", 6224 .data = &sysctl_min_unmapped_ratio, 6225 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6226 .mode = 0644, 6227 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6228 .extra1 = SYSCTL_ZERO, 6229 .extra2 = SYSCTL_ONE_HUNDRED, 6230 }, 6231 { 6232 .procname = "min_slab_ratio", 6233 .data = &sysctl_min_slab_ratio, 6234 .maxlen = sizeof(sysctl_min_slab_ratio), 6235 .mode = 0644, 6236 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6237 .extra1 = SYSCTL_ZERO, 6238 .extra2 = SYSCTL_ONE_HUNDRED, 6239 }, 6240 #endif 6241 }; 6242 6243 void __init page_alloc_sysctl_init(void) 6244 { 6245 register_sysctl_init("vm", page_alloc_sysctl_table); 6246 } 6247 6248 #ifdef CONFIG_CONTIG_ALLOC 6249 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6250 static void alloc_contig_dump_pages(struct list_head *page_list) 6251 { 6252 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6253 6254 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6255 struct page *page; 6256 6257 dump_stack(); 6258 list_for_each_entry(page, page_list, lru) 6259 dump_page(page, "migration failure"); 6260 } 6261 } 6262 6263 /* 6264 * [start, end) must belong to a single zone. 6265 * @migratetype: using migratetype to filter the type of migration in 6266 * trace_mm_alloc_contig_migrate_range_info. 6267 */ 6268 int __alloc_contig_migrate_range(struct compact_control *cc, 6269 unsigned long start, unsigned long end, 6270 int migratetype) 6271 { 6272 /* This function is based on compact_zone() from compaction.c. */ 6273 unsigned int nr_reclaimed; 6274 unsigned long pfn = start; 6275 unsigned int tries = 0; 6276 int ret = 0; 6277 struct migration_target_control mtc = { 6278 .nid = zone_to_nid(cc->zone), 6279 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6280 .reason = MR_CONTIG_RANGE, 6281 }; 6282 struct page *page; 6283 unsigned long total_mapped = 0; 6284 unsigned long total_migrated = 0; 6285 unsigned long total_reclaimed = 0; 6286 6287 lru_cache_disable(); 6288 6289 while (pfn < end || !list_empty(&cc->migratepages)) { 6290 if (fatal_signal_pending(current)) { 6291 ret = -EINTR; 6292 break; 6293 } 6294 6295 if (list_empty(&cc->migratepages)) { 6296 cc->nr_migratepages = 0; 6297 ret = isolate_migratepages_range(cc, pfn, end); 6298 if (ret && ret != -EAGAIN) 6299 break; 6300 pfn = cc->migrate_pfn; 6301 tries = 0; 6302 } else if (++tries == 5) { 6303 ret = -EBUSY; 6304 break; 6305 } 6306 6307 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6308 &cc->migratepages); 6309 cc->nr_migratepages -= nr_reclaimed; 6310 6311 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6312 total_reclaimed += nr_reclaimed; 6313 list_for_each_entry(page, &cc->migratepages, lru) { 6314 struct folio *folio = page_folio(page); 6315 6316 total_mapped += folio_mapped(folio) * 6317 folio_nr_pages(folio); 6318 } 6319 } 6320 6321 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6322 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6323 6324 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6325 total_migrated += cc->nr_migratepages; 6326 6327 /* 6328 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6329 * to retry again over this error, so do the same here. 6330 */ 6331 if (ret == -ENOMEM) 6332 break; 6333 } 6334 6335 lru_cache_enable(); 6336 if (ret < 0) { 6337 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6338 alloc_contig_dump_pages(&cc->migratepages); 6339 putback_movable_pages(&cc->migratepages); 6340 } 6341 6342 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6343 total_migrated, 6344 total_reclaimed, 6345 total_mapped); 6346 return (ret < 0) ? ret : 0; 6347 } 6348 6349 static void split_free_pages(struct list_head *list) 6350 { 6351 int order; 6352 6353 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6354 struct page *page, *next; 6355 int nr_pages = 1 << order; 6356 6357 list_for_each_entry_safe(page, next, &list[order], lru) { 6358 int i; 6359 6360 post_alloc_hook(page, order, __GFP_MOVABLE); 6361 if (!order) 6362 continue; 6363 6364 split_page(page, order); 6365 6366 /* Add all subpages to the order-0 head, in sequence. */ 6367 list_del(&page->lru); 6368 for (i = 0; i < nr_pages; i++) 6369 list_add_tail(&page[i].lru, &list[0]); 6370 } 6371 } 6372 } 6373 6374 /** 6375 * alloc_contig_range() -- tries to allocate given range of pages 6376 * @start: start PFN to allocate 6377 * @end: one-past-the-last PFN to allocate 6378 * @migratetype: migratetype of the underlying pageblocks (either 6379 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6380 * in range must have the same migratetype and it must 6381 * be either of the two. 6382 * @gfp_mask: GFP mask to use during compaction 6383 * 6384 * The PFN range does not have to be pageblock aligned. The PFN range must 6385 * belong to a single zone. 6386 * 6387 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6388 * pageblocks in the range. Once isolated, the pageblocks should not 6389 * be modified by others. 6390 * 6391 * Return: zero on success or negative error code. On success all 6392 * pages which PFN is in [start, end) are allocated for the caller and 6393 * need to be freed with free_contig_range(). 6394 */ 6395 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6396 unsigned migratetype, gfp_t gfp_mask) 6397 { 6398 unsigned long outer_start, outer_end; 6399 int ret = 0; 6400 6401 struct compact_control cc = { 6402 .nr_migratepages = 0, 6403 .order = -1, 6404 .zone = page_zone(pfn_to_page(start)), 6405 .mode = MIGRATE_SYNC, 6406 .ignore_skip_hint = true, 6407 .no_set_skip_hint = true, 6408 .gfp_mask = current_gfp_context(gfp_mask), 6409 .alloc_contig = true, 6410 }; 6411 INIT_LIST_HEAD(&cc.migratepages); 6412 6413 /* 6414 * What we do here is we mark all pageblocks in range as 6415 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6416 * have different sizes, and due to the way page allocator 6417 * work, start_isolate_page_range() has special handlings for this. 6418 * 6419 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6420 * migrate the pages from an unaligned range (ie. pages that 6421 * we are interested in). This will put all the pages in 6422 * range back to page allocator as MIGRATE_ISOLATE. 6423 * 6424 * When this is done, we take the pages in range from page 6425 * allocator removing them from the buddy system. This way 6426 * page allocator will never consider using them. 6427 * 6428 * This lets us mark the pageblocks back as 6429 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6430 * aligned range but not in the unaligned, original range are 6431 * put back to page allocator so that buddy can use them. 6432 */ 6433 6434 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6435 if (ret) 6436 goto done; 6437 6438 drain_all_pages(cc.zone); 6439 6440 /* 6441 * In case of -EBUSY, we'd like to know which page causes problem. 6442 * So, just fall through. test_pages_isolated() has a tracepoint 6443 * which will report the busy page. 6444 * 6445 * It is possible that busy pages could become available before 6446 * the call to test_pages_isolated, and the range will actually be 6447 * allocated. So, if we fall through be sure to clear ret so that 6448 * -EBUSY is not accidentally used or returned to caller. 6449 */ 6450 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6451 if (ret && ret != -EBUSY) 6452 goto done; 6453 ret = 0; 6454 6455 /* 6456 * Pages from [start, end) are within a pageblock_nr_pages 6457 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6458 * more, all pages in [start, end) are free in page allocator. 6459 * What we are going to do is to allocate all pages from 6460 * [start, end) (that is remove them from page allocator). 6461 * 6462 * The only problem is that pages at the beginning and at the 6463 * end of interesting range may be not aligned with pages that 6464 * page allocator holds, ie. they can be part of higher order 6465 * pages. Because of this, we reserve the bigger range and 6466 * once this is done free the pages we are not interested in. 6467 * 6468 * We don't have to hold zone->lock here because the pages are 6469 * isolated thus they won't get removed from buddy. 6470 */ 6471 outer_start = find_large_buddy(start); 6472 6473 /* Make sure the range is really isolated. */ 6474 if (test_pages_isolated(outer_start, end, 0)) { 6475 ret = -EBUSY; 6476 goto done; 6477 } 6478 6479 /* Grab isolated pages from freelists. */ 6480 outer_end = isolate_freepages_range(&cc, outer_start, end); 6481 if (!outer_end) { 6482 ret = -EBUSY; 6483 goto done; 6484 } 6485 6486 if (!(gfp_mask & __GFP_COMP)) { 6487 split_free_pages(cc.freepages); 6488 6489 /* Free head and tail (if any) */ 6490 if (start != outer_start) 6491 free_contig_range(outer_start, start - outer_start); 6492 if (end != outer_end) 6493 free_contig_range(end, outer_end - end); 6494 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6495 struct page *head = pfn_to_page(start); 6496 int order = ilog2(end - start); 6497 6498 check_new_pages(head, order); 6499 prep_new_page(head, order, gfp_mask, 0); 6500 } else { 6501 ret = -EINVAL; 6502 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6503 start, end, outer_start, outer_end); 6504 } 6505 done: 6506 undo_isolate_page_range(start, end, migratetype); 6507 return ret; 6508 } 6509 EXPORT_SYMBOL(alloc_contig_range_noprof); 6510 6511 static int __alloc_contig_pages(unsigned long start_pfn, 6512 unsigned long nr_pages, gfp_t gfp_mask) 6513 { 6514 unsigned long end_pfn = start_pfn + nr_pages; 6515 6516 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6517 gfp_mask); 6518 } 6519 6520 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6521 unsigned long nr_pages) 6522 { 6523 unsigned long i, end_pfn = start_pfn + nr_pages; 6524 struct page *page; 6525 6526 for (i = start_pfn; i < end_pfn; i++) { 6527 page = pfn_to_online_page(i); 6528 if (!page) 6529 return false; 6530 6531 if (page_zone(page) != z) 6532 return false; 6533 6534 if (PageReserved(page)) 6535 return false; 6536 6537 if (PageHuge(page)) 6538 return false; 6539 } 6540 return true; 6541 } 6542 6543 static bool zone_spans_last_pfn(const struct zone *zone, 6544 unsigned long start_pfn, unsigned long nr_pages) 6545 { 6546 unsigned long last_pfn = start_pfn + nr_pages - 1; 6547 6548 return zone_spans_pfn(zone, last_pfn); 6549 } 6550 6551 /** 6552 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6553 * @nr_pages: Number of contiguous pages to allocate 6554 * @gfp_mask: GFP mask to limit search and used during compaction 6555 * @nid: Target node 6556 * @nodemask: Mask for other possible nodes 6557 * 6558 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6559 * on an applicable zonelist to find a contiguous pfn range which can then be 6560 * tried for allocation with alloc_contig_range(). This routine is intended 6561 * for allocation requests which can not be fulfilled with the buddy allocator. 6562 * 6563 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6564 * power of two, then allocated range is also guaranteed to be aligned to same 6565 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6566 * 6567 * Allocated pages can be freed with free_contig_range() or by manually calling 6568 * __free_page() on each allocated page. 6569 * 6570 * Return: pointer to contiguous pages on success, or NULL if not successful. 6571 */ 6572 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6573 int nid, nodemask_t *nodemask) 6574 { 6575 unsigned long ret, pfn, flags; 6576 struct zonelist *zonelist; 6577 struct zone *zone; 6578 struct zoneref *z; 6579 6580 zonelist = node_zonelist(nid, gfp_mask); 6581 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6582 gfp_zone(gfp_mask), nodemask) { 6583 spin_lock_irqsave(&zone->lock, flags); 6584 6585 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6586 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6587 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6588 /* 6589 * We release the zone lock here because 6590 * alloc_contig_range() will also lock the zone 6591 * at some point. If there's an allocation 6592 * spinning on this lock, it may win the race 6593 * and cause alloc_contig_range() to fail... 6594 */ 6595 spin_unlock_irqrestore(&zone->lock, flags); 6596 ret = __alloc_contig_pages(pfn, nr_pages, 6597 gfp_mask); 6598 if (!ret) 6599 return pfn_to_page(pfn); 6600 spin_lock_irqsave(&zone->lock, flags); 6601 } 6602 pfn += nr_pages; 6603 } 6604 spin_unlock_irqrestore(&zone->lock, flags); 6605 } 6606 return NULL; 6607 } 6608 #endif /* CONFIG_CONTIG_ALLOC */ 6609 6610 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6611 { 6612 unsigned long count = 0; 6613 struct folio *folio = pfn_folio(pfn); 6614 6615 if (folio_test_large(folio)) { 6616 int expected = folio_nr_pages(folio); 6617 6618 if (nr_pages == expected) 6619 folio_put(folio); 6620 else 6621 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 6622 pfn, nr_pages, expected); 6623 return; 6624 } 6625 6626 for (; nr_pages--; pfn++) { 6627 struct page *page = pfn_to_page(pfn); 6628 6629 count += page_count(page) != 1; 6630 __free_page(page); 6631 } 6632 WARN(count != 0, "%lu pages are still in use!\n", count); 6633 } 6634 EXPORT_SYMBOL(free_contig_range); 6635 6636 /* 6637 * Effectively disable pcplists for the zone by setting the high limit to 0 6638 * and draining all cpus. A concurrent page freeing on another CPU that's about 6639 * to put the page on pcplist will either finish before the drain and the page 6640 * will be drained, or observe the new high limit and skip the pcplist. 6641 * 6642 * Must be paired with a call to zone_pcp_enable(). 6643 */ 6644 void zone_pcp_disable(struct zone *zone) 6645 { 6646 mutex_lock(&pcp_batch_high_lock); 6647 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6648 __drain_all_pages(zone, true); 6649 } 6650 6651 void zone_pcp_enable(struct zone *zone) 6652 { 6653 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6654 zone->pageset_high_max, zone->pageset_batch); 6655 mutex_unlock(&pcp_batch_high_lock); 6656 } 6657 6658 void zone_pcp_reset(struct zone *zone) 6659 { 6660 int cpu; 6661 struct per_cpu_zonestat *pzstats; 6662 6663 if (zone->per_cpu_pageset != &boot_pageset) { 6664 for_each_online_cpu(cpu) { 6665 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6666 drain_zonestat(zone, pzstats); 6667 } 6668 free_percpu(zone->per_cpu_pageset); 6669 zone->per_cpu_pageset = &boot_pageset; 6670 if (zone->per_cpu_zonestats != &boot_zonestats) { 6671 free_percpu(zone->per_cpu_zonestats); 6672 zone->per_cpu_zonestats = &boot_zonestats; 6673 } 6674 } 6675 } 6676 6677 #ifdef CONFIG_MEMORY_HOTREMOVE 6678 /* 6679 * All pages in the range must be in a single zone, must not contain holes, 6680 * must span full sections, and must be isolated before calling this function. 6681 * 6682 * Returns the number of managed (non-PageOffline()) pages in the range: the 6683 * number of pages for which memory offlining code must adjust managed page 6684 * counters using adjust_managed_page_count(). 6685 */ 6686 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6687 unsigned long end_pfn) 6688 { 6689 unsigned long already_offline = 0, flags; 6690 unsigned long pfn = start_pfn; 6691 struct page *page; 6692 struct zone *zone; 6693 unsigned int order; 6694 6695 offline_mem_sections(pfn, end_pfn); 6696 zone = page_zone(pfn_to_page(pfn)); 6697 spin_lock_irqsave(&zone->lock, flags); 6698 while (pfn < end_pfn) { 6699 page = pfn_to_page(pfn); 6700 /* 6701 * The HWPoisoned page may be not in buddy system, and 6702 * page_count() is not 0. 6703 */ 6704 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6705 pfn++; 6706 continue; 6707 } 6708 /* 6709 * At this point all remaining PageOffline() pages have a 6710 * reference count of 0 and can simply be skipped. 6711 */ 6712 if (PageOffline(page)) { 6713 BUG_ON(page_count(page)); 6714 BUG_ON(PageBuddy(page)); 6715 already_offline++; 6716 pfn++; 6717 continue; 6718 } 6719 6720 BUG_ON(page_count(page)); 6721 BUG_ON(!PageBuddy(page)); 6722 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 6723 order = buddy_order(page); 6724 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 6725 pfn += (1 << order); 6726 } 6727 spin_unlock_irqrestore(&zone->lock, flags); 6728 6729 return end_pfn - start_pfn - already_offline; 6730 } 6731 #endif 6732 6733 /* 6734 * This function returns a stable result only if called under zone lock. 6735 */ 6736 bool is_free_buddy_page(const struct page *page) 6737 { 6738 unsigned long pfn = page_to_pfn(page); 6739 unsigned int order; 6740 6741 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6742 const struct page *head = page - (pfn & ((1 << order) - 1)); 6743 6744 if (PageBuddy(head) && 6745 buddy_order_unsafe(head) >= order) 6746 break; 6747 } 6748 6749 return order <= MAX_PAGE_ORDER; 6750 } 6751 EXPORT_SYMBOL(is_free_buddy_page); 6752 6753 #ifdef CONFIG_MEMORY_FAILURE 6754 static inline void add_to_free_list(struct page *page, struct zone *zone, 6755 unsigned int order, int migratetype, 6756 bool tail) 6757 { 6758 __add_to_free_list(page, zone, order, migratetype, tail); 6759 account_freepages(zone, 1 << order, migratetype); 6760 } 6761 6762 /* 6763 * Break down a higher-order page in sub-pages, and keep our target out of 6764 * buddy allocator. 6765 */ 6766 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6767 struct page *target, int low, int high, 6768 int migratetype) 6769 { 6770 unsigned long size = 1 << high; 6771 struct page *current_buddy; 6772 6773 while (high > low) { 6774 high--; 6775 size >>= 1; 6776 6777 if (target >= &page[size]) { 6778 current_buddy = page; 6779 page = page + size; 6780 } else { 6781 current_buddy = page + size; 6782 } 6783 6784 if (set_page_guard(zone, current_buddy, high)) 6785 continue; 6786 6787 add_to_free_list(current_buddy, zone, high, migratetype, false); 6788 set_buddy_order(current_buddy, high); 6789 } 6790 } 6791 6792 /* 6793 * Take a page that will be marked as poisoned off the buddy allocator. 6794 */ 6795 bool take_page_off_buddy(struct page *page) 6796 { 6797 struct zone *zone = page_zone(page); 6798 unsigned long pfn = page_to_pfn(page); 6799 unsigned long flags; 6800 unsigned int order; 6801 bool ret = false; 6802 6803 spin_lock_irqsave(&zone->lock, flags); 6804 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6805 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6806 int page_order = buddy_order(page_head); 6807 6808 if (PageBuddy(page_head) && page_order >= order) { 6809 unsigned long pfn_head = page_to_pfn(page_head); 6810 int migratetype = get_pfnblock_migratetype(page_head, 6811 pfn_head); 6812 6813 del_page_from_free_list(page_head, zone, page_order, 6814 migratetype); 6815 break_down_buddy_pages(zone, page_head, page, 0, 6816 page_order, migratetype); 6817 SetPageHWPoisonTakenOff(page); 6818 ret = true; 6819 break; 6820 } 6821 if (page_count(page_head) > 0) 6822 break; 6823 } 6824 spin_unlock_irqrestore(&zone->lock, flags); 6825 return ret; 6826 } 6827 6828 /* 6829 * Cancel takeoff done by take_page_off_buddy(). 6830 */ 6831 bool put_page_back_buddy(struct page *page) 6832 { 6833 struct zone *zone = page_zone(page); 6834 unsigned long flags; 6835 bool ret = false; 6836 6837 spin_lock_irqsave(&zone->lock, flags); 6838 if (put_page_testzero(page)) { 6839 unsigned long pfn = page_to_pfn(page); 6840 int migratetype = get_pfnblock_migratetype(page, pfn); 6841 6842 ClearPageHWPoisonTakenOff(page); 6843 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6844 if (TestClearPageHWPoison(page)) { 6845 ret = true; 6846 } 6847 } 6848 spin_unlock_irqrestore(&zone->lock, flags); 6849 6850 return ret; 6851 } 6852 #endif 6853 6854 #ifdef CONFIG_ZONE_DMA 6855 bool has_managed_dma(void) 6856 { 6857 struct pglist_data *pgdat; 6858 6859 for_each_online_pgdat(pgdat) { 6860 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6861 6862 if (managed_zone(zone)) 6863 return true; 6864 } 6865 return false; 6866 } 6867 #endif /* CONFIG_ZONE_DMA */ 6868 6869 #ifdef CONFIG_UNACCEPTED_MEMORY 6870 6871 /* Counts number of zones with unaccepted pages. */ 6872 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6873 6874 static bool lazy_accept = true; 6875 6876 static int __init accept_memory_parse(char *p) 6877 { 6878 if (!strcmp(p, "lazy")) { 6879 lazy_accept = true; 6880 return 0; 6881 } else if (!strcmp(p, "eager")) { 6882 lazy_accept = false; 6883 return 0; 6884 } else { 6885 return -EINVAL; 6886 } 6887 } 6888 early_param("accept_memory", accept_memory_parse); 6889 6890 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6891 { 6892 phys_addr_t start = page_to_phys(page); 6893 6894 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 6895 } 6896 6897 static void __accept_page(struct zone *zone, unsigned long *flags, 6898 struct page *page) 6899 { 6900 bool last; 6901 6902 list_del(&page->lru); 6903 last = list_empty(&zone->unaccepted_pages); 6904 6905 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6906 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6907 __ClearPageUnaccepted(page); 6908 spin_unlock_irqrestore(&zone->lock, *flags); 6909 6910 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 6911 6912 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 6913 6914 if (last) 6915 static_branch_dec(&zones_with_unaccepted_pages); 6916 } 6917 6918 void accept_page(struct page *page) 6919 { 6920 struct zone *zone = page_zone(page); 6921 unsigned long flags; 6922 6923 spin_lock_irqsave(&zone->lock, flags); 6924 if (!PageUnaccepted(page)) { 6925 spin_unlock_irqrestore(&zone->lock, flags); 6926 return; 6927 } 6928 6929 /* Unlocks zone->lock */ 6930 __accept_page(zone, &flags, page); 6931 } 6932 6933 static bool try_to_accept_memory_one(struct zone *zone) 6934 { 6935 unsigned long flags; 6936 struct page *page; 6937 6938 spin_lock_irqsave(&zone->lock, flags); 6939 page = list_first_entry_or_null(&zone->unaccepted_pages, 6940 struct page, lru); 6941 if (!page) { 6942 spin_unlock_irqrestore(&zone->lock, flags); 6943 return false; 6944 } 6945 6946 /* Unlocks zone->lock */ 6947 __accept_page(zone, &flags, page); 6948 6949 return true; 6950 } 6951 6952 static inline bool has_unaccepted_memory(void) 6953 { 6954 return static_branch_unlikely(&zones_with_unaccepted_pages); 6955 } 6956 6957 static bool cond_accept_memory(struct zone *zone, unsigned int order) 6958 { 6959 long to_accept; 6960 bool ret = false; 6961 6962 if (!has_unaccepted_memory()) 6963 return false; 6964 6965 if (list_empty(&zone->unaccepted_pages)) 6966 return false; 6967 6968 /* How much to accept to get to promo watermark? */ 6969 to_accept = promo_wmark_pages(zone) - 6970 (zone_page_state(zone, NR_FREE_PAGES) - 6971 __zone_watermark_unusable_free(zone, order, 0) - 6972 zone_page_state(zone, NR_UNACCEPTED)); 6973 6974 while (to_accept > 0) { 6975 if (!try_to_accept_memory_one(zone)) 6976 break; 6977 ret = true; 6978 to_accept -= MAX_ORDER_NR_PAGES; 6979 } 6980 6981 return ret; 6982 } 6983 6984 static bool __free_unaccepted(struct page *page) 6985 { 6986 struct zone *zone = page_zone(page); 6987 unsigned long flags; 6988 bool first = false; 6989 6990 if (!lazy_accept) 6991 return false; 6992 6993 spin_lock_irqsave(&zone->lock, flags); 6994 first = list_empty(&zone->unaccepted_pages); 6995 list_add_tail(&page->lru, &zone->unaccepted_pages); 6996 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6997 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 6998 __SetPageUnaccepted(page); 6999 spin_unlock_irqrestore(&zone->lock, flags); 7000 7001 if (first) 7002 static_branch_inc(&zones_with_unaccepted_pages); 7003 7004 return true; 7005 } 7006 7007 #else 7008 7009 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7010 { 7011 return false; 7012 } 7013 7014 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7015 { 7016 return false; 7017 } 7018 7019 static bool __free_unaccepted(struct page *page) 7020 { 7021 BUILD_BUG(); 7022 return false; 7023 } 7024 7025 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7026