1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 92 static DEFINE_MUTEX(pcp_batch_high_lock); 93 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 94 95 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 96 /* 97 * On SMP, spin_trylock is sufficient protection. 98 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 99 */ 100 #define pcp_trylock_prepare(flags) do { } while (0) 101 #define pcp_trylock_finish(flag) do { } while (0) 102 #else 103 104 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 105 #define pcp_trylock_prepare(flags) local_irq_save(flags) 106 #define pcp_trylock_finish(flags) local_irq_restore(flags) 107 #endif 108 109 /* 110 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 111 * a migration causing the wrong PCP to be locked and remote memory being 112 * potentially allocated, pin the task to the CPU for the lookup+lock. 113 * preempt_disable is used on !RT because it is faster than migrate_disable. 114 * migrate_disable is used on RT because otherwise RT spinlock usage is 115 * interfered with and a high priority task cannot preempt the allocator. 116 */ 117 #ifndef CONFIG_PREEMPT_RT 118 #define pcpu_task_pin() preempt_disable() 119 #define pcpu_task_unpin() preempt_enable() 120 #else 121 #define pcpu_task_pin() migrate_disable() 122 #define pcpu_task_unpin() migrate_enable() 123 #endif 124 125 /* 126 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 127 * Return value should be used with equivalent unlock helper. 128 */ 129 #define pcpu_spin_lock(type, member, ptr) \ 130 ({ \ 131 type *_ret; \ 132 pcpu_task_pin(); \ 133 _ret = this_cpu_ptr(ptr); \ 134 spin_lock(&_ret->member); \ 135 _ret; \ 136 }) 137 138 #define pcpu_spin_trylock(type, member, ptr) \ 139 ({ \ 140 type *_ret; \ 141 pcpu_task_pin(); \ 142 _ret = this_cpu_ptr(ptr); \ 143 if (!spin_trylock(&_ret->member)) { \ 144 pcpu_task_unpin(); \ 145 _ret = NULL; \ 146 } \ 147 _ret; \ 148 }) 149 150 #define pcpu_spin_unlock(member, ptr) \ 151 ({ \ 152 spin_unlock(&ptr->member); \ 153 pcpu_task_unpin(); \ 154 }) 155 156 /* struct per_cpu_pages specific helpers. */ 157 #define pcp_spin_lock(ptr) \ 158 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_trylock(ptr) \ 161 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_unlock(ptr) \ 164 pcpu_spin_unlock(lock, ptr) 165 166 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 167 DEFINE_PER_CPU(int, numa_node); 168 EXPORT_PER_CPU_SYMBOL(numa_node); 169 #endif 170 171 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 172 173 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 174 /* 175 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 176 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 177 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 178 * defined in <linux/topology.h>. 179 */ 180 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 181 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 182 #endif 183 184 static DEFINE_MUTEX(pcpu_drain_mutex); 185 186 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 187 volatile unsigned long latent_entropy __latent_entropy; 188 EXPORT_SYMBOL(latent_entropy); 189 #endif 190 191 /* 192 * Array of node states. 193 */ 194 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 195 [N_POSSIBLE] = NODE_MASK_ALL, 196 [N_ONLINE] = { { [0] = 1UL } }, 197 #ifndef CONFIG_NUMA 198 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 199 #ifdef CONFIG_HIGHMEM 200 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 201 #endif 202 [N_MEMORY] = { { [0] = 1UL } }, 203 [N_CPU] = { { [0] = 1UL } }, 204 #endif /* NUMA */ 205 }; 206 EXPORT_SYMBOL(node_states); 207 208 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 209 210 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 211 unsigned int pageblock_order __read_mostly; 212 #endif 213 214 static void __free_pages_ok(struct page *page, unsigned int order, 215 fpi_t fpi_flags); 216 217 /* 218 * results with 256, 32 in the lowmem_reserve sysctl: 219 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 220 * 1G machine -> (16M dma, 784M normal, 224M high) 221 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 222 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 223 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 224 * 225 * TBD: should special case ZONE_DMA32 machines here - in those we normally 226 * don't need any ZONE_NORMAL reservation 227 */ 228 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 229 #ifdef CONFIG_ZONE_DMA 230 [ZONE_DMA] = 256, 231 #endif 232 #ifdef CONFIG_ZONE_DMA32 233 [ZONE_DMA32] = 256, 234 #endif 235 [ZONE_NORMAL] = 32, 236 #ifdef CONFIG_HIGHMEM 237 [ZONE_HIGHMEM] = 0, 238 #endif 239 [ZONE_MOVABLE] = 0, 240 }; 241 242 char * const zone_names[MAX_NR_ZONES] = { 243 #ifdef CONFIG_ZONE_DMA 244 "DMA", 245 #endif 246 #ifdef CONFIG_ZONE_DMA32 247 "DMA32", 248 #endif 249 "Normal", 250 #ifdef CONFIG_HIGHMEM 251 "HighMem", 252 #endif 253 "Movable", 254 #ifdef CONFIG_ZONE_DEVICE 255 "Device", 256 #endif 257 }; 258 259 const char * const migratetype_names[MIGRATE_TYPES] = { 260 "Unmovable", 261 "Movable", 262 "Reclaimable", 263 "HighAtomic", 264 #ifdef CONFIG_CMA 265 "CMA", 266 #endif 267 #ifdef CONFIG_MEMORY_ISOLATION 268 "Isolate", 269 #endif 270 }; 271 272 int min_free_kbytes = 1024; 273 int user_min_free_kbytes = -1; 274 static int watermark_boost_factor __read_mostly = 15000; 275 static int watermark_scale_factor = 10; 276 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 278 int movable_zone; 279 EXPORT_SYMBOL(movable_zone); 280 281 #if MAX_NUMNODES > 1 282 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 283 unsigned int nr_online_nodes __read_mostly = 1; 284 EXPORT_SYMBOL(nr_node_ids); 285 EXPORT_SYMBOL(nr_online_nodes); 286 #endif 287 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 290 static inline bool has_unaccepted_memory(void); 291 static bool __free_unaccepted(struct page *page); 292 293 int page_group_by_mobility_disabled __read_mostly; 294 295 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 296 /* 297 * During boot we initialize deferred pages on-demand, as needed, but once 298 * page_alloc_init_late() has finished, the deferred pages are all initialized, 299 * and we can permanently disable that path. 300 */ 301 DEFINE_STATIC_KEY_TRUE(deferred_pages); 302 303 static inline bool deferred_pages_enabled(void) 304 { 305 return static_branch_unlikely(&deferred_pages); 306 } 307 308 /* 309 * deferred_grow_zone() is __init, but it is called from 310 * get_page_from_freelist() during early boot until deferred_pages permanently 311 * disables this call. This is why we have refdata wrapper to avoid warning, 312 * and to ensure that the function body gets unloaded. 313 */ 314 static bool __ref 315 _deferred_grow_zone(struct zone *zone, unsigned int order) 316 { 317 return deferred_grow_zone(zone, order); 318 } 319 #else 320 static inline bool deferred_pages_enabled(void) 321 { 322 return false; 323 } 324 325 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 326 { 327 return false; 328 } 329 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 330 331 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 332 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 333 unsigned long pfn) 334 { 335 #ifdef CONFIG_SPARSEMEM 336 return section_to_usemap(__pfn_to_section(pfn)); 337 #else 338 return page_zone(page)->pageblock_flags; 339 #endif /* CONFIG_SPARSEMEM */ 340 } 341 342 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 343 { 344 #ifdef CONFIG_SPARSEMEM 345 pfn &= (PAGES_PER_SECTION-1); 346 #else 347 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 348 #endif /* CONFIG_SPARSEMEM */ 349 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 350 } 351 352 /** 353 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 354 * @page: The page within the block of interest 355 * @pfn: The target page frame number 356 * @mask: mask of bits that the caller is interested in 357 * 358 * Return: pageblock_bits flags 359 */ 360 unsigned long get_pfnblock_flags_mask(const struct page *page, 361 unsigned long pfn, unsigned long mask) 362 { 363 unsigned long *bitmap; 364 unsigned long bitidx, word_bitidx; 365 unsigned long word; 366 367 bitmap = get_pageblock_bitmap(page, pfn); 368 bitidx = pfn_to_bitidx(page, pfn); 369 word_bitidx = bitidx / BITS_PER_LONG; 370 bitidx &= (BITS_PER_LONG-1); 371 /* 372 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 373 * a consistent read of the memory array, so that results, even though 374 * racy, are not corrupted. 375 */ 376 word = READ_ONCE(bitmap[word_bitidx]); 377 return (word >> bitidx) & mask; 378 } 379 380 static __always_inline int get_pfnblock_migratetype(const struct page *page, 381 unsigned long pfn) 382 { 383 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 384 } 385 386 /** 387 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 388 * @page: The page within the block of interest 389 * @flags: The flags to set 390 * @pfn: The target page frame number 391 * @mask: mask of bits that the caller is interested in 392 */ 393 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 394 unsigned long pfn, 395 unsigned long mask) 396 { 397 unsigned long *bitmap; 398 unsigned long bitidx, word_bitidx; 399 unsigned long word; 400 401 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 402 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 403 404 bitmap = get_pageblock_bitmap(page, pfn); 405 bitidx = pfn_to_bitidx(page, pfn); 406 word_bitidx = bitidx / BITS_PER_LONG; 407 bitidx &= (BITS_PER_LONG-1); 408 409 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 410 411 mask <<= bitidx; 412 flags <<= bitidx; 413 414 word = READ_ONCE(bitmap[word_bitidx]); 415 do { 416 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 417 } 418 419 void set_pageblock_migratetype(struct page *page, int migratetype) 420 { 421 if (unlikely(page_group_by_mobility_disabled && 422 migratetype < MIGRATE_PCPTYPES)) 423 migratetype = MIGRATE_UNMOVABLE; 424 425 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 426 page_to_pfn(page), MIGRATETYPE_MASK); 427 } 428 429 #ifdef CONFIG_DEBUG_VM 430 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 431 { 432 int ret; 433 unsigned seq; 434 unsigned long pfn = page_to_pfn(page); 435 unsigned long sp, start_pfn; 436 437 do { 438 seq = zone_span_seqbegin(zone); 439 start_pfn = zone->zone_start_pfn; 440 sp = zone->spanned_pages; 441 ret = !zone_spans_pfn(zone, pfn); 442 } while (zone_span_seqretry(zone, seq)); 443 444 if (ret) 445 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 446 pfn, zone_to_nid(zone), zone->name, 447 start_pfn, start_pfn + sp); 448 449 return ret; 450 } 451 452 /* 453 * Temporary debugging check for pages not lying within a given zone. 454 */ 455 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 456 { 457 if (page_outside_zone_boundaries(zone, page)) 458 return true; 459 if (zone != page_zone(page)) 460 return true; 461 462 return false; 463 } 464 #else 465 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 466 { 467 return false; 468 } 469 #endif 470 471 static void bad_page(struct page *page, const char *reason) 472 { 473 static unsigned long resume; 474 static unsigned long nr_shown; 475 static unsigned long nr_unshown; 476 477 /* 478 * Allow a burst of 60 reports, then keep quiet for that minute; 479 * or allow a steady drip of one report per second. 480 */ 481 if (nr_shown == 60) { 482 if (time_before(jiffies, resume)) { 483 nr_unshown++; 484 goto out; 485 } 486 if (nr_unshown) { 487 pr_alert( 488 "BUG: Bad page state: %lu messages suppressed\n", 489 nr_unshown); 490 nr_unshown = 0; 491 } 492 nr_shown = 0; 493 } 494 if (nr_shown++ == 0) 495 resume = jiffies + 60 * HZ; 496 497 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 498 current->comm, page_to_pfn(page)); 499 dump_page(page, reason); 500 501 print_modules(); 502 dump_stack(); 503 out: 504 /* Leave bad fields for debug, except PageBuddy could make trouble */ 505 if (PageBuddy(page)) 506 __ClearPageBuddy(page); 507 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 508 } 509 510 static inline unsigned int order_to_pindex(int migratetype, int order) 511 { 512 bool __maybe_unused movable; 513 514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 515 if (order > PAGE_ALLOC_COSTLY_ORDER) { 516 VM_BUG_ON(order != HPAGE_PMD_ORDER); 517 518 movable = migratetype == MIGRATE_MOVABLE; 519 520 return NR_LOWORDER_PCP_LISTS + movable; 521 } 522 #else 523 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 524 #endif 525 526 return (MIGRATE_PCPTYPES * order) + migratetype; 527 } 528 529 static inline int pindex_to_order(unsigned int pindex) 530 { 531 int order = pindex / MIGRATE_PCPTYPES; 532 533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 534 if (pindex >= NR_LOWORDER_PCP_LISTS) 535 order = HPAGE_PMD_ORDER; 536 #else 537 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 538 #endif 539 540 return order; 541 } 542 543 static inline bool pcp_allowed_order(unsigned int order) 544 { 545 if (order <= PAGE_ALLOC_COSTLY_ORDER) 546 return true; 547 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 548 if (order == HPAGE_PMD_ORDER) 549 return true; 550 #endif 551 return false; 552 } 553 554 /* 555 * Higher-order pages are called "compound pages". They are structured thusly: 556 * 557 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 558 * 559 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 560 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 561 * 562 * The first tail page's ->compound_order holds the order of allocation. 563 * This usage means that zero-order pages may not be compound. 564 */ 565 566 void prep_compound_page(struct page *page, unsigned int order) 567 { 568 int i; 569 int nr_pages = 1 << order; 570 571 __SetPageHead(page); 572 for (i = 1; i < nr_pages; i++) 573 prep_compound_tail(page, i); 574 575 prep_compound_head(page, order); 576 } 577 578 static inline void set_buddy_order(struct page *page, unsigned int order) 579 { 580 set_page_private(page, order); 581 __SetPageBuddy(page); 582 } 583 584 #ifdef CONFIG_COMPACTION 585 static inline struct capture_control *task_capc(struct zone *zone) 586 { 587 struct capture_control *capc = current->capture_control; 588 589 return unlikely(capc) && 590 !(current->flags & PF_KTHREAD) && 591 !capc->page && 592 capc->cc->zone == zone ? capc : NULL; 593 } 594 595 static inline bool 596 compaction_capture(struct capture_control *capc, struct page *page, 597 int order, int migratetype) 598 { 599 if (!capc || order != capc->cc->order) 600 return false; 601 602 /* Do not accidentally pollute CMA or isolated regions*/ 603 if (is_migrate_cma(migratetype) || 604 is_migrate_isolate(migratetype)) 605 return false; 606 607 /* 608 * Do not let lower order allocations pollute a movable pageblock 609 * unless compaction is also requesting movable pages. 610 * This might let an unmovable request use a reclaimable pageblock 611 * and vice-versa but no more than normal fallback logic which can 612 * have trouble finding a high-order free page. 613 */ 614 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 615 capc->cc->migratetype != MIGRATE_MOVABLE) 616 return false; 617 618 capc->page = page; 619 return true; 620 } 621 622 #else 623 static inline struct capture_control *task_capc(struct zone *zone) 624 { 625 return NULL; 626 } 627 628 static inline bool 629 compaction_capture(struct capture_control *capc, struct page *page, 630 int order, int migratetype) 631 { 632 return false; 633 } 634 #endif /* CONFIG_COMPACTION */ 635 636 static inline void account_freepages(struct zone *zone, int nr_pages, 637 int migratetype) 638 { 639 if (is_migrate_isolate(migratetype)) 640 return; 641 642 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 643 644 if (is_migrate_cma(migratetype)) 645 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 646 } 647 648 /* Used for pages not on another list */ 649 static inline void __add_to_free_list(struct page *page, struct zone *zone, 650 unsigned int order, int migratetype, 651 bool tail) 652 { 653 struct free_area *area = &zone->free_area[order]; 654 655 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 656 "page type is %lu, passed migratetype is %d (nr=%d)\n", 657 get_pageblock_migratetype(page), migratetype, 1 << order); 658 659 if (tail) 660 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 661 else 662 list_add(&page->buddy_list, &area->free_list[migratetype]); 663 area->nr_free++; 664 } 665 666 /* 667 * Used for pages which are on another list. Move the pages to the tail 668 * of the list - so the moved pages won't immediately be considered for 669 * allocation again (e.g., optimization for memory onlining). 670 */ 671 static inline void move_to_free_list(struct page *page, struct zone *zone, 672 unsigned int order, int old_mt, int new_mt) 673 { 674 struct free_area *area = &zone->free_area[order]; 675 676 /* Free page moving can fail, so it happens before the type update */ 677 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 678 "page type is %lu, passed migratetype is %d (nr=%d)\n", 679 get_pageblock_migratetype(page), old_mt, 1 << order); 680 681 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 682 683 account_freepages(zone, -(1 << order), old_mt); 684 account_freepages(zone, 1 << order, new_mt); 685 } 686 687 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 688 unsigned int order, int migratetype) 689 { 690 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 691 "page type is %lu, passed migratetype is %d (nr=%d)\n", 692 get_pageblock_migratetype(page), migratetype, 1 << order); 693 694 /* clear reported state and update reported page count */ 695 if (page_reported(page)) 696 __ClearPageReported(page); 697 698 list_del(&page->buddy_list); 699 __ClearPageBuddy(page); 700 set_page_private(page, 0); 701 zone->free_area[order].nr_free--; 702 } 703 704 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 705 unsigned int order, int migratetype) 706 { 707 __del_page_from_free_list(page, zone, order, migratetype); 708 account_freepages(zone, -(1 << order), migratetype); 709 } 710 711 static inline struct page *get_page_from_free_area(struct free_area *area, 712 int migratetype) 713 { 714 return list_first_entry_or_null(&area->free_list[migratetype], 715 struct page, buddy_list); 716 } 717 718 /* 719 * If this is less than the 2nd largest possible page, check if the buddy 720 * of the next-higher order is free. If it is, it's possible 721 * that pages are being freed that will coalesce soon. In case, 722 * that is happening, add the free page to the tail of the list 723 * so it's less likely to be used soon and more likely to be merged 724 * as a 2-level higher order page 725 */ 726 static inline bool 727 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 728 struct page *page, unsigned int order) 729 { 730 unsigned long higher_page_pfn; 731 struct page *higher_page; 732 733 if (order >= MAX_PAGE_ORDER - 1) 734 return false; 735 736 higher_page_pfn = buddy_pfn & pfn; 737 higher_page = page + (higher_page_pfn - pfn); 738 739 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 740 NULL) != NULL; 741 } 742 743 /* 744 * Freeing function for a buddy system allocator. 745 * 746 * The concept of a buddy system is to maintain direct-mapped table 747 * (containing bit values) for memory blocks of various "orders". 748 * The bottom level table contains the map for the smallest allocatable 749 * units of memory (here, pages), and each level above it describes 750 * pairs of units from the levels below, hence, "buddies". 751 * At a high level, all that happens here is marking the table entry 752 * at the bottom level available, and propagating the changes upward 753 * as necessary, plus some accounting needed to play nicely with other 754 * parts of the VM system. 755 * At each level, we keep a list of pages, which are heads of continuous 756 * free pages of length of (1 << order) and marked with PageBuddy. 757 * Page's order is recorded in page_private(page) field. 758 * So when we are allocating or freeing one, we can derive the state of the 759 * other. That is, if we allocate a small block, and both were 760 * free, the remainder of the region must be split into blocks. 761 * If a block is freed, and its buddy is also free, then this 762 * triggers coalescing into a block of larger size. 763 * 764 * -- nyc 765 */ 766 767 static inline void __free_one_page(struct page *page, 768 unsigned long pfn, 769 struct zone *zone, unsigned int order, 770 int migratetype, fpi_t fpi_flags) 771 { 772 struct capture_control *capc = task_capc(zone); 773 unsigned long buddy_pfn = 0; 774 unsigned long combined_pfn; 775 struct page *buddy; 776 bool to_tail; 777 778 VM_BUG_ON(!zone_is_initialized(zone)); 779 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 780 781 VM_BUG_ON(migratetype == -1); 782 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 783 VM_BUG_ON_PAGE(bad_range(zone, page), page); 784 785 account_freepages(zone, 1 << order, migratetype); 786 787 while (order < MAX_PAGE_ORDER) { 788 int buddy_mt = migratetype; 789 790 if (compaction_capture(capc, page, order, migratetype)) { 791 account_freepages(zone, -(1 << order), migratetype); 792 return; 793 } 794 795 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 796 if (!buddy) 797 goto done_merging; 798 799 if (unlikely(order >= pageblock_order)) { 800 /* 801 * We want to prevent merge between freepages on pageblock 802 * without fallbacks and normal pageblock. Without this, 803 * pageblock isolation could cause incorrect freepage or CMA 804 * accounting or HIGHATOMIC accounting. 805 */ 806 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 807 808 if (migratetype != buddy_mt && 809 (!migratetype_is_mergeable(migratetype) || 810 !migratetype_is_mergeable(buddy_mt))) 811 goto done_merging; 812 } 813 814 /* 815 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 816 * merge with it and move up one order. 817 */ 818 if (page_is_guard(buddy)) 819 clear_page_guard(zone, buddy, order); 820 else 821 __del_page_from_free_list(buddy, zone, order, buddy_mt); 822 823 if (unlikely(buddy_mt != migratetype)) { 824 /* 825 * Match buddy type. This ensures that an 826 * expand() down the line puts the sub-blocks 827 * on the right freelists. 828 */ 829 set_pageblock_migratetype(buddy, migratetype); 830 } 831 832 combined_pfn = buddy_pfn & pfn; 833 page = page + (combined_pfn - pfn); 834 pfn = combined_pfn; 835 order++; 836 } 837 838 done_merging: 839 set_buddy_order(page, order); 840 841 if (fpi_flags & FPI_TO_TAIL) 842 to_tail = true; 843 else if (is_shuffle_order(order)) 844 to_tail = shuffle_pick_tail(); 845 else 846 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 847 848 __add_to_free_list(page, zone, order, migratetype, to_tail); 849 850 /* Notify page reporting subsystem of freed page */ 851 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 852 page_reporting_notify_free(order); 853 } 854 855 /* 856 * A bad page could be due to a number of fields. Instead of multiple branches, 857 * try and check multiple fields with one check. The caller must do a detailed 858 * check if necessary. 859 */ 860 static inline bool page_expected_state(struct page *page, 861 unsigned long check_flags) 862 { 863 if (unlikely(atomic_read(&page->_mapcount) != -1)) 864 return false; 865 866 if (unlikely((unsigned long)page->mapping | 867 page_ref_count(page) | 868 #ifdef CONFIG_MEMCG 869 page->memcg_data | 870 #endif 871 #ifdef CONFIG_PAGE_POOL 872 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 873 #endif 874 (page->flags & check_flags))) 875 return false; 876 877 return true; 878 } 879 880 static const char *page_bad_reason(struct page *page, unsigned long flags) 881 { 882 const char *bad_reason = NULL; 883 884 if (unlikely(atomic_read(&page->_mapcount) != -1)) 885 bad_reason = "nonzero mapcount"; 886 if (unlikely(page->mapping != NULL)) 887 bad_reason = "non-NULL mapping"; 888 if (unlikely(page_ref_count(page) != 0)) 889 bad_reason = "nonzero _refcount"; 890 if (unlikely(page->flags & flags)) { 891 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 892 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 893 else 894 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 895 } 896 #ifdef CONFIG_MEMCG 897 if (unlikely(page->memcg_data)) 898 bad_reason = "page still charged to cgroup"; 899 #endif 900 #ifdef CONFIG_PAGE_POOL 901 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 902 bad_reason = "page_pool leak"; 903 #endif 904 return bad_reason; 905 } 906 907 static void free_page_is_bad_report(struct page *page) 908 { 909 bad_page(page, 910 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 911 } 912 913 static inline bool free_page_is_bad(struct page *page) 914 { 915 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 916 return false; 917 918 /* Something has gone sideways, find it */ 919 free_page_is_bad_report(page); 920 return true; 921 } 922 923 static inline bool is_check_pages_enabled(void) 924 { 925 return static_branch_unlikely(&check_pages_enabled); 926 } 927 928 static int free_tail_page_prepare(struct page *head_page, struct page *page) 929 { 930 struct folio *folio = (struct folio *)head_page; 931 int ret = 1; 932 933 /* 934 * We rely page->lru.next never has bit 0 set, unless the page 935 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 936 */ 937 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 938 939 if (!is_check_pages_enabled()) { 940 ret = 0; 941 goto out; 942 } 943 switch (page - head_page) { 944 case 1: 945 /* the first tail page: these may be in place of ->mapping */ 946 if (unlikely(folio_entire_mapcount(folio))) { 947 bad_page(page, "nonzero entire_mapcount"); 948 goto out; 949 } 950 if (unlikely(folio_large_mapcount(folio))) { 951 bad_page(page, "nonzero large_mapcount"); 952 goto out; 953 } 954 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 955 bad_page(page, "nonzero nr_pages_mapped"); 956 goto out; 957 } 958 if (unlikely(atomic_read(&folio->_pincount))) { 959 bad_page(page, "nonzero pincount"); 960 goto out; 961 } 962 break; 963 case 2: 964 /* the second tail page: deferred_list overlaps ->mapping */ 965 if (unlikely(!list_empty(&folio->_deferred_list) && 966 folio_test_partially_mapped(folio))) { 967 bad_page(page, "partially mapped folio on deferred list"); 968 goto out; 969 } 970 break; 971 default: 972 if (page->mapping != TAIL_MAPPING) { 973 bad_page(page, "corrupted mapping in tail page"); 974 goto out; 975 } 976 break; 977 } 978 if (unlikely(!PageTail(page))) { 979 bad_page(page, "PageTail not set"); 980 goto out; 981 } 982 if (unlikely(compound_head(page) != head_page)) { 983 bad_page(page, "compound_head not consistent"); 984 goto out; 985 } 986 ret = 0; 987 out: 988 page->mapping = NULL; 989 clear_compound_head(page); 990 return ret; 991 } 992 993 /* 994 * Skip KASAN memory poisoning when either: 995 * 996 * 1. For generic KASAN: deferred memory initialization has not yet completed. 997 * Tag-based KASAN modes skip pages freed via deferred memory initialization 998 * using page tags instead (see below). 999 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1000 * that error detection is disabled for accesses via the page address. 1001 * 1002 * Pages will have match-all tags in the following circumstances: 1003 * 1004 * 1. Pages are being initialized for the first time, including during deferred 1005 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1006 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1007 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1008 * 3. The allocation was excluded from being checked due to sampling, 1009 * see the call to kasan_unpoison_pages. 1010 * 1011 * Poisoning pages during deferred memory init will greatly lengthen the 1012 * process and cause problem in large memory systems as the deferred pages 1013 * initialization is done with interrupt disabled. 1014 * 1015 * Assuming that there will be no reference to those newly initialized 1016 * pages before they are ever allocated, this should have no effect on 1017 * KASAN memory tracking as the poison will be properly inserted at page 1018 * allocation time. The only corner case is when pages are allocated by 1019 * on-demand allocation and then freed again before the deferred pages 1020 * initialization is done, but this is not likely to happen. 1021 */ 1022 static inline bool should_skip_kasan_poison(struct page *page) 1023 { 1024 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1025 return deferred_pages_enabled(); 1026 1027 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1028 } 1029 1030 static void kernel_init_pages(struct page *page, int numpages) 1031 { 1032 int i; 1033 1034 /* s390's use of memset() could override KASAN redzones. */ 1035 kasan_disable_current(); 1036 for (i = 0; i < numpages; i++) 1037 clear_highpage_kasan_tagged(page + i); 1038 kasan_enable_current(); 1039 } 1040 1041 __always_inline bool free_pages_prepare(struct page *page, 1042 unsigned int order) 1043 { 1044 int bad = 0; 1045 bool skip_kasan_poison = should_skip_kasan_poison(page); 1046 bool init = want_init_on_free(); 1047 bool compound = PageCompound(page); 1048 1049 VM_BUG_ON_PAGE(PageTail(page), page); 1050 1051 trace_mm_page_free(page, order); 1052 kmsan_free_page(page, order); 1053 1054 if (memcg_kmem_online() && PageMemcgKmem(page)) 1055 __memcg_kmem_uncharge_page(page, order); 1056 1057 if (unlikely(PageHWPoison(page)) && !order) { 1058 /* Do not let hwpoison pages hit pcplists/buddy */ 1059 reset_page_owner(page, order); 1060 page_table_check_free(page, order); 1061 pgalloc_tag_sub(page, 1 << order); 1062 return false; 1063 } 1064 1065 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1066 1067 /* 1068 * Check tail pages before head page information is cleared to 1069 * avoid checking PageCompound for order-0 pages. 1070 */ 1071 if (unlikely(order)) { 1072 int i; 1073 1074 if (compound) 1075 page[1].flags &= ~PAGE_FLAGS_SECOND; 1076 for (i = 1; i < (1 << order); i++) { 1077 if (compound) 1078 bad += free_tail_page_prepare(page, page + i); 1079 if (is_check_pages_enabled()) { 1080 if (free_page_is_bad(page + i)) { 1081 bad++; 1082 continue; 1083 } 1084 } 1085 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1086 } 1087 } 1088 if (PageMappingFlags(page)) { 1089 if (PageAnon(page)) 1090 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1091 page->mapping = NULL; 1092 } 1093 if (is_check_pages_enabled()) { 1094 if (free_page_is_bad(page)) 1095 bad++; 1096 if (bad) 1097 return false; 1098 } 1099 1100 page_cpupid_reset_last(page); 1101 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1102 reset_page_owner(page, order); 1103 page_table_check_free(page, order); 1104 pgalloc_tag_sub(page, 1 << order); 1105 1106 if (!PageHighMem(page)) { 1107 debug_check_no_locks_freed(page_address(page), 1108 PAGE_SIZE << order); 1109 debug_check_no_obj_freed(page_address(page), 1110 PAGE_SIZE << order); 1111 } 1112 1113 kernel_poison_pages(page, 1 << order); 1114 1115 /* 1116 * As memory initialization might be integrated into KASAN, 1117 * KASAN poisoning and memory initialization code must be 1118 * kept together to avoid discrepancies in behavior. 1119 * 1120 * With hardware tag-based KASAN, memory tags must be set before the 1121 * page becomes unavailable via debug_pagealloc or arch_free_page. 1122 */ 1123 if (!skip_kasan_poison) { 1124 kasan_poison_pages(page, order, init); 1125 1126 /* Memory is already initialized if KASAN did it internally. */ 1127 if (kasan_has_integrated_init()) 1128 init = false; 1129 } 1130 if (init) 1131 kernel_init_pages(page, 1 << order); 1132 1133 /* 1134 * arch_free_page() can make the page's contents inaccessible. s390 1135 * does this. So nothing which can access the page's contents should 1136 * happen after this. 1137 */ 1138 arch_free_page(page, order); 1139 1140 debug_pagealloc_unmap_pages(page, 1 << order); 1141 1142 return true; 1143 } 1144 1145 /* 1146 * Frees a number of pages from the PCP lists 1147 * Assumes all pages on list are in same zone. 1148 * count is the number of pages to free. 1149 */ 1150 static void free_pcppages_bulk(struct zone *zone, int count, 1151 struct per_cpu_pages *pcp, 1152 int pindex) 1153 { 1154 unsigned long flags; 1155 unsigned int order; 1156 struct page *page; 1157 1158 /* 1159 * Ensure proper count is passed which otherwise would stuck in the 1160 * below while (list_empty(list)) loop. 1161 */ 1162 count = min(pcp->count, count); 1163 1164 /* Ensure requested pindex is drained first. */ 1165 pindex = pindex - 1; 1166 1167 spin_lock_irqsave(&zone->lock, flags); 1168 1169 while (count > 0) { 1170 struct list_head *list; 1171 int nr_pages; 1172 1173 /* Remove pages from lists in a round-robin fashion. */ 1174 do { 1175 if (++pindex > NR_PCP_LISTS - 1) 1176 pindex = 0; 1177 list = &pcp->lists[pindex]; 1178 } while (list_empty(list)); 1179 1180 order = pindex_to_order(pindex); 1181 nr_pages = 1 << order; 1182 do { 1183 unsigned long pfn; 1184 int mt; 1185 1186 page = list_last_entry(list, struct page, pcp_list); 1187 pfn = page_to_pfn(page); 1188 mt = get_pfnblock_migratetype(page, pfn); 1189 1190 /* must delete to avoid corrupting pcp list */ 1191 list_del(&page->pcp_list); 1192 count -= nr_pages; 1193 pcp->count -= nr_pages; 1194 1195 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1196 trace_mm_page_pcpu_drain(page, order, mt); 1197 } while (count > 0 && !list_empty(list)); 1198 } 1199 1200 spin_unlock_irqrestore(&zone->lock, flags); 1201 } 1202 1203 /* Split a multi-block free page into its individual pageblocks. */ 1204 static void split_large_buddy(struct zone *zone, struct page *page, 1205 unsigned long pfn, int order, fpi_t fpi) 1206 { 1207 unsigned long end = pfn + (1 << order); 1208 1209 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1210 /* Caller removed page from freelist, buddy info cleared! */ 1211 VM_WARN_ON_ONCE(PageBuddy(page)); 1212 1213 if (order > pageblock_order) 1214 order = pageblock_order; 1215 1216 while (pfn != end) { 1217 int mt = get_pfnblock_migratetype(page, pfn); 1218 1219 __free_one_page(page, pfn, zone, order, mt, fpi); 1220 pfn += 1 << order; 1221 page = pfn_to_page(pfn); 1222 } 1223 } 1224 1225 static void free_one_page(struct zone *zone, struct page *page, 1226 unsigned long pfn, unsigned int order, 1227 fpi_t fpi_flags) 1228 { 1229 unsigned long flags; 1230 1231 spin_lock_irqsave(&zone->lock, flags); 1232 split_large_buddy(zone, page, pfn, order, fpi_flags); 1233 spin_unlock_irqrestore(&zone->lock, flags); 1234 } 1235 1236 static void __free_pages_ok(struct page *page, unsigned int order, 1237 fpi_t fpi_flags) 1238 { 1239 unsigned long pfn = page_to_pfn(page); 1240 struct zone *zone = page_zone(page); 1241 1242 if (!free_pages_prepare(page, order)) 1243 return; 1244 1245 free_one_page(zone, page, pfn, order, fpi_flags); 1246 1247 __count_vm_events(PGFREE, 1 << order); 1248 } 1249 1250 void __meminit __free_pages_core(struct page *page, unsigned int order, 1251 enum meminit_context context) 1252 { 1253 unsigned int nr_pages = 1 << order; 1254 struct page *p = page; 1255 unsigned int loop; 1256 1257 /* 1258 * When initializing the memmap, __init_single_page() sets the refcount 1259 * of all pages to 1 ("allocated"/"not free"). We have to set the 1260 * refcount of all involved pages to 0. 1261 * 1262 * Note that hotplugged memory pages are initialized to PageOffline(). 1263 * Pages freed from memblock might be marked as reserved. 1264 */ 1265 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1266 unlikely(context == MEMINIT_HOTPLUG)) { 1267 for (loop = 0; loop < nr_pages; loop++, p++) { 1268 VM_WARN_ON_ONCE(PageReserved(p)); 1269 __ClearPageOffline(p); 1270 set_page_count(p, 0); 1271 } 1272 1273 /* 1274 * Freeing the page with debug_pagealloc enabled will try to 1275 * unmap it; some archs don't like double-unmappings, so 1276 * map it first. 1277 */ 1278 debug_pagealloc_map_pages(page, nr_pages); 1279 adjust_managed_page_count(page, nr_pages); 1280 } else { 1281 for (loop = 0; loop < nr_pages; loop++, p++) { 1282 __ClearPageReserved(p); 1283 set_page_count(p, 0); 1284 } 1285 1286 /* memblock adjusts totalram_pages() manually. */ 1287 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1288 } 1289 1290 if (page_contains_unaccepted(page, order)) { 1291 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1292 return; 1293 1294 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1295 } 1296 1297 /* 1298 * Bypass PCP and place fresh pages right to the tail, primarily 1299 * relevant for memory onlining. 1300 */ 1301 __free_pages_ok(page, order, FPI_TO_TAIL); 1302 } 1303 1304 /* 1305 * Check that the whole (or subset of) a pageblock given by the interval of 1306 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1307 * with the migration of free compaction scanner. 1308 * 1309 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1310 * 1311 * It's possible on some configurations to have a setup like node0 node1 node0 1312 * i.e. it's possible that all pages within a zones range of pages do not 1313 * belong to a single zone. We assume that a border between node0 and node1 1314 * can occur within a single pageblock, but not a node0 node1 node0 1315 * interleaving within a single pageblock. It is therefore sufficient to check 1316 * the first and last page of a pageblock and avoid checking each individual 1317 * page in a pageblock. 1318 * 1319 * Note: the function may return non-NULL struct page even for a page block 1320 * which contains a memory hole (i.e. there is no physical memory for a subset 1321 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1322 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1323 * even though the start pfn is online and valid. This should be safe most of 1324 * the time because struct pages are still initialized via init_unavailable_range() 1325 * and pfn walkers shouldn't touch any physical memory range for which they do 1326 * not recognize any specific metadata in struct pages. 1327 */ 1328 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1329 unsigned long end_pfn, struct zone *zone) 1330 { 1331 struct page *start_page; 1332 struct page *end_page; 1333 1334 /* end_pfn is one past the range we are checking */ 1335 end_pfn--; 1336 1337 if (!pfn_valid(end_pfn)) 1338 return NULL; 1339 1340 start_page = pfn_to_online_page(start_pfn); 1341 if (!start_page) 1342 return NULL; 1343 1344 if (page_zone(start_page) != zone) 1345 return NULL; 1346 1347 end_page = pfn_to_page(end_pfn); 1348 1349 /* This gives a shorter code than deriving page_zone(end_page) */ 1350 if (page_zone_id(start_page) != page_zone_id(end_page)) 1351 return NULL; 1352 1353 return start_page; 1354 } 1355 1356 /* 1357 * The order of subdivision here is critical for the IO subsystem. 1358 * Please do not alter this order without good reasons and regression 1359 * testing. Specifically, as large blocks of memory are subdivided, 1360 * the order in which smaller blocks are delivered depends on the order 1361 * they're subdivided in this function. This is the primary factor 1362 * influencing the order in which pages are delivered to the IO 1363 * subsystem according to empirical testing, and this is also justified 1364 * by considering the behavior of a buddy system containing a single 1365 * large block of memory acted on by a series of small allocations. 1366 * This behavior is a critical factor in sglist merging's success. 1367 * 1368 * -- nyc 1369 */ 1370 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1371 int high, int migratetype) 1372 { 1373 unsigned int size = 1 << high; 1374 unsigned int nr_added = 0; 1375 1376 while (high > low) { 1377 high--; 1378 size >>= 1; 1379 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1380 1381 /* 1382 * Mark as guard pages (or page), that will allow to 1383 * merge back to allocator when buddy will be freed. 1384 * Corresponding page table entries will not be touched, 1385 * pages will stay not present in virtual address space 1386 */ 1387 if (set_page_guard(zone, &page[size], high)) 1388 continue; 1389 1390 __add_to_free_list(&page[size], zone, high, migratetype, false); 1391 set_buddy_order(&page[size], high); 1392 nr_added += size; 1393 } 1394 1395 return nr_added; 1396 } 1397 1398 static __always_inline void page_del_and_expand(struct zone *zone, 1399 struct page *page, int low, 1400 int high, int migratetype) 1401 { 1402 int nr_pages = 1 << high; 1403 1404 __del_page_from_free_list(page, zone, high, migratetype); 1405 nr_pages -= expand(zone, page, low, high, migratetype); 1406 account_freepages(zone, -nr_pages, migratetype); 1407 } 1408 1409 static void check_new_page_bad(struct page *page) 1410 { 1411 if (unlikely(page->flags & __PG_HWPOISON)) { 1412 /* Don't complain about hwpoisoned pages */ 1413 if (PageBuddy(page)) 1414 __ClearPageBuddy(page); 1415 return; 1416 } 1417 1418 bad_page(page, 1419 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1420 } 1421 1422 /* 1423 * This page is about to be returned from the page allocator 1424 */ 1425 static bool check_new_page(struct page *page) 1426 { 1427 if (likely(page_expected_state(page, 1428 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1429 return false; 1430 1431 check_new_page_bad(page); 1432 return true; 1433 } 1434 1435 static inline bool check_new_pages(struct page *page, unsigned int order) 1436 { 1437 if (is_check_pages_enabled()) { 1438 for (int i = 0; i < (1 << order); i++) { 1439 struct page *p = page + i; 1440 1441 if (check_new_page(p)) 1442 return true; 1443 } 1444 } 1445 1446 return false; 1447 } 1448 1449 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1450 { 1451 /* Don't skip if a software KASAN mode is enabled. */ 1452 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1453 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1454 return false; 1455 1456 /* Skip, if hardware tag-based KASAN is not enabled. */ 1457 if (!kasan_hw_tags_enabled()) 1458 return true; 1459 1460 /* 1461 * With hardware tag-based KASAN enabled, skip if this has been 1462 * requested via __GFP_SKIP_KASAN. 1463 */ 1464 return flags & __GFP_SKIP_KASAN; 1465 } 1466 1467 static inline bool should_skip_init(gfp_t flags) 1468 { 1469 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1470 if (!kasan_hw_tags_enabled()) 1471 return false; 1472 1473 /* For hardware tag-based KASAN, skip if requested. */ 1474 return (flags & __GFP_SKIP_ZERO); 1475 } 1476 1477 inline void post_alloc_hook(struct page *page, unsigned int order, 1478 gfp_t gfp_flags) 1479 { 1480 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1481 !should_skip_init(gfp_flags); 1482 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1483 int i; 1484 1485 set_page_private(page, 0); 1486 set_page_refcounted(page); 1487 1488 arch_alloc_page(page, order); 1489 debug_pagealloc_map_pages(page, 1 << order); 1490 1491 /* 1492 * Page unpoisoning must happen before memory initialization. 1493 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1494 * allocations and the page unpoisoning code will complain. 1495 */ 1496 kernel_unpoison_pages(page, 1 << order); 1497 1498 /* 1499 * As memory initialization might be integrated into KASAN, 1500 * KASAN unpoisoning and memory initializion code must be 1501 * kept together to avoid discrepancies in behavior. 1502 */ 1503 1504 /* 1505 * If memory tags should be zeroed 1506 * (which happens only when memory should be initialized as well). 1507 */ 1508 if (zero_tags) { 1509 /* Initialize both memory and memory tags. */ 1510 for (i = 0; i != 1 << order; ++i) 1511 tag_clear_highpage(page + i); 1512 1513 /* Take note that memory was initialized by the loop above. */ 1514 init = false; 1515 } 1516 if (!should_skip_kasan_unpoison(gfp_flags) && 1517 kasan_unpoison_pages(page, order, init)) { 1518 /* Take note that memory was initialized by KASAN. */ 1519 if (kasan_has_integrated_init()) 1520 init = false; 1521 } else { 1522 /* 1523 * If memory tags have not been set by KASAN, reset the page 1524 * tags to ensure page_address() dereferencing does not fault. 1525 */ 1526 for (i = 0; i != 1 << order; ++i) 1527 page_kasan_tag_reset(page + i); 1528 } 1529 /* If memory is still not initialized, initialize it now. */ 1530 if (init) 1531 kernel_init_pages(page, 1 << order); 1532 1533 set_page_owner(page, order, gfp_flags); 1534 page_table_check_alloc(page, order); 1535 pgalloc_tag_add(page, current, 1 << order); 1536 } 1537 1538 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1539 unsigned int alloc_flags) 1540 { 1541 post_alloc_hook(page, order, gfp_flags); 1542 1543 if (order && (gfp_flags & __GFP_COMP)) 1544 prep_compound_page(page, order); 1545 1546 /* 1547 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1548 * allocate the page. The expectation is that the caller is taking 1549 * steps that will free more memory. The caller should avoid the page 1550 * being used for !PFMEMALLOC purposes. 1551 */ 1552 if (alloc_flags & ALLOC_NO_WATERMARKS) 1553 set_page_pfmemalloc(page); 1554 else 1555 clear_page_pfmemalloc(page); 1556 } 1557 1558 /* 1559 * Go through the free lists for the given migratetype and remove 1560 * the smallest available page from the freelists 1561 */ 1562 static __always_inline 1563 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1564 int migratetype) 1565 { 1566 unsigned int current_order; 1567 struct free_area *area; 1568 struct page *page; 1569 1570 /* Find a page of the appropriate size in the preferred list */ 1571 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1572 area = &(zone->free_area[current_order]); 1573 page = get_page_from_free_area(area, migratetype); 1574 if (!page) 1575 continue; 1576 1577 page_del_and_expand(zone, page, order, current_order, 1578 migratetype); 1579 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1580 pcp_allowed_order(order) && 1581 migratetype < MIGRATE_PCPTYPES); 1582 return page; 1583 } 1584 1585 return NULL; 1586 } 1587 1588 1589 /* 1590 * This array describes the order lists are fallen back to when 1591 * the free lists for the desirable migrate type are depleted 1592 * 1593 * The other migratetypes do not have fallbacks. 1594 */ 1595 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1596 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1597 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1598 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1599 }; 1600 1601 #ifdef CONFIG_CMA 1602 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1603 unsigned int order) 1604 { 1605 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1606 } 1607 #else 1608 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1609 unsigned int order) { return NULL; } 1610 #endif 1611 1612 /* 1613 * Change the type of a block and move all its free pages to that 1614 * type's freelist. 1615 */ 1616 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1617 int old_mt, int new_mt) 1618 { 1619 struct page *page; 1620 unsigned long pfn, end_pfn; 1621 unsigned int order; 1622 int pages_moved = 0; 1623 1624 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1625 end_pfn = pageblock_end_pfn(start_pfn); 1626 1627 for (pfn = start_pfn; pfn < end_pfn;) { 1628 page = pfn_to_page(pfn); 1629 if (!PageBuddy(page)) { 1630 pfn++; 1631 continue; 1632 } 1633 1634 /* Make sure we are not inadvertently changing nodes */ 1635 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1636 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1637 1638 order = buddy_order(page); 1639 1640 move_to_free_list(page, zone, order, old_mt, new_mt); 1641 1642 pfn += 1 << order; 1643 pages_moved += 1 << order; 1644 } 1645 1646 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1647 1648 return pages_moved; 1649 } 1650 1651 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1652 unsigned long *start_pfn, 1653 int *num_free, int *num_movable) 1654 { 1655 unsigned long pfn, start, end; 1656 1657 pfn = page_to_pfn(page); 1658 start = pageblock_start_pfn(pfn); 1659 end = pageblock_end_pfn(pfn); 1660 1661 /* 1662 * The caller only has the lock for @zone, don't touch ranges 1663 * that straddle into other zones. While we could move part of 1664 * the range that's inside the zone, this call is usually 1665 * accompanied by other operations such as migratetype updates 1666 * which also should be locked. 1667 */ 1668 if (!zone_spans_pfn(zone, start)) 1669 return false; 1670 if (!zone_spans_pfn(zone, end - 1)) 1671 return false; 1672 1673 *start_pfn = start; 1674 1675 if (num_free) { 1676 *num_free = 0; 1677 *num_movable = 0; 1678 for (pfn = start; pfn < end;) { 1679 page = pfn_to_page(pfn); 1680 if (PageBuddy(page)) { 1681 int nr = 1 << buddy_order(page); 1682 1683 *num_free += nr; 1684 pfn += nr; 1685 continue; 1686 } 1687 /* 1688 * We assume that pages that could be isolated for 1689 * migration are movable. But we don't actually try 1690 * isolating, as that would be expensive. 1691 */ 1692 if (PageLRU(page) || __PageMovable(page)) 1693 (*num_movable)++; 1694 pfn++; 1695 } 1696 } 1697 1698 return true; 1699 } 1700 1701 static int move_freepages_block(struct zone *zone, struct page *page, 1702 int old_mt, int new_mt) 1703 { 1704 unsigned long start_pfn; 1705 1706 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1707 return -1; 1708 1709 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1710 } 1711 1712 #ifdef CONFIG_MEMORY_ISOLATION 1713 /* Look for a buddy that straddles start_pfn */ 1714 static unsigned long find_large_buddy(unsigned long start_pfn) 1715 { 1716 int order = 0; 1717 struct page *page; 1718 unsigned long pfn = start_pfn; 1719 1720 while (!PageBuddy(page = pfn_to_page(pfn))) { 1721 /* Nothing found */ 1722 if (++order > MAX_PAGE_ORDER) 1723 return start_pfn; 1724 pfn &= ~0UL << order; 1725 } 1726 1727 /* 1728 * Found a preceding buddy, but does it straddle? 1729 */ 1730 if (pfn + (1 << buddy_order(page)) > start_pfn) 1731 return pfn; 1732 1733 /* Nothing found */ 1734 return start_pfn; 1735 } 1736 1737 /** 1738 * move_freepages_block_isolate - move free pages in block for page isolation 1739 * @zone: the zone 1740 * @page: the pageblock page 1741 * @migratetype: migratetype to set on the pageblock 1742 * 1743 * This is similar to move_freepages_block(), but handles the special 1744 * case encountered in page isolation, where the block of interest 1745 * might be part of a larger buddy spanning multiple pageblocks. 1746 * 1747 * Unlike the regular page allocator path, which moves pages while 1748 * stealing buddies off the freelist, page isolation is interested in 1749 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1750 * 1751 * This function handles that. Straddling buddies are split into 1752 * individual pageblocks. Only the block of interest is moved. 1753 * 1754 * Returns %true if pages could be moved, %false otherwise. 1755 */ 1756 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1757 int migratetype) 1758 { 1759 unsigned long start_pfn, pfn; 1760 1761 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1762 return false; 1763 1764 /* No splits needed if buddies can't span multiple blocks */ 1765 if (pageblock_order == MAX_PAGE_ORDER) 1766 goto move; 1767 1768 /* We're a tail block in a larger buddy */ 1769 pfn = find_large_buddy(start_pfn); 1770 if (pfn != start_pfn) { 1771 struct page *buddy = pfn_to_page(pfn); 1772 int order = buddy_order(buddy); 1773 1774 del_page_from_free_list(buddy, zone, order, 1775 get_pfnblock_migratetype(buddy, pfn)); 1776 set_pageblock_migratetype(page, migratetype); 1777 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 1778 return true; 1779 } 1780 1781 /* We're the starting block of a larger buddy */ 1782 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1783 int order = buddy_order(page); 1784 1785 del_page_from_free_list(page, zone, order, 1786 get_pfnblock_migratetype(page, pfn)); 1787 set_pageblock_migratetype(page, migratetype); 1788 split_large_buddy(zone, page, pfn, order, FPI_NONE); 1789 return true; 1790 } 1791 move: 1792 __move_freepages_block(zone, start_pfn, 1793 get_pfnblock_migratetype(page, start_pfn), 1794 migratetype); 1795 return true; 1796 } 1797 #endif /* CONFIG_MEMORY_ISOLATION */ 1798 1799 static void change_pageblock_range(struct page *pageblock_page, 1800 int start_order, int migratetype) 1801 { 1802 int nr_pageblocks = 1 << (start_order - pageblock_order); 1803 1804 while (nr_pageblocks--) { 1805 set_pageblock_migratetype(pageblock_page, migratetype); 1806 pageblock_page += pageblock_nr_pages; 1807 } 1808 } 1809 1810 /* 1811 * When we are falling back to another migratetype during allocation, try to 1812 * steal extra free pages from the same pageblocks to satisfy further 1813 * allocations, instead of polluting multiple pageblocks. 1814 * 1815 * If we are stealing a relatively large buddy page, it is likely there will 1816 * be more free pages in the pageblock, so try to steal them all. For 1817 * reclaimable and unmovable allocations, we steal regardless of page size, 1818 * as fragmentation caused by those allocations polluting movable pageblocks 1819 * is worse than movable allocations stealing from unmovable and reclaimable 1820 * pageblocks. 1821 */ 1822 static bool can_steal_fallback(unsigned int order, int start_mt) 1823 { 1824 /* 1825 * Leaving this order check is intended, although there is 1826 * relaxed order check in next check. The reason is that 1827 * we can actually steal whole pageblock if this condition met, 1828 * but, below check doesn't guarantee it and that is just heuristic 1829 * so could be changed anytime. 1830 */ 1831 if (order >= pageblock_order) 1832 return true; 1833 1834 if (order >= pageblock_order / 2 || 1835 start_mt == MIGRATE_RECLAIMABLE || 1836 start_mt == MIGRATE_UNMOVABLE || 1837 page_group_by_mobility_disabled) 1838 return true; 1839 1840 return false; 1841 } 1842 1843 static inline bool boost_watermark(struct zone *zone) 1844 { 1845 unsigned long max_boost; 1846 1847 if (!watermark_boost_factor) 1848 return false; 1849 /* 1850 * Don't bother in zones that are unlikely to produce results. 1851 * On small machines, including kdump capture kernels running 1852 * in a small area, boosting the watermark can cause an out of 1853 * memory situation immediately. 1854 */ 1855 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1856 return false; 1857 1858 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1859 watermark_boost_factor, 10000); 1860 1861 /* 1862 * high watermark may be uninitialised if fragmentation occurs 1863 * very early in boot so do not boost. We do not fall 1864 * through and boost by pageblock_nr_pages as failing 1865 * allocations that early means that reclaim is not going 1866 * to help and it may even be impossible to reclaim the 1867 * boosted watermark resulting in a hang. 1868 */ 1869 if (!max_boost) 1870 return false; 1871 1872 max_boost = max(pageblock_nr_pages, max_boost); 1873 1874 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1875 max_boost); 1876 1877 return true; 1878 } 1879 1880 /* 1881 * This function implements actual steal behaviour. If order is large enough, we 1882 * can claim the whole pageblock for the requested migratetype. If not, we check 1883 * the pageblock for constituent pages; if at least half of the pages are free 1884 * or compatible, we can still claim the whole block, so pages freed in the 1885 * future will be put on the correct free list. Otherwise, we isolate exactly 1886 * the order we need from the fallback block and leave its migratetype alone. 1887 */ 1888 static struct page * 1889 steal_suitable_fallback(struct zone *zone, struct page *page, 1890 int current_order, int order, int start_type, 1891 unsigned int alloc_flags, bool whole_block) 1892 { 1893 int free_pages, movable_pages, alike_pages; 1894 unsigned long start_pfn; 1895 int block_type; 1896 1897 block_type = get_pageblock_migratetype(page); 1898 1899 /* 1900 * This can happen due to races and we want to prevent broken 1901 * highatomic accounting. 1902 */ 1903 if (is_migrate_highatomic(block_type)) 1904 goto single_page; 1905 1906 /* Take ownership for orders >= pageblock_order */ 1907 if (current_order >= pageblock_order) { 1908 unsigned int nr_added; 1909 1910 del_page_from_free_list(page, zone, current_order, block_type); 1911 change_pageblock_range(page, current_order, start_type); 1912 nr_added = expand(zone, page, order, current_order, start_type); 1913 account_freepages(zone, nr_added, start_type); 1914 return page; 1915 } 1916 1917 /* 1918 * Boost watermarks to increase reclaim pressure to reduce the 1919 * likelihood of future fallbacks. Wake kswapd now as the node 1920 * may be balanced overall and kswapd will not wake naturally. 1921 */ 1922 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1923 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1924 1925 /* We are not allowed to try stealing from the whole block */ 1926 if (!whole_block) 1927 goto single_page; 1928 1929 /* moving whole block can fail due to zone boundary conditions */ 1930 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 1931 &movable_pages)) 1932 goto single_page; 1933 1934 /* 1935 * Determine how many pages are compatible with our allocation. 1936 * For movable allocation, it's the number of movable pages which 1937 * we just obtained. For other types it's a bit more tricky. 1938 */ 1939 if (start_type == MIGRATE_MOVABLE) { 1940 alike_pages = movable_pages; 1941 } else { 1942 /* 1943 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1944 * to MOVABLE pageblock, consider all non-movable pages as 1945 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1946 * vice versa, be conservative since we can't distinguish the 1947 * exact migratetype of non-movable pages. 1948 */ 1949 if (block_type == MIGRATE_MOVABLE) 1950 alike_pages = pageblock_nr_pages 1951 - (free_pages + movable_pages); 1952 else 1953 alike_pages = 0; 1954 } 1955 /* 1956 * If a sufficient number of pages in the block are either free or of 1957 * compatible migratability as our allocation, claim the whole block. 1958 */ 1959 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1960 page_group_by_mobility_disabled) { 1961 __move_freepages_block(zone, start_pfn, block_type, start_type); 1962 return __rmqueue_smallest(zone, order, start_type); 1963 } 1964 1965 single_page: 1966 page_del_and_expand(zone, page, order, current_order, block_type); 1967 return page; 1968 } 1969 1970 /* 1971 * Check whether there is a suitable fallback freepage with requested order. 1972 * If only_stealable is true, this function returns fallback_mt only if 1973 * we can steal other freepages all together. This would help to reduce 1974 * fragmentation due to mixed migratetype pages in one pageblock. 1975 */ 1976 int find_suitable_fallback(struct free_area *area, unsigned int order, 1977 int migratetype, bool only_stealable, bool *can_steal) 1978 { 1979 int i; 1980 int fallback_mt; 1981 1982 if (area->nr_free == 0) 1983 return -1; 1984 1985 *can_steal = false; 1986 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1987 fallback_mt = fallbacks[migratetype][i]; 1988 if (free_area_empty(area, fallback_mt)) 1989 continue; 1990 1991 if (can_steal_fallback(order, migratetype)) 1992 *can_steal = true; 1993 1994 if (!only_stealable) 1995 return fallback_mt; 1996 1997 if (*can_steal) 1998 return fallback_mt; 1999 } 2000 2001 return -1; 2002 } 2003 2004 /* 2005 * Reserve the pageblock(s) surrounding an allocation request for 2006 * exclusive use of high-order atomic allocations if there are no 2007 * empty page blocks that contain a page with a suitable order 2008 */ 2009 static void reserve_highatomic_pageblock(struct page *page, int order, 2010 struct zone *zone) 2011 { 2012 int mt; 2013 unsigned long max_managed, flags; 2014 2015 /* 2016 * The number reserved as: minimum is 1 pageblock, maximum is 2017 * roughly 1% of a zone. But if 1% of a zone falls below a 2018 * pageblock size, then don't reserve any pageblocks. 2019 * Check is race-prone but harmless. 2020 */ 2021 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 2022 return; 2023 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 2024 if (zone->nr_reserved_highatomic >= max_managed) 2025 return; 2026 2027 spin_lock_irqsave(&zone->lock, flags); 2028 2029 /* Recheck the nr_reserved_highatomic limit under the lock */ 2030 if (zone->nr_reserved_highatomic >= max_managed) 2031 goto out_unlock; 2032 2033 /* Yoink! */ 2034 mt = get_pageblock_migratetype(page); 2035 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2036 if (!migratetype_is_mergeable(mt)) 2037 goto out_unlock; 2038 2039 if (order < pageblock_order) { 2040 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 2041 goto out_unlock; 2042 zone->nr_reserved_highatomic += pageblock_nr_pages; 2043 } else { 2044 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 2045 zone->nr_reserved_highatomic += 1 << order; 2046 } 2047 2048 out_unlock: 2049 spin_unlock_irqrestore(&zone->lock, flags); 2050 } 2051 2052 /* 2053 * Used when an allocation is about to fail under memory pressure. This 2054 * potentially hurts the reliability of high-order allocations when under 2055 * intense memory pressure but failed atomic allocations should be easier 2056 * to recover from than an OOM. 2057 * 2058 * If @force is true, try to unreserve pageblocks even though highatomic 2059 * pageblock is exhausted. 2060 */ 2061 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2062 bool force) 2063 { 2064 struct zonelist *zonelist = ac->zonelist; 2065 unsigned long flags; 2066 struct zoneref *z; 2067 struct zone *zone; 2068 struct page *page; 2069 int order; 2070 int ret; 2071 2072 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2073 ac->nodemask) { 2074 /* 2075 * Preserve at least one pageblock unless memory pressure 2076 * is really high. 2077 */ 2078 if (!force && zone->nr_reserved_highatomic <= 2079 pageblock_nr_pages) 2080 continue; 2081 2082 spin_lock_irqsave(&zone->lock, flags); 2083 for (order = 0; order < NR_PAGE_ORDERS; order++) { 2084 struct free_area *area = &(zone->free_area[order]); 2085 int mt; 2086 2087 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2088 if (!page) 2089 continue; 2090 2091 mt = get_pageblock_migratetype(page); 2092 /* 2093 * In page freeing path, migratetype change is racy so 2094 * we can counter several free pages in a pageblock 2095 * in this loop although we changed the pageblock type 2096 * from highatomic to ac->migratetype. So we should 2097 * adjust the count once. 2098 */ 2099 if (is_migrate_highatomic(mt)) { 2100 unsigned long size; 2101 /* 2102 * It should never happen but changes to 2103 * locking could inadvertently allow a per-cpu 2104 * drain to add pages to MIGRATE_HIGHATOMIC 2105 * while unreserving so be safe and watch for 2106 * underflows. 2107 */ 2108 size = max(pageblock_nr_pages, 1UL << order); 2109 size = min(size, zone->nr_reserved_highatomic); 2110 zone->nr_reserved_highatomic -= size; 2111 } 2112 2113 /* 2114 * Convert to ac->migratetype and avoid the normal 2115 * pageblock stealing heuristics. Minimally, the caller 2116 * is doing the work and needs the pages. More 2117 * importantly, if the block was always converted to 2118 * MIGRATE_UNMOVABLE or another type then the number 2119 * of pageblocks that cannot be completely freed 2120 * may increase. 2121 */ 2122 if (order < pageblock_order) 2123 ret = move_freepages_block(zone, page, mt, 2124 ac->migratetype); 2125 else { 2126 move_to_free_list(page, zone, order, mt, 2127 ac->migratetype); 2128 change_pageblock_range(page, order, 2129 ac->migratetype); 2130 ret = 1; 2131 } 2132 /* 2133 * Reserving the block(s) already succeeded, 2134 * so this should not fail on zone boundaries. 2135 */ 2136 WARN_ON_ONCE(ret == -1); 2137 if (ret > 0) { 2138 spin_unlock_irqrestore(&zone->lock, flags); 2139 return ret; 2140 } 2141 } 2142 spin_unlock_irqrestore(&zone->lock, flags); 2143 } 2144 2145 return false; 2146 } 2147 2148 /* 2149 * Try finding a free buddy page on the fallback list and put it on the free 2150 * list of requested migratetype, possibly along with other pages from the same 2151 * block, depending on fragmentation avoidance heuristics. Returns true if 2152 * fallback was found so that __rmqueue_smallest() can grab it. 2153 * 2154 * The use of signed ints for order and current_order is a deliberate 2155 * deviation from the rest of this file, to make the for loop 2156 * condition simpler. 2157 */ 2158 static __always_inline struct page * 2159 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2160 unsigned int alloc_flags) 2161 { 2162 struct free_area *area; 2163 int current_order; 2164 int min_order = order; 2165 struct page *page; 2166 int fallback_mt; 2167 bool can_steal; 2168 2169 /* 2170 * Do not steal pages from freelists belonging to other pageblocks 2171 * i.e. orders < pageblock_order. If there are no local zones free, 2172 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2173 */ 2174 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2175 min_order = pageblock_order; 2176 2177 /* 2178 * Find the largest available free page in the other list. This roughly 2179 * approximates finding the pageblock with the most free pages, which 2180 * would be too costly to do exactly. 2181 */ 2182 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2183 --current_order) { 2184 area = &(zone->free_area[current_order]); 2185 fallback_mt = find_suitable_fallback(area, current_order, 2186 start_migratetype, false, &can_steal); 2187 if (fallback_mt == -1) 2188 continue; 2189 2190 /* 2191 * We cannot steal all free pages from the pageblock and the 2192 * requested migratetype is movable. In that case it's better to 2193 * steal and split the smallest available page instead of the 2194 * largest available page, because even if the next movable 2195 * allocation falls back into a different pageblock than this 2196 * one, it won't cause permanent fragmentation. 2197 */ 2198 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2199 && current_order > order) 2200 goto find_smallest; 2201 2202 goto do_steal; 2203 } 2204 2205 return NULL; 2206 2207 find_smallest: 2208 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2209 area = &(zone->free_area[current_order]); 2210 fallback_mt = find_suitable_fallback(area, current_order, 2211 start_migratetype, false, &can_steal); 2212 if (fallback_mt != -1) 2213 break; 2214 } 2215 2216 /* 2217 * This should not happen - we already found a suitable fallback 2218 * when looking for the largest page. 2219 */ 2220 VM_BUG_ON(current_order > MAX_PAGE_ORDER); 2221 2222 do_steal: 2223 page = get_page_from_free_area(area, fallback_mt); 2224 2225 /* take off list, maybe claim block, expand remainder */ 2226 page = steal_suitable_fallback(zone, page, current_order, order, 2227 start_migratetype, alloc_flags, can_steal); 2228 2229 trace_mm_page_alloc_extfrag(page, order, current_order, 2230 start_migratetype, fallback_mt); 2231 2232 return page; 2233 } 2234 2235 /* 2236 * Do the hard work of removing an element from the buddy allocator. 2237 * Call me with the zone->lock already held. 2238 */ 2239 static __always_inline struct page * 2240 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2241 unsigned int alloc_flags) 2242 { 2243 struct page *page; 2244 2245 if (IS_ENABLED(CONFIG_CMA)) { 2246 /* 2247 * Balance movable allocations between regular and CMA areas by 2248 * allocating from CMA when over half of the zone's free memory 2249 * is in the CMA area. 2250 */ 2251 if (alloc_flags & ALLOC_CMA && 2252 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2253 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2254 page = __rmqueue_cma_fallback(zone, order); 2255 if (page) 2256 return page; 2257 } 2258 } 2259 2260 page = __rmqueue_smallest(zone, order, migratetype); 2261 if (unlikely(!page)) { 2262 if (alloc_flags & ALLOC_CMA) 2263 page = __rmqueue_cma_fallback(zone, order); 2264 2265 if (!page) 2266 page = __rmqueue_fallback(zone, order, migratetype, 2267 alloc_flags); 2268 } 2269 return page; 2270 } 2271 2272 /* 2273 * Obtain a specified number of elements from the buddy allocator, all under 2274 * a single hold of the lock, for efficiency. Add them to the supplied list. 2275 * Returns the number of new pages which were placed at *list. 2276 */ 2277 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2278 unsigned long count, struct list_head *list, 2279 int migratetype, unsigned int alloc_flags) 2280 { 2281 unsigned long flags; 2282 int i; 2283 2284 spin_lock_irqsave(&zone->lock, flags); 2285 for (i = 0; i < count; ++i) { 2286 struct page *page = __rmqueue(zone, order, migratetype, 2287 alloc_flags); 2288 if (unlikely(page == NULL)) 2289 break; 2290 2291 /* 2292 * Split buddy pages returned by expand() are received here in 2293 * physical page order. The page is added to the tail of 2294 * caller's list. From the callers perspective, the linked list 2295 * is ordered by page number under some conditions. This is 2296 * useful for IO devices that can forward direction from the 2297 * head, thus also in the physical page order. This is useful 2298 * for IO devices that can merge IO requests if the physical 2299 * pages are ordered properly. 2300 */ 2301 list_add_tail(&page->pcp_list, list); 2302 } 2303 spin_unlock_irqrestore(&zone->lock, flags); 2304 2305 return i; 2306 } 2307 2308 /* 2309 * Called from the vmstat counter updater to decay the PCP high. 2310 * Return whether there are addition works to do. 2311 */ 2312 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2313 { 2314 int high_min, to_drain, batch; 2315 int todo = 0; 2316 2317 high_min = READ_ONCE(pcp->high_min); 2318 batch = READ_ONCE(pcp->batch); 2319 /* 2320 * Decrease pcp->high periodically to try to free possible 2321 * idle PCP pages. And, avoid to free too many pages to 2322 * control latency. This caps pcp->high decrement too. 2323 */ 2324 if (pcp->high > high_min) { 2325 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2326 pcp->high - (pcp->high >> 3), high_min); 2327 if (pcp->high > high_min) 2328 todo++; 2329 } 2330 2331 to_drain = pcp->count - pcp->high; 2332 if (to_drain > 0) { 2333 spin_lock(&pcp->lock); 2334 free_pcppages_bulk(zone, to_drain, pcp, 0); 2335 spin_unlock(&pcp->lock); 2336 todo++; 2337 } 2338 2339 return todo; 2340 } 2341 2342 #ifdef CONFIG_NUMA 2343 /* 2344 * Called from the vmstat counter updater to drain pagesets of this 2345 * currently executing processor on remote nodes after they have 2346 * expired. 2347 */ 2348 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2349 { 2350 int to_drain, batch; 2351 2352 batch = READ_ONCE(pcp->batch); 2353 to_drain = min(pcp->count, batch); 2354 if (to_drain > 0) { 2355 spin_lock(&pcp->lock); 2356 free_pcppages_bulk(zone, to_drain, pcp, 0); 2357 spin_unlock(&pcp->lock); 2358 } 2359 } 2360 #endif 2361 2362 /* 2363 * Drain pcplists of the indicated processor and zone. 2364 */ 2365 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2366 { 2367 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2368 int count; 2369 2370 do { 2371 spin_lock(&pcp->lock); 2372 count = pcp->count; 2373 if (count) { 2374 int to_drain = min(count, 2375 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2376 2377 free_pcppages_bulk(zone, to_drain, pcp, 0); 2378 count -= to_drain; 2379 } 2380 spin_unlock(&pcp->lock); 2381 } while (count); 2382 } 2383 2384 /* 2385 * Drain pcplists of all zones on the indicated processor. 2386 */ 2387 static void drain_pages(unsigned int cpu) 2388 { 2389 struct zone *zone; 2390 2391 for_each_populated_zone(zone) { 2392 drain_pages_zone(cpu, zone); 2393 } 2394 } 2395 2396 /* 2397 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2398 */ 2399 void drain_local_pages(struct zone *zone) 2400 { 2401 int cpu = smp_processor_id(); 2402 2403 if (zone) 2404 drain_pages_zone(cpu, zone); 2405 else 2406 drain_pages(cpu); 2407 } 2408 2409 /* 2410 * The implementation of drain_all_pages(), exposing an extra parameter to 2411 * drain on all cpus. 2412 * 2413 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2414 * not empty. The check for non-emptiness can however race with a free to 2415 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2416 * that need the guarantee that every CPU has drained can disable the 2417 * optimizing racy check. 2418 */ 2419 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2420 { 2421 int cpu; 2422 2423 /* 2424 * Allocate in the BSS so we won't require allocation in 2425 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2426 */ 2427 static cpumask_t cpus_with_pcps; 2428 2429 /* 2430 * Do not drain if one is already in progress unless it's specific to 2431 * a zone. Such callers are primarily CMA and memory hotplug and need 2432 * the drain to be complete when the call returns. 2433 */ 2434 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2435 if (!zone) 2436 return; 2437 mutex_lock(&pcpu_drain_mutex); 2438 } 2439 2440 /* 2441 * We don't care about racing with CPU hotplug event 2442 * as offline notification will cause the notified 2443 * cpu to drain that CPU pcps and on_each_cpu_mask 2444 * disables preemption as part of its processing 2445 */ 2446 for_each_online_cpu(cpu) { 2447 struct per_cpu_pages *pcp; 2448 struct zone *z; 2449 bool has_pcps = false; 2450 2451 if (force_all_cpus) { 2452 /* 2453 * The pcp.count check is racy, some callers need a 2454 * guarantee that no cpu is missed. 2455 */ 2456 has_pcps = true; 2457 } else if (zone) { 2458 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2459 if (pcp->count) 2460 has_pcps = true; 2461 } else { 2462 for_each_populated_zone(z) { 2463 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2464 if (pcp->count) { 2465 has_pcps = true; 2466 break; 2467 } 2468 } 2469 } 2470 2471 if (has_pcps) 2472 cpumask_set_cpu(cpu, &cpus_with_pcps); 2473 else 2474 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2475 } 2476 2477 for_each_cpu(cpu, &cpus_with_pcps) { 2478 if (zone) 2479 drain_pages_zone(cpu, zone); 2480 else 2481 drain_pages(cpu); 2482 } 2483 2484 mutex_unlock(&pcpu_drain_mutex); 2485 } 2486 2487 /* 2488 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2489 * 2490 * When zone parameter is non-NULL, spill just the single zone's pages. 2491 */ 2492 void drain_all_pages(struct zone *zone) 2493 { 2494 __drain_all_pages(zone, false); 2495 } 2496 2497 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2498 { 2499 int min_nr_free, max_nr_free; 2500 2501 /* Free as much as possible if batch freeing high-order pages. */ 2502 if (unlikely(free_high)) 2503 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2504 2505 /* Check for PCP disabled or boot pageset */ 2506 if (unlikely(high < batch)) 2507 return 1; 2508 2509 /* Leave at least pcp->batch pages on the list */ 2510 min_nr_free = batch; 2511 max_nr_free = high - batch; 2512 2513 /* 2514 * Increase the batch number to the number of the consecutive 2515 * freed pages to reduce zone lock contention. 2516 */ 2517 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2518 2519 return batch; 2520 } 2521 2522 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2523 int batch, bool free_high) 2524 { 2525 int high, high_min, high_max; 2526 2527 high_min = READ_ONCE(pcp->high_min); 2528 high_max = READ_ONCE(pcp->high_max); 2529 high = pcp->high = clamp(pcp->high, high_min, high_max); 2530 2531 if (unlikely(!high)) 2532 return 0; 2533 2534 if (unlikely(free_high)) { 2535 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2536 high_min); 2537 return 0; 2538 } 2539 2540 /* 2541 * If reclaim is active, limit the number of pages that can be 2542 * stored on pcp lists 2543 */ 2544 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2545 int free_count = max_t(int, pcp->free_count, batch); 2546 2547 pcp->high = max(high - free_count, high_min); 2548 return min(batch << 2, pcp->high); 2549 } 2550 2551 if (high_min == high_max) 2552 return high; 2553 2554 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2555 int free_count = max_t(int, pcp->free_count, batch); 2556 2557 pcp->high = max(high - free_count, high_min); 2558 high = max(pcp->count, high_min); 2559 } else if (pcp->count >= high) { 2560 int need_high = pcp->free_count + batch; 2561 2562 /* pcp->high should be large enough to hold batch freed pages */ 2563 if (pcp->high < need_high) 2564 pcp->high = clamp(need_high, high_min, high_max); 2565 } 2566 2567 return high; 2568 } 2569 2570 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2571 struct page *page, int migratetype, 2572 unsigned int order) 2573 { 2574 int high, batch; 2575 int pindex; 2576 bool free_high = false; 2577 2578 /* 2579 * On freeing, reduce the number of pages that are batch allocated. 2580 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2581 * allocations. 2582 */ 2583 pcp->alloc_factor >>= 1; 2584 __count_vm_events(PGFREE, 1 << order); 2585 pindex = order_to_pindex(migratetype, order); 2586 list_add(&page->pcp_list, &pcp->lists[pindex]); 2587 pcp->count += 1 << order; 2588 2589 batch = READ_ONCE(pcp->batch); 2590 /* 2591 * As high-order pages other than THP's stored on PCP can contribute 2592 * to fragmentation, limit the number stored when PCP is heavily 2593 * freeing without allocation. The remainder after bulk freeing 2594 * stops will be drained from vmstat refresh context. 2595 */ 2596 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2597 free_high = (pcp->free_count >= batch && 2598 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2599 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2600 pcp->count >= READ_ONCE(batch))); 2601 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2602 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2603 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2604 } 2605 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2606 pcp->free_count += (1 << order); 2607 high = nr_pcp_high(pcp, zone, batch, free_high); 2608 if (pcp->count >= high) { 2609 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2610 pcp, pindex); 2611 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2612 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2613 ZONE_MOVABLE, 0)) 2614 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2615 } 2616 } 2617 2618 /* 2619 * Free a pcp page 2620 */ 2621 void free_unref_page(struct page *page, unsigned int order) 2622 { 2623 unsigned long __maybe_unused UP_flags; 2624 struct per_cpu_pages *pcp; 2625 struct zone *zone; 2626 unsigned long pfn = page_to_pfn(page); 2627 int migratetype; 2628 2629 if (!pcp_allowed_order(order)) { 2630 __free_pages_ok(page, order, FPI_NONE); 2631 return; 2632 } 2633 2634 if (!free_pages_prepare(page, order)) 2635 return; 2636 2637 /* 2638 * We only track unmovable, reclaimable and movable on pcp lists. 2639 * Place ISOLATE pages on the isolated list because they are being 2640 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2641 * get those areas back if necessary. Otherwise, we may have to free 2642 * excessively into the page allocator 2643 */ 2644 migratetype = get_pfnblock_migratetype(page, pfn); 2645 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2646 if (unlikely(is_migrate_isolate(migratetype))) { 2647 free_one_page(page_zone(page), page, pfn, order, FPI_NONE); 2648 return; 2649 } 2650 migratetype = MIGRATE_MOVABLE; 2651 } 2652 2653 zone = page_zone(page); 2654 pcp_trylock_prepare(UP_flags); 2655 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2656 if (pcp) { 2657 free_unref_page_commit(zone, pcp, page, migratetype, order); 2658 pcp_spin_unlock(pcp); 2659 } else { 2660 free_one_page(zone, page, pfn, order, FPI_NONE); 2661 } 2662 pcp_trylock_finish(UP_flags); 2663 } 2664 2665 /* 2666 * Free a batch of folios 2667 */ 2668 void free_unref_folios(struct folio_batch *folios) 2669 { 2670 unsigned long __maybe_unused UP_flags; 2671 struct per_cpu_pages *pcp = NULL; 2672 struct zone *locked_zone = NULL; 2673 int i, j; 2674 2675 /* Prepare folios for freeing */ 2676 for (i = 0, j = 0; i < folios->nr; i++) { 2677 struct folio *folio = folios->folios[i]; 2678 unsigned long pfn = folio_pfn(folio); 2679 unsigned int order = folio_order(folio); 2680 2681 folio_undo_large_rmappable(folio); 2682 if (!free_pages_prepare(&folio->page, order)) 2683 continue; 2684 /* 2685 * Free orders not handled on the PCP directly to the 2686 * allocator. 2687 */ 2688 if (!pcp_allowed_order(order)) { 2689 free_one_page(folio_zone(folio), &folio->page, 2690 pfn, order, FPI_NONE); 2691 continue; 2692 } 2693 folio->private = (void *)(unsigned long)order; 2694 if (j != i) 2695 folios->folios[j] = folio; 2696 j++; 2697 } 2698 folios->nr = j; 2699 2700 for (i = 0; i < folios->nr; i++) { 2701 struct folio *folio = folios->folios[i]; 2702 struct zone *zone = folio_zone(folio); 2703 unsigned long pfn = folio_pfn(folio); 2704 unsigned int order = (unsigned long)folio->private; 2705 int migratetype; 2706 2707 folio->private = NULL; 2708 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2709 2710 /* Different zone requires a different pcp lock */ 2711 if (zone != locked_zone || 2712 is_migrate_isolate(migratetype)) { 2713 if (pcp) { 2714 pcp_spin_unlock(pcp); 2715 pcp_trylock_finish(UP_flags); 2716 locked_zone = NULL; 2717 pcp = NULL; 2718 } 2719 2720 /* 2721 * Free isolated pages directly to the 2722 * allocator, see comment in free_unref_page. 2723 */ 2724 if (is_migrate_isolate(migratetype)) { 2725 free_one_page(zone, &folio->page, pfn, 2726 order, FPI_NONE); 2727 continue; 2728 } 2729 2730 /* 2731 * trylock is necessary as folios may be getting freed 2732 * from IRQ or SoftIRQ context after an IO completion. 2733 */ 2734 pcp_trylock_prepare(UP_flags); 2735 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2736 if (unlikely(!pcp)) { 2737 pcp_trylock_finish(UP_flags); 2738 free_one_page(zone, &folio->page, pfn, 2739 order, FPI_NONE); 2740 continue; 2741 } 2742 locked_zone = zone; 2743 } 2744 2745 /* 2746 * Non-isolated types over MIGRATE_PCPTYPES get added 2747 * to the MIGRATE_MOVABLE pcp list. 2748 */ 2749 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2750 migratetype = MIGRATE_MOVABLE; 2751 2752 trace_mm_page_free_batched(&folio->page); 2753 free_unref_page_commit(zone, pcp, &folio->page, migratetype, 2754 order); 2755 } 2756 2757 if (pcp) { 2758 pcp_spin_unlock(pcp); 2759 pcp_trylock_finish(UP_flags); 2760 } 2761 folio_batch_reinit(folios); 2762 } 2763 2764 /* 2765 * split_page takes a non-compound higher-order page, and splits it into 2766 * n (1<<order) sub-pages: page[0..n] 2767 * Each sub-page must be freed individually. 2768 * 2769 * Note: this is probably too low level an operation for use in drivers. 2770 * Please consult with lkml before using this in your driver. 2771 */ 2772 void split_page(struct page *page, unsigned int order) 2773 { 2774 int i; 2775 2776 VM_BUG_ON_PAGE(PageCompound(page), page); 2777 VM_BUG_ON_PAGE(!page_count(page), page); 2778 2779 for (i = 1; i < (1 << order); i++) 2780 set_page_refcounted(page + i); 2781 split_page_owner(page, order, 0); 2782 pgalloc_tag_split(page, 1 << order); 2783 split_page_memcg(page, order, 0); 2784 } 2785 EXPORT_SYMBOL_GPL(split_page); 2786 2787 int __isolate_free_page(struct page *page, unsigned int order) 2788 { 2789 struct zone *zone = page_zone(page); 2790 int mt = get_pageblock_migratetype(page); 2791 2792 if (!is_migrate_isolate(mt)) { 2793 unsigned long watermark; 2794 /* 2795 * Obey watermarks as if the page was being allocated. We can 2796 * emulate a high-order watermark check with a raised order-0 2797 * watermark, because we already know our high-order page 2798 * exists. 2799 */ 2800 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2801 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2802 return 0; 2803 } 2804 2805 del_page_from_free_list(page, zone, order, mt); 2806 2807 /* 2808 * Set the pageblock if the isolated page is at least half of a 2809 * pageblock 2810 */ 2811 if (order >= pageblock_order - 1) { 2812 struct page *endpage = page + (1 << order) - 1; 2813 for (; page < endpage; page += pageblock_nr_pages) { 2814 int mt = get_pageblock_migratetype(page); 2815 /* 2816 * Only change normal pageblocks (i.e., they can merge 2817 * with others) 2818 */ 2819 if (migratetype_is_mergeable(mt)) 2820 move_freepages_block(zone, page, mt, 2821 MIGRATE_MOVABLE); 2822 } 2823 } 2824 2825 return 1UL << order; 2826 } 2827 2828 /** 2829 * __putback_isolated_page - Return a now-isolated page back where we got it 2830 * @page: Page that was isolated 2831 * @order: Order of the isolated page 2832 * @mt: The page's pageblock's migratetype 2833 * 2834 * This function is meant to return a page pulled from the free lists via 2835 * __isolate_free_page back to the free lists they were pulled from. 2836 */ 2837 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2838 { 2839 struct zone *zone = page_zone(page); 2840 2841 /* zone lock should be held when this function is called */ 2842 lockdep_assert_held(&zone->lock); 2843 2844 /* Return isolated page to tail of freelist. */ 2845 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2846 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2847 } 2848 2849 /* 2850 * Update NUMA hit/miss statistics 2851 */ 2852 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2853 long nr_account) 2854 { 2855 #ifdef CONFIG_NUMA 2856 enum numa_stat_item local_stat = NUMA_LOCAL; 2857 2858 /* skip numa counters update if numa stats is disabled */ 2859 if (!static_branch_likely(&vm_numa_stat_key)) 2860 return; 2861 2862 if (zone_to_nid(z) != numa_node_id()) 2863 local_stat = NUMA_OTHER; 2864 2865 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2866 __count_numa_events(z, NUMA_HIT, nr_account); 2867 else { 2868 __count_numa_events(z, NUMA_MISS, nr_account); 2869 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2870 } 2871 __count_numa_events(z, local_stat, nr_account); 2872 #endif 2873 } 2874 2875 static __always_inline 2876 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2877 unsigned int order, unsigned int alloc_flags, 2878 int migratetype) 2879 { 2880 struct page *page; 2881 unsigned long flags; 2882 2883 do { 2884 page = NULL; 2885 spin_lock_irqsave(&zone->lock, flags); 2886 if (alloc_flags & ALLOC_HIGHATOMIC) 2887 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2888 if (!page) { 2889 page = __rmqueue(zone, order, migratetype, alloc_flags); 2890 2891 /* 2892 * If the allocation fails, allow OOM handling access 2893 * to HIGHATOMIC reserves as failing now is worse than 2894 * failing a high-order atomic allocation in the 2895 * future. 2896 */ 2897 if (!page && (alloc_flags & ALLOC_OOM)) 2898 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2899 2900 if (!page) { 2901 spin_unlock_irqrestore(&zone->lock, flags); 2902 return NULL; 2903 } 2904 } 2905 spin_unlock_irqrestore(&zone->lock, flags); 2906 } while (check_new_pages(page, order)); 2907 2908 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2909 zone_statistics(preferred_zone, zone, 1); 2910 2911 return page; 2912 } 2913 2914 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 2915 { 2916 int high, base_batch, batch, max_nr_alloc; 2917 int high_max, high_min; 2918 2919 base_batch = READ_ONCE(pcp->batch); 2920 high_min = READ_ONCE(pcp->high_min); 2921 high_max = READ_ONCE(pcp->high_max); 2922 high = pcp->high = clamp(pcp->high, high_min, high_max); 2923 2924 /* Check for PCP disabled or boot pageset */ 2925 if (unlikely(high < base_batch)) 2926 return 1; 2927 2928 if (order) 2929 batch = base_batch; 2930 else 2931 batch = (base_batch << pcp->alloc_factor); 2932 2933 /* 2934 * If we had larger pcp->high, we could avoid to allocate from 2935 * zone. 2936 */ 2937 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 2938 high = pcp->high = min(high + batch, high_max); 2939 2940 if (!order) { 2941 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 2942 /* 2943 * Double the number of pages allocated each time there is 2944 * subsequent allocation of order-0 pages without any freeing. 2945 */ 2946 if (batch <= max_nr_alloc && 2947 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 2948 pcp->alloc_factor++; 2949 batch = min(batch, max_nr_alloc); 2950 } 2951 2952 /* 2953 * Scale batch relative to order if batch implies free pages 2954 * can be stored on the PCP. Batch can be 1 for small zones or 2955 * for boot pagesets which should never store free pages as 2956 * the pages may belong to arbitrary zones. 2957 */ 2958 if (batch > 1) 2959 batch = max(batch >> order, 2); 2960 2961 return batch; 2962 } 2963 2964 /* Remove page from the per-cpu list, caller must protect the list */ 2965 static inline 2966 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2967 int migratetype, 2968 unsigned int alloc_flags, 2969 struct per_cpu_pages *pcp, 2970 struct list_head *list) 2971 { 2972 struct page *page; 2973 2974 do { 2975 if (list_empty(list)) { 2976 int batch = nr_pcp_alloc(pcp, zone, order); 2977 int alloced; 2978 2979 alloced = rmqueue_bulk(zone, order, 2980 batch, list, 2981 migratetype, alloc_flags); 2982 2983 pcp->count += alloced << order; 2984 if (unlikely(list_empty(list))) 2985 return NULL; 2986 } 2987 2988 page = list_first_entry(list, struct page, pcp_list); 2989 list_del(&page->pcp_list); 2990 pcp->count -= 1 << order; 2991 } while (check_new_pages(page, order)); 2992 2993 return page; 2994 } 2995 2996 /* Lock and remove page from the per-cpu list */ 2997 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2998 struct zone *zone, unsigned int order, 2999 int migratetype, unsigned int alloc_flags) 3000 { 3001 struct per_cpu_pages *pcp; 3002 struct list_head *list; 3003 struct page *page; 3004 unsigned long __maybe_unused UP_flags; 3005 3006 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3007 pcp_trylock_prepare(UP_flags); 3008 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3009 if (!pcp) { 3010 pcp_trylock_finish(UP_flags); 3011 return NULL; 3012 } 3013 3014 /* 3015 * On allocation, reduce the number of pages that are batch freed. 3016 * See nr_pcp_free() where free_factor is increased for subsequent 3017 * frees. 3018 */ 3019 pcp->free_count >>= 1; 3020 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3021 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3022 pcp_spin_unlock(pcp); 3023 pcp_trylock_finish(UP_flags); 3024 if (page) { 3025 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3026 zone_statistics(preferred_zone, zone, 1); 3027 } 3028 return page; 3029 } 3030 3031 /* 3032 * Allocate a page from the given zone. 3033 * Use pcplists for THP or "cheap" high-order allocations. 3034 */ 3035 3036 /* 3037 * Do not instrument rmqueue() with KMSAN. This function may call 3038 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3039 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3040 * may call rmqueue() again, which will result in a deadlock. 3041 */ 3042 __no_sanitize_memory 3043 static inline 3044 struct page *rmqueue(struct zone *preferred_zone, 3045 struct zone *zone, unsigned int order, 3046 gfp_t gfp_flags, unsigned int alloc_flags, 3047 int migratetype) 3048 { 3049 struct page *page; 3050 3051 if (likely(pcp_allowed_order(order))) { 3052 page = rmqueue_pcplist(preferred_zone, zone, order, 3053 migratetype, alloc_flags); 3054 if (likely(page)) 3055 goto out; 3056 } 3057 3058 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3059 migratetype); 3060 3061 out: 3062 /* Separate test+clear to avoid unnecessary atomics */ 3063 if ((alloc_flags & ALLOC_KSWAPD) && 3064 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3065 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3066 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3067 } 3068 3069 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3070 return page; 3071 } 3072 3073 static inline long __zone_watermark_unusable_free(struct zone *z, 3074 unsigned int order, unsigned int alloc_flags) 3075 { 3076 long unusable_free = (1 << order) - 1; 3077 3078 /* 3079 * If the caller does not have rights to reserves below the min 3080 * watermark then subtract the high-atomic reserves. This will 3081 * over-estimate the size of the atomic reserve but it avoids a search. 3082 */ 3083 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3084 unusable_free += z->nr_reserved_highatomic; 3085 3086 #ifdef CONFIG_CMA 3087 /* If allocation can't use CMA areas don't use free CMA pages */ 3088 if (!(alloc_flags & ALLOC_CMA)) 3089 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3090 #endif 3091 3092 return unusable_free; 3093 } 3094 3095 /* 3096 * Return true if free base pages are above 'mark'. For high-order checks it 3097 * will return true of the order-0 watermark is reached and there is at least 3098 * one free page of a suitable size. Checking now avoids taking the zone lock 3099 * to check in the allocation paths if no pages are free. 3100 */ 3101 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3102 int highest_zoneidx, unsigned int alloc_flags, 3103 long free_pages) 3104 { 3105 long min = mark; 3106 int o; 3107 3108 /* free_pages may go negative - that's OK */ 3109 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3110 3111 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3112 /* 3113 * __GFP_HIGH allows access to 50% of the min reserve as well 3114 * as OOM. 3115 */ 3116 if (alloc_flags & ALLOC_MIN_RESERVE) { 3117 min -= min / 2; 3118 3119 /* 3120 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3121 * access more reserves than just __GFP_HIGH. Other 3122 * non-blocking allocations requests such as GFP_NOWAIT 3123 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3124 * access to the min reserve. 3125 */ 3126 if (alloc_flags & ALLOC_NON_BLOCK) 3127 min -= min / 4; 3128 } 3129 3130 /* 3131 * OOM victims can try even harder than the normal reserve 3132 * users on the grounds that it's definitely going to be in 3133 * the exit path shortly and free memory. Any allocation it 3134 * makes during the free path will be small and short-lived. 3135 */ 3136 if (alloc_flags & ALLOC_OOM) 3137 min -= min / 2; 3138 } 3139 3140 /* 3141 * Check watermarks for an order-0 allocation request. If these 3142 * are not met, then a high-order request also cannot go ahead 3143 * even if a suitable page happened to be free. 3144 */ 3145 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3146 return false; 3147 3148 /* If this is an order-0 request then the watermark is fine */ 3149 if (!order) 3150 return true; 3151 3152 /* For a high-order request, check at least one suitable page is free */ 3153 for (o = order; o < NR_PAGE_ORDERS; o++) { 3154 struct free_area *area = &z->free_area[o]; 3155 int mt; 3156 3157 if (!area->nr_free) 3158 continue; 3159 3160 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3161 if (!free_area_empty(area, mt)) 3162 return true; 3163 } 3164 3165 #ifdef CONFIG_CMA 3166 if ((alloc_flags & ALLOC_CMA) && 3167 !free_area_empty(area, MIGRATE_CMA)) { 3168 return true; 3169 } 3170 #endif 3171 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3172 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3173 return true; 3174 } 3175 } 3176 return false; 3177 } 3178 3179 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3180 int highest_zoneidx, unsigned int alloc_flags) 3181 { 3182 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3183 zone_page_state(z, NR_FREE_PAGES)); 3184 } 3185 3186 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3187 unsigned long mark, int highest_zoneidx, 3188 unsigned int alloc_flags, gfp_t gfp_mask) 3189 { 3190 long free_pages; 3191 3192 free_pages = zone_page_state(z, NR_FREE_PAGES); 3193 3194 /* 3195 * Fast check for order-0 only. If this fails then the reserves 3196 * need to be calculated. 3197 */ 3198 if (!order) { 3199 long usable_free; 3200 long reserved; 3201 3202 usable_free = free_pages; 3203 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3204 3205 /* reserved may over estimate high-atomic reserves. */ 3206 usable_free -= min(usable_free, reserved); 3207 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3208 return true; 3209 } 3210 3211 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3212 free_pages)) 3213 return true; 3214 3215 /* 3216 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3217 * when checking the min watermark. The min watermark is the 3218 * point where boosting is ignored so that kswapd is woken up 3219 * when below the low watermark. 3220 */ 3221 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3222 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3223 mark = z->_watermark[WMARK_MIN]; 3224 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3225 alloc_flags, free_pages); 3226 } 3227 3228 return false; 3229 } 3230 3231 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3232 unsigned long mark, int highest_zoneidx) 3233 { 3234 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3235 3236 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3237 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3238 3239 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3240 free_pages); 3241 } 3242 3243 #ifdef CONFIG_NUMA 3244 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3245 3246 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3247 { 3248 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3249 node_reclaim_distance; 3250 } 3251 #else /* CONFIG_NUMA */ 3252 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3253 { 3254 return true; 3255 } 3256 #endif /* CONFIG_NUMA */ 3257 3258 /* 3259 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3260 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3261 * premature use of a lower zone may cause lowmem pressure problems that 3262 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3263 * probably too small. It only makes sense to spread allocations to avoid 3264 * fragmentation between the Normal and DMA32 zones. 3265 */ 3266 static inline unsigned int 3267 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3268 { 3269 unsigned int alloc_flags; 3270 3271 /* 3272 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3273 * to save a branch. 3274 */ 3275 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3276 3277 #ifdef CONFIG_ZONE_DMA32 3278 if (!zone) 3279 return alloc_flags; 3280 3281 if (zone_idx(zone) != ZONE_NORMAL) 3282 return alloc_flags; 3283 3284 /* 3285 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3286 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3287 * on UMA that if Normal is populated then so is DMA32. 3288 */ 3289 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3290 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3291 return alloc_flags; 3292 3293 alloc_flags |= ALLOC_NOFRAGMENT; 3294 #endif /* CONFIG_ZONE_DMA32 */ 3295 return alloc_flags; 3296 } 3297 3298 /* Must be called after current_gfp_context() which can change gfp_mask */ 3299 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3300 unsigned int alloc_flags) 3301 { 3302 #ifdef CONFIG_CMA 3303 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3304 alloc_flags |= ALLOC_CMA; 3305 #endif 3306 return alloc_flags; 3307 } 3308 3309 /* 3310 * get_page_from_freelist goes through the zonelist trying to allocate 3311 * a page. 3312 */ 3313 static struct page * 3314 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3315 const struct alloc_context *ac) 3316 { 3317 struct zoneref *z; 3318 struct zone *zone; 3319 struct pglist_data *last_pgdat = NULL; 3320 bool last_pgdat_dirty_ok = false; 3321 bool no_fallback; 3322 3323 retry: 3324 /* 3325 * Scan zonelist, looking for a zone with enough free. 3326 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3327 */ 3328 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3329 z = ac->preferred_zoneref; 3330 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3331 ac->nodemask) { 3332 struct page *page; 3333 unsigned long mark; 3334 3335 if (cpusets_enabled() && 3336 (alloc_flags & ALLOC_CPUSET) && 3337 !__cpuset_zone_allowed(zone, gfp_mask)) 3338 continue; 3339 /* 3340 * When allocating a page cache page for writing, we 3341 * want to get it from a node that is within its dirty 3342 * limit, such that no single node holds more than its 3343 * proportional share of globally allowed dirty pages. 3344 * The dirty limits take into account the node's 3345 * lowmem reserves and high watermark so that kswapd 3346 * should be able to balance it without having to 3347 * write pages from its LRU list. 3348 * 3349 * XXX: For now, allow allocations to potentially 3350 * exceed the per-node dirty limit in the slowpath 3351 * (spread_dirty_pages unset) before going into reclaim, 3352 * which is important when on a NUMA setup the allowed 3353 * nodes are together not big enough to reach the 3354 * global limit. The proper fix for these situations 3355 * will require awareness of nodes in the 3356 * dirty-throttling and the flusher threads. 3357 */ 3358 if (ac->spread_dirty_pages) { 3359 if (last_pgdat != zone->zone_pgdat) { 3360 last_pgdat = zone->zone_pgdat; 3361 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3362 } 3363 3364 if (!last_pgdat_dirty_ok) 3365 continue; 3366 } 3367 3368 if (no_fallback && nr_online_nodes > 1 && 3369 zone != zonelist_zone(ac->preferred_zoneref)) { 3370 int local_nid; 3371 3372 /* 3373 * If moving to a remote node, retry but allow 3374 * fragmenting fallbacks. Locality is more important 3375 * than fragmentation avoidance. 3376 */ 3377 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3378 if (zone_to_nid(zone) != local_nid) { 3379 alloc_flags &= ~ALLOC_NOFRAGMENT; 3380 goto retry; 3381 } 3382 } 3383 3384 cond_accept_memory(zone, order); 3385 3386 /* 3387 * Detect whether the number of free pages is below high 3388 * watermark. If so, we will decrease pcp->high and free 3389 * PCP pages in free path to reduce the possibility of 3390 * premature page reclaiming. Detection is done here to 3391 * avoid to do that in hotter free path. 3392 */ 3393 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3394 goto check_alloc_wmark; 3395 3396 mark = high_wmark_pages(zone); 3397 if (zone_watermark_fast(zone, order, mark, 3398 ac->highest_zoneidx, alloc_flags, 3399 gfp_mask)) 3400 goto try_this_zone; 3401 else 3402 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3403 3404 check_alloc_wmark: 3405 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3406 if (!zone_watermark_fast(zone, order, mark, 3407 ac->highest_zoneidx, alloc_flags, 3408 gfp_mask)) { 3409 int ret; 3410 3411 if (cond_accept_memory(zone, order)) 3412 goto try_this_zone; 3413 3414 /* 3415 * Watermark failed for this zone, but see if we can 3416 * grow this zone if it contains deferred pages. 3417 */ 3418 if (deferred_pages_enabled()) { 3419 if (_deferred_grow_zone(zone, order)) 3420 goto try_this_zone; 3421 } 3422 /* Checked here to keep the fast path fast */ 3423 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3424 if (alloc_flags & ALLOC_NO_WATERMARKS) 3425 goto try_this_zone; 3426 3427 if (!node_reclaim_enabled() || 3428 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3429 continue; 3430 3431 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3432 switch (ret) { 3433 case NODE_RECLAIM_NOSCAN: 3434 /* did not scan */ 3435 continue; 3436 case NODE_RECLAIM_FULL: 3437 /* scanned but unreclaimable */ 3438 continue; 3439 default: 3440 /* did we reclaim enough */ 3441 if (zone_watermark_ok(zone, order, mark, 3442 ac->highest_zoneidx, alloc_flags)) 3443 goto try_this_zone; 3444 3445 continue; 3446 } 3447 } 3448 3449 try_this_zone: 3450 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3451 gfp_mask, alloc_flags, ac->migratetype); 3452 if (page) { 3453 prep_new_page(page, order, gfp_mask, alloc_flags); 3454 3455 /* 3456 * If this is a high-order atomic allocation then check 3457 * if the pageblock should be reserved for the future 3458 */ 3459 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3460 reserve_highatomic_pageblock(page, order, zone); 3461 3462 return page; 3463 } else { 3464 if (cond_accept_memory(zone, order)) 3465 goto try_this_zone; 3466 3467 /* Try again if zone has deferred pages */ 3468 if (deferred_pages_enabled()) { 3469 if (_deferred_grow_zone(zone, order)) 3470 goto try_this_zone; 3471 } 3472 } 3473 } 3474 3475 /* 3476 * It's possible on a UMA machine to get through all zones that are 3477 * fragmented. If avoiding fragmentation, reset and try again. 3478 */ 3479 if (no_fallback) { 3480 alloc_flags &= ~ALLOC_NOFRAGMENT; 3481 goto retry; 3482 } 3483 3484 return NULL; 3485 } 3486 3487 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3488 { 3489 unsigned int filter = SHOW_MEM_FILTER_NODES; 3490 3491 /* 3492 * This documents exceptions given to allocations in certain 3493 * contexts that are allowed to allocate outside current's set 3494 * of allowed nodes. 3495 */ 3496 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3497 if (tsk_is_oom_victim(current) || 3498 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3499 filter &= ~SHOW_MEM_FILTER_NODES; 3500 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3501 filter &= ~SHOW_MEM_FILTER_NODES; 3502 3503 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3504 } 3505 3506 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3507 { 3508 struct va_format vaf; 3509 va_list args; 3510 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3511 3512 if ((gfp_mask & __GFP_NOWARN) || 3513 !__ratelimit(&nopage_rs) || 3514 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3515 return; 3516 3517 va_start(args, fmt); 3518 vaf.fmt = fmt; 3519 vaf.va = &args; 3520 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3521 current->comm, &vaf, gfp_mask, &gfp_mask, 3522 nodemask_pr_args(nodemask)); 3523 va_end(args); 3524 3525 cpuset_print_current_mems_allowed(); 3526 pr_cont("\n"); 3527 dump_stack(); 3528 warn_alloc_show_mem(gfp_mask, nodemask); 3529 } 3530 3531 static inline struct page * 3532 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3533 unsigned int alloc_flags, 3534 const struct alloc_context *ac) 3535 { 3536 struct page *page; 3537 3538 page = get_page_from_freelist(gfp_mask, order, 3539 alloc_flags|ALLOC_CPUSET, ac); 3540 /* 3541 * fallback to ignore cpuset restriction if our nodes 3542 * are depleted 3543 */ 3544 if (!page) 3545 page = get_page_from_freelist(gfp_mask, order, 3546 alloc_flags, ac); 3547 3548 return page; 3549 } 3550 3551 static inline struct page * 3552 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3553 const struct alloc_context *ac, unsigned long *did_some_progress) 3554 { 3555 struct oom_control oc = { 3556 .zonelist = ac->zonelist, 3557 .nodemask = ac->nodemask, 3558 .memcg = NULL, 3559 .gfp_mask = gfp_mask, 3560 .order = order, 3561 }; 3562 struct page *page; 3563 3564 *did_some_progress = 0; 3565 3566 /* 3567 * Acquire the oom lock. If that fails, somebody else is 3568 * making progress for us. 3569 */ 3570 if (!mutex_trylock(&oom_lock)) { 3571 *did_some_progress = 1; 3572 schedule_timeout_uninterruptible(1); 3573 return NULL; 3574 } 3575 3576 /* 3577 * Go through the zonelist yet one more time, keep very high watermark 3578 * here, this is only to catch a parallel oom killing, we must fail if 3579 * we're still under heavy pressure. But make sure that this reclaim 3580 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3581 * allocation which will never fail due to oom_lock already held. 3582 */ 3583 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3584 ~__GFP_DIRECT_RECLAIM, order, 3585 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3586 if (page) 3587 goto out; 3588 3589 /* Coredumps can quickly deplete all memory reserves */ 3590 if (current->flags & PF_DUMPCORE) 3591 goto out; 3592 /* The OOM killer will not help higher order allocs */ 3593 if (order > PAGE_ALLOC_COSTLY_ORDER) 3594 goto out; 3595 /* 3596 * We have already exhausted all our reclaim opportunities without any 3597 * success so it is time to admit defeat. We will skip the OOM killer 3598 * because it is very likely that the caller has a more reasonable 3599 * fallback than shooting a random task. 3600 * 3601 * The OOM killer may not free memory on a specific node. 3602 */ 3603 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3604 goto out; 3605 /* The OOM killer does not needlessly kill tasks for lowmem */ 3606 if (ac->highest_zoneidx < ZONE_NORMAL) 3607 goto out; 3608 if (pm_suspended_storage()) 3609 goto out; 3610 /* 3611 * XXX: GFP_NOFS allocations should rather fail than rely on 3612 * other request to make a forward progress. 3613 * We are in an unfortunate situation where out_of_memory cannot 3614 * do much for this context but let's try it to at least get 3615 * access to memory reserved if the current task is killed (see 3616 * out_of_memory). Once filesystems are ready to handle allocation 3617 * failures more gracefully we should just bail out here. 3618 */ 3619 3620 /* Exhausted what can be done so it's blame time */ 3621 if (out_of_memory(&oc) || 3622 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3623 *did_some_progress = 1; 3624 3625 /* 3626 * Help non-failing allocations by giving them access to memory 3627 * reserves 3628 */ 3629 if (gfp_mask & __GFP_NOFAIL) 3630 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3631 ALLOC_NO_WATERMARKS, ac); 3632 } 3633 out: 3634 mutex_unlock(&oom_lock); 3635 return page; 3636 } 3637 3638 /* 3639 * Maximum number of compaction retries with a progress before OOM 3640 * killer is consider as the only way to move forward. 3641 */ 3642 #define MAX_COMPACT_RETRIES 16 3643 3644 #ifdef CONFIG_COMPACTION 3645 /* Try memory compaction for high-order allocations before reclaim */ 3646 static struct page * 3647 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3648 unsigned int alloc_flags, const struct alloc_context *ac, 3649 enum compact_priority prio, enum compact_result *compact_result) 3650 { 3651 struct page *page = NULL; 3652 unsigned long pflags; 3653 unsigned int noreclaim_flag; 3654 3655 if (!order) 3656 return NULL; 3657 3658 psi_memstall_enter(&pflags); 3659 delayacct_compact_start(); 3660 noreclaim_flag = memalloc_noreclaim_save(); 3661 3662 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3663 prio, &page); 3664 3665 memalloc_noreclaim_restore(noreclaim_flag); 3666 psi_memstall_leave(&pflags); 3667 delayacct_compact_end(); 3668 3669 if (*compact_result == COMPACT_SKIPPED) 3670 return NULL; 3671 /* 3672 * At least in one zone compaction wasn't deferred or skipped, so let's 3673 * count a compaction stall 3674 */ 3675 count_vm_event(COMPACTSTALL); 3676 3677 /* Prep a captured page if available */ 3678 if (page) 3679 prep_new_page(page, order, gfp_mask, alloc_flags); 3680 3681 /* Try get a page from the freelist if available */ 3682 if (!page) 3683 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3684 3685 if (page) { 3686 struct zone *zone = page_zone(page); 3687 3688 zone->compact_blockskip_flush = false; 3689 compaction_defer_reset(zone, order, true); 3690 count_vm_event(COMPACTSUCCESS); 3691 return page; 3692 } 3693 3694 /* 3695 * It's bad if compaction run occurs and fails. The most likely reason 3696 * is that pages exist, but not enough to satisfy watermarks. 3697 */ 3698 count_vm_event(COMPACTFAIL); 3699 3700 cond_resched(); 3701 3702 return NULL; 3703 } 3704 3705 static inline bool 3706 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3707 enum compact_result compact_result, 3708 enum compact_priority *compact_priority, 3709 int *compaction_retries) 3710 { 3711 int max_retries = MAX_COMPACT_RETRIES; 3712 int min_priority; 3713 bool ret = false; 3714 int retries = *compaction_retries; 3715 enum compact_priority priority = *compact_priority; 3716 3717 if (!order) 3718 return false; 3719 3720 if (fatal_signal_pending(current)) 3721 return false; 3722 3723 /* 3724 * Compaction was skipped due to a lack of free order-0 3725 * migration targets. Continue if reclaim can help. 3726 */ 3727 if (compact_result == COMPACT_SKIPPED) { 3728 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3729 goto out; 3730 } 3731 3732 /* 3733 * Compaction managed to coalesce some page blocks, but the 3734 * allocation failed presumably due to a race. Retry some. 3735 */ 3736 if (compact_result == COMPACT_SUCCESS) { 3737 /* 3738 * !costly requests are much more important than 3739 * __GFP_RETRY_MAYFAIL costly ones because they are de 3740 * facto nofail and invoke OOM killer to move on while 3741 * costly can fail and users are ready to cope with 3742 * that. 1/4 retries is rather arbitrary but we would 3743 * need much more detailed feedback from compaction to 3744 * make a better decision. 3745 */ 3746 if (order > PAGE_ALLOC_COSTLY_ORDER) 3747 max_retries /= 4; 3748 3749 if (++(*compaction_retries) <= max_retries) { 3750 ret = true; 3751 goto out; 3752 } 3753 } 3754 3755 /* 3756 * Compaction failed. Retry with increasing priority. 3757 */ 3758 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3759 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3760 3761 if (*compact_priority > min_priority) { 3762 (*compact_priority)--; 3763 *compaction_retries = 0; 3764 ret = true; 3765 } 3766 out: 3767 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3768 return ret; 3769 } 3770 #else 3771 static inline struct page * 3772 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3773 unsigned int alloc_flags, const struct alloc_context *ac, 3774 enum compact_priority prio, enum compact_result *compact_result) 3775 { 3776 *compact_result = COMPACT_SKIPPED; 3777 return NULL; 3778 } 3779 3780 static inline bool 3781 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3782 enum compact_result compact_result, 3783 enum compact_priority *compact_priority, 3784 int *compaction_retries) 3785 { 3786 struct zone *zone; 3787 struct zoneref *z; 3788 3789 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3790 return false; 3791 3792 /* 3793 * There are setups with compaction disabled which would prefer to loop 3794 * inside the allocator rather than hit the oom killer prematurely. 3795 * Let's give them a good hope and keep retrying while the order-0 3796 * watermarks are OK. 3797 */ 3798 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3799 ac->highest_zoneidx, ac->nodemask) { 3800 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3801 ac->highest_zoneidx, alloc_flags)) 3802 return true; 3803 } 3804 return false; 3805 } 3806 #endif /* CONFIG_COMPACTION */ 3807 3808 #ifdef CONFIG_LOCKDEP 3809 static struct lockdep_map __fs_reclaim_map = 3810 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3811 3812 static bool __need_reclaim(gfp_t gfp_mask) 3813 { 3814 /* no reclaim without waiting on it */ 3815 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3816 return false; 3817 3818 /* this guy won't enter reclaim */ 3819 if (current->flags & PF_MEMALLOC) 3820 return false; 3821 3822 if (gfp_mask & __GFP_NOLOCKDEP) 3823 return false; 3824 3825 return true; 3826 } 3827 3828 void __fs_reclaim_acquire(unsigned long ip) 3829 { 3830 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3831 } 3832 3833 void __fs_reclaim_release(unsigned long ip) 3834 { 3835 lock_release(&__fs_reclaim_map, ip); 3836 } 3837 3838 void fs_reclaim_acquire(gfp_t gfp_mask) 3839 { 3840 gfp_mask = current_gfp_context(gfp_mask); 3841 3842 if (__need_reclaim(gfp_mask)) { 3843 if (gfp_mask & __GFP_FS) 3844 __fs_reclaim_acquire(_RET_IP_); 3845 3846 #ifdef CONFIG_MMU_NOTIFIER 3847 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3848 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3849 #endif 3850 3851 } 3852 } 3853 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3854 3855 void fs_reclaim_release(gfp_t gfp_mask) 3856 { 3857 gfp_mask = current_gfp_context(gfp_mask); 3858 3859 if (__need_reclaim(gfp_mask)) { 3860 if (gfp_mask & __GFP_FS) 3861 __fs_reclaim_release(_RET_IP_); 3862 } 3863 } 3864 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3865 #endif 3866 3867 /* 3868 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3869 * have been rebuilt so allocation retries. Reader side does not lock and 3870 * retries the allocation if zonelist changes. Writer side is protected by the 3871 * embedded spin_lock. 3872 */ 3873 static DEFINE_SEQLOCK(zonelist_update_seq); 3874 3875 static unsigned int zonelist_iter_begin(void) 3876 { 3877 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3878 return read_seqbegin(&zonelist_update_seq); 3879 3880 return 0; 3881 } 3882 3883 static unsigned int check_retry_zonelist(unsigned int seq) 3884 { 3885 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3886 return read_seqretry(&zonelist_update_seq, seq); 3887 3888 return seq; 3889 } 3890 3891 /* Perform direct synchronous page reclaim */ 3892 static unsigned long 3893 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3894 const struct alloc_context *ac) 3895 { 3896 unsigned int noreclaim_flag; 3897 unsigned long progress; 3898 3899 cond_resched(); 3900 3901 /* We now go into synchronous reclaim */ 3902 cpuset_memory_pressure_bump(); 3903 fs_reclaim_acquire(gfp_mask); 3904 noreclaim_flag = memalloc_noreclaim_save(); 3905 3906 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3907 ac->nodemask); 3908 3909 memalloc_noreclaim_restore(noreclaim_flag); 3910 fs_reclaim_release(gfp_mask); 3911 3912 cond_resched(); 3913 3914 return progress; 3915 } 3916 3917 /* The really slow allocator path where we enter direct reclaim */ 3918 static inline struct page * 3919 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3920 unsigned int alloc_flags, const struct alloc_context *ac, 3921 unsigned long *did_some_progress) 3922 { 3923 struct page *page = NULL; 3924 unsigned long pflags; 3925 bool drained = false; 3926 3927 psi_memstall_enter(&pflags); 3928 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3929 if (unlikely(!(*did_some_progress))) 3930 goto out; 3931 3932 retry: 3933 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3934 3935 /* 3936 * If an allocation failed after direct reclaim, it could be because 3937 * pages are pinned on the per-cpu lists or in high alloc reserves. 3938 * Shrink them and try again 3939 */ 3940 if (!page && !drained) { 3941 unreserve_highatomic_pageblock(ac, false); 3942 drain_all_pages(NULL); 3943 drained = true; 3944 goto retry; 3945 } 3946 out: 3947 psi_memstall_leave(&pflags); 3948 3949 return page; 3950 } 3951 3952 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3953 const struct alloc_context *ac) 3954 { 3955 struct zoneref *z; 3956 struct zone *zone; 3957 pg_data_t *last_pgdat = NULL; 3958 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3959 3960 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3961 ac->nodemask) { 3962 if (!managed_zone(zone)) 3963 continue; 3964 if (last_pgdat != zone->zone_pgdat) { 3965 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3966 last_pgdat = zone->zone_pgdat; 3967 } 3968 } 3969 } 3970 3971 static inline unsigned int 3972 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3973 { 3974 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3975 3976 /* 3977 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3978 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3979 * to save two branches. 3980 */ 3981 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3982 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3983 3984 /* 3985 * The caller may dip into page reserves a bit more if the caller 3986 * cannot run direct reclaim, or if the caller has realtime scheduling 3987 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3988 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3989 */ 3990 alloc_flags |= (__force int) 3991 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3992 3993 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3994 /* 3995 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3996 * if it can't schedule. 3997 */ 3998 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3999 alloc_flags |= ALLOC_NON_BLOCK; 4000 4001 if (order > 0) 4002 alloc_flags |= ALLOC_HIGHATOMIC; 4003 } 4004 4005 /* 4006 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4007 * GFP_ATOMIC) rather than fail, see the comment for 4008 * cpuset_node_allowed(). 4009 */ 4010 if (alloc_flags & ALLOC_MIN_RESERVE) 4011 alloc_flags &= ~ALLOC_CPUSET; 4012 } else if (unlikely(rt_task(current)) && in_task()) 4013 alloc_flags |= ALLOC_MIN_RESERVE; 4014 4015 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4016 4017 return alloc_flags; 4018 } 4019 4020 static bool oom_reserves_allowed(struct task_struct *tsk) 4021 { 4022 if (!tsk_is_oom_victim(tsk)) 4023 return false; 4024 4025 /* 4026 * !MMU doesn't have oom reaper so give access to memory reserves 4027 * only to the thread with TIF_MEMDIE set 4028 */ 4029 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4030 return false; 4031 4032 return true; 4033 } 4034 4035 /* 4036 * Distinguish requests which really need access to full memory 4037 * reserves from oom victims which can live with a portion of it 4038 */ 4039 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4040 { 4041 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4042 return 0; 4043 if (gfp_mask & __GFP_MEMALLOC) 4044 return ALLOC_NO_WATERMARKS; 4045 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4046 return ALLOC_NO_WATERMARKS; 4047 if (!in_interrupt()) { 4048 if (current->flags & PF_MEMALLOC) 4049 return ALLOC_NO_WATERMARKS; 4050 else if (oom_reserves_allowed(current)) 4051 return ALLOC_OOM; 4052 } 4053 4054 return 0; 4055 } 4056 4057 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4058 { 4059 return !!__gfp_pfmemalloc_flags(gfp_mask); 4060 } 4061 4062 /* 4063 * Checks whether it makes sense to retry the reclaim to make a forward progress 4064 * for the given allocation request. 4065 * 4066 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4067 * without success, or when we couldn't even meet the watermark if we 4068 * reclaimed all remaining pages on the LRU lists. 4069 * 4070 * Returns true if a retry is viable or false to enter the oom path. 4071 */ 4072 static inline bool 4073 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4074 struct alloc_context *ac, int alloc_flags, 4075 bool did_some_progress, int *no_progress_loops) 4076 { 4077 struct zone *zone; 4078 struct zoneref *z; 4079 bool ret = false; 4080 4081 /* 4082 * Costly allocations might have made a progress but this doesn't mean 4083 * their order will become available due to high fragmentation so 4084 * always increment the no progress counter for them 4085 */ 4086 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4087 *no_progress_loops = 0; 4088 else 4089 (*no_progress_loops)++; 4090 4091 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4092 goto out; 4093 4094 4095 /* 4096 * Keep reclaiming pages while there is a chance this will lead 4097 * somewhere. If none of the target zones can satisfy our allocation 4098 * request even if all reclaimable pages are considered then we are 4099 * screwed and have to go OOM. 4100 */ 4101 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4102 ac->highest_zoneidx, ac->nodemask) { 4103 unsigned long available; 4104 unsigned long reclaimable; 4105 unsigned long min_wmark = min_wmark_pages(zone); 4106 bool wmark; 4107 4108 if (cpusets_enabled() && 4109 (alloc_flags & ALLOC_CPUSET) && 4110 !__cpuset_zone_allowed(zone, gfp_mask)) 4111 continue; 4112 4113 available = reclaimable = zone_reclaimable_pages(zone); 4114 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4115 4116 /* 4117 * Would the allocation succeed if we reclaimed all 4118 * reclaimable pages? 4119 */ 4120 wmark = __zone_watermark_ok(zone, order, min_wmark, 4121 ac->highest_zoneidx, alloc_flags, available); 4122 trace_reclaim_retry_zone(z, order, reclaimable, 4123 available, min_wmark, *no_progress_loops, wmark); 4124 if (wmark) { 4125 ret = true; 4126 break; 4127 } 4128 } 4129 4130 /* 4131 * Memory allocation/reclaim might be called from a WQ context and the 4132 * current implementation of the WQ concurrency control doesn't 4133 * recognize that a particular WQ is congested if the worker thread is 4134 * looping without ever sleeping. Therefore we have to do a short sleep 4135 * here rather than calling cond_resched(). 4136 */ 4137 if (current->flags & PF_WQ_WORKER) 4138 schedule_timeout_uninterruptible(1); 4139 else 4140 cond_resched(); 4141 out: 4142 /* Before OOM, exhaust highatomic_reserve */ 4143 if (!ret) 4144 return unreserve_highatomic_pageblock(ac, true); 4145 4146 return ret; 4147 } 4148 4149 static inline bool 4150 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4151 { 4152 /* 4153 * It's possible that cpuset's mems_allowed and the nodemask from 4154 * mempolicy don't intersect. This should be normally dealt with by 4155 * policy_nodemask(), but it's possible to race with cpuset update in 4156 * such a way the check therein was true, and then it became false 4157 * before we got our cpuset_mems_cookie here. 4158 * This assumes that for all allocations, ac->nodemask can come only 4159 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4160 * when it does not intersect with the cpuset restrictions) or the 4161 * caller can deal with a violated nodemask. 4162 */ 4163 if (cpusets_enabled() && ac->nodemask && 4164 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4165 ac->nodemask = NULL; 4166 return true; 4167 } 4168 4169 /* 4170 * When updating a task's mems_allowed or mempolicy nodemask, it is 4171 * possible to race with parallel threads in such a way that our 4172 * allocation can fail while the mask is being updated. If we are about 4173 * to fail, check if the cpuset changed during allocation and if so, 4174 * retry. 4175 */ 4176 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4177 return true; 4178 4179 return false; 4180 } 4181 4182 static inline struct page * 4183 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4184 struct alloc_context *ac) 4185 { 4186 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4187 bool can_compact = gfp_compaction_allowed(gfp_mask); 4188 bool nofail = gfp_mask & __GFP_NOFAIL; 4189 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4190 struct page *page = NULL; 4191 unsigned int alloc_flags; 4192 unsigned long did_some_progress; 4193 enum compact_priority compact_priority; 4194 enum compact_result compact_result; 4195 int compaction_retries; 4196 int no_progress_loops; 4197 unsigned int cpuset_mems_cookie; 4198 unsigned int zonelist_iter_cookie; 4199 int reserve_flags; 4200 4201 if (unlikely(nofail)) { 4202 /* 4203 * We most definitely don't want callers attempting to 4204 * allocate greater than order-1 page units with __GFP_NOFAIL. 4205 */ 4206 WARN_ON_ONCE(order > 1); 4207 /* 4208 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4209 * otherwise, we may result in lockup. 4210 */ 4211 WARN_ON_ONCE(!can_direct_reclaim); 4212 /* 4213 * PF_MEMALLOC request from this context is rather bizarre 4214 * because we cannot reclaim anything and only can loop waiting 4215 * for somebody to do a work for us. 4216 */ 4217 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4218 } 4219 4220 restart: 4221 compaction_retries = 0; 4222 no_progress_loops = 0; 4223 compact_priority = DEF_COMPACT_PRIORITY; 4224 cpuset_mems_cookie = read_mems_allowed_begin(); 4225 zonelist_iter_cookie = zonelist_iter_begin(); 4226 4227 /* 4228 * The fast path uses conservative alloc_flags to succeed only until 4229 * kswapd needs to be woken up, and to avoid the cost of setting up 4230 * alloc_flags precisely. So we do that now. 4231 */ 4232 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4233 4234 /* 4235 * We need to recalculate the starting point for the zonelist iterator 4236 * because we might have used different nodemask in the fast path, or 4237 * there was a cpuset modification and we are retrying - otherwise we 4238 * could end up iterating over non-eligible zones endlessly. 4239 */ 4240 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4241 ac->highest_zoneidx, ac->nodemask); 4242 if (!zonelist_zone(ac->preferred_zoneref)) 4243 goto nopage; 4244 4245 /* 4246 * Check for insane configurations where the cpuset doesn't contain 4247 * any suitable zone to satisfy the request - e.g. non-movable 4248 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4249 */ 4250 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4251 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4252 ac->highest_zoneidx, 4253 &cpuset_current_mems_allowed); 4254 if (!zonelist_zone(z)) 4255 goto nopage; 4256 } 4257 4258 if (alloc_flags & ALLOC_KSWAPD) 4259 wake_all_kswapds(order, gfp_mask, ac); 4260 4261 /* 4262 * The adjusted alloc_flags might result in immediate success, so try 4263 * that first 4264 */ 4265 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4266 if (page) 4267 goto got_pg; 4268 4269 /* 4270 * For costly allocations, try direct compaction first, as it's likely 4271 * that we have enough base pages and don't need to reclaim. For non- 4272 * movable high-order allocations, do that as well, as compaction will 4273 * try prevent permanent fragmentation by migrating from blocks of the 4274 * same migratetype. 4275 * Don't try this for allocations that are allowed to ignore 4276 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4277 */ 4278 if (can_direct_reclaim && can_compact && 4279 (costly_order || 4280 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4281 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4282 page = __alloc_pages_direct_compact(gfp_mask, order, 4283 alloc_flags, ac, 4284 INIT_COMPACT_PRIORITY, 4285 &compact_result); 4286 if (page) 4287 goto got_pg; 4288 4289 /* 4290 * Checks for costly allocations with __GFP_NORETRY, which 4291 * includes some THP page fault allocations 4292 */ 4293 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4294 /* 4295 * If allocating entire pageblock(s) and compaction 4296 * failed because all zones are below low watermarks 4297 * or is prohibited because it recently failed at this 4298 * order, fail immediately unless the allocator has 4299 * requested compaction and reclaim retry. 4300 * 4301 * Reclaim is 4302 * - potentially very expensive because zones are far 4303 * below their low watermarks or this is part of very 4304 * bursty high order allocations, 4305 * - not guaranteed to help because isolate_freepages() 4306 * may not iterate over freed pages as part of its 4307 * linear scan, and 4308 * - unlikely to make entire pageblocks free on its 4309 * own. 4310 */ 4311 if (compact_result == COMPACT_SKIPPED || 4312 compact_result == COMPACT_DEFERRED) 4313 goto nopage; 4314 4315 /* 4316 * Looks like reclaim/compaction is worth trying, but 4317 * sync compaction could be very expensive, so keep 4318 * using async compaction. 4319 */ 4320 compact_priority = INIT_COMPACT_PRIORITY; 4321 } 4322 } 4323 4324 retry: 4325 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4326 if (alloc_flags & ALLOC_KSWAPD) 4327 wake_all_kswapds(order, gfp_mask, ac); 4328 4329 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4330 if (reserve_flags) 4331 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4332 (alloc_flags & ALLOC_KSWAPD); 4333 4334 /* 4335 * Reset the nodemask and zonelist iterators if memory policies can be 4336 * ignored. These allocations are high priority and system rather than 4337 * user oriented. 4338 */ 4339 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4340 ac->nodemask = NULL; 4341 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4342 ac->highest_zoneidx, ac->nodemask); 4343 } 4344 4345 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4346 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4347 if (page) 4348 goto got_pg; 4349 4350 /* Caller is not willing to reclaim, we can't balance anything */ 4351 if (!can_direct_reclaim) 4352 goto nopage; 4353 4354 /* Avoid recursion of direct reclaim */ 4355 if (current->flags & PF_MEMALLOC) 4356 goto nopage; 4357 4358 /* Try direct reclaim and then allocating */ 4359 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4360 &did_some_progress); 4361 if (page) 4362 goto got_pg; 4363 4364 /* Try direct compaction and then allocating */ 4365 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4366 compact_priority, &compact_result); 4367 if (page) 4368 goto got_pg; 4369 4370 /* Do not loop if specifically requested */ 4371 if (gfp_mask & __GFP_NORETRY) 4372 goto nopage; 4373 4374 /* 4375 * Do not retry costly high order allocations unless they are 4376 * __GFP_RETRY_MAYFAIL and we can compact 4377 */ 4378 if (costly_order && (!can_compact || 4379 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4380 goto nopage; 4381 4382 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4383 did_some_progress > 0, &no_progress_loops)) 4384 goto retry; 4385 4386 /* 4387 * It doesn't make any sense to retry for the compaction if the order-0 4388 * reclaim is not able to make any progress because the current 4389 * implementation of the compaction depends on the sufficient amount 4390 * of free memory (see __compaction_suitable) 4391 */ 4392 if (did_some_progress > 0 && can_compact && 4393 should_compact_retry(ac, order, alloc_flags, 4394 compact_result, &compact_priority, 4395 &compaction_retries)) 4396 goto retry; 4397 4398 4399 /* 4400 * Deal with possible cpuset update races or zonelist updates to avoid 4401 * a unnecessary OOM kill. 4402 */ 4403 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4404 check_retry_zonelist(zonelist_iter_cookie)) 4405 goto restart; 4406 4407 /* Reclaim has failed us, start killing things */ 4408 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4409 if (page) 4410 goto got_pg; 4411 4412 /* Avoid allocations with no watermarks from looping endlessly */ 4413 if (tsk_is_oom_victim(current) && 4414 (alloc_flags & ALLOC_OOM || 4415 (gfp_mask & __GFP_NOMEMALLOC))) 4416 goto nopage; 4417 4418 /* Retry as long as the OOM killer is making progress */ 4419 if (did_some_progress) { 4420 no_progress_loops = 0; 4421 goto retry; 4422 } 4423 4424 nopage: 4425 /* 4426 * Deal with possible cpuset update races or zonelist updates to avoid 4427 * a unnecessary OOM kill. 4428 */ 4429 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4430 check_retry_zonelist(zonelist_iter_cookie)) 4431 goto restart; 4432 4433 /* 4434 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4435 * we always retry 4436 */ 4437 if (unlikely(nofail)) { 4438 /* 4439 * Lacking direct_reclaim we can't do anything to reclaim memory, 4440 * we disregard these unreasonable nofail requests and still 4441 * return NULL 4442 */ 4443 if (!can_direct_reclaim) 4444 goto fail; 4445 4446 /* 4447 * Help non-failing allocations by giving some access to memory 4448 * reserves normally used for high priority non-blocking 4449 * allocations but do not use ALLOC_NO_WATERMARKS because this 4450 * could deplete whole memory reserves which would just make 4451 * the situation worse. 4452 */ 4453 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4454 if (page) 4455 goto got_pg; 4456 4457 cond_resched(); 4458 goto retry; 4459 } 4460 fail: 4461 warn_alloc(gfp_mask, ac->nodemask, 4462 "page allocation failure: order:%u", order); 4463 got_pg: 4464 return page; 4465 } 4466 4467 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4468 int preferred_nid, nodemask_t *nodemask, 4469 struct alloc_context *ac, gfp_t *alloc_gfp, 4470 unsigned int *alloc_flags) 4471 { 4472 ac->highest_zoneidx = gfp_zone(gfp_mask); 4473 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4474 ac->nodemask = nodemask; 4475 ac->migratetype = gfp_migratetype(gfp_mask); 4476 4477 if (cpusets_enabled()) { 4478 *alloc_gfp |= __GFP_HARDWALL; 4479 /* 4480 * When we are in the interrupt context, it is irrelevant 4481 * to the current task context. It means that any node ok. 4482 */ 4483 if (in_task() && !ac->nodemask) 4484 ac->nodemask = &cpuset_current_mems_allowed; 4485 else 4486 *alloc_flags |= ALLOC_CPUSET; 4487 } 4488 4489 might_alloc(gfp_mask); 4490 4491 if (should_fail_alloc_page(gfp_mask, order)) 4492 return false; 4493 4494 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4495 4496 /* Dirty zone balancing only done in the fast path */ 4497 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4498 4499 /* 4500 * The preferred zone is used for statistics but crucially it is 4501 * also used as the starting point for the zonelist iterator. It 4502 * may get reset for allocations that ignore memory policies. 4503 */ 4504 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4505 ac->highest_zoneidx, ac->nodemask); 4506 4507 return true; 4508 } 4509 4510 /* 4511 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4512 * @gfp: GFP flags for the allocation 4513 * @preferred_nid: The preferred NUMA node ID to allocate from 4514 * @nodemask: Set of nodes to allocate from, may be NULL 4515 * @nr_pages: The number of pages desired on the list or array 4516 * @page_list: Optional list to store the allocated pages 4517 * @page_array: Optional array to store the pages 4518 * 4519 * This is a batched version of the page allocator that attempts to 4520 * allocate nr_pages quickly. Pages are added to page_list if page_list 4521 * is not NULL, otherwise it is assumed that the page_array is valid. 4522 * 4523 * For lists, nr_pages is the number of pages that should be allocated. 4524 * 4525 * For arrays, only NULL elements are populated with pages and nr_pages 4526 * is the maximum number of pages that will be stored in the array. 4527 * 4528 * Returns the number of pages on the list or array. 4529 */ 4530 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4531 nodemask_t *nodemask, int nr_pages, 4532 struct list_head *page_list, 4533 struct page **page_array) 4534 { 4535 struct page *page; 4536 unsigned long __maybe_unused UP_flags; 4537 struct zone *zone; 4538 struct zoneref *z; 4539 struct per_cpu_pages *pcp; 4540 struct list_head *pcp_list; 4541 struct alloc_context ac; 4542 gfp_t alloc_gfp; 4543 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4544 int nr_populated = 0, nr_account = 0; 4545 4546 /* 4547 * Skip populated array elements to determine if any pages need 4548 * to be allocated before disabling IRQs. 4549 */ 4550 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4551 nr_populated++; 4552 4553 /* No pages requested? */ 4554 if (unlikely(nr_pages <= 0)) 4555 goto out; 4556 4557 /* Already populated array? */ 4558 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4559 goto out; 4560 4561 /* Bulk allocator does not support memcg accounting. */ 4562 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4563 goto failed; 4564 4565 /* Use the single page allocator for one page. */ 4566 if (nr_pages - nr_populated == 1) 4567 goto failed; 4568 4569 #ifdef CONFIG_PAGE_OWNER 4570 /* 4571 * PAGE_OWNER may recurse into the allocator to allocate space to 4572 * save the stack with pagesets.lock held. Releasing/reacquiring 4573 * removes much of the performance benefit of bulk allocation so 4574 * force the caller to allocate one page at a time as it'll have 4575 * similar performance to added complexity to the bulk allocator. 4576 */ 4577 if (static_branch_unlikely(&page_owner_inited)) 4578 goto failed; 4579 #endif 4580 4581 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4582 gfp &= gfp_allowed_mask; 4583 alloc_gfp = gfp; 4584 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4585 goto out; 4586 gfp = alloc_gfp; 4587 4588 /* Find an allowed local zone that meets the low watermark. */ 4589 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4590 unsigned long mark; 4591 4592 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4593 !__cpuset_zone_allowed(zone, gfp)) { 4594 continue; 4595 } 4596 4597 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4598 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4599 goto failed; 4600 } 4601 4602 cond_accept_memory(zone, 0); 4603 retry_this_zone: 4604 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4605 if (zone_watermark_fast(zone, 0, mark, 4606 zonelist_zone_idx(ac.preferred_zoneref), 4607 alloc_flags, gfp)) { 4608 break; 4609 } 4610 4611 if (cond_accept_memory(zone, 0)) 4612 goto retry_this_zone; 4613 4614 /* Try again if zone has deferred pages */ 4615 if (deferred_pages_enabled()) { 4616 if (_deferred_grow_zone(zone, 0)) 4617 goto retry_this_zone; 4618 } 4619 } 4620 4621 /* 4622 * If there are no allowed local zones that meets the watermarks then 4623 * try to allocate a single page and reclaim if necessary. 4624 */ 4625 if (unlikely(!zone)) 4626 goto failed; 4627 4628 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4629 pcp_trylock_prepare(UP_flags); 4630 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4631 if (!pcp) 4632 goto failed_irq; 4633 4634 /* Attempt the batch allocation */ 4635 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4636 while (nr_populated < nr_pages) { 4637 4638 /* Skip existing pages */ 4639 if (page_array && page_array[nr_populated]) { 4640 nr_populated++; 4641 continue; 4642 } 4643 4644 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4645 pcp, pcp_list); 4646 if (unlikely(!page)) { 4647 /* Try and allocate at least one page */ 4648 if (!nr_account) { 4649 pcp_spin_unlock(pcp); 4650 goto failed_irq; 4651 } 4652 break; 4653 } 4654 nr_account++; 4655 4656 prep_new_page(page, 0, gfp, 0); 4657 if (page_list) 4658 list_add(&page->lru, page_list); 4659 else 4660 page_array[nr_populated] = page; 4661 nr_populated++; 4662 } 4663 4664 pcp_spin_unlock(pcp); 4665 pcp_trylock_finish(UP_flags); 4666 4667 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4668 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4669 4670 out: 4671 return nr_populated; 4672 4673 failed_irq: 4674 pcp_trylock_finish(UP_flags); 4675 4676 failed: 4677 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4678 if (page) { 4679 if (page_list) 4680 list_add(&page->lru, page_list); 4681 else 4682 page_array[nr_populated] = page; 4683 nr_populated++; 4684 } 4685 4686 goto out; 4687 } 4688 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4689 4690 /* 4691 * This is the 'heart' of the zoned buddy allocator. 4692 */ 4693 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4694 int preferred_nid, nodemask_t *nodemask) 4695 { 4696 struct page *page; 4697 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4698 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4699 struct alloc_context ac = { }; 4700 4701 /* 4702 * There are several places where we assume that the order value is sane 4703 * so bail out early if the request is out of bound. 4704 */ 4705 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4706 return NULL; 4707 4708 gfp &= gfp_allowed_mask; 4709 /* 4710 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4711 * resp. GFP_NOIO which has to be inherited for all allocation requests 4712 * from a particular context which has been marked by 4713 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4714 * movable zones are not used during allocation. 4715 */ 4716 gfp = current_gfp_context(gfp); 4717 alloc_gfp = gfp; 4718 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4719 &alloc_gfp, &alloc_flags)) 4720 return NULL; 4721 4722 /* 4723 * Forbid the first pass from falling back to types that fragment 4724 * memory until all local zones are considered. 4725 */ 4726 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4727 4728 /* First allocation attempt */ 4729 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4730 if (likely(page)) 4731 goto out; 4732 4733 alloc_gfp = gfp; 4734 ac.spread_dirty_pages = false; 4735 4736 /* 4737 * Restore the original nodemask if it was potentially replaced with 4738 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4739 */ 4740 ac.nodemask = nodemask; 4741 4742 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4743 4744 out: 4745 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4746 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4747 __free_pages(page, order); 4748 page = NULL; 4749 } 4750 4751 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4752 kmsan_alloc_page(page, order, alloc_gfp); 4753 4754 return page; 4755 } 4756 EXPORT_SYMBOL(__alloc_pages_noprof); 4757 4758 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 4759 nodemask_t *nodemask) 4760 { 4761 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 4762 preferred_nid, nodemask); 4763 return page_rmappable_folio(page); 4764 } 4765 EXPORT_SYMBOL(__folio_alloc_noprof); 4766 4767 /* 4768 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4769 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4770 * you need to access high mem. 4771 */ 4772 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 4773 { 4774 struct page *page; 4775 4776 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 4777 if (!page) 4778 return 0; 4779 return (unsigned long) page_address(page); 4780 } 4781 EXPORT_SYMBOL(get_free_pages_noprof); 4782 4783 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 4784 { 4785 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 4786 } 4787 EXPORT_SYMBOL(get_zeroed_page_noprof); 4788 4789 /** 4790 * __free_pages - Free pages allocated with alloc_pages(). 4791 * @page: The page pointer returned from alloc_pages(). 4792 * @order: The order of the allocation. 4793 * 4794 * This function can free multi-page allocations that are not compound 4795 * pages. It does not check that the @order passed in matches that of 4796 * the allocation, so it is easy to leak memory. Freeing more memory 4797 * than was allocated will probably emit a warning. 4798 * 4799 * If the last reference to this page is speculative, it will be released 4800 * by put_page() which only frees the first page of a non-compound 4801 * allocation. To prevent the remaining pages from being leaked, we free 4802 * the subsequent pages here. If you want to use the page's reference 4803 * count to decide when to free the allocation, you should allocate a 4804 * compound page, and use put_page() instead of __free_pages(). 4805 * 4806 * Context: May be called in interrupt context or while holding a normal 4807 * spinlock, but not in NMI context or while holding a raw spinlock. 4808 */ 4809 void __free_pages(struct page *page, unsigned int order) 4810 { 4811 /* get PageHead before we drop reference */ 4812 int head = PageHead(page); 4813 struct alloc_tag *tag = pgalloc_tag_get(page); 4814 4815 if (put_page_testzero(page)) 4816 free_unref_page(page, order); 4817 else if (!head) { 4818 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 4819 while (order-- > 0) 4820 free_unref_page(page + (1 << order), order); 4821 } 4822 } 4823 EXPORT_SYMBOL(__free_pages); 4824 4825 void free_pages(unsigned long addr, unsigned int order) 4826 { 4827 if (addr != 0) { 4828 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4829 __free_pages(virt_to_page((void *)addr), order); 4830 } 4831 } 4832 4833 EXPORT_SYMBOL(free_pages); 4834 4835 /* 4836 * Page Fragment: 4837 * An arbitrary-length arbitrary-offset area of memory which resides 4838 * within a 0 or higher order page. Multiple fragments within that page 4839 * are individually refcounted, in the page's reference counter. 4840 * 4841 * The page_frag functions below provide a simple allocation framework for 4842 * page fragments. This is used by the network stack and network device 4843 * drivers to provide a backing region of memory for use as either an 4844 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4845 */ 4846 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4847 gfp_t gfp_mask) 4848 { 4849 struct page *page = NULL; 4850 gfp_t gfp = gfp_mask; 4851 4852 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4853 gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | 4854 __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; 4855 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4856 PAGE_FRAG_CACHE_MAX_ORDER); 4857 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4858 #endif 4859 if (unlikely(!page)) 4860 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4861 4862 nc->va = page ? page_address(page) : NULL; 4863 4864 return page; 4865 } 4866 4867 void page_frag_cache_drain(struct page_frag_cache *nc) 4868 { 4869 if (!nc->va) 4870 return; 4871 4872 __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias); 4873 nc->va = NULL; 4874 } 4875 EXPORT_SYMBOL(page_frag_cache_drain); 4876 4877 void __page_frag_cache_drain(struct page *page, unsigned int count) 4878 { 4879 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4880 4881 if (page_ref_sub_and_test(page, count)) 4882 free_unref_page(page, compound_order(page)); 4883 } 4884 EXPORT_SYMBOL(__page_frag_cache_drain); 4885 4886 void *__page_frag_alloc_align(struct page_frag_cache *nc, 4887 unsigned int fragsz, gfp_t gfp_mask, 4888 unsigned int align_mask) 4889 { 4890 unsigned int size = PAGE_SIZE; 4891 struct page *page; 4892 int offset; 4893 4894 if (unlikely(!nc->va)) { 4895 refill: 4896 page = __page_frag_cache_refill(nc, gfp_mask); 4897 if (!page) 4898 return NULL; 4899 4900 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4901 /* if size can vary use size else just use PAGE_SIZE */ 4902 size = nc->size; 4903 #endif 4904 /* Even if we own the page, we do not use atomic_set(). 4905 * This would break get_page_unless_zero() users. 4906 */ 4907 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4908 4909 /* reset page count bias and offset to start of new frag */ 4910 nc->pfmemalloc = page_is_pfmemalloc(page); 4911 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4912 nc->offset = size; 4913 } 4914 4915 offset = nc->offset - fragsz; 4916 if (unlikely(offset < 0)) { 4917 page = virt_to_page(nc->va); 4918 4919 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4920 goto refill; 4921 4922 if (unlikely(nc->pfmemalloc)) { 4923 free_unref_page(page, compound_order(page)); 4924 goto refill; 4925 } 4926 4927 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4928 /* if size can vary use size else just use PAGE_SIZE */ 4929 size = nc->size; 4930 #endif 4931 /* OK, page count is 0, we can safely set it */ 4932 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4933 4934 /* reset page count bias and offset to start of new frag */ 4935 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4936 offset = size - fragsz; 4937 if (unlikely(offset < 0)) { 4938 /* 4939 * The caller is trying to allocate a fragment 4940 * with fragsz > PAGE_SIZE but the cache isn't big 4941 * enough to satisfy the request, this may 4942 * happen in low memory conditions. 4943 * We don't release the cache page because 4944 * it could make memory pressure worse 4945 * so we simply return NULL here. 4946 */ 4947 return NULL; 4948 } 4949 } 4950 4951 nc->pagecnt_bias--; 4952 offset &= align_mask; 4953 nc->offset = offset; 4954 4955 return nc->va + offset; 4956 } 4957 EXPORT_SYMBOL(__page_frag_alloc_align); 4958 4959 /* 4960 * Frees a page fragment allocated out of either a compound or order 0 page. 4961 */ 4962 void page_frag_free(void *addr) 4963 { 4964 struct page *page = virt_to_head_page(addr); 4965 4966 if (unlikely(put_page_testzero(page))) 4967 free_unref_page(page, compound_order(page)); 4968 } 4969 EXPORT_SYMBOL(page_frag_free); 4970 4971 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4972 size_t size) 4973 { 4974 if (addr) { 4975 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4976 struct page *page = virt_to_page((void *)addr); 4977 struct page *last = page + nr; 4978 4979 split_page_owner(page, order, 0); 4980 pgalloc_tag_split(page, 1 << order); 4981 split_page_memcg(page, order, 0); 4982 while (page < --last) 4983 set_page_refcounted(last); 4984 4985 last = page + (1UL << order); 4986 for (page += nr; page < last; page++) 4987 __free_pages_ok(page, 0, FPI_TO_TAIL); 4988 } 4989 return (void *)addr; 4990 } 4991 4992 /** 4993 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4994 * @size: the number of bytes to allocate 4995 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4996 * 4997 * This function is similar to alloc_pages(), except that it allocates the 4998 * minimum number of pages to satisfy the request. alloc_pages() can only 4999 * allocate memory in power-of-two pages. 5000 * 5001 * This function is also limited by MAX_PAGE_ORDER. 5002 * 5003 * Memory allocated by this function must be released by free_pages_exact(). 5004 * 5005 * Return: pointer to the allocated area or %NULL in case of error. 5006 */ 5007 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5008 { 5009 unsigned int order = get_order(size); 5010 unsigned long addr; 5011 5012 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5013 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5014 5015 addr = get_free_pages_noprof(gfp_mask, order); 5016 return make_alloc_exact(addr, order, size); 5017 } 5018 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5019 5020 /** 5021 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5022 * pages on a node. 5023 * @nid: the preferred node ID where memory should be allocated 5024 * @size: the number of bytes to allocate 5025 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5026 * 5027 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5028 * back. 5029 * 5030 * Return: pointer to the allocated area or %NULL in case of error. 5031 */ 5032 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5033 { 5034 unsigned int order = get_order(size); 5035 struct page *p; 5036 5037 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5038 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5039 5040 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5041 if (!p) 5042 return NULL; 5043 return make_alloc_exact((unsigned long)page_address(p), order, size); 5044 } 5045 5046 /** 5047 * free_pages_exact - release memory allocated via alloc_pages_exact() 5048 * @virt: the value returned by alloc_pages_exact. 5049 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5050 * 5051 * Release the memory allocated by a previous call to alloc_pages_exact. 5052 */ 5053 void free_pages_exact(void *virt, size_t size) 5054 { 5055 unsigned long addr = (unsigned long)virt; 5056 unsigned long end = addr + PAGE_ALIGN(size); 5057 5058 while (addr < end) { 5059 free_page(addr); 5060 addr += PAGE_SIZE; 5061 } 5062 } 5063 EXPORT_SYMBOL(free_pages_exact); 5064 5065 /** 5066 * nr_free_zone_pages - count number of pages beyond high watermark 5067 * @offset: The zone index of the highest zone 5068 * 5069 * nr_free_zone_pages() counts the number of pages which are beyond the 5070 * high watermark within all zones at or below a given zone index. For each 5071 * zone, the number of pages is calculated as: 5072 * 5073 * nr_free_zone_pages = managed_pages - high_pages 5074 * 5075 * Return: number of pages beyond high watermark. 5076 */ 5077 static unsigned long nr_free_zone_pages(int offset) 5078 { 5079 struct zoneref *z; 5080 struct zone *zone; 5081 5082 /* Just pick one node, since fallback list is circular */ 5083 unsigned long sum = 0; 5084 5085 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5086 5087 for_each_zone_zonelist(zone, z, zonelist, offset) { 5088 unsigned long size = zone_managed_pages(zone); 5089 unsigned long high = high_wmark_pages(zone); 5090 if (size > high) 5091 sum += size - high; 5092 } 5093 5094 return sum; 5095 } 5096 5097 /** 5098 * nr_free_buffer_pages - count number of pages beyond high watermark 5099 * 5100 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5101 * watermark within ZONE_DMA and ZONE_NORMAL. 5102 * 5103 * Return: number of pages beyond high watermark within ZONE_DMA and 5104 * ZONE_NORMAL. 5105 */ 5106 unsigned long nr_free_buffer_pages(void) 5107 { 5108 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5109 } 5110 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5111 5112 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5113 { 5114 zoneref->zone = zone; 5115 zoneref->zone_idx = zone_idx(zone); 5116 } 5117 5118 /* 5119 * Builds allocation fallback zone lists. 5120 * 5121 * Add all populated zones of a node to the zonelist. 5122 */ 5123 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5124 { 5125 struct zone *zone; 5126 enum zone_type zone_type = MAX_NR_ZONES; 5127 int nr_zones = 0; 5128 5129 do { 5130 zone_type--; 5131 zone = pgdat->node_zones + zone_type; 5132 if (populated_zone(zone)) { 5133 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5134 check_highest_zone(zone_type); 5135 } 5136 } while (zone_type); 5137 5138 return nr_zones; 5139 } 5140 5141 #ifdef CONFIG_NUMA 5142 5143 static int __parse_numa_zonelist_order(char *s) 5144 { 5145 /* 5146 * We used to support different zonelists modes but they turned 5147 * out to be just not useful. Let's keep the warning in place 5148 * if somebody still use the cmd line parameter so that we do 5149 * not fail it silently 5150 */ 5151 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5152 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5153 return -EINVAL; 5154 } 5155 return 0; 5156 } 5157 5158 static char numa_zonelist_order[] = "Node"; 5159 #define NUMA_ZONELIST_ORDER_LEN 16 5160 /* 5161 * sysctl handler for numa_zonelist_order 5162 */ 5163 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5164 void *buffer, size_t *length, loff_t *ppos) 5165 { 5166 if (write) 5167 return __parse_numa_zonelist_order(buffer); 5168 return proc_dostring(table, write, buffer, length, ppos); 5169 } 5170 5171 static int node_load[MAX_NUMNODES]; 5172 5173 /** 5174 * find_next_best_node - find the next node that should appear in a given node's fallback list 5175 * @node: node whose fallback list we're appending 5176 * @used_node_mask: nodemask_t of already used nodes 5177 * 5178 * We use a number of factors to determine which is the next node that should 5179 * appear on a given node's fallback list. The node should not have appeared 5180 * already in @node's fallback list, and it should be the next closest node 5181 * according to the distance array (which contains arbitrary distance values 5182 * from each node to each node in the system), and should also prefer nodes 5183 * with no CPUs, since presumably they'll have very little allocation pressure 5184 * on them otherwise. 5185 * 5186 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5187 */ 5188 int find_next_best_node(int node, nodemask_t *used_node_mask) 5189 { 5190 int n, val; 5191 int min_val = INT_MAX; 5192 int best_node = NUMA_NO_NODE; 5193 5194 /* 5195 * Use the local node if we haven't already, but for memoryless local 5196 * node, we should skip it and fall back to other nodes. 5197 */ 5198 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5199 node_set(node, *used_node_mask); 5200 return node; 5201 } 5202 5203 for_each_node_state(n, N_MEMORY) { 5204 5205 /* Don't want a node to appear more than once */ 5206 if (node_isset(n, *used_node_mask)) 5207 continue; 5208 5209 /* Use the distance array to find the distance */ 5210 val = node_distance(node, n); 5211 5212 /* Penalize nodes under us ("prefer the next node") */ 5213 val += (n < node); 5214 5215 /* Give preference to headless and unused nodes */ 5216 if (!cpumask_empty(cpumask_of_node(n))) 5217 val += PENALTY_FOR_NODE_WITH_CPUS; 5218 5219 /* Slight preference for less loaded node */ 5220 val *= MAX_NUMNODES; 5221 val += node_load[n]; 5222 5223 if (val < min_val) { 5224 min_val = val; 5225 best_node = n; 5226 } 5227 } 5228 5229 if (best_node >= 0) 5230 node_set(best_node, *used_node_mask); 5231 5232 return best_node; 5233 } 5234 5235 5236 /* 5237 * Build zonelists ordered by node and zones within node. 5238 * This results in maximum locality--normal zone overflows into local 5239 * DMA zone, if any--but risks exhausting DMA zone. 5240 */ 5241 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5242 unsigned nr_nodes) 5243 { 5244 struct zoneref *zonerefs; 5245 int i; 5246 5247 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5248 5249 for (i = 0; i < nr_nodes; i++) { 5250 int nr_zones; 5251 5252 pg_data_t *node = NODE_DATA(node_order[i]); 5253 5254 nr_zones = build_zonerefs_node(node, zonerefs); 5255 zonerefs += nr_zones; 5256 } 5257 zonerefs->zone = NULL; 5258 zonerefs->zone_idx = 0; 5259 } 5260 5261 /* 5262 * Build __GFP_THISNODE zonelists 5263 */ 5264 static void build_thisnode_zonelists(pg_data_t *pgdat) 5265 { 5266 struct zoneref *zonerefs; 5267 int nr_zones; 5268 5269 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5270 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5271 zonerefs += nr_zones; 5272 zonerefs->zone = NULL; 5273 zonerefs->zone_idx = 0; 5274 } 5275 5276 /* 5277 * Build zonelists ordered by zone and nodes within zones. 5278 * This results in conserving DMA zone[s] until all Normal memory is 5279 * exhausted, but results in overflowing to remote node while memory 5280 * may still exist in local DMA zone. 5281 */ 5282 5283 static void build_zonelists(pg_data_t *pgdat) 5284 { 5285 static int node_order[MAX_NUMNODES]; 5286 int node, nr_nodes = 0; 5287 nodemask_t used_mask = NODE_MASK_NONE; 5288 int local_node, prev_node; 5289 5290 /* NUMA-aware ordering of nodes */ 5291 local_node = pgdat->node_id; 5292 prev_node = local_node; 5293 5294 memset(node_order, 0, sizeof(node_order)); 5295 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5296 /* 5297 * We don't want to pressure a particular node. 5298 * So adding penalty to the first node in same 5299 * distance group to make it round-robin. 5300 */ 5301 if (node_distance(local_node, node) != 5302 node_distance(local_node, prev_node)) 5303 node_load[node] += 1; 5304 5305 node_order[nr_nodes++] = node; 5306 prev_node = node; 5307 } 5308 5309 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5310 build_thisnode_zonelists(pgdat); 5311 pr_info("Fallback order for Node %d: ", local_node); 5312 for (node = 0; node < nr_nodes; node++) 5313 pr_cont("%d ", node_order[node]); 5314 pr_cont("\n"); 5315 } 5316 5317 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5318 /* 5319 * Return node id of node used for "local" allocations. 5320 * I.e., first node id of first zone in arg node's generic zonelist. 5321 * Used for initializing percpu 'numa_mem', which is used primarily 5322 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5323 */ 5324 int local_memory_node(int node) 5325 { 5326 struct zoneref *z; 5327 5328 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5329 gfp_zone(GFP_KERNEL), 5330 NULL); 5331 return zonelist_node_idx(z); 5332 } 5333 #endif 5334 5335 static void setup_min_unmapped_ratio(void); 5336 static void setup_min_slab_ratio(void); 5337 #else /* CONFIG_NUMA */ 5338 5339 static void build_zonelists(pg_data_t *pgdat) 5340 { 5341 struct zoneref *zonerefs; 5342 int nr_zones; 5343 5344 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5345 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5346 zonerefs += nr_zones; 5347 5348 zonerefs->zone = NULL; 5349 zonerefs->zone_idx = 0; 5350 } 5351 5352 #endif /* CONFIG_NUMA */ 5353 5354 /* 5355 * Boot pageset table. One per cpu which is going to be used for all 5356 * zones and all nodes. The parameters will be set in such a way 5357 * that an item put on a list will immediately be handed over to 5358 * the buddy list. This is safe since pageset manipulation is done 5359 * with interrupts disabled. 5360 * 5361 * The boot_pagesets must be kept even after bootup is complete for 5362 * unused processors and/or zones. They do play a role for bootstrapping 5363 * hotplugged processors. 5364 * 5365 * zoneinfo_show() and maybe other functions do 5366 * not check if the processor is online before following the pageset pointer. 5367 * Other parts of the kernel may not check if the zone is available. 5368 */ 5369 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5370 /* These effectively disable the pcplists in the boot pageset completely */ 5371 #define BOOT_PAGESET_HIGH 0 5372 #define BOOT_PAGESET_BATCH 1 5373 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5374 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5375 5376 static void __build_all_zonelists(void *data) 5377 { 5378 int nid; 5379 int __maybe_unused cpu; 5380 pg_data_t *self = data; 5381 unsigned long flags; 5382 5383 /* 5384 * The zonelist_update_seq must be acquired with irqsave because the 5385 * reader can be invoked from IRQ with GFP_ATOMIC. 5386 */ 5387 write_seqlock_irqsave(&zonelist_update_seq, flags); 5388 /* 5389 * Also disable synchronous printk() to prevent any printk() from 5390 * trying to hold port->lock, for 5391 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5392 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5393 */ 5394 printk_deferred_enter(); 5395 5396 #ifdef CONFIG_NUMA 5397 memset(node_load, 0, sizeof(node_load)); 5398 #endif 5399 5400 /* 5401 * This node is hotadded and no memory is yet present. So just 5402 * building zonelists is fine - no need to touch other nodes. 5403 */ 5404 if (self && !node_online(self->node_id)) { 5405 build_zonelists(self); 5406 } else { 5407 /* 5408 * All possible nodes have pgdat preallocated 5409 * in free_area_init 5410 */ 5411 for_each_node(nid) { 5412 pg_data_t *pgdat = NODE_DATA(nid); 5413 5414 build_zonelists(pgdat); 5415 } 5416 5417 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5418 /* 5419 * We now know the "local memory node" for each node-- 5420 * i.e., the node of the first zone in the generic zonelist. 5421 * Set up numa_mem percpu variable for on-line cpus. During 5422 * boot, only the boot cpu should be on-line; we'll init the 5423 * secondary cpus' numa_mem as they come on-line. During 5424 * node/memory hotplug, we'll fixup all on-line cpus. 5425 */ 5426 for_each_online_cpu(cpu) 5427 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5428 #endif 5429 } 5430 5431 printk_deferred_exit(); 5432 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5433 } 5434 5435 static noinline void __init 5436 build_all_zonelists_init(void) 5437 { 5438 int cpu; 5439 5440 __build_all_zonelists(NULL); 5441 5442 /* 5443 * Initialize the boot_pagesets that are going to be used 5444 * for bootstrapping processors. The real pagesets for 5445 * each zone will be allocated later when the per cpu 5446 * allocator is available. 5447 * 5448 * boot_pagesets are used also for bootstrapping offline 5449 * cpus if the system is already booted because the pagesets 5450 * are needed to initialize allocators on a specific cpu too. 5451 * F.e. the percpu allocator needs the page allocator which 5452 * needs the percpu allocator in order to allocate its pagesets 5453 * (a chicken-egg dilemma). 5454 */ 5455 for_each_possible_cpu(cpu) 5456 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5457 5458 mminit_verify_zonelist(); 5459 cpuset_init_current_mems_allowed(); 5460 } 5461 5462 /* 5463 * unless system_state == SYSTEM_BOOTING. 5464 * 5465 * __ref due to call of __init annotated helper build_all_zonelists_init 5466 * [protected by SYSTEM_BOOTING]. 5467 */ 5468 void __ref build_all_zonelists(pg_data_t *pgdat) 5469 { 5470 unsigned long vm_total_pages; 5471 5472 if (system_state == SYSTEM_BOOTING) { 5473 build_all_zonelists_init(); 5474 } else { 5475 __build_all_zonelists(pgdat); 5476 /* cpuset refresh routine should be here */ 5477 } 5478 /* Get the number of free pages beyond high watermark in all zones. */ 5479 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5480 /* 5481 * Disable grouping by mobility if the number of pages in the 5482 * system is too low to allow the mechanism to work. It would be 5483 * more accurate, but expensive to check per-zone. This check is 5484 * made on memory-hotadd so a system can start with mobility 5485 * disabled and enable it later 5486 */ 5487 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5488 page_group_by_mobility_disabled = 1; 5489 else 5490 page_group_by_mobility_disabled = 0; 5491 5492 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5493 nr_online_nodes, 5494 page_group_by_mobility_disabled ? "off" : "on", 5495 vm_total_pages); 5496 #ifdef CONFIG_NUMA 5497 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5498 #endif 5499 } 5500 5501 static int zone_batchsize(struct zone *zone) 5502 { 5503 #ifdef CONFIG_MMU 5504 int batch; 5505 5506 /* 5507 * The number of pages to batch allocate is either ~0.1% 5508 * of the zone or 1MB, whichever is smaller. The batch 5509 * size is striking a balance between allocation latency 5510 * and zone lock contention. 5511 */ 5512 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5513 batch /= 4; /* We effectively *= 4 below */ 5514 if (batch < 1) 5515 batch = 1; 5516 5517 /* 5518 * Clamp the batch to a 2^n - 1 value. Having a power 5519 * of 2 value was found to be more likely to have 5520 * suboptimal cache aliasing properties in some cases. 5521 * 5522 * For example if 2 tasks are alternately allocating 5523 * batches of pages, one task can end up with a lot 5524 * of pages of one half of the possible page colors 5525 * and the other with pages of the other colors. 5526 */ 5527 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5528 5529 return batch; 5530 5531 #else 5532 /* The deferral and batching of frees should be suppressed under NOMMU 5533 * conditions. 5534 * 5535 * The problem is that NOMMU needs to be able to allocate large chunks 5536 * of contiguous memory as there's no hardware page translation to 5537 * assemble apparent contiguous memory from discontiguous pages. 5538 * 5539 * Queueing large contiguous runs of pages for batching, however, 5540 * causes the pages to actually be freed in smaller chunks. As there 5541 * can be a significant delay between the individual batches being 5542 * recycled, this leads to the once large chunks of space being 5543 * fragmented and becoming unavailable for high-order allocations. 5544 */ 5545 return 0; 5546 #endif 5547 } 5548 5549 static int percpu_pagelist_high_fraction; 5550 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5551 int high_fraction) 5552 { 5553 #ifdef CONFIG_MMU 5554 int high; 5555 int nr_split_cpus; 5556 unsigned long total_pages; 5557 5558 if (!high_fraction) { 5559 /* 5560 * By default, the high value of the pcp is based on the zone 5561 * low watermark so that if they are full then background 5562 * reclaim will not be started prematurely. 5563 */ 5564 total_pages = low_wmark_pages(zone); 5565 } else { 5566 /* 5567 * If percpu_pagelist_high_fraction is configured, the high 5568 * value is based on a fraction of the managed pages in the 5569 * zone. 5570 */ 5571 total_pages = zone_managed_pages(zone) / high_fraction; 5572 } 5573 5574 /* 5575 * Split the high value across all online CPUs local to the zone. Note 5576 * that early in boot that CPUs may not be online yet and that during 5577 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5578 * onlined. For memory nodes that have no CPUs, split the high value 5579 * across all online CPUs to mitigate the risk that reclaim is triggered 5580 * prematurely due to pages stored on pcp lists. 5581 */ 5582 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5583 if (!nr_split_cpus) 5584 nr_split_cpus = num_online_cpus(); 5585 high = total_pages / nr_split_cpus; 5586 5587 /* 5588 * Ensure high is at least batch*4. The multiple is based on the 5589 * historical relationship between high and batch. 5590 */ 5591 high = max(high, batch << 2); 5592 5593 return high; 5594 #else 5595 return 0; 5596 #endif 5597 } 5598 5599 /* 5600 * pcp->high and pcp->batch values are related and generally batch is lower 5601 * than high. They are also related to pcp->count such that count is lower 5602 * than high, and as soon as it reaches high, the pcplist is flushed. 5603 * 5604 * However, guaranteeing these relations at all times would require e.g. write 5605 * barriers here but also careful usage of read barriers at the read side, and 5606 * thus be prone to error and bad for performance. Thus the update only prevents 5607 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5608 * should ensure they can cope with those fields changing asynchronously, and 5609 * fully trust only the pcp->count field on the local CPU with interrupts 5610 * disabled. 5611 * 5612 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5613 * outside of boot time (or some other assurance that no concurrent updaters 5614 * exist). 5615 */ 5616 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5617 unsigned long high_max, unsigned long batch) 5618 { 5619 WRITE_ONCE(pcp->batch, batch); 5620 WRITE_ONCE(pcp->high_min, high_min); 5621 WRITE_ONCE(pcp->high_max, high_max); 5622 } 5623 5624 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5625 { 5626 int pindex; 5627 5628 memset(pcp, 0, sizeof(*pcp)); 5629 memset(pzstats, 0, sizeof(*pzstats)); 5630 5631 spin_lock_init(&pcp->lock); 5632 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5633 INIT_LIST_HEAD(&pcp->lists[pindex]); 5634 5635 /* 5636 * Set batch and high values safe for a boot pageset. A true percpu 5637 * pageset's initialization will update them subsequently. Here we don't 5638 * need to be as careful as pageset_update() as nobody can access the 5639 * pageset yet. 5640 */ 5641 pcp->high_min = BOOT_PAGESET_HIGH; 5642 pcp->high_max = BOOT_PAGESET_HIGH; 5643 pcp->batch = BOOT_PAGESET_BATCH; 5644 pcp->free_count = 0; 5645 } 5646 5647 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5648 unsigned long high_max, unsigned long batch) 5649 { 5650 struct per_cpu_pages *pcp; 5651 int cpu; 5652 5653 for_each_possible_cpu(cpu) { 5654 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5655 pageset_update(pcp, high_min, high_max, batch); 5656 } 5657 } 5658 5659 /* 5660 * Calculate and set new high and batch values for all per-cpu pagesets of a 5661 * zone based on the zone's size. 5662 */ 5663 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5664 { 5665 int new_high_min, new_high_max, new_batch; 5666 5667 new_batch = max(1, zone_batchsize(zone)); 5668 if (percpu_pagelist_high_fraction) { 5669 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5670 percpu_pagelist_high_fraction); 5671 /* 5672 * PCP high is tuned manually, disable auto-tuning via 5673 * setting high_min and high_max to the manual value. 5674 */ 5675 new_high_max = new_high_min; 5676 } else { 5677 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5678 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5679 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5680 } 5681 5682 if (zone->pageset_high_min == new_high_min && 5683 zone->pageset_high_max == new_high_max && 5684 zone->pageset_batch == new_batch) 5685 return; 5686 5687 zone->pageset_high_min = new_high_min; 5688 zone->pageset_high_max = new_high_max; 5689 zone->pageset_batch = new_batch; 5690 5691 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5692 new_batch); 5693 } 5694 5695 void __meminit setup_zone_pageset(struct zone *zone) 5696 { 5697 int cpu; 5698 5699 /* Size may be 0 on !SMP && !NUMA */ 5700 if (sizeof(struct per_cpu_zonestat) > 0) 5701 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5702 5703 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5704 for_each_possible_cpu(cpu) { 5705 struct per_cpu_pages *pcp; 5706 struct per_cpu_zonestat *pzstats; 5707 5708 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5709 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5710 per_cpu_pages_init(pcp, pzstats); 5711 } 5712 5713 zone_set_pageset_high_and_batch(zone, 0); 5714 } 5715 5716 /* 5717 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5718 * page high values need to be recalculated. 5719 */ 5720 static void zone_pcp_update(struct zone *zone, int cpu_online) 5721 { 5722 mutex_lock(&pcp_batch_high_lock); 5723 zone_set_pageset_high_and_batch(zone, cpu_online); 5724 mutex_unlock(&pcp_batch_high_lock); 5725 } 5726 5727 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5728 { 5729 struct per_cpu_pages *pcp; 5730 struct cpu_cacheinfo *cci; 5731 5732 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5733 cci = get_cpu_cacheinfo(cpu); 5734 /* 5735 * If data cache slice of CPU is large enough, "pcp->batch" 5736 * pages can be preserved in PCP before draining PCP for 5737 * consecutive high-order pages freeing without allocation. 5738 * This can reduce zone lock contention without hurting 5739 * cache-hot pages sharing. 5740 */ 5741 spin_lock(&pcp->lock); 5742 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5743 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5744 else 5745 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5746 spin_unlock(&pcp->lock); 5747 } 5748 5749 void setup_pcp_cacheinfo(unsigned int cpu) 5750 { 5751 struct zone *zone; 5752 5753 for_each_populated_zone(zone) 5754 zone_pcp_update_cacheinfo(zone, cpu); 5755 } 5756 5757 /* 5758 * Allocate per cpu pagesets and initialize them. 5759 * Before this call only boot pagesets were available. 5760 */ 5761 void __init setup_per_cpu_pageset(void) 5762 { 5763 struct pglist_data *pgdat; 5764 struct zone *zone; 5765 int __maybe_unused cpu; 5766 5767 for_each_populated_zone(zone) 5768 setup_zone_pageset(zone); 5769 5770 #ifdef CONFIG_NUMA 5771 /* 5772 * Unpopulated zones continue using the boot pagesets. 5773 * The numa stats for these pagesets need to be reset. 5774 * Otherwise, they will end up skewing the stats of 5775 * the nodes these zones are associated with. 5776 */ 5777 for_each_possible_cpu(cpu) { 5778 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5779 memset(pzstats->vm_numa_event, 0, 5780 sizeof(pzstats->vm_numa_event)); 5781 } 5782 #endif 5783 5784 for_each_online_pgdat(pgdat) 5785 pgdat->per_cpu_nodestats = 5786 alloc_percpu(struct per_cpu_nodestat); 5787 } 5788 5789 __meminit void zone_pcp_init(struct zone *zone) 5790 { 5791 /* 5792 * per cpu subsystem is not up at this point. The following code 5793 * relies on the ability of the linker to provide the 5794 * offset of a (static) per cpu variable into the per cpu area. 5795 */ 5796 zone->per_cpu_pageset = &boot_pageset; 5797 zone->per_cpu_zonestats = &boot_zonestats; 5798 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5799 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5800 zone->pageset_batch = BOOT_PAGESET_BATCH; 5801 5802 if (populated_zone(zone)) 5803 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5804 zone->present_pages, zone_batchsize(zone)); 5805 } 5806 5807 void adjust_managed_page_count(struct page *page, long count) 5808 { 5809 atomic_long_add(count, &page_zone(page)->managed_pages); 5810 totalram_pages_add(count); 5811 } 5812 EXPORT_SYMBOL(adjust_managed_page_count); 5813 5814 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5815 { 5816 void *pos; 5817 unsigned long pages = 0; 5818 5819 start = (void *)PAGE_ALIGN((unsigned long)start); 5820 end = (void *)((unsigned long)end & PAGE_MASK); 5821 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5822 struct page *page = virt_to_page(pos); 5823 void *direct_map_addr; 5824 5825 /* 5826 * 'direct_map_addr' might be different from 'pos' 5827 * because some architectures' virt_to_page() 5828 * work with aliases. Getting the direct map 5829 * address ensures that we get a _writeable_ 5830 * alias for the memset(). 5831 */ 5832 direct_map_addr = page_address(page); 5833 /* 5834 * Perform a kasan-unchecked memset() since this memory 5835 * has not been initialized. 5836 */ 5837 direct_map_addr = kasan_reset_tag(direct_map_addr); 5838 if ((unsigned int)poison <= 0xFF) 5839 memset(direct_map_addr, poison, PAGE_SIZE); 5840 5841 free_reserved_page(page); 5842 } 5843 5844 if (pages && s) 5845 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5846 5847 return pages; 5848 } 5849 5850 void free_reserved_page(struct page *page) 5851 { 5852 clear_page_tag_ref(page); 5853 ClearPageReserved(page); 5854 init_page_count(page); 5855 __free_page(page); 5856 adjust_managed_page_count(page, 1); 5857 } 5858 EXPORT_SYMBOL(free_reserved_page); 5859 5860 static int page_alloc_cpu_dead(unsigned int cpu) 5861 { 5862 struct zone *zone; 5863 5864 lru_add_drain_cpu(cpu); 5865 mlock_drain_remote(cpu); 5866 drain_pages(cpu); 5867 5868 /* 5869 * Spill the event counters of the dead processor 5870 * into the current processors event counters. 5871 * This artificially elevates the count of the current 5872 * processor. 5873 */ 5874 vm_events_fold_cpu(cpu); 5875 5876 /* 5877 * Zero the differential counters of the dead processor 5878 * so that the vm statistics are consistent. 5879 * 5880 * This is only okay since the processor is dead and cannot 5881 * race with what we are doing. 5882 */ 5883 cpu_vm_stats_fold(cpu); 5884 5885 for_each_populated_zone(zone) 5886 zone_pcp_update(zone, 0); 5887 5888 return 0; 5889 } 5890 5891 static int page_alloc_cpu_online(unsigned int cpu) 5892 { 5893 struct zone *zone; 5894 5895 for_each_populated_zone(zone) 5896 zone_pcp_update(zone, 1); 5897 return 0; 5898 } 5899 5900 void __init page_alloc_init_cpuhp(void) 5901 { 5902 int ret; 5903 5904 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5905 "mm/page_alloc:pcp", 5906 page_alloc_cpu_online, 5907 page_alloc_cpu_dead); 5908 WARN_ON(ret < 0); 5909 } 5910 5911 /* 5912 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5913 * or min_free_kbytes changes. 5914 */ 5915 static void calculate_totalreserve_pages(void) 5916 { 5917 struct pglist_data *pgdat; 5918 unsigned long reserve_pages = 0; 5919 enum zone_type i, j; 5920 5921 for_each_online_pgdat(pgdat) { 5922 5923 pgdat->totalreserve_pages = 0; 5924 5925 for (i = 0; i < MAX_NR_ZONES; i++) { 5926 struct zone *zone = pgdat->node_zones + i; 5927 long max = 0; 5928 unsigned long managed_pages = zone_managed_pages(zone); 5929 5930 /* Find valid and maximum lowmem_reserve in the zone */ 5931 for (j = i; j < MAX_NR_ZONES; j++) { 5932 if (zone->lowmem_reserve[j] > max) 5933 max = zone->lowmem_reserve[j]; 5934 } 5935 5936 /* we treat the high watermark as reserved pages. */ 5937 max += high_wmark_pages(zone); 5938 5939 if (max > managed_pages) 5940 max = managed_pages; 5941 5942 pgdat->totalreserve_pages += max; 5943 5944 reserve_pages += max; 5945 } 5946 } 5947 totalreserve_pages = reserve_pages; 5948 } 5949 5950 /* 5951 * setup_per_zone_lowmem_reserve - called whenever 5952 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5953 * has a correct pages reserved value, so an adequate number of 5954 * pages are left in the zone after a successful __alloc_pages(). 5955 */ 5956 static void setup_per_zone_lowmem_reserve(void) 5957 { 5958 struct pglist_data *pgdat; 5959 enum zone_type i, j; 5960 5961 for_each_online_pgdat(pgdat) { 5962 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5963 struct zone *zone = &pgdat->node_zones[i]; 5964 int ratio = sysctl_lowmem_reserve_ratio[i]; 5965 bool clear = !ratio || !zone_managed_pages(zone); 5966 unsigned long managed_pages = 0; 5967 5968 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5969 struct zone *upper_zone = &pgdat->node_zones[j]; 5970 bool empty = !zone_managed_pages(upper_zone); 5971 5972 managed_pages += zone_managed_pages(upper_zone); 5973 5974 if (clear || empty) 5975 zone->lowmem_reserve[j] = 0; 5976 else 5977 zone->lowmem_reserve[j] = managed_pages / ratio; 5978 } 5979 } 5980 } 5981 5982 /* update totalreserve_pages */ 5983 calculate_totalreserve_pages(); 5984 } 5985 5986 static void __setup_per_zone_wmarks(void) 5987 { 5988 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5989 unsigned long lowmem_pages = 0; 5990 struct zone *zone; 5991 unsigned long flags; 5992 5993 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5994 for_each_zone(zone) { 5995 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5996 lowmem_pages += zone_managed_pages(zone); 5997 } 5998 5999 for_each_zone(zone) { 6000 u64 tmp; 6001 6002 spin_lock_irqsave(&zone->lock, flags); 6003 tmp = (u64)pages_min * zone_managed_pages(zone); 6004 tmp = div64_ul(tmp, lowmem_pages); 6005 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6006 /* 6007 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6008 * need highmem and movable zones pages, so cap pages_min 6009 * to a small value here. 6010 * 6011 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6012 * deltas control async page reclaim, and so should 6013 * not be capped for highmem and movable zones. 6014 */ 6015 unsigned long min_pages; 6016 6017 min_pages = zone_managed_pages(zone) / 1024; 6018 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6019 zone->_watermark[WMARK_MIN] = min_pages; 6020 } else { 6021 /* 6022 * If it's a lowmem zone, reserve a number of pages 6023 * proportionate to the zone's size. 6024 */ 6025 zone->_watermark[WMARK_MIN] = tmp; 6026 } 6027 6028 /* 6029 * Set the kswapd watermarks distance according to the 6030 * scale factor in proportion to available memory, but 6031 * ensure a minimum size on small systems. 6032 */ 6033 tmp = max_t(u64, tmp >> 2, 6034 mult_frac(zone_managed_pages(zone), 6035 watermark_scale_factor, 10000)); 6036 6037 zone->watermark_boost = 0; 6038 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6039 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6040 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6041 6042 spin_unlock_irqrestore(&zone->lock, flags); 6043 } 6044 6045 /* update totalreserve_pages */ 6046 calculate_totalreserve_pages(); 6047 } 6048 6049 /** 6050 * setup_per_zone_wmarks - called when min_free_kbytes changes 6051 * or when memory is hot-{added|removed} 6052 * 6053 * Ensures that the watermark[min,low,high] values for each zone are set 6054 * correctly with respect to min_free_kbytes. 6055 */ 6056 void setup_per_zone_wmarks(void) 6057 { 6058 struct zone *zone; 6059 static DEFINE_SPINLOCK(lock); 6060 6061 spin_lock(&lock); 6062 __setup_per_zone_wmarks(); 6063 spin_unlock(&lock); 6064 6065 /* 6066 * The watermark size have changed so update the pcpu batch 6067 * and high limits or the limits may be inappropriate. 6068 */ 6069 for_each_zone(zone) 6070 zone_pcp_update(zone, 0); 6071 } 6072 6073 /* 6074 * Initialise min_free_kbytes. 6075 * 6076 * For small machines we want it small (128k min). For large machines 6077 * we want it large (256MB max). But it is not linear, because network 6078 * bandwidth does not increase linearly with machine size. We use 6079 * 6080 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6081 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6082 * 6083 * which yields 6084 * 6085 * 16MB: 512k 6086 * 32MB: 724k 6087 * 64MB: 1024k 6088 * 128MB: 1448k 6089 * 256MB: 2048k 6090 * 512MB: 2896k 6091 * 1024MB: 4096k 6092 * 2048MB: 5792k 6093 * 4096MB: 8192k 6094 * 8192MB: 11584k 6095 * 16384MB: 16384k 6096 */ 6097 void calculate_min_free_kbytes(void) 6098 { 6099 unsigned long lowmem_kbytes; 6100 int new_min_free_kbytes; 6101 6102 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6103 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6104 6105 if (new_min_free_kbytes > user_min_free_kbytes) 6106 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6107 else 6108 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6109 new_min_free_kbytes, user_min_free_kbytes); 6110 6111 } 6112 6113 int __meminit init_per_zone_wmark_min(void) 6114 { 6115 calculate_min_free_kbytes(); 6116 setup_per_zone_wmarks(); 6117 refresh_zone_stat_thresholds(); 6118 setup_per_zone_lowmem_reserve(); 6119 6120 #ifdef CONFIG_NUMA 6121 setup_min_unmapped_ratio(); 6122 setup_min_slab_ratio(); 6123 #endif 6124 6125 khugepaged_min_free_kbytes_update(); 6126 6127 return 0; 6128 } 6129 postcore_initcall(init_per_zone_wmark_min) 6130 6131 /* 6132 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6133 * that we can call two helper functions whenever min_free_kbytes 6134 * changes. 6135 */ 6136 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6137 void *buffer, size_t *length, loff_t *ppos) 6138 { 6139 int rc; 6140 6141 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6142 if (rc) 6143 return rc; 6144 6145 if (write) { 6146 user_min_free_kbytes = min_free_kbytes; 6147 setup_per_zone_wmarks(); 6148 } 6149 return 0; 6150 } 6151 6152 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6153 void *buffer, size_t *length, loff_t *ppos) 6154 { 6155 int rc; 6156 6157 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6158 if (rc) 6159 return rc; 6160 6161 if (write) 6162 setup_per_zone_wmarks(); 6163 6164 return 0; 6165 } 6166 6167 #ifdef CONFIG_NUMA 6168 static void setup_min_unmapped_ratio(void) 6169 { 6170 pg_data_t *pgdat; 6171 struct zone *zone; 6172 6173 for_each_online_pgdat(pgdat) 6174 pgdat->min_unmapped_pages = 0; 6175 6176 for_each_zone(zone) 6177 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6178 sysctl_min_unmapped_ratio) / 100; 6179 } 6180 6181 6182 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6183 void *buffer, size_t *length, loff_t *ppos) 6184 { 6185 int rc; 6186 6187 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6188 if (rc) 6189 return rc; 6190 6191 setup_min_unmapped_ratio(); 6192 6193 return 0; 6194 } 6195 6196 static void setup_min_slab_ratio(void) 6197 { 6198 pg_data_t *pgdat; 6199 struct zone *zone; 6200 6201 for_each_online_pgdat(pgdat) 6202 pgdat->min_slab_pages = 0; 6203 6204 for_each_zone(zone) 6205 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6206 sysctl_min_slab_ratio) / 100; 6207 } 6208 6209 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6210 void *buffer, size_t *length, loff_t *ppos) 6211 { 6212 int rc; 6213 6214 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6215 if (rc) 6216 return rc; 6217 6218 setup_min_slab_ratio(); 6219 6220 return 0; 6221 } 6222 #endif 6223 6224 /* 6225 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6226 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6227 * whenever sysctl_lowmem_reserve_ratio changes. 6228 * 6229 * The reserve ratio obviously has absolutely no relation with the 6230 * minimum watermarks. The lowmem reserve ratio can only make sense 6231 * if in function of the boot time zone sizes. 6232 */ 6233 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6234 int write, void *buffer, size_t *length, loff_t *ppos) 6235 { 6236 int i; 6237 6238 proc_dointvec_minmax(table, write, buffer, length, ppos); 6239 6240 for (i = 0; i < MAX_NR_ZONES; i++) { 6241 if (sysctl_lowmem_reserve_ratio[i] < 1) 6242 sysctl_lowmem_reserve_ratio[i] = 0; 6243 } 6244 6245 setup_per_zone_lowmem_reserve(); 6246 return 0; 6247 } 6248 6249 /* 6250 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6251 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6252 * pagelist can have before it gets flushed back to buddy allocator. 6253 */ 6254 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6255 int write, void *buffer, size_t *length, loff_t *ppos) 6256 { 6257 struct zone *zone; 6258 int old_percpu_pagelist_high_fraction; 6259 int ret; 6260 6261 mutex_lock(&pcp_batch_high_lock); 6262 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6263 6264 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6265 if (!write || ret < 0) 6266 goto out; 6267 6268 /* Sanity checking to avoid pcp imbalance */ 6269 if (percpu_pagelist_high_fraction && 6270 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6271 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6272 ret = -EINVAL; 6273 goto out; 6274 } 6275 6276 /* No change? */ 6277 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6278 goto out; 6279 6280 for_each_populated_zone(zone) 6281 zone_set_pageset_high_and_batch(zone, 0); 6282 out: 6283 mutex_unlock(&pcp_batch_high_lock); 6284 return ret; 6285 } 6286 6287 static struct ctl_table page_alloc_sysctl_table[] = { 6288 { 6289 .procname = "min_free_kbytes", 6290 .data = &min_free_kbytes, 6291 .maxlen = sizeof(min_free_kbytes), 6292 .mode = 0644, 6293 .proc_handler = min_free_kbytes_sysctl_handler, 6294 .extra1 = SYSCTL_ZERO, 6295 }, 6296 { 6297 .procname = "watermark_boost_factor", 6298 .data = &watermark_boost_factor, 6299 .maxlen = sizeof(watermark_boost_factor), 6300 .mode = 0644, 6301 .proc_handler = proc_dointvec_minmax, 6302 .extra1 = SYSCTL_ZERO, 6303 }, 6304 { 6305 .procname = "watermark_scale_factor", 6306 .data = &watermark_scale_factor, 6307 .maxlen = sizeof(watermark_scale_factor), 6308 .mode = 0644, 6309 .proc_handler = watermark_scale_factor_sysctl_handler, 6310 .extra1 = SYSCTL_ONE, 6311 .extra2 = SYSCTL_THREE_THOUSAND, 6312 }, 6313 { 6314 .procname = "percpu_pagelist_high_fraction", 6315 .data = &percpu_pagelist_high_fraction, 6316 .maxlen = sizeof(percpu_pagelist_high_fraction), 6317 .mode = 0644, 6318 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6319 .extra1 = SYSCTL_ZERO, 6320 }, 6321 { 6322 .procname = "lowmem_reserve_ratio", 6323 .data = &sysctl_lowmem_reserve_ratio, 6324 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6325 .mode = 0644, 6326 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6327 }, 6328 #ifdef CONFIG_NUMA 6329 { 6330 .procname = "numa_zonelist_order", 6331 .data = &numa_zonelist_order, 6332 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6333 .mode = 0644, 6334 .proc_handler = numa_zonelist_order_handler, 6335 }, 6336 { 6337 .procname = "min_unmapped_ratio", 6338 .data = &sysctl_min_unmapped_ratio, 6339 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6340 .mode = 0644, 6341 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6342 .extra1 = SYSCTL_ZERO, 6343 .extra2 = SYSCTL_ONE_HUNDRED, 6344 }, 6345 { 6346 .procname = "min_slab_ratio", 6347 .data = &sysctl_min_slab_ratio, 6348 .maxlen = sizeof(sysctl_min_slab_ratio), 6349 .mode = 0644, 6350 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6351 .extra1 = SYSCTL_ZERO, 6352 .extra2 = SYSCTL_ONE_HUNDRED, 6353 }, 6354 #endif 6355 }; 6356 6357 void __init page_alloc_sysctl_init(void) 6358 { 6359 register_sysctl_init("vm", page_alloc_sysctl_table); 6360 } 6361 6362 #ifdef CONFIG_CONTIG_ALLOC 6363 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6364 static void alloc_contig_dump_pages(struct list_head *page_list) 6365 { 6366 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6367 6368 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6369 struct page *page; 6370 6371 dump_stack(); 6372 list_for_each_entry(page, page_list, lru) 6373 dump_page(page, "migration failure"); 6374 } 6375 } 6376 6377 /* 6378 * [start, end) must belong to a single zone. 6379 * @migratetype: using migratetype to filter the type of migration in 6380 * trace_mm_alloc_contig_migrate_range_info. 6381 */ 6382 int __alloc_contig_migrate_range(struct compact_control *cc, 6383 unsigned long start, unsigned long end, 6384 int migratetype) 6385 { 6386 /* This function is based on compact_zone() from compaction.c. */ 6387 unsigned int nr_reclaimed; 6388 unsigned long pfn = start; 6389 unsigned int tries = 0; 6390 int ret = 0; 6391 struct migration_target_control mtc = { 6392 .nid = zone_to_nid(cc->zone), 6393 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6394 .reason = MR_CONTIG_RANGE, 6395 }; 6396 struct page *page; 6397 unsigned long total_mapped = 0; 6398 unsigned long total_migrated = 0; 6399 unsigned long total_reclaimed = 0; 6400 6401 lru_cache_disable(); 6402 6403 while (pfn < end || !list_empty(&cc->migratepages)) { 6404 if (fatal_signal_pending(current)) { 6405 ret = -EINTR; 6406 break; 6407 } 6408 6409 if (list_empty(&cc->migratepages)) { 6410 cc->nr_migratepages = 0; 6411 ret = isolate_migratepages_range(cc, pfn, end); 6412 if (ret && ret != -EAGAIN) 6413 break; 6414 pfn = cc->migrate_pfn; 6415 tries = 0; 6416 } else if (++tries == 5) { 6417 ret = -EBUSY; 6418 break; 6419 } 6420 6421 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6422 &cc->migratepages); 6423 cc->nr_migratepages -= nr_reclaimed; 6424 6425 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6426 total_reclaimed += nr_reclaimed; 6427 list_for_each_entry(page, &cc->migratepages, lru) { 6428 struct folio *folio = page_folio(page); 6429 6430 total_mapped += folio_mapped(folio) * 6431 folio_nr_pages(folio); 6432 } 6433 } 6434 6435 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6436 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6437 6438 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6439 total_migrated += cc->nr_migratepages; 6440 6441 /* 6442 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6443 * to retry again over this error, so do the same here. 6444 */ 6445 if (ret == -ENOMEM) 6446 break; 6447 } 6448 6449 lru_cache_enable(); 6450 if (ret < 0) { 6451 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6452 alloc_contig_dump_pages(&cc->migratepages); 6453 putback_movable_pages(&cc->migratepages); 6454 } 6455 6456 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6457 total_migrated, 6458 total_reclaimed, 6459 total_mapped); 6460 return (ret < 0) ? ret : 0; 6461 } 6462 6463 static void split_free_pages(struct list_head *list) 6464 { 6465 int order; 6466 6467 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6468 struct page *page, *next; 6469 int nr_pages = 1 << order; 6470 6471 list_for_each_entry_safe(page, next, &list[order], lru) { 6472 int i; 6473 6474 post_alloc_hook(page, order, __GFP_MOVABLE); 6475 if (!order) 6476 continue; 6477 6478 split_page(page, order); 6479 6480 /* Add all subpages to the order-0 head, in sequence. */ 6481 list_del(&page->lru); 6482 for (i = 0; i < nr_pages; i++) 6483 list_add_tail(&page[i].lru, &list[0]); 6484 } 6485 } 6486 } 6487 6488 /** 6489 * alloc_contig_range() -- tries to allocate given range of pages 6490 * @start: start PFN to allocate 6491 * @end: one-past-the-last PFN to allocate 6492 * @migratetype: migratetype of the underlying pageblocks (either 6493 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6494 * in range must have the same migratetype and it must 6495 * be either of the two. 6496 * @gfp_mask: GFP mask to use during compaction 6497 * 6498 * The PFN range does not have to be pageblock aligned. The PFN range must 6499 * belong to a single zone. 6500 * 6501 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6502 * pageblocks in the range. Once isolated, the pageblocks should not 6503 * be modified by others. 6504 * 6505 * Return: zero on success or negative error code. On success all 6506 * pages which PFN is in [start, end) are allocated for the caller and 6507 * need to be freed with free_contig_range(). 6508 */ 6509 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6510 unsigned migratetype, gfp_t gfp_mask) 6511 { 6512 unsigned long outer_start, outer_end; 6513 int ret = 0; 6514 6515 struct compact_control cc = { 6516 .nr_migratepages = 0, 6517 .order = -1, 6518 .zone = page_zone(pfn_to_page(start)), 6519 .mode = MIGRATE_SYNC, 6520 .ignore_skip_hint = true, 6521 .no_set_skip_hint = true, 6522 .gfp_mask = current_gfp_context(gfp_mask), 6523 .alloc_contig = true, 6524 }; 6525 INIT_LIST_HEAD(&cc.migratepages); 6526 6527 /* 6528 * What we do here is we mark all pageblocks in range as 6529 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6530 * have different sizes, and due to the way page allocator 6531 * work, start_isolate_page_range() has special handlings for this. 6532 * 6533 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6534 * migrate the pages from an unaligned range (ie. pages that 6535 * we are interested in). This will put all the pages in 6536 * range back to page allocator as MIGRATE_ISOLATE. 6537 * 6538 * When this is done, we take the pages in range from page 6539 * allocator removing them from the buddy system. This way 6540 * page allocator will never consider using them. 6541 * 6542 * This lets us mark the pageblocks back as 6543 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6544 * aligned range but not in the unaligned, original range are 6545 * put back to page allocator so that buddy can use them. 6546 */ 6547 6548 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6549 if (ret) 6550 goto done; 6551 6552 drain_all_pages(cc.zone); 6553 6554 /* 6555 * In case of -EBUSY, we'd like to know which page causes problem. 6556 * So, just fall through. test_pages_isolated() has a tracepoint 6557 * which will report the busy page. 6558 * 6559 * It is possible that busy pages could become available before 6560 * the call to test_pages_isolated, and the range will actually be 6561 * allocated. So, if we fall through be sure to clear ret so that 6562 * -EBUSY is not accidentally used or returned to caller. 6563 */ 6564 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6565 if (ret && ret != -EBUSY) 6566 goto done; 6567 ret = 0; 6568 6569 /* 6570 * Pages from [start, end) are within a pageblock_nr_pages 6571 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6572 * more, all pages in [start, end) are free in page allocator. 6573 * What we are going to do is to allocate all pages from 6574 * [start, end) (that is remove them from page allocator). 6575 * 6576 * The only problem is that pages at the beginning and at the 6577 * end of interesting range may be not aligned with pages that 6578 * page allocator holds, ie. they can be part of higher order 6579 * pages. Because of this, we reserve the bigger range and 6580 * once this is done free the pages we are not interested in. 6581 * 6582 * We don't have to hold zone->lock here because the pages are 6583 * isolated thus they won't get removed from buddy. 6584 */ 6585 outer_start = find_large_buddy(start); 6586 6587 /* Make sure the range is really isolated. */ 6588 if (test_pages_isolated(outer_start, end, 0)) { 6589 ret = -EBUSY; 6590 goto done; 6591 } 6592 6593 /* Grab isolated pages from freelists. */ 6594 outer_end = isolate_freepages_range(&cc, outer_start, end); 6595 if (!outer_end) { 6596 ret = -EBUSY; 6597 goto done; 6598 } 6599 6600 if (!(gfp_mask & __GFP_COMP)) { 6601 split_free_pages(cc.freepages); 6602 6603 /* Free head and tail (if any) */ 6604 if (start != outer_start) 6605 free_contig_range(outer_start, start - outer_start); 6606 if (end != outer_end) 6607 free_contig_range(end, outer_end - end); 6608 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6609 struct page *head = pfn_to_page(start); 6610 int order = ilog2(end - start); 6611 6612 check_new_pages(head, order); 6613 prep_new_page(head, order, gfp_mask, 0); 6614 } else { 6615 ret = -EINVAL; 6616 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6617 start, end, outer_start, outer_end); 6618 } 6619 done: 6620 undo_isolate_page_range(start, end, migratetype); 6621 return ret; 6622 } 6623 EXPORT_SYMBOL(alloc_contig_range_noprof); 6624 6625 static int __alloc_contig_pages(unsigned long start_pfn, 6626 unsigned long nr_pages, gfp_t gfp_mask) 6627 { 6628 unsigned long end_pfn = start_pfn + nr_pages; 6629 6630 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6631 gfp_mask); 6632 } 6633 6634 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6635 unsigned long nr_pages) 6636 { 6637 unsigned long i, end_pfn = start_pfn + nr_pages; 6638 struct page *page; 6639 6640 for (i = start_pfn; i < end_pfn; i++) { 6641 page = pfn_to_online_page(i); 6642 if (!page) 6643 return false; 6644 6645 if (page_zone(page) != z) 6646 return false; 6647 6648 if (PageReserved(page)) 6649 return false; 6650 6651 if (PageHuge(page)) 6652 return false; 6653 } 6654 return true; 6655 } 6656 6657 static bool zone_spans_last_pfn(const struct zone *zone, 6658 unsigned long start_pfn, unsigned long nr_pages) 6659 { 6660 unsigned long last_pfn = start_pfn + nr_pages - 1; 6661 6662 return zone_spans_pfn(zone, last_pfn); 6663 } 6664 6665 /** 6666 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6667 * @nr_pages: Number of contiguous pages to allocate 6668 * @gfp_mask: GFP mask to limit search and used during compaction 6669 * @nid: Target node 6670 * @nodemask: Mask for other possible nodes 6671 * 6672 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6673 * on an applicable zonelist to find a contiguous pfn range which can then be 6674 * tried for allocation with alloc_contig_range(). This routine is intended 6675 * for allocation requests which can not be fulfilled with the buddy allocator. 6676 * 6677 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6678 * power of two, then allocated range is also guaranteed to be aligned to same 6679 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6680 * 6681 * Allocated pages can be freed with free_contig_range() or by manually calling 6682 * __free_page() on each allocated page. 6683 * 6684 * Return: pointer to contiguous pages on success, or NULL if not successful. 6685 */ 6686 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6687 int nid, nodemask_t *nodemask) 6688 { 6689 unsigned long ret, pfn, flags; 6690 struct zonelist *zonelist; 6691 struct zone *zone; 6692 struct zoneref *z; 6693 6694 zonelist = node_zonelist(nid, gfp_mask); 6695 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6696 gfp_zone(gfp_mask), nodemask) { 6697 spin_lock_irqsave(&zone->lock, flags); 6698 6699 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6700 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6701 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6702 /* 6703 * We release the zone lock here because 6704 * alloc_contig_range() will also lock the zone 6705 * at some point. If there's an allocation 6706 * spinning on this lock, it may win the race 6707 * and cause alloc_contig_range() to fail... 6708 */ 6709 spin_unlock_irqrestore(&zone->lock, flags); 6710 ret = __alloc_contig_pages(pfn, nr_pages, 6711 gfp_mask); 6712 if (!ret) 6713 return pfn_to_page(pfn); 6714 spin_lock_irqsave(&zone->lock, flags); 6715 } 6716 pfn += nr_pages; 6717 } 6718 spin_unlock_irqrestore(&zone->lock, flags); 6719 } 6720 return NULL; 6721 } 6722 #endif /* CONFIG_CONTIG_ALLOC */ 6723 6724 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6725 { 6726 unsigned long count = 0; 6727 struct folio *folio = pfn_folio(pfn); 6728 6729 if (folio_test_large(folio)) { 6730 int expected = folio_nr_pages(folio); 6731 6732 if (nr_pages == expected) 6733 folio_put(folio); 6734 else 6735 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 6736 pfn, nr_pages, expected); 6737 return; 6738 } 6739 6740 for (; nr_pages--; pfn++) { 6741 struct page *page = pfn_to_page(pfn); 6742 6743 count += page_count(page) != 1; 6744 __free_page(page); 6745 } 6746 WARN(count != 0, "%lu pages are still in use!\n", count); 6747 } 6748 EXPORT_SYMBOL(free_contig_range); 6749 6750 /* 6751 * Effectively disable pcplists for the zone by setting the high limit to 0 6752 * and draining all cpus. A concurrent page freeing on another CPU that's about 6753 * to put the page on pcplist will either finish before the drain and the page 6754 * will be drained, or observe the new high limit and skip the pcplist. 6755 * 6756 * Must be paired with a call to zone_pcp_enable(). 6757 */ 6758 void zone_pcp_disable(struct zone *zone) 6759 { 6760 mutex_lock(&pcp_batch_high_lock); 6761 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6762 __drain_all_pages(zone, true); 6763 } 6764 6765 void zone_pcp_enable(struct zone *zone) 6766 { 6767 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6768 zone->pageset_high_max, zone->pageset_batch); 6769 mutex_unlock(&pcp_batch_high_lock); 6770 } 6771 6772 void zone_pcp_reset(struct zone *zone) 6773 { 6774 int cpu; 6775 struct per_cpu_zonestat *pzstats; 6776 6777 if (zone->per_cpu_pageset != &boot_pageset) { 6778 for_each_online_cpu(cpu) { 6779 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6780 drain_zonestat(zone, pzstats); 6781 } 6782 free_percpu(zone->per_cpu_pageset); 6783 zone->per_cpu_pageset = &boot_pageset; 6784 if (zone->per_cpu_zonestats != &boot_zonestats) { 6785 free_percpu(zone->per_cpu_zonestats); 6786 zone->per_cpu_zonestats = &boot_zonestats; 6787 } 6788 } 6789 } 6790 6791 #ifdef CONFIG_MEMORY_HOTREMOVE 6792 /* 6793 * All pages in the range must be in a single zone, must not contain holes, 6794 * must span full sections, and must be isolated before calling this function. 6795 * 6796 * Returns the number of managed (non-PageOffline()) pages in the range: the 6797 * number of pages for which memory offlining code must adjust managed page 6798 * counters using adjust_managed_page_count(). 6799 */ 6800 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6801 unsigned long end_pfn) 6802 { 6803 unsigned long already_offline = 0, flags; 6804 unsigned long pfn = start_pfn; 6805 struct page *page; 6806 struct zone *zone; 6807 unsigned int order; 6808 6809 offline_mem_sections(pfn, end_pfn); 6810 zone = page_zone(pfn_to_page(pfn)); 6811 spin_lock_irqsave(&zone->lock, flags); 6812 while (pfn < end_pfn) { 6813 page = pfn_to_page(pfn); 6814 /* 6815 * The HWPoisoned page may be not in buddy system, and 6816 * page_count() is not 0. 6817 */ 6818 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6819 pfn++; 6820 continue; 6821 } 6822 /* 6823 * At this point all remaining PageOffline() pages have a 6824 * reference count of 0 and can simply be skipped. 6825 */ 6826 if (PageOffline(page)) { 6827 BUG_ON(page_count(page)); 6828 BUG_ON(PageBuddy(page)); 6829 already_offline++; 6830 pfn++; 6831 continue; 6832 } 6833 6834 BUG_ON(page_count(page)); 6835 BUG_ON(!PageBuddy(page)); 6836 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 6837 order = buddy_order(page); 6838 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 6839 pfn += (1 << order); 6840 } 6841 spin_unlock_irqrestore(&zone->lock, flags); 6842 6843 return end_pfn - start_pfn - already_offline; 6844 } 6845 #endif 6846 6847 /* 6848 * This function returns a stable result only if called under zone lock. 6849 */ 6850 bool is_free_buddy_page(const struct page *page) 6851 { 6852 unsigned long pfn = page_to_pfn(page); 6853 unsigned int order; 6854 6855 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6856 const struct page *head = page - (pfn & ((1 << order) - 1)); 6857 6858 if (PageBuddy(head) && 6859 buddy_order_unsafe(head) >= order) 6860 break; 6861 } 6862 6863 return order <= MAX_PAGE_ORDER; 6864 } 6865 EXPORT_SYMBOL(is_free_buddy_page); 6866 6867 #ifdef CONFIG_MEMORY_FAILURE 6868 static inline void add_to_free_list(struct page *page, struct zone *zone, 6869 unsigned int order, int migratetype, 6870 bool tail) 6871 { 6872 __add_to_free_list(page, zone, order, migratetype, tail); 6873 account_freepages(zone, 1 << order, migratetype); 6874 } 6875 6876 /* 6877 * Break down a higher-order page in sub-pages, and keep our target out of 6878 * buddy allocator. 6879 */ 6880 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6881 struct page *target, int low, int high, 6882 int migratetype) 6883 { 6884 unsigned long size = 1 << high; 6885 struct page *current_buddy; 6886 6887 while (high > low) { 6888 high--; 6889 size >>= 1; 6890 6891 if (target >= &page[size]) { 6892 current_buddy = page; 6893 page = page + size; 6894 } else { 6895 current_buddy = page + size; 6896 } 6897 6898 if (set_page_guard(zone, current_buddy, high)) 6899 continue; 6900 6901 add_to_free_list(current_buddy, zone, high, migratetype, false); 6902 set_buddy_order(current_buddy, high); 6903 } 6904 } 6905 6906 /* 6907 * Take a page that will be marked as poisoned off the buddy allocator. 6908 */ 6909 bool take_page_off_buddy(struct page *page) 6910 { 6911 struct zone *zone = page_zone(page); 6912 unsigned long pfn = page_to_pfn(page); 6913 unsigned long flags; 6914 unsigned int order; 6915 bool ret = false; 6916 6917 spin_lock_irqsave(&zone->lock, flags); 6918 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6919 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6920 int page_order = buddy_order(page_head); 6921 6922 if (PageBuddy(page_head) && page_order >= order) { 6923 unsigned long pfn_head = page_to_pfn(page_head); 6924 int migratetype = get_pfnblock_migratetype(page_head, 6925 pfn_head); 6926 6927 del_page_from_free_list(page_head, zone, page_order, 6928 migratetype); 6929 break_down_buddy_pages(zone, page_head, page, 0, 6930 page_order, migratetype); 6931 SetPageHWPoisonTakenOff(page); 6932 ret = true; 6933 break; 6934 } 6935 if (page_count(page_head) > 0) 6936 break; 6937 } 6938 spin_unlock_irqrestore(&zone->lock, flags); 6939 return ret; 6940 } 6941 6942 /* 6943 * Cancel takeoff done by take_page_off_buddy(). 6944 */ 6945 bool put_page_back_buddy(struct page *page) 6946 { 6947 struct zone *zone = page_zone(page); 6948 unsigned long flags; 6949 bool ret = false; 6950 6951 spin_lock_irqsave(&zone->lock, flags); 6952 if (put_page_testzero(page)) { 6953 unsigned long pfn = page_to_pfn(page); 6954 int migratetype = get_pfnblock_migratetype(page, pfn); 6955 6956 ClearPageHWPoisonTakenOff(page); 6957 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6958 if (TestClearPageHWPoison(page)) { 6959 ret = true; 6960 } 6961 } 6962 spin_unlock_irqrestore(&zone->lock, flags); 6963 6964 return ret; 6965 } 6966 #endif 6967 6968 #ifdef CONFIG_ZONE_DMA 6969 bool has_managed_dma(void) 6970 { 6971 struct pglist_data *pgdat; 6972 6973 for_each_online_pgdat(pgdat) { 6974 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6975 6976 if (managed_zone(zone)) 6977 return true; 6978 } 6979 return false; 6980 } 6981 #endif /* CONFIG_ZONE_DMA */ 6982 6983 #ifdef CONFIG_UNACCEPTED_MEMORY 6984 6985 /* Counts number of zones with unaccepted pages. */ 6986 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6987 6988 static bool lazy_accept = true; 6989 6990 static int __init accept_memory_parse(char *p) 6991 { 6992 if (!strcmp(p, "lazy")) { 6993 lazy_accept = true; 6994 return 0; 6995 } else if (!strcmp(p, "eager")) { 6996 lazy_accept = false; 6997 return 0; 6998 } else { 6999 return -EINVAL; 7000 } 7001 } 7002 early_param("accept_memory", accept_memory_parse); 7003 7004 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7005 { 7006 phys_addr_t start = page_to_phys(page); 7007 7008 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7009 } 7010 7011 static void __accept_page(struct zone *zone, unsigned long *flags, 7012 struct page *page) 7013 { 7014 bool last; 7015 7016 list_del(&page->lru); 7017 last = list_empty(&zone->unaccepted_pages); 7018 7019 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7020 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7021 __ClearPageUnaccepted(page); 7022 spin_unlock_irqrestore(&zone->lock, *flags); 7023 7024 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7025 7026 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7027 7028 if (last) 7029 static_branch_dec(&zones_with_unaccepted_pages); 7030 } 7031 7032 void accept_page(struct page *page) 7033 { 7034 struct zone *zone = page_zone(page); 7035 unsigned long flags; 7036 7037 spin_lock_irqsave(&zone->lock, flags); 7038 if (!PageUnaccepted(page)) { 7039 spin_unlock_irqrestore(&zone->lock, flags); 7040 return; 7041 } 7042 7043 /* Unlocks zone->lock */ 7044 __accept_page(zone, &flags, page); 7045 } 7046 7047 static bool try_to_accept_memory_one(struct zone *zone) 7048 { 7049 unsigned long flags; 7050 struct page *page; 7051 7052 spin_lock_irqsave(&zone->lock, flags); 7053 page = list_first_entry_or_null(&zone->unaccepted_pages, 7054 struct page, lru); 7055 if (!page) { 7056 spin_unlock_irqrestore(&zone->lock, flags); 7057 return false; 7058 } 7059 7060 /* Unlocks zone->lock */ 7061 __accept_page(zone, &flags, page); 7062 7063 return true; 7064 } 7065 7066 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7067 { 7068 long to_accept; 7069 bool ret = false; 7070 7071 if (!has_unaccepted_memory()) 7072 return false; 7073 7074 if (list_empty(&zone->unaccepted_pages)) 7075 return false; 7076 7077 /* How much to accept to get to promo watermark? */ 7078 to_accept = promo_wmark_pages(zone) - 7079 (zone_page_state(zone, NR_FREE_PAGES) - 7080 __zone_watermark_unusable_free(zone, order, 0) - 7081 zone_page_state(zone, NR_UNACCEPTED)); 7082 7083 while (to_accept > 0) { 7084 if (!try_to_accept_memory_one(zone)) 7085 break; 7086 ret = true; 7087 to_accept -= MAX_ORDER_NR_PAGES; 7088 } 7089 7090 return ret; 7091 } 7092 7093 static inline bool has_unaccepted_memory(void) 7094 { 7095 return static_branch_unlikely(&zones_with_unaccepted_pages); 7096 } 7097 7098 static bool __free_unaccepted(struct page *page) 7099 { 7100 struct zone *zone = page_zone(page); 7101 unsigned long flags; 7102 bool first = false; 7103 7104 if (!lazy_accept) 7105 return false; 7106 7107 spin_lock_irqsave(&zone->lock, flags); 7108 first = list_empty(&zone->unaccepted_pages); 7109 list_add_tail(&page->lru, &zone->unaccepted_pages); 7110 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7111 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7112 __SetPageUnaccepted(page); 7113 spin_unlock_irqrestore(&zone->lock, flags); 7114 7115 if (first) 7116 static_branch_inc(&zones_with_unaccepted_pages); 7117 7118 return true; 7119 } 7120 7121 #else 7122 7123 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7124 { 7125 return false; 7126 } 7127 7128 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7129 { 7130 return false; 7131 } 7132 7133 static inline bool has_unaccepted_memory(void) 7134 { 7135 return false; 7136 } 7137 7138 static bool __free_unaccepted(struct page *page) 7139 { 7140 BUILD_BUG(); 7141 return false; 7142 } 7143 7144 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7145