1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* Free the page without taking locks. Rely on trylock only. */ 92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 93 94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 95 static DEFINE_MUTEX(pcp_batch_high_lock); 96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 97 98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 99 /* 100 * On SMP, spin_trylock is sufficient protection. 101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 102 */ 103 #define pcp_trylock_prepare(flags) do { } while (0) 104 #define pcp_trylock_finish(flag) do { } while (0) 105 #else 106 107 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 108 #define pcp_trylock_prepare(flags) local_irq_save(flags) 109 #define pcp_trylock_finish(flags) local_irq_restore(flags) 110 #endif 111 112 /* 113 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 114 * a migration causing the wrong PCP to be locked and remote memory being 115 * potentially allocated, pin the task to the CPU for the lookup+lock. 116 * preempt_disable is used on !RT because it is faster than migrate_disable. 117 * migrate_disable is used on RT because otherwise RT spinlock usage is 118 * interfered with and a high priority task cannot preempt the allocator. 119 */ 120 #ifndef CONFIG_PREEMPT_RT 121 #define pcpu_task_pin() preempt_disable() 122 #define pcpu_task_unpin() preempt_enable() 123 #else 124 #define pcpu_task_pin() migrate_disable() 125 #define pcpu_task_unpin() migrate_enable() 126 #endif 127 128 /* 129 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 130 * Return value should be used with equivalent unlock helper. 131 */ 132 #define pcpu_spin_lock(type, member, ptr) \ 133 ({ \ 134 type *_ret; \ 135 pcpu_task_pin(); \ 136 _ret = this_cpu_ptr(ptr); \ 137 spin_lock(&_ret->member); \ 138 _ret; \ 139 }) 140 141 #define pcpu_spin_trylock(type, member, ptr) \ 142 ({ \ 143 type *_ret; \ 144 pcpu_task_pin(); \ 145 _ret = this_cpu_ptr(ptr); \ 146 if (!spin_trylock(&_ret->member)) { \ 147 pcpu_task_unpin(); \ 148 _ret = NULL; \ 149 } \ 150 _ret; \ 151 }) 152 153 #define pcpu_spin_unlock(member, ptr) \ 154 ({ \ 155 spin_unlock(&ptr->member); \ 156 pcpu_task_unpin(); \ 157 }) 158 159 /* struct per_cpu_pages specific helpers. */ 160 #define pcp_spin_lock(ptr) \ 161 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_trylock(ptr) \ 164 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 165 166 #define pcp_spin_unlock(ptr) \ 167 pcpu_spin_unlock(lock, ptr) 168 169 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 170 DEFINE_PER_CPU(int, numa_node); 171 EXPORT_PER_CPU_SYMBOL(numa_node); 172 #endif 173 174 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 175 176 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 177 /* 178 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 179 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 180 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 181 * defined in <linux/topology.h>. 182 */ 183 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 184 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 185 #endif 186 187 static DEFINE_MUTEX(pcpu_drain_mutex); 188 189 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 190 volatile unsigned long latent_entropy __latent_entropy; 191 EXPORT_SYMBOL(latent_entropy); 192 #endif 193 194 /* 195 * Array of node states. 196 */ 197 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 198 [N_POSSIBLE] = NODE_MASK_ALL, 199 [N_ONLINE] = { { [0] = 1UL } }, 200 #ifndef CONFIG_NUMA 201 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 202 #ifdef CONFIG_HIGHMEM 203 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 204 #endif 205 [N_MEMORY] = { { [0] = 1UL } }, 206 [N_CPU] = { { [0] = 1UL } }, 207 #endif /* NUMA */ 208 }; 209 EXPORT_SYMBOL(node_states); 210 211 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 212 213 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 214 unsigned int pageblock_order __read_mostly; 215 #endif 216 217 static void __free_pages_ok(struct page *page, unsigned int order, 218 fpi_t fpi_flags); 219 220 /* 221 * results with 256, 32 in the lowmem_reserve sysctl: 222 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 223 * 1G machine -> (16M dma, 784M normal, 224M high) 224 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 225 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 226 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 227 * 228 * TBD: should special case ZONE_DMA32 machines here - in those we normally 229 * don't need any ZONE_NORMAL reservation 230 */ 231 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 232 #ifdef CONFIG_ZONE_DMA 233 [ZONE_DMA] = 256, 234 #endif 235 #ifdef CONFIG_ZONE_DMA32 236 [ZONE_DMA32] = 256, 237 #endif 238 [ZONE_NORMAL] = 32, 239 #ifdef CONFIG_HIGHMEM 240 [ZONE_HIGHMEM] = 0, 241 #endif 242 [ZONE_MOVABLE] = 0, 243 }; 244 245 char * const zone_names[MAX_NR_ZONES] = { 246 #ifdef CONFIG_ZONE_DMA 247 "DMA", 248 #endif 249 #ifdef CONFIG_ZONE_DMA32 250 "DMA32", 251 #endif 252 "Normal", 253 #ifdef CONFIG_HIGHMEM 254 "HighMem", 255 #endif 256 "Movable", 257 #ifdef CONFIG_ZONE_DEVICE 258 "Device", 259 #endif 260 }; 261 262 const char * const migratetype_names[MIGRATE_TYPES] = { 263 "Unmovable", 264 "Movable", 265 "Reclaimable", 266 "HighAtomic", 267 #ifdef CONFIG_CMA 268 "CMA", 269 #endif 270 #ifdef CONFIG_MEMORY_ISOLATION 271 "Isolate", 272 #endif 273 }; 274 275 int min_free_kbytes = 1024; 276 int user_min_free_kbytes = -1; 277 static int watermark_boost_factor __read_mostly = 15000; 278 static int watermark_scale_factor = 10; 279 int defrag_mode; 280 281 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 282 int movable_zone; 283 EXPORT_SYMBOL(movable_zone); 284 285 #if MAX_NUMNODES > 1 286 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 287 unsigned int nr_online_nodes __read_mostly = 1; 288 EXPORT_SYMBOL(nr_node_ids); 289 EXPORT_SYMBOL(nr_online_nodes); 290 #endif 291 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 static bool cond_accept_memory(struct zone *zone, unsigned int order, 294 int alloc_flags); 295 static bool __free_unaccepted(struct page *page); 296 297 int page_group_by_mobility_disabled __read_mostly; 298 299 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 300 /* 301 * During boot we initialize deferred pages on-demand, as needed, but once 302 * page_alloc_init_late() has finished, the deferred pages are all initialized, 303 * and we can permanently disable that path. 304 */ 305 DEFINE_STATIC_KEY_TRUE(deferred_pages); 306 307 static inline bool deferred_pages_enabled(void) 308 { 309 return static_branch_unlikely(&deferred_pages); 310 } 311 312 /* 313 * deferred_grow_zone() is __init, but it is called from 314 * get_page_from_freelist() during early boot until deferred_pages permanently 315 * disables this call. This is why we have refdata wrapper to avoid warning, 316 * and to ensure that the function body gets unloaded. 317 */ 318 static bool __ref 319 _deferred_grow_zone(struct zone *zone, unsigned int order) 320 { 321 return deferred_grow_zone(zone, order); 322 } 323 #else 324 static inline bool deferred_pages_enabled(void) 325 { 326 return false; 327 } 328 329 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 330 { 331 return false; 332 } 333 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 334 335 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 336 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 337 unsigned long pfn) 338 { 339 #ifdef CONFIG_SPARSEMEM 340 return section_to_usemap(__pfn_to_section(pfn)); 341 #else 342 return page_zone(page)->pageblock_flags; 343 #endif /* CONFIG_SPARSEMEM */ 344 } 345 346 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 347 { 348 #ifdef CONFIG_SPARSEMEM 349 pfn &= (PAGES_PER_SECTION-1); 350 #else 351 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 352 #endif /* CONFIG_SPARSEMEM */ 353 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 354 } 355 356 /** 357 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 358 * @page: The page within the block of interest 359 * @pfn: The target page frame number 360 * @mask: mask of bits that the caller is interested in 361 * 362 * Return: pageblock_bits flags 363 */ 364 unsigned long get_pfnblock_flags_mask(const struct page *page, 365 unsigned long pfn, unsigned long mask) 366 { 367 unsigned long *bitmap; 368 unsigned long bitidx, word_bitidx; 369 unsigned long word; 370 371 bitmap = get_pageblock_bitmap(page, pfn); 372 bitidx = pfn_to_bitidx(page, pfn); 373 word_bitidx = bitidx / BITS_PER_LONG; 374 bitidx &= (BITS_PER_LONG-1); 375 /* 376 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 377 * a consistent read of the memory array, so that results, even though 378 * racy, are not corrupted. 379 */ 380 word = READ_ONCE(bitmap[word_bitidx]); 381 return (word >> bitidx) & mask; 382 } 383 384 static __always_inline int get_pfnblock_migratetype(const struct page *page, 385 unsigned long pfn) 386 { 387 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 388 } 389 390 /** 391 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 392 * @page: The page within the block of interest 393 * @flags: The flags to set 394 * @pfn: The target page frame number 395 * @mask: mask of bits that the caller is interested in 396 */ 397 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 398 unsigned long pfn, 399 unsigned long mask) 400 { 401 unsigned long *bitmap; 402 unsigned long bitidx, word_bitidx; 403 unsigned long word; 404 405 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 406 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 407 408 bitmap = get_pageblock_bitmap(page, pfn); 409 bitidx = pfn_to_bitidx(page, pfn); 410 word_bitidx = bitidx / BITS_PER_LONG; 411 bitidx &= (BITS_PER_LONG-1); 412 413 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 414 415 mask <<= bitidx; 416 flags <<= bitidx; 417 418 word = READ_ONCE(bitmap[word_bitidx]); 419 do { 420 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 421 } 422 423 void set_pageblock_migratetype(struct page *page, int migratetype) 424 { 425 if (unlikely(page_group_by_mobility_disabled && 426 migratetype < MIGRATE_PCPTYPES)) 427 migratetype = MIGRATE_UNMOVABLE; 428 429 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 430 page_to_pfn(page), MIGRATETYPE_MASK); 431 } 432 433 #ifdef CONFIG_DEBUG_VM 434 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 435 { 436 int ret; 437 unsigned seq; 438 unsigned long pfn = page_to_pfn(page); 439 unsigned long sp, start_pfn; 440 441 do { 442 seq = zone_span_seqbegin(zone); 443 start_pfn = zone->zone_start_pfn; 444 sp = zone->spanned_pages; 445 ret = !zone_spans_pfn(zone, pfn); 446 } while (zone_span_seqretry(zone, seq)); 447 448 if (ret) 449 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 450 pfn, zone_to_nid(zone), zone->name, 451 start_pfn, start_pfn + sp); 452 453 return ret; 454 } 455 456 /* 457 * Temporary debugging check for pages not lying within a given zone. 458 */ 459 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 460 { 461 if (page_outside_zone_boundaries(zone, page)) 462 return true; 463 if (zone != page_zone(page)) 464 return true; 465 466 return false; 467 } 468 #else 469 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 470 { 471 return false; 472 } 473 #endif 474 475 static void bad_page(struct page *page, const char *reason) 476 { 477 static unsigned long resume; 478 static unsigned long nr_shown; 479 static unsigned long nr_unshown; 480 481 /* 482 * Allow a burst of 60 reports, then keep quiet for that minute; 483 * or allow a steady drip of one report per second. 484 */ 485 if (nr_shown == 60) { 486 if (time_before(jiffies, resume)) { 487 nr_unshown++; 488 goto out; 489 } 490 if (nr_unshown) { 491 pr_alert( 492 "BUG: Bad page state: %lu messages suppressed\n", 493 nr_unshown); 494 nr_unshown = 0; 495 } 496 nr_shown = 0; 497 } 498 if (nr_shown++ == 0) 499 resume = jiffies + 60 * HZ; 500 501 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 502 current->comm, page_to_pfn(page)); 503 dump_page(page, reason); 504 505 print_modules(); 506 dump_stack(); 507 out: 508 /* Leave bad fields for debug, except PageBuddy could make trouble */ 509 if (PageBuddy(page)) 510 __ClearPageBuddy(page); 511 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 512 } 513 514 static inline unsigned int order_to_pindex(int migratetype, int order) 515 { 516 517 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 518 bool movable; 519 if (order > PAGE_ALLOC_COSTLY_ORDER) { 520 VM_BUG_ON(order != HPAGE_PMD_ORDER); 521 522 movable = migratetype == MIGRATE_MOVABLE; 523 524 return NR_LOWORDER_PCP_LISTS + movable; 525 } 526 #else 527 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 528 #endif 529 530 return (MIGRATE_PCPTYPES * order) + migratetype; 531 } 532 533 static inline int pindex_to_order(unsigned int pindex) 534 { 535 int order = pindex / MIGRATE_PCPTYPES; 536 537 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 538 if (pindex >= NR_LOWORDER_PCP_LISTS) 539 order = HPAGE_PMD_ORDER; 540 #else 541 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 542 #endif 543 544 return order; 545 } 546 547 static inline bool pcp_allowed_order(unsigned int order) 548 { 549 if (order <= PAGE_ALLOC_COSTLY_ORDER) 550 return true; 551 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 552 if (order == HPAGE_PMD_ORDER) 553 return true; 554 #endif 555 return false; 556 } 557 558 /* 559 * Higher-order pages are called "compound pages". They are structured thusly: 560 * 561 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 562 * 563 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 564 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 565 * 566 * The first tail page's ->compound_order holds the order of allocation. 567 * This usage means that zero-order pages may not be compound. 568 */ 569 570 void prep_compound_page(struct page *page, unsigned int order) 571 { 572 int i; 573 int nr_pages = 1 << order; 574 575 __SetPageHead(page); 576 for (i = 1; i < nr_pages; i++) 577 prep_compound_tail(page, i); 578 579 prep_compound_head(page, order); 580 } 581 582 static inline void set_buddy_order(struct page *page, unsigned int order) 583 { 584 set_page_private(page, order); 585 __SetPageBuddy(page); 586 } 587 588 #ifdef CONFIG_COMPACTION 589 static inline struct capture_control *task_capc(struct zone *zone) 590 { 591 struct capture_control *capc = current->capture_control; 592 593 return unlikely(capc) && 594 !(current->flags & PF_KTHREAD) && 595 !capc->page && 596 capc->cc->zone == zone ? capc : NULL; 597 } 598 599 static inline bool 600 compaction_capture(struct capture_control *capc, struct page *page, 601 int order, int migratetype) 602 { 603 if (!capc || order != capc->cc->order) 604 return false; 605 606 /* Do not accidentally pollute CMA or isolated regions*/ 607 if (is_migrate_cma(migratetype) || 608 is_migrate_isolate(migratetype)) 609 return false; 610 611 /* 612 * Do not let lower order allocations pollute a movable pageblock 613 * unless compaction is also requesting movable pages. 614 * This might let an unmovable request use a reclaimable pageblock 615 * and vice-versa but no more than normal fallback logic which can 616 * have trouble finding a high-order free page. 617 */ 618 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 619 capc->cc->migratetype != MIGRATE_MOVABLE) 620 return false; 621 622 if (migratetype != capc->cc->migratetype) 623 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 624 capc->cc->migratetype, migratetype); 625 626 capc->page = page; 627 return true; 628 } 629 630 #else 631 static inline struct capture_control *task_capc(struct zone *zone) 632 { 633 return NULL; 634 } 635 636 static inline bool 637 compaction_capture(struct capture_control *capc, struct page *page, 638 int order, int migratetype) 639 { 640 return false; 641 } 642 #endif /* CONFIG_COMPACTION */ 643 644 static inline void account_freepages(struct zone *zone, int nr_pages, 645 int migratetype) 646 { 647 lockdep_assert_held(&zone->lock); 648 649 if (is_migrate_isolate(migratetype)) 650 return; 651 652 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 653 654 if (is_migrate_cma(migratetype)) 655 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 656 else if (is_migrate_highatomic(migratetype)) 657 WRITE_ONCE(zone->nr_free_highatomic, 658 zone->nr_free_highatomic + nr_pages); 659 } 660 661 /* Used for pages not on another list */ 662 static inline void __add_to_free_list(struct page *page, struct zone *zone, 663 unsigned int order, int migratetype, 664 bool tail) 665 { 666 struct free_area *area = &zone->free_area[order]; 667 int nr_pages = 1 << order; 668 669 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 670 "page type is %lu, passed migratetype is %d (nr=%d)\n", 671 get_pageblock_migratetype(page), migratetype, nr_pages); 672 673 if (tail) 674 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 675 else 676 list_add(&page->buddy_list, &area->free_list[migratetype]); 677 area->nr_free++; 678 679 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 680 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 681 } 682 683 /* 684 * Used for pages which are on another list. Move the pages to the tail 685 * of the list - so the moved pages won't immediately be considered for 686 * allocation again (e.g., optimization for memory onlining). 687 */ 688 static inline void move_to_free_list(struct page *page, struct zone *zone, 689 unsigned int order, int old_mt, int new_mt) 690 { 691 struct free_area *area = &zone->free_area[order]; 692 int nr_pages = 1 << order; 693 694 /* Free page moving can fail, so it happens before the type update */ 695 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 696 "page type is %lu, passed migratetype is %d (nr=%d)\n", 697 get_pageblock_migratetype(page), old_mt, nr_pages); 698 699 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 700 701 account_freepages(zone, -nr_pages, old_mt); 702 account_freepages(zone, nr_pages, new_mt); 703 704 if (order >= pageblock_order && 705 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 706 if (!is_migrate_isolate(old_mt)) 707 nr_pages = -nr_pages; 708 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 709 } 710 } 711 712 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 713 unsigned int order, int migratetype) 714 { 715 int nr_pages = 1 << order; 716 717 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 718 "page type is %lu, passed migratetype is %d (nr=%d)\n", 719 get_pageblock_migratetype(page), migratetype, nr_pages); 720 721 /* clear reported state and update reported page count */ 722 if (page_reported(page)) 723 __ClearPageReported(page); 724 725 list_del(&page->buddy_list); 726 __ClearPageBuddy(page); 727 set_page_private(page, 0); 728 zone->free_area[order].nr_free--; 729 730 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 731 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 732 } 733 734 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 735 unsigned int order, int migratetype) 736 { 737 __del_page_from_free_list(page, zone, order, migratetype); 738 account_freepages(zone, -(1 << order), migratetype); 739 } 740 741 static inline struct page *get_page_from_free_area(struct free_area *area, 742 int migratetype) 743 { 744 return list_first_entry_or_null(&area->free_list[migratetype], 745 struct page, buddy_list); 746 } 747 748 /* 749 * If this is less than the 2nd largest possible page, check if the buddy 750 * of the next-higher order is free. If it is, it's possible 751 * that pages are being freed that will coalesce soon. In case, 752 * that is happening, add the free page to the tail of the list 753 * so it's less likely to be used soon and more likely to be merged 754 * as a 2-level higher order page 755 */ 756 static inline bool 757 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 758 struct page *page, unsigned int order) 759 { 760 unsigned long higher_page_pfn; 761 struct page *higher_page; 762 763 if (order >= MAX_PAGE_ORDER - 1) 764 return false; 765 766 higher_page_pfn = buddy_pfn & pfn; 767 higher_page = page + (higher_page_pfn - pfn); 768 769 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 770 NULL) != NULL; 771 } 772 773 /* 774 * Freeing function for a buddy system allocator. 775 * 776 * The concept of a buddy system is to maintain direct-mapped table 777 * (containing bit values) for memory blocks of various "orders". 778 * The bottom level table contains the map for the smallest allocatable 779 * units of memory (here, pages), and each level above it describes 780 * pairs of units from the levels below, hence, "buddies". 781 * At a high level, all that happens here is marking the table entry 782 * at the bottom level available, and propagating the changes upward 783 * as necessary, plus some accounting needed to play nicely with other 784 * parts of the VM system. 785 * At each level, we keep a list of pages, which are heads of continuous 786 * free pages of length of (1 << order) and marked with PageBuddy. 787 * Page's order is recorded in page_private(page) field. 788 * So when we are allocating or freeing one, we can derive the state of the 789 * other. That is, if we allocate a small block, and both were 790 * free, the remainder of the region must be split into blocks. 791 * If a block is freed, and its buddy is also free, then this 792 * triggers coalescing into a block of larger size. 793 * 794 * -- nyc 795 */ 796 797 static inline void __free_one_page(struct page *page, 798 unsigned long pfn, 799 struct zone *zone, unsigned int order, 800 int migratetype, fpi_t fpi_flags) 801 { 802 struct capture_control *capc = task_capc(zone); 803 unsigned long buddy_pfn = 0; 804 unsigned long combined_pfn; 805 struct page *buddy; 806 bool to_tail; 807 808 VM_BUG_ON(!zone_is_initialized(zone)); 809 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 810 811 VM_BUG_ON(migratetype == -1); 812 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 813 VM_BUG_ON_PAGE(bad_range(zone, page), page); 814 815 account_freepages(zone, 1 << order, migratetype); 816 817 while (order < MAX_PAGE_ORDER) { 818 int buddy_mt = migratetype; 819 820 if (compaction_capture(capc, page, order, migratetype)) { 821 account_freepages(zone, -(1 << order), migratetype); 822 return; 823 } 824 825 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 826 if (!buddy) 827 goto done_merging; 828 829 if (unlikely(order >= pageblock_order)) { 830 /* 831 * We want to prevent merge between freepages on pageblock 832 * without fallbacks and normal pageblock. Without this, 833 * pageblock isolation could cause incorrect freepage or CMA 834 * accounting or HIGHATOMIC accounting. 835 */ 836 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 837 838 if (migratetype != buddy_mt && 839 (!migratetype_is_mergeable(migratetype) || 840 !migratetype_is_mergeable(buddy_mt))) 841 goto done_merging; 842 } 843 844 /* 845 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 846 * merge with it and move up one order. 847 */ 848 if (page_is_guard(buddy)) 849 clear_page_guard(zone, buddy, order); 850 else 851 __del_page_from_free_list(buddy, zone, order, buddy_mt); 852 853 if (unlikely(buddy_mt != migratetype)) { 854 /* 855 * Match buddy type. This ensures that an 856 * expand() down the line puts the sub-blocks 857 * on the right freelists. 858 */ 859 set_pageblock_migratetype(buddy, migratetype); 860 } 861 862 combined_pfn = buddy_pfn & pfn; 863 page = page + (combined_pfn - pfn); 864 pfn = combined_pfn; 865 order++; 866 } 867 868 done_merging: 869 set_buddy_order(page, order); 870 871 if (fpi_flags & FPI_TO_TAIL) 872 to_tail = true; 873 else if (is_shuffle_order(order)) 874 to_tail = shuffle_pick_tail(); 875 else 876 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 877 878 __add_to_free_list(page, zone, order, migratetype, to_tail); 879 880 /* Notify page reporting subsystem of freed page */ 881 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 882 page_reporting_notify_free(order); 883 } 884 885 /* 886 * A bad page could be due to a number of fields. Instead of multiple branches, 887 * try and check multiple fields with one check. The caller must do a detailed 888 * check if necessary. 889 */ 890 static inline bool page_expected_state(struct page *page, 891 unsigned long check_flags) 892 { 893 if (unlikely(atomic_read(&page->_mapcount) != -1)) 894 return false; 895 896 if (unlikely((unsigned long)page->mapping | 897 page_ref_count(page) | 898 #ifdef CONFIG_MEMCG 899 page->memcg_data | 900 #endif 901 #ifdef CONFIG_PAGE_POOL 902 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 903 #endif 904 (page->flags & check_flags))) 905 return false; 906 907 return true; 908 } 909 910 static const char *page_bad_reason(struct page *page, unsigned long flags) 911 { 912 const char *bad_reason = NULL; 913 914 if (unlikely(atomic_read(&page->_mapcount) != -1)) 915 bad_reason = "nonzero mapcount"; 916 if (unlikely(page->mapping != NULL)) 917 bad_reason = "non-NULL mapping"; 918 if (unlikely(page_ref_count(page) != 0)) 919 bad_reason = "nonzero _refcount"; 920 if (unlikely(page->flags & flags)) { 921 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 922 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 923 else 924 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 925 } 926 #ifdef CONFIG_MEMCG 927 if (unlikely(page->memcg_data)) 928 bad_reason = "page still charged to cgroup"; 929 #endif 930 #ifdef CONFIG_PAGE_POOL 931 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 932 bad_reason = "page_pool leak"; 933 #endif 934 return bad_reason; 935 } 936 937 static inline bool free_page_is_bad(struct page *page) 938 { 939 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 940 return false; 941 942 /* Something has gone sideways, find it */ 943 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 944 return true; 945 } 946 947 static inline bool is_check_pages_enabled(void) 948 { 949 return static_branch_unlikely(&check_pages_enabled); 950 } 951 952 static int free_tail_page_prepare(struct page *head_page, struct page *page) 953 { 954 struct folio *folio = (struct folio *)head_page; 955 int ret = 1; 956 957 /* 958 * We rely page->lru.next never has bit 0 set, unless the page 959 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 960 */ 961 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 962 963 if (!is_check_pages_enabled()) { 964 ret = 0; 965 goto out; 966 } 967 switch (page - head_page) { 968 case 1: 969 /* the first tail page: these may be in place of ->mapping */ 970 if (unlikely(folio_large_mapcount(folio))) { 971 bad_page(page, "nonzero large_mapcount"); 972 goto out; 973 } 974 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 975 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 976 bad_page(page, "nonzero nr_pages_mapped"); 977 goto out; 978 } 979 if (IS_ENABLED(CONFIG_MM_ID)) { 980 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 981 bad_page(page, "nonzero mm mapcount 0"); 982 goto out; 983 } 984 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 985 bad_page(page, "nonzero mm mapcount 1"); 986 goto out; 987 } 988 } 989 if (IS_ENABLED(CONFIG_64BIT)) { 990 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 991 bad_page(page, "nonzero entire_mapcount"); 992 goto out; 993 } 994 if (unlikely(atomic_read(&folio->_pincount))) { 995 bad_page(page, "nonzero pincount"); 996 goto out; 997 } 998 } 999 break; 1000 case 2: 1001 /* the second tail page: deferred_list overlaps ->mapping */ 1002 if (unlikely(!list_empty(&folio->_deferred_list))) { 1003 bad_page(page, "on deferred list"); 1004 goto out; 1005 } 1006 if (!IS_ENABLED(CONFIG_64BIT)) { 1007 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1008 bad_page(page, "nonzero entire_mapcount"); 1009 goto out; 1010 } 1011 if (unlikely(atomic_read(&folio->_pincount))) { 1012 bad_page(page, "nonzero pincount"); 1013 goto out; 1014 } 1015 } 1016 break; 1017 case 3: 1018 /* the third tail page: hugetlb specifics overlap ->mappings */ 1019 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1020 break; 1021 fallthrough; 1022 default: 1023 if (page->mapping != TAIL_MAPPING) { 1024 bad_page(page, "corrupted mapping in tail page"); 1025 goto out; 1026 } 1027 break; 1028 } 1029 if (unlikely(!PageTail(page))) { 1030 bad_page(page, "PageTail not set"); 1031 goto out; 1032 } 1033 if (unlikely(compound_head(page) != head_page)) { 1034 bad_page(page, "compound_head not consistent"); 1035 goto out; 1036 } 1037 ret = 0; 1038 out: 1039 page->mapping = NULL; 1040 clear_compound_head(page); 1041 return ret; 1042 } 1043 1044 /* 1045 * Skip KASAN memory poisoning when either: 1046 * 1047 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1048 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1049 * using page tags instead (see below). 1050 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1051 * that error detection is disabled for accesses via the page address. 1052 * 1053 * Pages will have match-all tags in the following circumstances: 1054 * 1055 * 1. Pages are being initialized for the first time, including during deferred 1056 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1057 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1058 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1059 * 3. The allocation was excluded from being checked due to sampling, 1060 * see the call to kasan_unpoison_pages. 1061 * 1062 * Poisoning pages during deferred memory init will greatly lengthen the 1063 * process and cause problem in large memory systems as the deferred pages 1064 * initialization is done with interrupt disabled. 1065 * 1066 * Assuming that there will be no reference to those newly initialized 1067 * pages before they are ever allocated, this should have no effect on 1068 * KASAN memory tracking as the poison will be properly inserted at page 1069 * allocation time. The only corner case is when pages are allocated by 1070 * on-demand allocation and then freed again before the deferred pages 1071 * initialization is done, but this is not likely to happen. 1072 */ 1073 static inline bool should_skip_kasan_poison(struct page *page) 1074 { 1075 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1076 return deferred_pages_enabled(); 1077 1078 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1079 } 1080 1081 static void kernel_init_pages(struct page *page, int numpages) 1082 { 1083 int i; 1084 1085 /* s390's use of memset() could override KASAN redzones. */ 1086 kasan_disable_current(); 1087 for (i = 0; i < numpages; i++) 1088 clear_highpage_kasan_tagged(page + i); 1089 kasan_enable_current(); 1090 } 1091 1092 #ifdef CONFIG_MEM_ALLOC_PROFILING 1093 1094 /* Should be called only if mem_alloc_profiling_enabled() */ 1095 void __clear_page_tag_ref(struct page *page) 1096 { 1097 union pgtag_ref_handle handle; 1098 union codetag_ref ref; 1099 1100 if (get_page_tag_ref(page, &ref, &handle)) { 1101 set_codetag_empty(&ref); 1102 update_page_tag_ref(handle, &ref); 1103 put_page_tag_ref(handle); 1104 } 1105 } 1106 1107 /* Should be called only if mem_alloc_profiling_enabled() */ 1108 static noinline 1109 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1110 unsigned int nr) 1111 { 1112 union pgtag_ref_handle handle; 1113 union codetag_ref ref; 1114 1115 if (get_page_tag_ref(page, &ref, &handle)) { 1116 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1117 update_page_tag_ref(handle, &ref); 1118 put_page_tag_ref(handle); 1119 } 1120 } 1121 1122 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1123 unsigned int nr) 1124 { 1125 if (mem_alloc_profiling_enabled()) 1126 __pgalloc_tag_add(page, task, nr); 1127 } 1128 1129 /* Should be called only if mem_alloc_profiling_enabled() */ 1130 static noinline 1131 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1132 { 1133 union pgtag_ref_handle handle; 1134 union codetag_ref ref; 1135 1136 if (get_page_tag_ref(page, &ref, &handle)) { 1137 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1138 update_page_tag_ref(handle, &ref); 1139 put_page_tag_ref(handle); 1140 } 1141 } 1142 1143 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1144 { 1145 if (mem_alloc_profiling_enabled()) 1146 __pgalloc_tag_sub(page, nr); 1147 } 1148 1149 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1150 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1151 { 1152 if (tag) 1153 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1154 } 1155 1156 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1157 1158 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1159 unsigned int nr) {} 1160 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1161 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1162 1163 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1164 1165 __always_inline bool free_pages_prepare(struct page *page, 1166 unsigned int order) 1167 { 1168 int bad = 0; 1169 bool skip_kasan_poison = should_skip_kasan_poison(page); 1170 bool init = want_init_on_free(); 1171 bool compound = PageCompound(page); 1172 struct folio *folio = page_folio(page); 1173 1174 VM_BUG_ON_PAGE(PageTail(page), page); 1175 1176 trace_mm_page_free(page, order); 1177 kmsan_free_page(page, order); 1178 1179 if (memcg_kmem_online() && PageMemcgKmem(page)) 1180 __memcg_kmem_uncharge_page(page, order); 1181 1182 /* 1183 * In rare cases, when truncation or holepunching raced with 1184 * munlock after VM_LOCKED was cleared, Mlocked may still be 1185 * found set here. This does not indicate a problem, unless 1186 * "unevictable_pgs_cleared" appears worryingly large. 1187 */ 1188 if (unlikely(folio_test_mlocked(folio))) { 1189 long nr_pages = folio_nr_pages(folio); 1190 1191 __folio_clear_mlocked(folio); 1192 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1193 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1194 } 1195 1196 if (unlikely(PageHWPoison(page)) && !order) { 1197 /* Do not let hwpoison pages hit pcplists/buddy */ 1198 reset_page_owner(page, order); 1199 page_table_check_free(page, order); 1200 pgalloc_tag_sub(page, 1 << order); 1201 1202 /* 1203 * The page is isolated and accounted for. 1204 * Mark the codetag as empty to avoid accounting error 1205 * when the page is freed by unpoison_memory(). 1206 */ 1207 clear_page_tag_ref(page); 1208 return false; 1209 } 1210 1211 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1212 1213 /* 1214 * Check tail pages before head page information is cleared to 1215 * avoid checking PageCompound for order-0 pages. 1216 */ 1217 if (unlikely(order)) { 1218 int i; 1219 1220 if (compound) { 1221 page[1].flags &= ~PAGE_FLAGS_SECOND; 1222 #ifdef NR_PAGES_IN_LARGE_FOLIO 1223 folio->_nr_pages = 0; 1224 #endif 1225 } 1226 for (i = 1; i < (1 << order); i++) { 1227 if (compound) 1228 bad += free_tail_page_prepare(page, page + i); 1229 if (is_check_pages_enabled()) { 1230 if (free_page_is_bad(page + i)) { 1231 bad++; 1232 continue; 1233 } 1234 } 1235 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1236 } 1237 } 1238 if (PageMappingFlags(page)) { 1239 if (PageAnon(page)) 1240 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1241 page->mapping = NULL; 1242 } 1243 if (is_check_pages_enabled()) { 1244 if (free_page_is_bad(page)) 1245 bad++; 1246 if (bad) 1247 return false; 1248 } 1249 1250 page_cpupid_reset_last(page); 1251 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1252 reset_page_owner(page, order); 1253 page_table_check_free(page, order); 1254 pgalloc_tag_sub(page, 1 << order); 1255 1256 if (!PageHighMem(page)) { 1257 debug_check_no_locks_freed(page_address(page), 1258 PAGE_SIZE << order); 1259 debug_check_no_obj_freed(page_address(page), 1260 PAGE_SIZE << order); 1261 } 1262 1263 kernel_poison_pages(page, 1 << order); 1264 1265 /* 1266 * As memory initialization might be integrated into KASAN, 1267 * KASAN poisoning and memory initialization code must be 1268 * kept together to avoid discrepancies in behavior. 1269 * 1270 * With hardware tag-based KASAN, memory tags must be set before the 1271 * page becomes unavailable via debug_pagealloc or arch_free_page. 1272 */ 1273 if (!skip_kasan_poison) { 1274 kasan_poison_pages(page, order, init); 1275 1276 /* Memory is already initialized if KASAN did it internally. */ 1277 if (kasan_has_integrated_init()) 1278 init = false; 1279 } 1280 if (init) 1281 kernel_init_pages(page, 1 << order); 1282 1283 /* 1284 * arch_free_page() can make the page's contents inaccessible. s390 1285 * does this. So nothing which can access the page's contents should 1286 * happen after this. 1287 */ 1288 arch_free_page(page, order); 1289 1290 debug_pagealloc_unmap_pages(page, 1 << order); 1291 1292 return true; 1293 } 1294 1295 /* 1296 * Frees a number of pages from the PCP lists 1297 * Assumes all pages on list are in same zone. 1298 * count is the number of pages to free. 1299 */ 1300 static void free_pcppages_bulk(struct zone *zone, int count, 1301 struct per_cpu_pages *pcp, 1302 int pindex) 1303 { 1304 unsigned long flags; 1305 unsigned int order; 1306 struct page *page; 1307 1308 /* 1309 * Ensure proper count is passed which otherwise would stuck in the 1310 * below while (list_empty(list)) loop. 1311 */ 1312 count = min(pcp->count, count); 1313 1314 /* Ensure requested pindex is drained first. */ 1315 pindex = pindex - 1; 1316 1317 spin_lock_irqsave(&zone->lock, flags); 1318 1319 while (count > 0) { 1320 struct list_head *list; 1321 int nr_pages; 1322 1323 /* Remove pages from lists in a round-robin fashion. */ 1324 do { 1325 if (++pindex > NR_PCP_LISTS - 1) 1326 pindex = 0; 1327 list = &pcp->lists[pindex]; 1328 } while (list_empty(list)); 1329 1330 order = pindex_to_order(pindex); 1331 nr_pages = 1 << order; 1332 do { 1333 unsigned long pfn; 1334 int mt; 1335 1336 page = list_last_entry(list, struct page, pcp_list); 1337 pfn = page_to_pfn(page); 1338 mt = get_pfnblock_migratetype(page, pfn); 1339 1340 /* must delete to avoid corrupting pcp list */ 1341 list_del(&page->pcp_list); 1342 count -= nr_pages; 1343 pcp->count -= nr_pages; 1344 1345 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1346 trace_mm_page_pcpu_drain(page, order, mt); 1347 } while (count > 0 && !list_empty(list)); 1348 } 1349 1350 spin_unlock_irqrestore(&zone->lock, flags); 1351 } 1352 1353 /* Split a multi-block free page into its individual pageblocks. */ 1354 static void split_large_buddy(struct zone *zone, struct page *page, 1355 unsigned long pfn, int order, fpi_t fpi) 1356 { 1357 unsigned long end = pfn + (1 << order); 1358 1359 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1360 /* Caller removed page from freelist, buddy info cleared! */ 1361 VM_WARN_ON_ONCE(PageBuddy(page)); 1362 1363 if (order > pageblock_order) 1364 order = pageblock_order; 1365 1366 do { 1367 int mt = get_pfnblock_migratetype(page, pfn); 1368 1369 __free_one_page(page, pfn, zone, order, mt, fpi); 1370 pfn += 1 << order; 1371 if (pfn == end) 1372 break; 1373 page = pfn_to_page(pfn); 1374 } while (1); 1375 } 1376 1377 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1378 unsigned int order) 1379 { 1380 /* Remember the order */ 1381 page->order = order; 1382 /* Add the page to the free list */ 1383 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1384 } 1385 1386 static void free_one_page(struct zone *zone, struct page *page, 1387 unsigned long pfn, unsigned int order, 1388 fpi_t fpi_flags) 1389 { 1390 struct llist_head *llhead; 1391 unsigned long flags; 1392 1393 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1394 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1395 add_page_to_zone_llist(zone, page, order); 1396 return; 1397 } 1398 } else { 1399 spin_lock_irqsave(&zone->lock, flags); 1400 } 1401 1402 /* The lock succeeded. Process deferred pages. */ 1403 llhead = &zone->trylock_free_pages; 1404 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1405 struct llist_node *llnode; 1406 struct page *p, *tmp; 1407 1408 llnode = llist_del_all(llhead); 1409 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1410 unsigned int p_order = p->order; 1411 1412 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1413 __count_vm_events(PGFREE, 1 << p_order); 1414 } 1415 } 1416 split_large_buddy(zone, page, pfn, order, fpi_flags); 1417 spin_unlock_irqrestore(&zone->lock, flags); 1418 1419 __count_vm_events(PGFREE, 1 << order); 1420 } 1421 1422 static void __free_pages_ok(struct page *page, unsigned int order, 1423 fpi_t fpi_flags) 1424 { 1425 unsigned long pfn = page_to_pfn(page); 1426 struct zone *zone = page_zone(page); 1427 1428 if (free_pages_prepare(page, order)) 1429 free_one_page(zone, page, pfn, order, fpi_flags); 1430 } 1431 1432 void __meminit __free_pages_core(struct page *page, unsigned int order, 1433 enum meminit_context context) 1434 { 1435 unsigned int nr_pages = 1 << order; 1436 struct page *p = page; 1437 unsigned int loop; 1438 1439 /* 1440 * When initializing the memmap, __init_single_page() sets the refcount 1441 * of all pages to 1 ("allocated"/"not free"). We have to set the 1442 * refcount of all involved pages to 0. 1443 * 1444 * Note that hotplugged memory pages are initialized to PageOffline(). 1445 * Pages freed from memblock might be marked as reserved. 1446 */ 1447 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1448 unlikely(context == MEMINIT_HOTPLUG)) { 1449 for (loop = 0; loop < nr_pages; loop++, p++) { 1450 VM_WARN_ON_ONCE(PageReserved(p)); 1451 __ClearPageOffline(p); 1452 set_page_count(p, 0); 1453 } 1454 1455 adjust_managed_page_count(page, nr_pages); 1456 } else { 1457 for (loop = 0; loop < nr_pages; loop++, p++) { 1458 __ClearPageReserved(p); 1459 set_page_count(p, 0); 1460 } 1461 1462 /* memblock adjusts totalram_pages() manually. */ 1463 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1464 } 1465 1466 if (page_contains_unaccepted(page, order)) { 1467 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1468 return; 1469 1470 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1471 } 1472 1473 /* 1474 * Bypass PCP and place fresh pages right to the tail, primarily 1475 * relevant for memory onlining. 1476 */ 1477 __free_pages_ok(page, order, FPI_TO_TAIL); 1478 } 1479 1480 /* 1481 * Check that the whole (or subset of) a pageblock given by the interval of 1482 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1483 * with the migration of free compaction scanner. 1484 * 1485 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1486 * 1487 * It's possible on some configurations to have a setup like node0 node1 node0 1488 * i.e. it's possible that all pages within a zones range of pages do not 1489 * belong to a single zone. We assume that a border between node0 and node1 1490 * can occur within a single pageblock, but not a node0 node1 node0 1491 * interleaving within a single pageblock. It is therefore sufficient to check 1492 * the first and last page of a pageblock and avoid checking each individual 1493 * page in a pageblock. 1494 * 1495 * Note: the function may return non-NULL struct page even for a page block 1496 * which contains a memory hole (i.e. there is no physical memory for a subset 1497 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1498 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1499 * even though the start pfn is online and valid. This should be safe most of 1500 * the time because struct pages are still initialized via init_unavailable_range() 1501 * and pfn walkers shouldn't touch any physical memory range for which they do 1502 * not recognize any specific metadata in struct pages. 1503 */ 1504 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1505 unsigned long end_pfn, struct zone *zone) 1506 { 1507 struct page *start_page; 1508 struct page *end_page; 1509 1510 /* end_pfn is one past the range we are checking */ 1511 end_pfn--; 1512 1513 if (!pfn_valid(end_pfn)) 1514 return NULL; 1515 1516 start_page = pfn_to_online_page(start_pfn); 1517 if (!start_page) 1518 return NULL; 1519 1520 if (page_zone(start_page) != zone) 1521 return NULL; 1522 1523 end_page = pfn_to_page(end_pfn); 1524 1525 /* This gives a shorter code than deriving page_zone(end_page) */ 1526 if (page_zone_id(start_page) != page_zone_id(end_page)) 1527 return NULL; 1528 1529 return start_page; 1530 } 1531 1532 /* 1533 * The order of subdivision here is critical for the IO subsystem. 1534 * Please do not alter this order without good reasons and regression 1535 * testing. Specifically, as large blocks of memory are subdivided, 1536 * the order in which smaller blocks are delivered depends on the order 1537 * they're subdivided in this function. This is the primary factor 1538 * influencing the order in which pages are delivered to the IO 1539 * subsystem according to empirical testing, and this is also justified 1540 * by considering the behavior of a buddy system containing a single 1541 * large block of memory acted on by a series of small allocations. 1542 * This behavior is a critical factor in sglist merging's success. 1543 * 1544 * -- nyc 1545 */ 1546 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1547 int high, int migratetype) 1548 { 1549 unsigned int size = 1 << high; 1550 unsigned int nr_added = 0; 1551 1552 while (high > low) { 1553 high--; 1554 size >>= 1; 1555 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1556 1557 /* 1558 * Mark as guard pages (or page), that will allow to 1559 * merge back to allocator when buddy will be freed. 1560 * Corresponding page table entries will not be touched, 1561 * pages will stay not present in virtual address space 1562 */ 1563 if (set_page_guard(zone, &page[size], high)) 1564 continue; 1565 1566 __add_to_free_list(&page[size], zone, high, migratetype, false); 1567 set_buddy_order(&page[size], high); 1568 nr_added += size; 1569 } 1570 1571 return nr_added; 1572 } 1573 1574 static __always_inline void page_del_and_expand(struct zone *zone, 1575 struct page *page, int low, 1576 int high, int migratetype) 1577 { 1578 int nr_pages = 1 << high; 1579 1580 __del_page_from_free_list(page, zone, high, migratetype); 1581 nr_pages -= expand(zone, page, low, high, migratetype); 1582 account_freepages(zone, -nr_pages, migratetype); 1583 } 1584 1585 static void check_new_page_bad(struct page *page) 1586 { 1587 if (unlikely(PageHWPoison(page))) { 1588 /* Don't complain about hwpoisoned pages */ 1589 if (PageBuddy(page)) 1590 __ClearPageBuddy(page); 1591 return; 1592 } 1593 1594 bad_page(page, 1595 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1596 } 1597 1598 /* 1599 * This page is about to be returned from the page allocator 1600 */ 1601 static bool check_new_page(struct page *page) 1602 { 1603 if (likely(page_expected_state(page, 1604 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1605 return false; 1606 1607 check_new_page_bad(page); 1608 return true; 1609 } 1610 1611 static inline bool check_new_pages(struct page *page, unsigned int order) 1612 { 1613 if (is_check_pages_enabled()) { 1614 for (int i = 0; i < (1 << order); i++) { 1615 struct page *p = page + i; 1616 1617 if (check_new_page(p)) 1618 return true; 1619 } 1620 } 1621 1622 return false; 1623 } 1624 1625 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1626 { 1627 /* Don't skip if a software KASAN mode is enabled. */ 1628 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1629 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1630 return false; 1631 1632 /* Skip, if hardware tag-based KASAN is not enabled. */ 1633 if (!kasan_hw_tags_enabled()) 1634 return true; 1635 1636 /* 1637 * With hardware tag-based KASAN enabled, skip if this has been 1638 * requested via __GFP_SKIP_KASAN. 1639 */ 1640 return flags & __GFP_SKIP_KASAN; 1641 } 1642 1643 static inline bool should_skip_init(gfp_t flags) 1644 { 1645 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1646 if (!kasan_hw_tags_enabled()) 1647 return false; 1648 1649 /* For hardware tag-based KASAN, skip if requested. */ 1650 return (flags & __GFP_SKIP_ZERO); 1651 } 1652 1653 inline void post_alloc_hook(struct page *page, unsigned int order, 1654 gfp_t gfp_flags) 1655 { 1656 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1657 !should_skip_init(gfp_flags); 1658 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1659 int i; 1660 1661 set_page_private(page, 0); 1662 1663 arch_alloc_page(page, order); 1664 debug_pagealloc_map_pages(page, 1 << order); 1665 1666 /* 1667 * Page unpoisoning must happen before memory initialization. 1668 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1669 * allocations and the page unpoisoning code will complain. 1670 */ 1671 kernel_unpoison_pages(page, 1 << order); 1672 1673 /* 1674 * As memory initialization might be integrated into KASAN, 1675 * KASAN unpoisoning and memory initializion code must be 1676 * kept together to avoid discrepancies in behavior. 1677 */ 1678 1679 /* 1680 * If memory tags should be zeroed 1681 * (which happens only when memory should be initialized as well). 1682 */ 1683 if (zero_tags) { 1684 /* Initialize both memory and memory tags. */ 1685 for (i = 0; i != 1 << order; ++i) 1686 tag_clear_highpage(page + i); 1687 1688 /* Take note that memory was initialized by the loop above. */ 1689 init = false; 1690 } 1691 if (!should_skip_kasan_unpoison(gfp_flags) && 1692 kasan_unpoison_pages(page, order, init)) { 1693 /* Take note that memory was initialized by KASAN. */ 1694 if (kasan_has_integrated_init()) 1695 init = false; 1696 } else { 1697 /* 1698 * If memory tags have not been set by KASAN, reset the page 1699 * tags to ensure page_address() dereferencing does not fault. 1700 */ 1701 for (i = 0; i != 1 << order; ++i) 1702 page_kasan_tag_reset(page + i); 1703 } 1704 /* If memory is still not initialized, initialize it now. */ 1705 if (init) 1706 kernel_init_pages(page, 1 << order); 1707 1708 set_page_owner(page, order, gfp_flags); 1709 page_table_check_alloc(page, order); 1710 pgalloc_tag_add(page, current, 1 << order); 1711 } 1712 1713 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1714 unsigned int alloc_flags) 1715 { 1716 post_alloc_hook(page, order, gfp_flags); 1717 1718 if (order && (gfp_flags & __GFP_COMP)) 1719 prep_compound_page(page, order); 1720 1721 /* 1722 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1723 * allocate the page. The expectation is that the caller is taking 1724 * steps that will free more memory. The caller should avoid the page 1725 * being used for !PFMEMALLOC purposes. 1726 */ 1727 if (alloc_flags & ALLOC_NO_WATERMARKS) 1728 set_page_pfmemalloc(page); 1729 else 1730 clear_page_pfmemalloc(page); 1731 } 1732 1733 /* 1734 * Go through the free lists for the given migratetype and remove 1735 * the smallest available page from the freelists 1736 */ 1737 static __always_inline 1738 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1739 int migratetype) 1740 { 1741 unsigned int current_order; 1742 struct free_area *area; 1743 struct page *page; 1744 1745 /* Find a page of the appropriate size in the preferred list */ 1746 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1747 area = &(zone->free_area[current_order]); 1748 page = get_page_from_free_area(area, migratetype); 1749 if (!page) 1750 continue; 1751 1752 page_del_and_expand(zone, page, order, current_order, 1753 migratetype); 1754 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1755 pcp_allowed_order(order) && 1756 migratetype < MIGRATE_PCPTYPES); 1757 return page; 1758 } 1759 1760 return NULL; 1761 } 1762 1763 1764 /* 1765 * This array describes the order lists are fallen back to when 1766 * the free lists for the desirable migrate type are depleted 1767 * 1768 * The other migratetypes do not have fallbacks. 1769 */ 1770 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1771 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1772 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1773 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1774 }; 1775 1776 #ifdef CONFIG_CMA 1777 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1778 unsigned int order) 1779 { 1780 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1781 } 1782 #else 1783 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1784 unsigned int order) { return NULL; } 1785 #endif 1786 1787 /* 1788 * Change the type of a block and move all its free pages to that 1789 * type's freelist. 1790 */ 1791 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1792 int old_mt, int new_mt) 1793 { 1794 struct page *page; 1795 unsigned long pfn, end_pfn; 1796 unsigned int order; 1797 int pages_moved = 0; 1798 1799 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1800 end_pfn = pageblock_end_pfn(start_pfn); 1801 1802 for (pfn = start_pfn; pfn < end_pfn;) { 1803 page = pfn_to_page(pfn); 1804 if (!PageBuddy(page)) { 1805 pfn++; 1806 continue; 1807 } 1808 1809 /* Make sure we are not inadvertently changing nodes */ 1810 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1811 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1812 1813 order = buddy_order(page); 1814 1815 move_to_free_list(page, zone, order, old_mt, new_mt); 1816 1817 pfn += 1 << order; 1818 pages_moved += 1 << order; 1819 } 1820 1821 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1822 1823 return pages_moved; 1824 } 1825 1826 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1827 unsigned long *start_pfn, 1828 int *num_free, int *num_movable) 1829 { 1830 unsigned long pfn, start, end; 1831 1832 pfn = page_to_pfn(page); 1833 start = pageblock_start_pfn(pfn); 1834 end = pageblock_end_pfn(pfn); 1835 1836 /* 1837 * The caller only has the lock for @zone, don't touch ranges 1838 * that straddle into other zones. While we could move part of 1839 * the range that's inside the zone, this call is usually 1840 * accompanied by other operations such as migratetype updates 1841 * which also should be locked. 1842 */ 1843 if (!zone_spans_pfn(zone, start)) 1844 return false; 1845 if (!zone_spans_pfn(zone, end - 1)) 1846 return false; 1847 1848 *start_pfn = start; 1849 1850 if (num_free) { 1851 *num_free = 0; 1852 *num_movable = 0; 1853 for (pfn = start; pfn < end;) { 1854 page = pfn_to_page(pfn); 1855 if (PageBuddy(page)) { 1856 int nr = 1 << buddy_order(page); 1857 1858 *num_free += nr; 1859 pfn += nr; 1860 continue; 1861 } 1862 /* 1863 * We assume that pages that could be isolated for 1864 * migration are movable. But we don't actually try 1865 * isolating, as that would be expensive. 1866 */ 1867 if (PageLRU(page) || __PageMovable(page)) 1868 (*num_movable)++; 1869 pfn++; 1870 } 1871 } 1872 1873 return true; 1874 } 1875 1876 static int move_freepages_block(struct zone *zone, struct page *page, 1877 int old_mt, int new_mt) 1878 { 1879 unsigned long start_pfn; 1880 1881 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1882 return -1; 1883 1884 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1885 } 1886 1887 #ifdef CONFIG_MEMORY_ISOLATION 1888 /* Look for a buddy that straddles start_pfn */ 1889 static unsigned long find_large_buddy(unsigned long start_pfn) 1890 { 1891 int order = 0; 1892 struct page *page; 1893 unsigned long pfn = start_pfn; 1894 1895 while (!PageBuddy(page = pfn_to_page(pfn))) { 1896 /* Nothing found */ 1897 if (++order > MAX_PAGE_ORDER) 1898 return start_pfn; 1899 pfn &= ~0UL << order; 1900 } 1901 1902 /* 1903 * Found a preceding buddy, but does it straddle? 1904 */ 1905 if (pfn + (1 << buddy_order(page)) > start_pfn) 1906 return pfn; 1907 1908 /* Nothing found */ 1909 return start_pfn; 1910 } 1911 1912 /** 1913 * move_freepages_block_isolate - move free pages in block for page isolation 1914 * @zone: the zone 1915 * @page: the pageblock page 1916 * @migratetype: migratetype to set on the pageblock 1917 * 1918 * This is similar to move_freepages_block(), but handles the special 1919 * case encountered in page isolation, where the block of interest 1920 * might be part of a larger buddy spanning multiple pageblocks. 1921 * 1922 * Unlike the regular page allocator path, which moves pages while 1923 * stealing buddies off the freelist, page isolation is interested in 1924 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1925 * 1926 * This function handles that. Straddling buddies are split into 1927 * individual pageblocks. Only the block of interest is moved. 1928 * 1929 * Returns %true if pages could be moved, %false otherwise. 1930 */ 1931 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1932 int migratetype) 1933 { 1934 unsigned long start_pfn, pfn; 1935 1936 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1937 return false; 1938 1939 /* No splits needed if buddies can't span multiple blocks */ 1940 if (pageblock_order == MAX_PAGE_ORDER) 1941 goto move; 1942 1943 /* We're a tail block in a larger buddy */ 1944 pfn = find_large_buddy(start_pfn); 1945 if (pfn != start_pfn) { 1946 struct page *buddy = pfn_to_page(pfn); 1947 int order = buddy_order(buddy); 1948 1949 del_page_from_free_list(buddy, zone, order, 1950 get_pfnblock_migratetype(buddy, pfn)); 1951 set_pageblock_migratetype(page, migratetype); 1952 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); 1953 return true; 1954 } 1955 1956 /* We're the starting block of a larger buddy */ 1957 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1958 int order = buddy_order(page); 1959 1960 del_page_from_free_list(page, zone, order, 1961 get_pfnblock_migratetype(page, pfn)); 1962 set_pageblock_migratetype(page, migratetype); 1963 split_large_buddy(zone, page, pfn, order, FPI_NONE); 1964 return true; 1965 } 1966 move: 1967 __move_freepages_block(zone, start_pfn, 1968 get_pfnblock_migratetype(page, start_pfn), 1969 migratetype); 1970 return true; 1971 } 1972 #endif /* CONFIG_MEMORY_ISOLATION */ 1973 1974 static void change_pageblock_range(struct page *pageblock_page, 1975 int start_order, int migratetype) 1976 { 1977 int nr_pageblocks = 1 << (start_order - pageblock_order); 1978 1979 while (nr_pageblocks--) { 1980 set_pageblock_migratetype(pageblock_page, migratetype); 1981 pageblock_page += pageblock_nr_pages; 1982 } 1983 } 1984 1985 static inline bool boost_watermark(struct zone *zone) 1986 { 1987 unsigned long max_boost; 1988 1989 if (!watermark_boost_factor) 1990 return false; 1991 /* 1992 * Don't bother in zones that are unlikely to produce results. 1993 * On small machines, including kdump capture kernels running 1994 * in a small area, boosting the watermark can cause an out of 1995 * memory situation immediately. 1996 */ 1997 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1998 return false; 1999 2000 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2001 watermark_boost_factor, 10000); 2002 2003 /* 2004 * high watermark may be uninitialised if fragmentation occurs 2005 * very early in boot so do not boost. We do not fall 2006 * through and boost by pageblock_nr_pages as failing 2007 * allocations that early means that reclaim is not going 2008 * to help and it may even be impossible to reclaim the 2009 * boosted watermark resulting in a hang. 2010 */ 2011 if (!max_boost) 2012 return false; 2013 2014 max_boost = max(pageblock_nr_pages, max_boost); 2015 2016 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2017 max_boost); 2018 2019 return true; 2020 } 2021 2022 /* 2023 * When we are falling back to another migratetype during allocation, should we 2024 * try to claim an entire block to satisfy further allocations, instead of 2025 * polluting multiple pageblocks? 2026 */ 2027 static bool should_try_claim_block(unsigned int order, int start_mt) 2028 { 2029 /* 2030 * Leaving this order check is intended, although there is 2031 * relaxed order check in next check. The reason is that 2032 * we can actually claim the whole pageblock if this condition met, 2033 * but, below check doesn't guarantee it and that is just heuristic 2034 * so could be changed anytime. 2035 */ 2036 if (order >= pageblock_order) 2037 return true; 2038 2039 /* 2040 * Above a certain threshold, always try to claim, as it's likely there 2041 * will be more free pages in the pageblock. 2042 */ 2043 if (order >= pageblock_order / 2) 2044 return true; 2045 2046 /* 2047 * Unmovable/reclaimable allocations would cause permanent 2048 * fragmentations if they fell back to allocating from a movable block 2049 * (polluting it), so we try to claim the whole block regardless of the 2050 * allocation size. Later movable allocations can always steal from this 2051 * block, which is less problematic. 2052 */ 2053 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2054 return true; 2055 2056 if (page_group_by_mobility_disabled) 2057 return true; 2058 2059 /* 2060 * Movable pages won't cause permanent fragmentation, so when you alloc 2061 * small pages, we just need to temporarily steal unmovable or 2062 * reclaimable pages that are closest to the request size. After a 2063 * while, memory compaction may occur to form large contiguous pages, 2064 * and the next movable allocation may not need to steal. 2065 */ 2066 return false; 2067 } 2068 2069 /* 2070 * Check whether there is a suitable fallback freepage with requested order. 2071 * Sets *claim_block to instruct the caller whether it should convert a whole 2072 * pageblock to the returned migratetype. 2073 * If only_claim is true, this function returns fallback_mt only if 2074 * we would do this whole-block claiming. This would help to reduce 2075 * fragmentation due to mixed migratetype pages in one pageblock. 2076 */ 2077 int find_suitable_fallback(struct free_area *area, unsigned int order, 2078 int migratetype, bool only_claim, bool *claim_block) 2079 { 2080 int i; 2081 int fallback_mt; 2082 2083 if (area->nr_free == 0) 2084 return -1; 2085 2086 *claim_block = false; 2087 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2088 fallback_mt = fallbacks[migratetype][i]; 2089 if (free_area_empty(area, fallback_mt)) 2090 continue; 2091 2092 if (should_try_claim_block(order, migratetype)) 2093 *claim_block = true; 2094 2095 if (*claim_block || !only_claim) 2096 return fallback_mt; 2097 } 2098 2099 return -1; 2100 } 2101 2102 /* 2103 * This function implements actual block claiming behaviour. If order is large 2104 * enough, we can claim the whole pageblock for the requested migratetype. If 2105 * not, we check the pageblock for constituent pages; if at least half of the 2106 * pages are free or compatible, we can still claim the whole block, so pages 2107 * freed in the future will be put on the correct free list. 2108 */ 2109 static struct page * 2110 try_to_claim_block(struct zone *zone, struct page *page, 2111 int current_order, int order, int start_type, 2112 int block_type, unsigned int alloc_flags) 2113 { 2114 int free_pages, movable_pages, alike_pages; 2115 unsigned long start_pfn; 2116 2117 /* Take ownership for orders >= pageblock_order */ 2118 if (current_order >= pageblock_order) { 2119 unsigned int nr_added; 2120 2121 del_page_from_free_list(page, zone, current_order, block_type); 2122 change_pageblock_range(page, current_order, start_type); 2123 nr_added = expand(zone, page, order, current_order, start_type); 2124 account_freepages(zone, nr_added, start_type); 2125 return page; 2126 } 2127 2128 /* 2129 * Boost watermarks to increase reclaim pressure to reduce the 2130 * likelihood of future fallbacks. Wake kswapd now as the node 2131 * may be balanced overall and kswapd will not wake naturally. 2132 */ 2133 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2134 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2135 2136 /* moving whole block can fail due to zone boundary conditions */ 2137 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2138 &movable_pages)) 2139 return NULL; 2140 2141 /* 2142 * Determine how many pages are compatible with our allocation. 2143 * For movable allocation, it's the number of movable pages which 2144 * we just obtained. For other types it's a bit more tricky. 2145 */ 2146 if (start_type == MIGRATE_MOVABLE) { 2147 alike_pages = movable_pages; 2148 } else { 2149 /* 2150 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2151 * to MOVABLE pageblock, consider all non-movable pages as 2152 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2153 * vice versa, be conservative since we can't distinguish the 2154 * exact migratetype of non-movable pages. 2155 */ 2156 if (block_type == MIGRATE_MOVABLE) 2157 alike_pages = pageblock_nr_pages 2158 - (free_pages + movable_pages); 2159 else 2160 alike_pages = 0; 2161 } 2162 /* 2163 * If a sufficient number of pages in the block are either free or of 2164 * compatible migratability as our allocation, claim the whole block. 2165 */ 2166 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2167 page_group_by_mobility_disabled) { 2168 __move_freepages_block(zone, start_pfn, block_type, start_type); 2169 return __rmqueue_smallest(zone, order, start_type); 2170 } 2171 2172 return NULL; 2173 } 2174 2175 /* 2176 * Try to allocate from some fallback migratetype by claiming the entire block, 2177 * i.e. converting it to the allocation's start migratetype. 2178 * 2179 * The use of signed ints for order and current_order is a deliberate 2180 * deviation from the rest of this file, to make the for loop 2181 * condition simpler. 2182 */ 2183 static __always_inline struct page * 2184 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, 2185 unsigned int alloc_flags) 2186 { 2187 struct free_area *area; 2188 int current_order; 2189 int min_order = order; 2190 struct page *page; 2191 int fallback_mt; 2192 bool claim_block; 2193 2194 /* 2195 * Do not steal pages from freelists belonging to other pageblocks 2196 * i.e. orders < pageblock_order. If there are no local zones free, 2197 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2198 */ 2199 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2200 min_order = pageblock_order; 2201 2202 /* 2203 * Find the largest available free page in the other list. This roughly 2204 * approximates finding the pageblock with the most free pages, which 2205 * would be too costly to do exactly. 2206 */ 2207 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2208 --current_order) { 2209 area = &(zone->free_area[current_order]); 2210 fallback_mt = find_suitable_fallback(area, current_order, 2211 start_migratetype, false, &claim_block); 2212 if (fallback_mt == -1) 2213 continue; 2214 2215 if (!claim_block) 2216 break; 2217 2218 page = get_page_from_free_area(area, fallback_mt); 2219 page = try_to_claim_block(zone, page, current_order, order, 2220 start_migratetype, fallback_mt, 2221 alloc_flags); 2222 if (page) { 2223 trace_mm_page_alloc_extfrag(page, order, current_order, 2224 start_migratetype, fallback_mt); 2225 return page; 2226 } 2227 } 2228 2229 return NULL; 2230 } 2231 2232 /* 2233 * Try to steal a single page from some fallback migratetype. Leave the rest of 2234 * the block as its current migratetype, potentially causing fragmentation. 2235 */ 2236 static __always_inline struct page * 2237 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) 2238 { 2239 struct free_area *area; 2240 int current_order; 2241 struct page *page; 2242 int fallback_mt; 2243 bool claim_block; 2244 2245 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2246 area = &(zone->free_area[current_order]); 2247 fallback_mt = find_suitable_fallback(area, current_order, 2248 start_migratetype, false, &claim_block); 2249 if (fallback_mt == -1) 2250 continue; 2251 2252 page = get_page_from_free_area(area, fallback_mt); 2253 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2254 trace_mm_page_alloc_extfrag(page, order, current_order, 2255 start_migratetype, fallback_mt); 2256 return page; 2257 } 2258 2259 return NULL; 2260 } 2261 2262 enum rmqueue_mode { 2263 RMQUEUE_NORMAL, 2264 RMQUEUE_CMA, 2265 RMQUEUE_CLAIM, 2266 RMQUEUE_STEAL, 2267 }; 2268 2269 /* 2270 * Do the hard work of removing an element from the buddy allocator. 2271 * Call me with the zone->lock already held. 2272 */ 2273 static __always_inline struct page * 2274 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2275 unsigned int alloc_flags, enum rmqueue_mode *mode) 2276 { 2277 struct page *page; 2278 2279 if (IS_ENABLED(CONFIG_CMA)) { 2280 /* 2281 * Balance movable allocations between regular and CMA areas by 2282 * allocating from CMA when over half of the zone's free memory 2283 * is in the CMA area. 2284 */ 2285 if (alloc_flags & ALLOC_CMA && 2286 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2287 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2288 page = __rmqueue_cma_fallback(zone, order); 2289 if (page) 2290 return page; 2291 } 2292 } 2293 2294 /* 2295 * First try the freelists of the requested migratetype, then try 2296 * fallbacks modes with increasing levels of fragmentation risk. 2297 * 2298 * The fallback logic is expensive and rmqueue_bulk() calls in 2299 * a loop with the zone->lock held, meaning the freelists are 2300 * not subject to any outside changes. Remember in *mode where 2301 * we found pay dirt, to save us the search on the next call. 2302 */ 2303 switch (*mode) { 2304 case RMQUEUE_NORMAL: 2305 page = __rmqueue_smallest(zone, order, migratetype); 2306 if (page) 2307 return page; 2308 fallthrough; 2309 case RMQUEUE_CMA: 2310 if (alloc_flags & ALLOC_CMA) { 2311 page = __rmqueue_cma_fallback(zone, order); 2312 if (page) { 2313 *mode = RMQUEUE_CMA; 2314 return page; 2315 } 2316 } 2317 fallthrough; 2318 case RMQUEUE_CLAIM: 2319 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); 2320 if (page) { 2321 /* Replenished preferred freelist, back to normal mode. */ 2322 *mode = RMQUEUE_NORMAL; 2323 return page; 2324 } 2325 fallthrough; 2326 case RMQUEUE_STEAL: 2327 if (!(alloc_flags & ALLOC_NOFRAGMENT)) { 2328 page = __rmqueue_steal(zone, order, migratetype); 2329 if (page) { 2330 *mode = RMQUEUE_STEAL; 2331 return page; 2332 } 2333 } 2334 } 2335 return NULL; 2336 } 2337 2338 /* 2339 * Obtain a specified number of elements from the buddy allocator, all under 2340 * a single hold of the lock, for efficiency. Add them to the supplied list. 2341 * Returns the number of new pages which were placed at *list. 2342 */ 2343 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2344 unsigned long count, struct list_head *list, 2345 int migratetype, unsigned int alloc_flags) 2346 { 2347 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2348 unsigned long flags; 2349 int i; 2350 2351 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2352 if (!spin_trylock_irqsave(&zone->lock, flags)) 2353 return 0; 2354 } else { 2355 spin_lock_irqsave(&zone->lock, flags); 2356 } 2357 for (i = 0; i < count; ++i) { 2358 struct page *page = __rmqueue(zone, order, migratetype, 2359 alloc_flags, &rmqm); 2360 if (unlikely(page == NULL)) 2361 break; 2362 2363 /* 2364 * Split buddy pages returned by expand() are received here in 2365 * physical page order. The page is added to the tail of 2366 * caller's list. From the callers perspective, the linked list 2367 * is ordered by page number under some conditions. This is 2368 * useful for IO devices that can forward direction from the 2369 * head, thus also in the physical page order. This is useful 2370 * for IO devices that can merge IO requests if the physical 2371 * pages are ordered properly. 2372 */ 2373 list_add_tail(&page->pcp_list, list); 2374 } 2375 spin_unlock_irqrestore(&zone->lock, flags); 2376 2377 return i; 2378 } 2379 2380 /* 2381 * Called from the vmstat counter updater to decay the PCP high. 2382 * Return whether there are addition works to do. 2383 */ 2384 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2385 { 2386 int high_min, to_drain, batch; 2387 int todo = 0; 2388 2389 high_min = READ_ONCE(pcp->high_min); 2390 batch = READ_ONCE(pcp->batch); 2391 /* 2392 * Decrease pcp->high periodically to try to free possible 2393 * idle PCP pages. And, avoid to free too many pages to 2394 * control latency. This caps pcp->high decrement too. 2395 */ 2396 if (pcp->high > high_min) { 2397 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2398 pcp->high - (pcp->high >> 3), high_min); 2399 if (pcp->high > high_min) 2400 todo++; 2401 } 2402 2403 to_drain = pcp->count - pcp->high; 2404 if (to_drain > 0) { 2405 spin_lock(&pcp->lock); 2406 free_pcppages_bulk(zone, to_drain, pcp, 0); 2407 spin_unlock(&pcp->lock); 2408 todo++; 2409 } 2410 2411 return todo; 2412 } 2413 2414 #ifdef CONFIG_NUMA 2415 /* 2416 * Called from the vmstat counter updater to drain pagesets of this 2417 * currently executing processor on remote nodes after they have 2418 * expired. 2419 */ 2420 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2421 { 2422 int to_drain, batch; 2423 2424 batch = READ_ONCE(pcp->batch); 2425 to_drain = min(pcp->count, batch); 2426 if (to_drain > 0) { 2427 spin_lock(&pcp->lock); 2428 free_pcppages_bulk(zone, to_drain, pcp, 0); 2429 spin_unlock(&pcp->lock); 2430 } 2431 } 2432 #endif 2433 2434 /* 2435 * Drain pcplists of the indicated processor and zone. 2436 */ 2437 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2438 { 2439 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2440 int count; 2441 2442 do { 2443 spin_lock(&pcp->lock); 2444 count = pcp->count; 2445 if (count) { 2446 int to_drain = min(count, 2447 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2448 2449 free_pcppages_bulk(zone, to_drain, pcp, 0); 2450 count -= to_drain; 2451 } 2452 spin_unlock(&pcp->lock); 2453 } while (count); 2454 } 2455 2456 /* 2457 * Drain pcplists of all zones on the indicated processor. 2458 */ 2459 static void drain_pages(unsigned int cpu) 2460 { 2461 struct zone *zone; 2462 2463 for_each_populated_zone(zone) { 2464 drain_pages_zone(cpu, zone); 2465 } 2466 } 2467 2468 /* 2469 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2470 */ 2471 void drain_local_pages(struct zone *zone) 2472 { 2473 int cpu = smp_processor_id(); 2474 2475 if (zone) 2476 drain_pages_zone(cpu, zone); 2477 else 2478 drain_pages(cpu); 2479 } 2480 2481 /* 2482 * The implementation of drain_all_pages(), exposing an extra parameter to 2483 * drain on all cpus. 2484 * 2485 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2486 * not empty. The check for non-emptiness can however race with a free to 2487 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2488 * that need the guarantee that every CPU has drained can disable the 2489 * optimizing racy check. 2490 */ 2491 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2492 { 2493 int cpu; 2494 2495 /* 2496 * Allocate in the BSS so we won't require allocation in 2497 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2498 */ 2499 static cpumask_t cpus_with_pcps; 2500 2501 /* 2502 * Do not drain if one is already in progress unless it's specific to 2503 * a zone. Such callers are primarily CMA and memory hotplug and need 2504 * the drain to be complete when the call returns. 2505 */ 2506 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2507 if (!zone) 2508 return; 2509 mutex_lock(&pcpu_drain_mutex); 2510 } 2511 2512 /* 2513 * We don't care about racing with CPU hotplug event 2514 * as offline notification will cause the notified 2515 * cpu to drain that CPU pcps and on_each_cpu_mask 2516 * disables preemption as part of its processing 2517 */ 2518 for_each_online_cpu(cpu) { 2519 struct per_cpu_pages *pcp; 2520 struct zone *z; 2521 bool has_pcps = false; 2522 2523 if (force_all_cpus) { 2524 /* 2525 * The pcp.count check is racy, some callers need a 2526 * guarantee that no cpu is missed. 2527 */ 2528 has_pcps = true; 2529 } else if (zone) { 2530 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2531 if (pcp->count) 2532 has_pcps = true; 2533 } else { 2534 for_each_populated_zone(z) { 2535 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2536 if (pcp->count) { 2537 has_pcps = true; 2538 break; 2539 } 2540 } 2541 } 2542 2543 if (has_pcps) 2544 cpumask_set_cpu(cpu, &cpus_with_pcps); 2545 else 2546 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2547 } 2548 2549 for_each_cpu(cpu, &cpus_with_pcps) { 2550 if (zone) 2551 drain_pages_zone(cpu, zone); 2552 else 2553 drain_pages(cpu); 2554 } 2555 2556 mutex_unlock(&pcpu_drain_mutex); 2557 } 2558 2559 /* 2560 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2561 * 2562 * When zone parameter is non-NULL, spill just the single zone's pages. 2563 */ 2564 void drain_all_pages(struct zone *zone) 2565 { 2566 __drain_all_pages(zone, false); 2567 } 2568 2569 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2570 { 2571 int min_nr_free, max_nr_free; 2572 2573 /* Free as much as possible if batch freeing high-order pages. */ 2574 if (unlikely(free_high)) 2575 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2576 2577 /* Check for PCP disabled or boot pageset */ 2578 if (unlikely(high < batch)) 2579 return 1; 2580 2581 /* Leave at least pcp->batch pages on the list */ 2582 min_nr_free = batch; 2583 max_nr_free = high - batch; 2584 2585 /* 2586 * Increase the batch number to the number of the consecutive 2587 * freed pages to reduce zone lock contention. 2588 */ 2589 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2590 2591 return batch; 2592 } 2593 2594 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2595 int batch, bool free_high) 2596 { 2597 int high, high_min, high_max; 2598 2599 high_min = READ_ONCE(pcp->high_min); 2600 high_max = READ_ONCE(pcp->high_max); 2601 high = pcp->high = clamp(pcp->high, high_min, high_max); 2602 2603 if (unlikely(!high)) 2604 return 0; 2605 2606 if (unlikely(free_high)) { 2607 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2608 high_min); 2609 return 0; 2610 } 2611 2612 /* 2613 * If reclaim is active, limit the number of pages that can be 2614 * stored on pcp lists 2615 */ 2616 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2617 int free_count = max_t(int, pcp->free_count, batch); 2618 2619 pcp->high = max(high - free_count, high_min); 2620 return min(batch << 2, pcp->high); 2621 } 2622 2623 if (high_min == high_max) 2624 return high; 2625 2626 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2627 int free_count = max_t(int, pcp->free_count, batch); 2628 2629 pcp->high = max(high - free_count, high_min); 2630 high = max(pcp->count, high_min); 2631 } else if (pcp->count >= high) { 2632 int need_high = pcp->free_count + batch; 2633 2634 /* pcp->high should be large enough to hold batch freed pages */ 2635 if (pcp->high < need_high) 2636 pcp->high = clamp(need_high, high_min, high_max); 2637 } 2638 2639 return high; 2640 } 2641 2642 static void free_frozen_page_commit(struct zone *zone, 2643 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2644 unsigned int order, fpi_t fpi_flags) 2645 { 2646 int high, batch; 2647 int pindex; 2648 bool free_high = false; 2649 2650 /* 2651 * On freeing, reduce the number of pages that are batch allocated. 2652 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2653 * allocations. 2654 */ 2655 pcp->alloc_factor >>= 1; 2656 __count_vm_events(PGFREE, 1 << order); 2657 pindex = order_to_pindex(migratetype, order); 2658 list_add(&page->pcp_list, &pcp->lists[pindex]); 2659 pcp->count += 1 << order; 2660 2661 batch = READ_ONCE(pcp->batch); 2662 /* 2663 * As high-order pages other than THP's stored on PCP can contribute 2664 * to fragmentation, limit the number stored when PCP is heavily 2665 * freeing without allocation. The remainder after bulk freeing 2666 * stops will be drained from vmstat refresh context. 2667 */ 2668 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2669 free_high = (pcp->free_count >= batch && 2670 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2671 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2672 pcp->count >= batch)); 2673 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2674 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2675 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2676 } 2677 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2678 pcp->free_count += (1 << order); 2679 2680 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2681 /* 2682 * Do not attempt to take a zone lock. Let pcp->count get 2683 * over high mark temporarily. 2684 */ 2685 return; 2686 } 2687 high = nr_pcp_high(pcp, zone, batch, free_high); 2688 if (pcp->count >= high) { 2689 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2690 pcp, pindex); 2691 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2692 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2693 ZONE_MOVABLE, 0)) 2694 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2695 } 2696 } 2697 2698 /* 2699 * Free a pcp page 2700 */ 2701 static void __free_frozen_pages(struct page *page, unsigned int order, 2702 fpi_t fpi_flags) 2703 { 2704 unsigned long __maybe_unused UP_flags; 2705 struct per_cpu_pages *pcp; 2706 struct zone *zone; 2707 unsigned long pfn = page_to_pfn(page); 2708 int migratetype; 2709 2710 if (!pcp_allowed_order(order)) { 2711 __free_pages_ok(page, order, fpi_flags); 2712 return; 2713 } 2714 2715 if (!free_pages_prepare(page, order)) 2716 return; 2717 2718 /* 2719 * We only track unmovable, reclaimable and movable on pcp lists. 2720 * Place ISOLATE pages on the isolated list because they are being 2721 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2722 * get those areas back if necessary. Otherwise, we may have to free 2723 * excessively into the page allocator 2724 */ 2725 zone = page_zone(page); 2726 migratetype = get_pfnblock_migratetype(page, pfn); 2727 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2728 if (unlikely(is_migrate_isolate(migratetype))) { 2729 free_one_page(zone, page, pfn, order, fpi_flags); 2730 return; 2731 } 2732 migratetype = MIGRATE_MOVABLE; 2733 } 2734 2735 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2736 && (in_nmi() || in_hardirq()))) { 2737 add_page_to_zone_llist(zone, page, order); 2738 return; 2739 } 2740 pcp_trylock_prepare(UP_flags); 2741 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2742 if (pcp) { 2743 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags); 2744 pcp_spin_unlock(pcp); 2745 } else { 2746 free_one_page(zone, page, pfn, order, fpi_flags); 2747 } 2748 pcp_trylock_finish(UP_flags); 2749 } 2750 2751 void free_frozen_pages(struct page *page, unsigned int order) 2752 { 2753 __free_frozen_pages(page, order, FPI_NONE); 2754 } 2755 2756 /* 2757 * Free a batch of folios 2758 */ 2759 void free_unref_folios(struct folio_batch *folios) 2760 { 2761 unsigned long __maybe_unused UP_flags; 2762 struct per_cpu_pages *pcp = NULL; 2763 struct zone *locked_zone = NULL; 2764 int i, j; 2765 2766 /* Prepare folios for freeing */ 2767 for (i = 0, j = 0; i < folios->nr; i++) { 2768 struct folio *folio = folios->folios[i]; 2769 unsigned long pfn = folio_pfn(folio); 2770 unsigned int order = folio_order(folio); 2771 2772 if (!free_pages_prepare(&folio->page, order)) 2773 continue; 2774 /* 2775 * Free orders not handled on the PCP directly to the 2776 * allocator. 2777 */ 2778 if (!pcp_allowed_order(order)) { 2779 free_one_page(folio_zone(folio), &folio->page, 2780 pfn, order, FPI_NONE); 2781 continue; 2782 } 2783 folio->private = (void *)(unsigned long)order; 2784 if (j != i) 2785 folios->folios[j] = folio; 2786 j++; 2787 } 2788 folios->nr = j; 2789 2790 for (i = 0; i < folios->nr; i++) { 2791 struct folio *folio = folios->folios[i]; 2792 struct zone *zone = folio_zone(folio); 2793 unsigned long pfn = folio_pfn(folio); 2794 unsigned int order = (unsigned long)folio->private; 2795 int migratetype; 2796 2797 folio->private = NULL; 2798 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2799 2800 /* Different zone requires a different pcp lock */ 2801 if (zone != locked_zone || 2802 is_migrate_isolate(migratetype)) { 2803 if (pcp) { 2804 pcp_spin_unlock(pcp); 2805 pcp_trylock_finish(UP_flags); 2806 locked_zone = NULL; 2807 pcp = NULL; 2808 } 2809 2810 /* 2811 * Free isolated pages directly to the 2812 * allocator, see comment in free_frozen_pages. 2813 */ 2814 if (is_migrate_isolate(migratetype)) { 2815 free_one_page(zone, &folio->page, pfn, 2816 order, FPI_NONE); 2817 continue; 2818 } 2819 2820 /* 2821 * trylock is necessary as folios may be getting freed 2822 * from IRQ or SoftIRQ context after an IO completion. 2823 */ 2824 pcp_trylock_prepare(UP_flags); 2825 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2826 if (unlikely(!pcp)) { 2827 pcp_trylock_finish(UP_flags); 2828 free_one_page(zone, &folio->page, pfn, 2829 order, FPI_NONE); 2830 continue; 2831 } 2832 locked_zone = zone; 2833 } 2834 2835 /* 2836 * Non-isolated types over MIGRATE_PCPTYPES get added 2837 * to the MIGRATE_MOVABLE pcp list. 2838 */ 2839 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2840 migratetype = MIGRATE_MOVABLE; 2841 2842 trace_mm_page_free_batched(&folio->page); 2843 free_frozen_page_commit(zone, pcp, &folio->page, migratetype, 2844 order, FPI_NONE); 2845 } 2846 2847 if (pcp) { 2848 pcp_spin_unlock(pcp); 2849 pcp_trylock_finish(UP_flags); 2850 } 2851 folio_batch_reinit(folios); 2852 } 2853 2854 /* 2855 * split_page takes a non-compound higher-order page, and splits it into 2856 * n (1<<order) sub-pages: page[0..n] 2857 * Each sub-page must be freed individually. 2858 * 2859 * Note: this is probably too low level an operation for use in drivers. 2860 * Please consult with lkml before using this in your driver. 2861 */ 2862 void split_page(struct page *page, unsigned int order) 2863 { 2864 int i; 2865 2866 VM_BUG_ON_PAGE(PageCompound(page), page); 2867 VM_BUG_ON_PAGE(!page_count(page), page); 2868 2869 for (i = 1; i < (1 << order); i++) 2870 set_page_refcounted(page + i); 2871 split_page_owner(page, order, 0); 2872 pgalloc_tag_split(page_folio(page), order, 0); 2873 split_page_memcg(page, order); 2874 } 2875 EXPORT_SYMBOL_GPL(split_page); 2876 2877 int __isolate_free_page(struct page *page, unsigned int order) 2878 { 2879 struct zone *zone = page_zone(page); 2880 int mt = get_pageblock_migratetype(page); 2881 2882 if (!is_migrate_isolate(mt)) { 2883 unsigned long watermark; 2884 /* 2885 * Obey watermarks as if the page was being allocated. We can 2886 * emulate a high-order watermark check with a raised order-0 2887 * watermark, because we already know our high-order page 2888 * exists. 2889 */ 2890 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2891 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2892 return 0; 2893 } 2894 2895 del_page_from_free_list(page, zone, order, mt); 2896 2897 /* 2898 * Set the pageblock if the isolated page is at least half of a 2899 * pageblock 2900 */ 2901 if (order >= pageblock_order - 1) { 2902 struct page *endpage = page + (1 << order) - 1; 2903 for (; page < endpage; page += pageblock_nr_pages) { 2904 int mt = get_pageblock_migratetype(page); 2905 /* 2906 * Only change normal pageblocks (i.e., they can merge 2907 * with others) 2908 */ 2909 if (migratetype_is_mergeable(mt)) 2910 move_freepages_block(zone, page, mt, 2911 MIGRATE_MOVABLE); 2912 } 2913 } 2914 2915 return 1UL << order; 2916 } 2917 2918 /** 2919 * __putback_isolated_page - Return a now-isolated page back where we got it 2920 * @page: Page that was isolated 2921 * @order: Order of the isolated page 2922 * @mt: The page's pageblock's migratetype 2923 * 2924 * This function is meant to return a page pulled from the free lists via 2925 * __isolate_free_page back to the free lists they were pulled from. 2926 */ 2927 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2928 { 2929 struct zone *zone = page_zone(page); 2930 2931 /* zone lock should be held when this function is called */ 2932 lockdep_assert_held(&zone->lock); 2933 2934 /* Return isolated page to tail of freelist. */ 2935 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2936 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2937 } 2938 2939 /* 2940 * Update NUMA hit/miss statistics 2941 */ 2942 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2943 long nr_account) 2944 { 2945 #ifdef CONFIG_NUMA 2946 enum numa_stat_item local_stat = NUMA_LOCAL; 2947 2948 /* skip numa counters update if numa stats is disabled */ 2949 if (!static_branch_likely(&vm_numa_stat_key)) 2950 return; 2951 2952 if (zone_to_nid(z) != numa_node_id()) 2953 local_stat = NUMA_OTHER; 2954 2955 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2956 __count_numa_events(z, NUMA_HIT, nr_account); 2957 else { 2958 __count_numa_events(z, NUMA_MISS, nr_account); 2959 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2960 } 2961 __count_numa_events(z, local_stat, nr_account); 2962 #endif 2963 } 2964 2965 static __always_inline 2966 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2967 unsigned int order, unsigned int alloc_flags, 2968 int migratetype) 2969 { 2970 struct page *page; 2971 unsigned long flags; 2972 2973 do { 2974 page = NULL; 2975 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2976 if (!spin_trylock_irqsave(&zone->lock, flags)) 2977 return NULL; 2978 } else { 2979 spin_lock_irqsave(&zone->lock, flags); 2980 } 2981 if (alloc_flags & ALLOC_HIGHATOMIC) 2982 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2983 if (!page) { 2984 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2985 2986 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); 2987 2988 /* 2989 * If the allocation fails, allow OOM handling and 2990 * order-0 (atomic) allocs access to HIGHATOMIC 2991 * reserves as failing now is worse than failing a 2992 * high-order atomic allocation in the future. 2993 */ 2994 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 2995 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2996 2997 if (!page) { 2998 spin_unlock_irqrestore(&zone->lock, flags); 2999 return NULL; 3000 } 3001 } 3002 spin_unlock_irqrestore(&zone->lock, flags); 3003 } while (check_new_pages(page, order)); 3004 3005 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3006 zone_statistics(preferred_zone, zone, 1); 3007 3008 return page; 3009 } 3010 3011 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3012 { 3013 int high, base_batch, batch, max_nr_alloc; 3014 int high_max, high_min; 3015 3016 base_batch = READ_ONCE(pcp->batch); 3017 high_min = READ_ONCE(pcp->high_min); 3018 high_max = READ_ONCE(pcp->high_max); 3019 high = pcp->high = clamp(pcp->high, high_min, high_max); 3020 3021 /* Check for PCP disabled or boot pageset */ 3022 if (unlikely(high < base_batch)) 3023 return 1; 3024 3025 if (order) 3026 batch = base_batch; 3027 else 3028 batch = (base_batch << pcp->alloc_factor); 3029 3030 /* 3031 * If we had larger pcp->high, we could avoid to allocate from 3032 * zone. 3033 */ 3034 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3035 high = pcp->high = min(high + batch, high_max); 3036 3037 if (!order) { 3038 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3039 /* 3040 * Double the number of pages allocated each time there is 3041 * subsequent allocation of order-0 pages without any freeing. 3042 */ 3043 if (batch <= max_nr_alloc && 3044 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3045 pcp->alloc_factor++; 3046 batch = min(batch, max_nr_alloc); 3047 } 3048 3049 /* 3050 * Scale batch relative to order if batch implies free pages 3051 * can be stored on the PCP. Batch can be 1 for small zones or 3052 * for boot pagesets which should never store free pages as 3053 * the pages may belong to arbitrary zones. 3054 */ 3055 if (batch > 1) 3056 batch = max(batch >> order, 2); 3057 3058 return batch; 3059 } 3060 3061 /* Remove page from the per-cpu list, caller must protect the list */ 3062 static inline 3063 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3064 int migratetype, 3065 unsigned int alloc_flags, 3066 struct per_cpu_pages *pcp, 3067 struct list_head *list) 3068 { 3069 struct page *page; 3070 3071 do { 3072 if (list_empty(list)) { 3073 int batch = nr_pcp_alloc(pcp, zone, order); 3074 int alloced; 3075 3076 alloced = rmqueue_bulk(zone, order, 3077 batch, list, 3078 migratetype, alloc_flags); 3079 3080 pcp->count += alloced << order; 3081 if (unlikely(list_empty(list))) 3082 return NULL; 3083 } 3084 3085 page = list_first_entry(list, struct page, pcp_list); 3086 list_del(&page->pcp_list); 3087 pcp->count -= 1 << order; 3088 } while (check_new_pages(page, order)); 3089 3090 return page; 3091 } 3092 3093 /* Lock and remove page from the per-cpu list */ 3094 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3095 struct zone *zone, unsigned int order, 3096 int migratetype, unsigned int alloc_flags) 3097 { 3098 struct per_cpu_pages *pcp; 3099 struct list_head *list; 3100 struct page *page; 3101 unsigned long __maybe_unused UP_flags; 3102 3103 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3104 pcp_trylock_prepare(UP_flags); 3105 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3106 if (!pcp) { 3107 pcp_trylock_finish(UP_flags); 3108 return NULL; 3109 } 3110 3111 /* 3112 * On allocation, reduce the number of pages that are batch freed. 3113 * See nr_pcp_free() where free_factor is increased for subsequent 3114 * frees. 3115 */ 3116 pcp->free_count >>= 1; 3117 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3118 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3119 pcp_spin_unlock(pcp); 3120 pcp_trylock_finish(UP_flags); 3121 if (page) { 3122 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3123 zone_statistics(preferred_zone, zone, 1); 3124 } 3125 return page; 3126 } 3127 3128 /* 3129 * Allocate a page from the given zone. 3130 * Use pcplists for THP or "cheap" high-order allocations. 3131 */ 3132 3133 /* 3134 * Do not instrument rmqueue() with KMSAN. This function may call 3135 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3136 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3137 * may call rmqueue() again, which will result in a deadlock. 3138 */ 3139 __no_sanitize_memory 3140 static inline 3141 struct page *rmqueue(struct zone *preferred_zone, 3142 struct zone *zone, unsigned int order, 3143 gfp_t gfp_flags, unsigned int alloc_flags, 3144 int migratetype) 3145 { 3146 struct page *page; 3147 3148 if (likely(pcp_allowed_order(order))) { 3149 page = rmqueue_pcplist(preferred_zone, zone, order, 3150 migratetype, alloc_flags); 3151 if (likely(page)) 3152 goto out; 3153 } 3154 3155 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3156 migratetype); 3157 3158 out: 3159 /* Separate test+clear to avoid unnecessary atomics */ 3160 if ((alloc_flags & ALLOC_KSWAPD) && 3161 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3162 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3163 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3164 } 3165 3166 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3167 return page; 3168 } 3169 3170 /* 3171 * Reserve the pageblock(s) surrounding an allocation request for 3172 * exclusive use of high-order atomic allocations if there are no 3173 * empty page blocks that contain a page with a suitable order 3174 */ 3175 static void reserve_highatomic_pageblock(struct page *page, int order, 3176 struct zone *zone) 3177 { 3178 int mt; 3179 unsigned long max_managed, flags; 3180 3181 /* 3182 * The number reserved as: minimum is 1 pageblock, maximum is 3183 * roughly 1% of a zone. But if 1% of a zone falls below a 3184 * pageblock size, then don't reserve any pageblocks. 3185 * Check is race-prone but harmless. 3186 */ 3187 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3188 return; 3189 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3190 if (zone->nr_reserved_highatomic >= max_managed) 3191 return; 3192 3193 spin_lock_irqsave(&zone->lock, flags); 3194 3195 /* Recheck the nr_reserved_highatomic limit under the lock */ 3196 if (zone->nr_reserved_highatomic >= max_managed) 3197 goto out_unlock; 3198 3199 /* Yoink! */ 3200 mt = get_pageblock_migratetype(page); 3201 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3202 if (!migratetype_is_mergeable(mt)) 3203 goto out_unlock; 3204 3205 if (order < pageblock_order) { 3206 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3207 goto out_unlock; 3208 zone->nr_reserved_highatomic += pageblock_nr_pages; 3209 } else { 3210 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3211 zone->nr_reserved_highatomic += 1 << order; 3212 } 3213 3214 out_unlock: 3215 spin_unlock_irqrestore(&zone->lock, flags); 3216 } 3217 3218 /* 3219 * Used when an allocation is about to fail under memory pressure. This 3220 * potentially hurts the reliability of high-order allocations when under 3221 * intense memory pressure but failed atomic allocations should be easier 3222 * to recover from than an OOM. 3223 * 3224 * If @force is true, try to unreserve pageblocks even though highatomic 3225 * pageblock is exhausted. 3226 */ 3227 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3228 bool force) 3229 { 3230 struct zonelist *zonelist = ac->zonelist; 3231 unsigned long flags; 3232 struct zoneref *z; 3233 struct zone *zone; 3234 struct page *page; 3235 int order; 3236 int ret; 3237 3238 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3239 ac->nodemask) { 3240 /* 3241 * Preserve at least one pageblock unless memory pressure 3242 * is really high. 3243 */ 3244 if (!force && zone->nr_reserved_highatomic <= 3245 pageblock_nr_pages) 3246 continue; 3247 3248 spin_lock_irqsave(&zone->lock, flags); 3249 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3250 struct free_area *area = &(zone->free_area[order]); 3251 unsigned long size; 3252 3253 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3254 if (!page) 3255 continue; 3256 3257 size = max(pageblock_nr_pages, 1UL << order); 3258 /* 3259 * It should never happen but changes to 3260 * locking could inadvertently allow a per-cpu 3261 * drain to add pages to MIGRATE_HIGHATOMIC 3262 * while unreserving so be safe and watch for 3263 * underflows. 3264 */ 3265 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3266 size = zone->nr_reserved_highatomic; 3267 zone->nr_reserved_highatomic -= size; 3268 3269 /* 3270 * Convert to ac->migratetype and avoid the normal 3271 * pageblock stealing heuristics. Minimally, the caller 3272 * is doing the work and needs the pages. More 3273 * importantly, if the block was always converted to 3274 * MIGRATE_UNMOVABLE or another type then the number 3275 * of pageblocks that cannot be completely freed 3276 * may increase. 3277 */ 3278 if (order < pageblock_order) 3279 ret = move_freepages_block(zone, page, 3280 MIGRATE_HIGHATOMIC, 3281 ac->migratetype); 3282 else { 3283 move_to_free_list(page, zone, order, 3284 MIGRATE_HIGHATOMIC, 3285 ac->migratetype); 3286 change_pageblock_range(page, order, 3287 ac->migratetype); 3288 ret = 1; 3289 } 3290 /* 3291 * Reserving the block(s) already succeeded, 3292 * so this should not fail on zone boundaries. 3293 */ 3294 WARN_ON_ONCE(ret == -1); 3295 if (ret > 0) { 3296 spin_unlock_irqrestore(&zone->lock, flags); 3297 return ret; 3298 } 3299 } 3300 spin_unlock_irqrestore(&zone->lock, flags); 3301 } 3302 3303 return false; 3304 } 3305 3306 static inline long __zone_watermark_unusable_free(struct zone *z, 3307 unsigned int order, unsigned int alloc_flags) 3308 { 3309 long unusable_free = (1 << order) - 1; 3310 3311 /* 3312 * If the caller does not have rights to reserves below the min 3313 * watermark then subtract the free pages reserved for highatomic. 3314 */ 3315 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3316 unusable_free += READ_ONCE(z->nr_free_highatomic); 3317 3318 #ifdef CONFIG_CMA 3319 /* If allocation can't use CMA areas don't use free CMA pages */ 3320 if (!(alloc_flags & ALLOC_CMA)) 3321 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3322 #endif 3323 3324 return unusable_free; 3325 } 3326 3327 /* 3328 * Return true if free base pages are above 'mark'. For high-order checks it 3329 * will return true of the order-0 watermark is reached and there is at least 3330 * one free page of a suitable size. Checking now avoids taking the zone lock 3331 * to check in the allocation paths if no pages are free. 3332 */ 3333 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3334 int highest_zoneidx, unsigned int alloc_flags, 3335 long free_pages) 3336 { 3337 long min = mark; 3338 int o; 3339 3340 /* free_pages may go negative - that's OK */ 3341 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3342 3343 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3344 /* 3345 * __GFP_HIGH allows access to 50% of the min reserve as well 3346 * as OOM. 3347 */ 3348 if (alloc_flags & ALLOC_MIN_RESERVE) { 3349 min -= min / 2; 3350 3351 /* 3352 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3353 * access more reserves than just __GFP_HIGH. Other 3354 * non-blocking allocations requests such as GFP_NOWAIT 3355 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3356 * access to the min reserve. 3357 */ 3358 if (alloc_flags & ALLOC_NON_BLOCK) 3359 min -= min / 4; 3360 } 3361 3362 /* 3363 * OOM victims can try even harder than the normal reserve 3364 * users on the grounds that it's definitely going to be in 3365 * the exit path shortly and free memory. Any allocation it 3366 * makes during the free path will be small and short-lived. 3367 */ 3368 if (alloc_flags & ALLOC_OOM) 3369 min -= min / 2; 3370 } 3371 3372 /* 3373 * Check watermarks for an order-0 allocation request. If these 3374 * are not met, then a high-order request also cannot go ahead 3375 * even if a suitable page happened to be free. 3376 */ 3377 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3378 return false; 3379 3380 /* If this is an order-0 request then the watermark is fine */ 3381 if (!order) 3382 return true; 3383 3384 /* For a high-order request, check at least one suitable page is free */ 3385 for (o = order; o < NR_PAGE_ORDERS; o++) { 3386 struct free_area *area = &z->free_area[o]; 3387 int mt; 3388 3389 if (!area->nr_free) 3390 continue; 3391 3392 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3393 if (!free_area_empty(area, mt)) 3394 return true; 3395 } 3396 3397 #ifdef CONFIG_CMA 3398 if ((alloc_flags & ALLOC_CMA) && 3399 !free_area_empty(area, MIGRATE_CMA)) { 3400 return true; 3401 } 3402 #endif 3403 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3404 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3405 return true; 3406 } 3407 } 3408 return false; 3409 } 3410 3411 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3412 int highest_zoneidx, unsigned int alloc_flags) 3413 { 3414 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3415 zone_page_state(z, NR_FREE_PAGES)); 3416 } 3417 3418 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3419 unsigned long mark, int highest_zoneidx, 3420 unsigned int alloc_flags, gfp_t gfp_mask) 3421 { 3422 long free_pages; 3423 3424 free_pages = zone_page_state(z, NR_FREE_PAGES); 3425 3426 /* 3427 * Fast check for order-0 only. If this fails then the reserves 3428 * need to be calculated. 3429 */ 3430 if (!order) { 3431 long usable_free; 3432 long reserved; 3433 3434 usable_free = free_pages; 3435 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3436 3437 /* reserved may over estimate high-atomic reserves. */ 3438 usable_free -= min(usable_free, reserved); 3439 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3440 return true; 3441 } 3442 3443 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3444 free_pages)) 3445 return true; 3446 3447 /* 3448 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3449 * when checking the min watermark. The min watermark is the 3450 * point where boosting is ignored so that kswapd is woken up 3451 * when below the low watermark. 3452 */ 3453 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3454 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3455 mark = z->_watermark[WMARK_MIN]; 3456 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3457 alloc_flags, free_pages); 3458 } 3459 3460 return false; 3461 } 3462 3463 #ifdef CONFIG_NUMA 3464 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3465 3466 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3467 { 3468 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3469 node_reclaim_distance; 3470 } 3471 #else /* CONFIG_NUMA */ 3472 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3473 { 3474 return true; 3475 } 3476 #endif /* CONFIG_NUMA */ 3477 3478 /* 3479 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3480 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3481 * premature use of a lower zone may cause lowmem pressure problems that 3482 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3483 * probably too small. It only makes sense to spread allocations to avoid 3484 * fragmentation between the Normal and DMA32 zones. 3485 */ 3486 static inline unsigned int 3487 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3488 { 3489 unsigned int alloc_flags; 3490 3491 /* 3492 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3493 * to save a branch. 3494 */ 3495 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3496 3497 if (defrag_mode) { 3498 alloc_flags |= ALLOC_NOFRAGMENT; 3499 return alloc_flags; 3500 } 3501 3502 #ifdef CONFIG_ZONE_DMA32 3503 if (!zone) 3504 return alloc_flags; 3505 3506 if (zone_idx(zone) != ZONE_NORMAL) 3507 return alloc_flags; 3508 3509 /* 3510 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3511 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3512 * on UMA that if Normal is populated then so is DMA32. 3513 */ 3514 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3515 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3516 return alloc_flags; 3517 3518 alloc_flags |= ALLOC_NOFRAGMENT; 3519 #endif /* CONFIG_ZONE_DMA32 */ 3520 return alloc_flags; 3521 } 3522 3523 /* Must be called after current_gfp_context() which can change gfp_mask */ 3524 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3525 unsigned int alloc_flags) 3526 { 3527 #ifdef CONFIG_CMA 3528 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3529 alloc_flags |= ALLOC_CMA; 3530 #endif 3531 return alloc_flags; 3532 } 3533 3534 /* 3535 * get_page_from_freelist goes through the zonelist trying to allocate 3536 * a page. 3537 */ 3538 static struct page * 3539 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3540 const struct alloc_context *ac) 3541 { 3542 struct zoneref *z; 3543 struct zone *zone; 3544 struct pglist_data *last_pgdat = NULL; 3545 bool last_pgdat_dirty_ok = false; 3546 bool no_fallback; 3547 3548 retry: 3549 /* 3550 * Scan zonelist, looking for a zone with enough free. 3551 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3552 */ 3553 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3554 z = ac->preferred_zoneref; 3555 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3556 ac->nodemask) { 3557 struct page *page; 3558 unsigned long mark; 3559 3560 if (cpusets_enabled() && 3561 (alloc_flags & ALLOC_CPUSET) && 3562 !__cpuset_zone_allowed(zone, gfp_mask)) 3563 continue; 3564 /* 3565 * When allocating a page cache page for writing, we 3566 * want to get it from a node that is within its dirty 3567 * limit, such that no single node holds more than its 3568 * proportional share of globally allowed dirty pages. 3569 * The dirty limits take into account the node's 3570 * lowmem reserves and high watermark so that kswapd 3571 * should be able to balance it without having to 3572 * write pages from its LRU list. 3573 * 3574 * XXX: For now, allow allocations to potentially 3575 * exceed the per-node dirty limit in the slowpath 3576 * (spread_dirty_pages unset) before going into reclaim, 3577 * which is important when on a NUMA setup the allowed 3578 * nodes are together not big enough to reach the 3579 * global limit. The proper fix for these situations 3580 * will require awareness of nodes in the 3581 * dirty-throttling and the flusher threads. 3582 */ 3583 if (ac->spread_dirty_pages) { 3584 if (last_pgdat != zone->zone_pgdat) { 3585 last_pgdat = zone->zone_pgdat; 3586 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3587 } 3588 3589 if (!last_pgdat_dirty_ok) 3590 continue; 3591 } 3592 3593 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3594 zone != zonelist_zone(ac->preferred_zoneref)) { 3595 int local_nid; 3596 3597 /* 3598 * If moving to a remote node, retry but allow 3599 * fragmenting fallbacks. Locality is more important 3600 * than fragmentation avoidance. 3601 */ 3602 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3603 if (zone_to_nid(zone) != local_nid) { 3604 alloc_flags &= ~ALLOC_NOFRAGMENT; 3605 goto retry; 3606 } 3607 } 3608 3609 cond_accept_memory(zone, order, alloc_flags); 3610 3611 /* 3612 * Detect whether the number of free pages is below high 3613 * watermark. If so, we will decrease pcp->high and free 3614 * PCP pages in free path to reduce the possibility of 3615 * premature page reclaiming. Detection is done here to 3616 * avoid to do that in hotter free path. 3617 */ 3618 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3619 goto check_alloc_wmark; 3620 3621 mark = high_wmark_pages(zone); 3622 if (zone_watermark_fast(zone, order, mark, 3623 ac->highest_zoneidx, alloc_flags, 3624 gfp_mask)) 3625 goto try_this_zone; 3626 else 3627 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3628 3629 check_alloc_wmark: 3630 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3631 if (!zone_watermark_fast(zone, order, mark, 3632 ac->highest_zoneidx, alloc_flags, 3633 gfp_mask)) { 3634 int ret; 3635 3636 if (cond_accept_memory(zone, order, alloc_flags)) 3637 goto try_this_zone; 3638 3639 /* 3640 * Watermark failed for this zone, but see if we can 3641 * grow this zone if it contains deferred pages. 3642 */ 3643 if (deferred_pages_enabled()) { 3644 if (_deferred_grow_zone(zone, order)) 3645 goto try_this_zone; 3646 } 3647 /* Checked here to keep the fast path fast */ 3648 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3649 if (alloc_flags & ALLOC_NO_WATERMARKS) 3650 goto try_this_zone; 3651 3652 if (!node_reclaim_enabled() || 3653 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3654 continue; 3655 3656 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3657 switch (ret) { 3658 case NODE_RECLAIM_NOSCAN: 3659 /* did not scan */ 3660 continue; 3661 case NODE_RECLAIM_FULL: 3662 /* scanned but unreclaimable */ 3663 continue; 3664 default: 3665 /* did we reclaim enough */ 3666 if (zone_watermark_ok(zone, order, mark, 3667 ac->highest_zoneidx, alloc_flags)) 3668 goto try_this_zone; 3669 3670 continue; 3671 } 3672 } 3673 3674 try_this_zone: 3675 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3676 gfp_mask, alloc_flags, ac->migratetype); 3677 if (page) { 3678 prep_new_page(page, order, gfp_mask, alloc_flags); 3679 3680 /* 3681 * If this is a high-order atomic allocation then check 3682 * if the pageblock should be reserved for the future 3683 */ 3684 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3685 reserve_highatomic_pageblock(page, order, zone); 3686 3687 return page; 3688 } else { 3689 if (cond_accept_memory(zone, order, alloc_flags)) 3690 goto try_this_zone; 3691 3692 /* Try again if zone has deferred pages */ 3693 if (deferred_pages_enabled()) { 3694 if (_deferred_grow_zone(zone, order)) 3695 goto try_this_zone; 3696 } 3697 } 3698 } 3699 3700 /* 3701 * It's possible on a UMA machine to get through all zones that are 3702 * fragmented. If avoiding fragmentation, reset and try again. 3703 */ 3704 if (no_fallback && !defrag_mode) { 3705 alloc_flags &= ~ALLOC_NOFRAGMENT; 3706 goto retry; 3707 } 3708 3709 return NULL; 3710 } 3711 3712 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3713 { 3714 unsigned int filter = SHOW_MEM_FILTER_NODES; 3715 3716 /* 3717 * This documents exceptions given to allocations in certain 3718 * contexts that are allowed to allocate outside current's set 3719 * of allowed nodes. 3720 */ 3721 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3722 if (tsk_is_oom_victim(current) || 3723 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3724 filter &= ~SHOW_MEM_FILTER_NODES; 3725 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3726 filter &= ~SHOW_MEM_FILTER_NODES; 3727 3728 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3729 } 3730 3731 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3732 { 3733 struct va_format vaf; 3734 va_list args; 3735 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3736 3737 if ((gfp_mask & __GFP_NOWARN) || 3738 !__ratelimit(&nopage_rs) || 3739 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3740 return; 3741 3742 va_start(args, fmt); 3743 vaf.fmt = fmt; 3744 vaf.va = &args; 3745 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3746 current->comm, &vaf, gfp_mask, &gfp_mask, 3747 nodemask_pr_args(nodemask)); 3748 va_end(args); 3749 3750 cpuset_print_current_mems_allowed(); 3751 pr_cont("\n"); 3752 dump_stack(); 3753 warn_alloc_show_mem(gfp_mask, nodemask); 3754 } 3755 3756 static inline struct page * 3757 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3758 unsigned int alloc_flags, 3759 const struct alloc_context *ac) 3760 { 3761 struct page *page; 3762 3763 page = get_page_from_freelist(gfp_mask, order, 3764 alloc_flags|ALLOC_CPUSET, ac); 3765 /* 3766 * fallback to ignore cpuset restriction if our nodes 3767 * are depleted 3768 */ 3769 if (!page) 3770 page = get_page_from_freelist(gfp_mask, order, 3771 alloc_flags, ac); 3772 return page; 3773 } 3774 3775 static inline struct page * 3776 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3777 const struct alloc_context *ac, unsigned long *did_some_progress) 3778 { 3779 struct oom_control oc = { 3780 .zonelist = ac->zonelist, 3781 .nodemask = ac->nodemask, 3782 .memcg = NULL, 3783 .gfp_mask = gfp_mask, 3784 .order = order, 3785 }; 3786 struct page *page; 3787 3788 *did_some_progress = 0; 3789 3790 /* 3791 * Acquire the oom lock. If that fails, somebody else is 3792 * making progress for us. 3793 */ 3794 if (!mutex_trylock(&oom_lock)) { 3795 *did_some_progress = 1; 3796 schedule_timeout_uninterruptible(1); 3797 return NULL; 3798 } 3799 3800 /* 3801 * Go through the zonelist yet one more time, keep very high watermark 3802 * here, this is only to catch a parallel oom killing, we must fail if 3803 * we're still under heavy pressure. But make sure that this reclaim 3804 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3805 * allocation which will never fail due to oom_lock already held. 3806 */ 3807 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3808 ~__GFP_DIRECT_RECLAIM, order, 3809 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3810 if (page) 3811 goto out; 3812 3813 /* Coredumps can quickly deplete all memory reserves */ 3814 if (current->flags & PF_DUMPCORE) 3815 goto out; 3816 /* The OOM killer will not help higher order allocs */ 3817 if (order > PAGE_ALLOC_COSTLY_ORDER) 3818 goto out; 3819 /* 3820 * We have already exhausted all our reclaim opportunities without any 3821 * success so it is time to admit defeat. We will skip the OOM killer 3822 * because it is very likely that the caller has a more reasonable 3823 * fallback than shooting a random task. 3824 * 3825 * The OOM killer may not free memory on a specific node. 3826 */ 3827 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3828 goto out; 3829 /* The OOM killer does not needlessly kill tasks for lowmem */ 3830 if (ac->highest_zoneidx < ZONE_NORMAL) 3831 goto out; 3832 if (pm_suspended_storage()) 3833 goto out; 3834 /* 3835 * XXX: GFP_NOFS allocations should rather fail than rely on 3836 * other request to make a forward progress. 3837 * We are in an unfortunate situation where out_of_memory cannot 3838 * do much for this context but let's try it to at least get 3839 * access to memory reserved if the current task is killed (see 3840 * out_of_memory). Once filesystems are ready to handle allocation 3841 * failures more gracefully we should just bail out here. 3842 */ 3843 3844 /* Exhausted what can be done so it's blame time */ 3845 if (out_of_memory(&oc) || 3846 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3847 *did_some_progress = 1; 3848 3849 /* 3850 * Help non-failing allocations by giving them access to memory 3851 * reserves 3852 */ 3853 if (gfp_mask & __GFP_NOFAIL) 3854 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3855 ALLOC_NO_WATERMARKS, ac); 3856 } 3857 out: 3858 mutex_unlock(&oom_lock); 3859 return page; 3860 } 3861 3862 /* 3863 * Maximum number of compaction retries with a progress before OOM 3864 * killer is consider as the only way to move forward. 3865 */ 3866 #define MAX_COMPACT_RETRIES 16 3867 3868 #ifdef CONFIG_COMPACTION 3869 /* Try memory compaction for high-order allocations before reclaim */ 3870 static struct page * 3871 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3872 unsigned int alloc_flags, const struct alloc_context *ac, 3873 enum compact_priority prio, enum compact_result *compact_result) 3874 { 3875 struct page *page = NULL; 3876 unsigned long pflags; 3877 unsigned int noreclaim_flag; 3878 3879 if (!order) 3880 return NULL; 3881 3882 psi_memstall_enter(&pflags); 3883 delayacct_compact_start(); 3884 noreclaim_flag = memalloc_noreclaim_save(); 3885 3886 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3887 prio, &page); 3888 3889 memalloc_noreclaim_restore(noreclaim_flag); 3890 psi_memstall_leave(&pflags); 3891 delayacct_compact_end(); 3892 3893 if (*compact_result == COMPACT_SKIPPED) 3894 return NULL; 3895 /* 3896 * At least in one zone compaction wasn't deferred or skipped, so let's 3897 * count a compaction stall 3898 */ 3899 count_vm_event(COMPACTSTALL); 3900 3901 /* Prep a captured page if available */ 3902 if (page) 3903 prep_new_page(page, order, gfp_mask, alloc_flags); 3904 3905 /* Try get a page from the freelist if available */ 3906 if (!page) 3907 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3908 3909 if (page) { 3910 struct zone *zone = page_zone(page); 3911 3912 zone->compact_blockskip_flush = false; 3913 compaction_defer_reset(zone, order, true); 3914 count_vm_event(COMPACTSUCCESS); 3915 return page; 3916 } 3917 3918 /* 3919 * It's bad if compaction run occurs and fails. The most likely reason 3920 * is that pages exist, but not enough to satisfy watermarks. 3921 */ 3922 count_vm_event(COMPACTFAIL); 3923 3924 cond_resched(); 3925 3926 return NULL; 3927 } 3928 3929 static inline bool 3930 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3931 enum compact_result compact_result, 3932 enum compact_priority *compact_priority, 3933 int *compaction_retries) 3934 { 3935 int max_retries = MAX_COMPACT_RETRIES; 3936 int min_priority; 3937 bool ret = false; 3938 int retries = *compaction_retries; 3939 enum compact_priority priority = *compact_priority; 3940 3941 if (!order) 3942 return false; 3943 3944 if (fatal_signal_pending(current)) 3945 return false; 3946 3947 /* 3948 * Compaction was skipped due to a lack of free order-0 3949 * migration targets. Continue if reclaim can help. 3950 */ 3951 if (compact_result == COMPACT_SKIPPED) { 3952 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3953 goto out; 3954 } 3955 3956 /* 3957 * Compaction managed to coalesce some page blocks, but the 3958 * allocation failed presumably due to a race. Retry some. 3959 */ 3960 if (compact_result == COMPACT_SUCCESS) { 3961 /* 3962 * !costly requests are much more important than 3963 * __GFP_RETRY_MAYFAIL costly ones because they are de 3964 * facto nofail and invoke OOM killer to move on while 3965 * costly can fail and users are ready to cope with 3966 * that. 1/4 retries is rather arbitrary but we would 3967 * need much more detailed feedback from compaction to 3968 * make a better decision. 3969 */ 3970 if (order > PAGE_ALLOC_COSTLY_ORDER) 3971 max_retries /= 4; 3972 3973 if (++(*compaction_retries) <= max_retries) { 3974 ret = true; 3975 goto out; 3976 } 3977 } 3978 3979 /* 3980 * Compaction failed. Retry with increasing priority. 3981 */ 3982 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3983 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3984 3985 if (*compact_priority > min_priority) { 3986 (*compact_priority)--; 3987 *compaction_retries = 0; 3988 ret = true; 3989 } 3990 out: 3991 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3992 return ret; 3993 } 3994 #else 3995 static inline struct page * 3996 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3997 unsigned int alloc_flags, const struct alloc_context *ac, 3998 enum compact_priority prio, enum compact_result *compact_result) 3999 { 4000 *compact_result = COMPACT_SKIPPED; 4001 return NULL; 4002 } 4003 4004 static inline bool 4005 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4006 enum compact_result compact_result, 4007 enum compact_priority *compact_priority, 4008 int *compaction_retries) 4009 { 4010 struct zone *zone; 4011 struct zoneref *z; 4012 4013 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4014 return false; 4015 4016 /* 4017 * There are setups with compaction disabled which would prefer to loop 4018 * inside the allocator rather than hit the oom killer prematurely. 4019 * Let's give them a good hope and keep retrying while the order-0 4020 * watermarks are OK. 4021 */ 4022 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4023 ac->highest_zoneidx, ac->nodemask) { 4024 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4025 ac->highest_zoneidx, alloc_flags)) 4026 return true; 4027 } 4028 return false; 4029 } 4030 #endif /* CONFIG_COMPACTION */ 4031 4032 #ifdef CONFIG_LOCKDEP 4033 static struct lockdep_map __fs_reclaim_map = 4034 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4035 4036 static bool __need_reclaim(gfp_t gfp_mask) 4037 { 4038 /* no reclaim without waiting on it */ 4039 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4040 return false; 4041 4042 /* this guy won't enter reclaim */ 4043 if (current->flags & PF_MEMALLOC) 4044 return false; 4045 4046 if (gfp_mask & __GFP_NOLOCKDEP) 4047 return false; 4048 4049 return true; 4050 } 4051 4052 void __fs_reclaim_acquire(unsigned long ip) 4053 { 4054 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4055 } 4056 4057 void __fs_reclaim_release(unsigned long ip) 4058 { 4059 lock_release(&__fs_reclaim_map, ip); 4060 } 4061 4062 void fs_reclaim_acquire(gfp_t gfp_mask) 4063 { 4064 gfp_mask = current_gfp_context(gfp_mask); 4065 4066 if (__need_reclaim(gfp_mask)) { 4067 if (gfp_mask & __GFP_FS) 4068 __fs_reclaim_acquire(_RET_IP_); 4069 4070 #ifdef CONFIG_MMU_NOTIFIER 4071 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4072 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4073 #endif 4074 4075 } 4076 } 4077 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4078 4079 void fs_reclaim_release(gfp_t gfp_mask) 4080 { 4081 gfp_mask = current_gfp_context(gfp_mask); 4082 4083 if (__need_reclaim(gfp_mask)) { 4084 if (gfp_mask & __GFP_FS) 4085 __fs_reclaim_release(_RET_IP_); 4086 } 4087 } 4088 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4089 #endif 4090 4091 /* 4092 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4093 * have been rebuilt so allocation retries. Reader side does not lock and 4094 * retries the allocation if zonelist changes. Writer side is protected by the 4095 * embedded spin_lock. 4096 */ 4097 static DEFINE_SEQLOCK(zonelist_update_seq); 4098 4099 static unsigned int zonelist_iter_begin(void) 4100 { 4101 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4102 return read_seqbegin(&zonelist_update_seq); 4103 4104 return 0; 4105 } 4106 4107 static unsigned int check_retry_zonelist(unsigned int seq) 4108 { 4109 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4110 return read_seqretry(&zonelist_update_seq, seq); 4111 4112 return seq; 4113 } 4114 4115 /* Perform direct synchronous page reclaim */ 4116 static unsigned long 4117 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4118 const struct alloc_context *ac) 4119 { 4120 unsigned int noreclaim_flag; 4121 unsigned long progress; 4122 4123 cond_resched(); 4124 4125 /* We now go into synchronous reclaim */ 4126 cpuset_memory_pressure_bump(); 4127 fs_reclaim_acquire(gfp_mask); 4128 noreclaim_flag = memalloc_noreclaim_save(); 4129 4130 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4131 ac->nodemask); 4132 4133 memalloc_noreclaim_restore(noreclaim_flag); 4134 fs_reclaim_release(gfp_mask); 4135 4136 cond_resched(); 4137 4138 return progress; 4139 } 4140 4141 /* The really slow allocator path where we enter direct reclaim */ 4142 static inline struct page * 4143 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4144 unsigned int alloc_flags, const struct alloc_context *ac, 4145 unsigned long *did_some_progress) 4146 { 4147 struct page *page = NULL; 4148 unsigned long pflags; 4149 bool drained = false; 4150 4151 psi_memstall_enter(&pflags); 4152 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4153 if (unlikely(!(*did_some_progress))) 4154 goto out; 4155 4156 retry: 4157 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4158 4159 /* 4160 * If an allocation failed after direct reclaim, it could be because 4161 * pages are pinned on the per-cpu lists or in high alloc reserves. 4162 * Shrink them and try again 4163 */ 4164 if (!page && !drained) { 4165 unreserve_highatomic_pageblock(ac, false); 4166 drain_all_pages(NULL); 4167 drained = true; 4168 goto retry; 4169 } 4170 out: 4171 psi_memstall_leave(&pflags); 4172 4173 return page; 4174 } 4175 4176 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4177 const struct alloc_context *ac) 4178 { 4179 struct zoneref *z; 4180 struct zone *zone; 4181 pg_data_t *last_pgdat = NULL; 4182 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4183 unsigned int reclaim_order; 4184 4185 if (defrag_mode) 4186 reclaim_order = max(order, pageblock_order); 4187 else 4188 reclaim_order = order; 4189 4190 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4191 ac->nodemask) { 4192 if (!managed_zone(zone)) 4193 continue; 4194 if (last_pgdat == zone->zone_pgdat) 4195 continue; 4196 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4197 last_pgdat = zone->zone_pgdat; 4198 } 4199 } 4200 4201 static inline unsigned int 4202 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4203 { 4204 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4205 4206 /* 4207 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4208 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4209 * to save two branches. 4210 */ 4211 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4212 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4213 4214 /* 4215 * The caller may dip into page reserves a bit more if the caller 4216 * cannot run direct reclaim, or if the caller has realtime scheduling 4217 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4218 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4219 */ 4220 alloc_flags |= (__force int) 4221 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4222 4223 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4224 /* 4225 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4226 * if it can't schedule. 4227 */ 4228 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4229 alloc_flags |= ALLOC_NON_BLOCK; 4230 4231 if (order > 0) 4232 alloc_flags |= ALLOC_HIGHATOMIC; 4233 } 4234 4235 /* 4236 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4237 * GFP_ATOMIC) rather than fail, see the comment for 4238 * cpuset_node_allowed(). 4239 */ 4240 if (alloc_flags & ALLOC_MIN_RESERVE) 4241 alloc_flags &= ~ALLOC_CPUSET; 4242 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4243 alloc_flags |= ALLOC_MIN_RESERVE; 4244 4245 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4246 4247 if (defrag_mode) 4248 alloc_flags |= ALLOC_NOFRAGMENT; 4249 4250 return alloc_flags; 4251 } 4252 4253 static bool oom_reserves_allowed(struct task_struct *tsk) 4254 { 4255 if (!tsk_is_oom_victim(tsk)) 4256 return false; 4257 4258 /* 4259 * !MMU doesn't have oom reaper so give access to memory reserves 4260 * only to the thread with TIF_MEMDIE set 4261 */ 4262 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4263 return false; 4264 4265 return true; 4266 } 4267 4268 /* 4269 * Distinguish requests which really need access to full memory 4270 * reserves from oom victims which can live with a portion of it 4271 */ 4272 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4273 { 4274 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4275 return 0; 4276 if (gfp_mask & __GFP_MEMALLOC) 4277 return ALLOC_NO_WATERMARKS; 4278 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4279 return ALLOC_NO_WATERMARKS; 4280 if (!in_interrupt()) { 4281 if (current->flags & PF_MEMALLOC) 4282 return ALLOC_NO_WATERMARKS; 4283 else if (oom_reserves_allowed(current)) 4284 return ALLOC_OOM; 4285 } 4286 4287 return 0; 4288 } 4289 4290 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4291 { 4292 return !!__gfp_pfmemalloc_flags(gfp_mask); 4293 } 4294 4295 /* 4296 * Checks whether it makes sense to retry the reclaim to make a forward progress 4297 * for the given allocation request. 4298 * 4299 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4300 * without success, or when we couldn't even meet the watermark if we 4301 * reclaimed all remaining pages on the LRU lists. 4302 * 4303 * Returns true if a retry is viable or false to enter the oom path. 4304 */ 4305 static inline bool 4306 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4307 struct alloc_context *ac, int alloc_flags, 4308 bool did_some_progress, int *no_progress_loops) 4309 { 4310 struct zone *zone; 4311 struct zoneref *z; 4312 bool ret = false; 4313 4314 /* 4315 * Costly allocations might have made a progress but this doesn't mean 4316 * their order will become available due to high fragmentation so 4317 * always increment the no progress counter for them 4318 */ 4319 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4320 *no_progress_loops = 0; 4321 else 4322 (*no_progress_loops)++; 4323 4324 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4325 goto out; 4326 4327 4328 /* 4329 * Keep reclaiming pages while there is a chance this will lead 4330 * somewhere. If none of the target zones can satisfy our allocation 4331 * request even if all reclaimable pages are considered then we are 4332 * screwed and have to go OOM. 4333 */ 4334 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4335 ac->highest_zoneidx, ac->nodemask) { 4336 unsigned long available; 4337 unsigned long reclaimable; 4338 unsigned long min_wmark = min_wmark_pages(zone); 4339 bool wmark; 4340 4341 if (cpusets_enabled() && 4342 (alloc_flags & ALLOC_CPUSET) && 4343 !__cpuset_zone_allowed(zone, gfp_mask)) 4344 continue; 4345 4346 available = reclaimable = zone_reclaimable_pages(zone); 4347 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4348 4349 /* 4350 * Would the allocation succeed if we reclaimed all 4351 * reclaimable pages? 4352 */ 4353 wmark = __zone_watermark_ok(zone, order, min_wmark, 4354 ac->highest_zoneidx, alloc_flags, available); 4355 trace_reclaim_retry_zone(z, order, reclaimable, 4356 available, min_wmark, *no_progress_loops, wmark); 4357 if (wmark) { 4358 ret = true; 4359 break; 4360 } 4361 } 4362 4363 /* 4364 * Memory allocation/reclaim might be called from a WQ context and the 4365 * current implementation of the WQ concurrency control doesn't 4366 * recognize that a particular WQ is congested if the worker thread is 4367 * looping without ever sleeping. Therefore we have to do a short sleep 4368 * here rather than calling cond_resched(). 4369 */ 4370 if (current->flags & PF_WQ_WORKER) 4371 schedule_timeout_uninterruptible(1); 4372 else 4373 cond_resched(); 4374 out: 4375 /* Before OOM, exhaust highatomic_reserve */ 4376 if (!ret) 4377 return unreserve_highatomic_pageblock(ac, true); 4378 4379 return ret; 4380 } 4381 4382 static inline bool 4383 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4384 { 4385 /* 4386 * It's possible that cpuset's mems_allowed and the nodemask from 4387 * mempolicy don't intersect. This should be normally dealt with by 4388 * policy_nodemask(), but it's possible to race with cpuset update in 4389 * such a way the check therein was true, and then it became false 4390 * before we got our cpuset_mems_cookie here. 4391 * This assumes that for all allocations, ac->nodemask can come only 4392 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4393 * when it does not intersect with the cpuset restrictions) or the 4394 * caller can deal with a violated nodemask. 4395 */ 4396 if (cpusets_enabled() && ac->nodemask && 4397 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4398 ac->nodemask = NULL; 4399 return true; 4400 } 4401 4402 /* 4403 * When updating a task's mems_allowed or mempolicy nodemask, it is 4404 * possible to race with parallel threads in such a way that our 4405 * allocation can fail while the mask is being updated. If we are about 4406 * to fail, check if the cpuset changed during allocation and if so, 4407 * retry. 4408 */ 4409 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4410 return true; 4411 4412 return false; 4413 } 4414 4415 static inline struct page * 4416 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4417 struct alloc_context *ac) 4418 { 4419 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4420 bool can_compact = gfp_compaction_allowed(gfp_mask); 4421 bool nofail = gfp_mask & __GFP_NOFAIL; 4422 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4423 struct page *page = NULL; 4424 unsigned int alloc_flags; 4425 unsigned long did_some_progress; 4426 enum compact_priority compact_priority; 4427 enum compact_result compact_result; 4428 int compaction_retries; 4429 int no_progress_loops; 4430 unsigned int cpuset_mems_cookie; 4431 unsigned int zonelist_iter_cookie; 4432 int reserve_flags; 4433 4434 if (unlikely(nofail)) { 4435 /* 4436 * We most definitely don't want callers attempting to 4437 * allocate greater than order-1 page units with __GFP_NOFAIL. 4438 */ 4439 WARN_ON_ONCE(order > 1); 4440 /* 4441 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4442 * otherwise, we may result in lockup. 4443 */ 4444 WARN_ON_ONCE(!can_direct_reclaim); 4445 /* 4446 * PF_MEMALLOC request from this context is rather bizarre 4447 * because we cannot reclaim anything and only can loop waiting 4448 * for somebody to do a work for us. 4449 */ 4450 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4451 } 4452 4453 restart: 4454 compaction_retries = 0; 4455 no_progress_loops = 0; 4456 compact_result = COMPACT_SKIPPED; 4457 compact_priority = DEF_COMPACT_PRIORITY; 4458 cpuset_mems_cookie = read_mems_allowed_begin(); 4459 zonelist_iter_cookie = zonelist_iter_begin(); 4460 4461 /* 4462 * The fast path uses conservative alloc_flags to succeed only until 4463 * kswapd needs to be woken up, and to avoid the cost of setting up 4464 * alloc_flags precisely. So we do that now. 4465 */ 4466 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4467 4468 /* 4469 * We need to recalculate the starting point for the zonelist iterator 4470 * because we might have used different nodemask in the fast path, or 4471 * there was a cpuset modification and we are retrying - otherwise we 4472 * could end up iterating over non-eligible zones endlessly. 4473 */ 4474 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4475 ac->highest_zoneidx, ac->nodemask); 4476 if (!zonelist_zone(ac->preferred_zoneref)) 4477 goto nopage; 4478 4479 /* 4480 * Check for insane configurations where the cpuset doesn't contain 4481 * any suitable zone to satisfy the request - e.g. non-movable 4482 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4483 */ 4484 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4485 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4486 ac->highest_zoneidx, 4487 &cpuset_current_mems_allowed); 4488 if (!zonelist_zone(z)) 4489 goto nopage; 4490 } 4491 4492 if (alloc_flags & ALLOC_KSWAPD) 4493 wake_all_kswapds(order, gfp_mask, ac); 4494 4495 /* 4496 * The adjusted alloc_flags might result in immediate success, so try 4497 * that first 4498 */ 4499 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4500 if (page) 4501 goto got_pg; 4502 4503 /* 4504 * For costly allocations, try direct compaction first, as it's likely 4505 * that we have enough base pages and don't need to reclaim. For non- 4506 * movable high-order allocations, do that as well, as compaction will 4507 * try prevent permanent fragmentation by migrating from blocks of the 4508 * same migratetype. 4509 * Don't try this for allocations that are allowed to ignore 4510 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4511 */ 4512 if (can_direct_reclaim && can_compact && 4513 (costly_order || 4514 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4515 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4516 page = __alloc_pages_direct_compact(gfp_mask, order, 4517 alloc_flags, ac, 4518 INIT_COMPACT_PRIORITY, 4519 &compact_result); 4520 if (page) 4521 goto got_pg; 4522 4523 /* 4524 * Checks for costly allocations with __GFP_NORETRY, which 4525 * includes some THP page fault allocations 4526 */ 4527 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4528 /* 4529 * If allocating entire pageblock(s) and compaction 4530 * failed because all zones are below low watermarks 4531 * or is prohibited because it recently failed at this 4532 * order, fail immediately unless the allocator has 4533 * requested compaction and reclaim retry. 4534 * 4535 * Reclaim is 4536 * - potentially very expensive because zones are far 4537 * below their low watermarks or this is part of very 4538 * bursty high order allocations, 4539 * - not guaranteed to help because isolate_freepages() 4540 * may not iterate over freed pages as part of its 4541 * linear scan, and 4542 * - unlikely to make entire pageblocks free on its 4543 * own. 4544 */ 4545 if (compact_result == COMPACT_SKIPPED || 4546 compact_result == COMPACT_DEFERRED) 4547 goto nopage; 4548 4549 /* 4550 * Looks like reclaim/compaction is worth trying, but 4551 * sync compaction could be very expensive, so keep 4552 * using async compaction. 4553 */ 4554 compact_priority = INIT_COMPACT_PRIORITY; 4555 } 4556 } 4557 4558 retry: 4559 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4560 if (alloc_flags & ALLOC_KSWAPD) 4561 wake_all_kswapds(order, gfp_mask, ac); 4562 4563 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4564 if (reserve_flags) 4565 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4566 (alloc_flags & ALLOC_KSWAPD); 4567 4568 /* 4569 * Reset the nodemask and zonelist iterators if memory policies can be 4570 * ignored. These allocations are high priority and system rather than 4571 * user oriented. 4572 */ 4573 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4574 ac->nodemask = NULL; 4575 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4576 ac->highest_zoneidx, ac->nodemask); 4577 } 4578 4579 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4580 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4581 if (page) 4582 goto got_pg; 4583 4584 /* Caller is not willing to reclaim, we can't balance anything */ 4585 if (!can_direct_reclaim) 4586 goto nopage; 4587 4588 /* Avoid recursion of direct reclaim */ 4589 if (current->flags & PF_MEMALLOC) 4590 goto nopage; 4591 4592 /* Try direct reclaim and then allocating */ 4593 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4594 &did_some_progress); 4595 if (page) 4596 goto got_pg; 4597 4598 /* Try direct compaction and then allocating */ 4599 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4600 compact_priority, &compact_result); 4601 if (page) 4602 goto got_pg; 4603 4604 /* Do not loop if specifically requested */ 4605 if (gfp_mask & __GFP_NORETRY) 4606 goto nopage; 4607 4608 /* 4609 * Do not retry costly high order allocations unless they are 4610 * __GFP_RETRY_MAYFAIL and we can compact 4611 */ 4612 if (costly_order && (!can_compact || 4613 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4614 goto nopage; 4615 4616 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4617 did_some_progress > 0, &no_progress_loops)) 4618 goto retry; 4619 4620 /* 4621 * It doesn't make any sense to retry for the compaction if the order-0 4622 * reclaim is not able to make any progress because the current 4623 * implementation of the compaction depends on the sufficient amount 4624 * of free memory (see __compaction_suitable) 4625 */ 4626 if (did_some_progress > 0 && can_compact && 4627 should_compact_retry(ac, order, alloc_flags, 4628 compact_result, &compact_priority, 4629 &compaction_retries)) 4630 goto retry; 4631 4632 /* Reclaim/compaction failed to prevent the fallback */ 4633 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4634 alloc_flags &= ~ALLOC_NOFRAGMENT; 4635 goto retry; 4636 } 4637 4638 /* 4639 * Deal with possible cpuset update races or zonelist updates to avoid 4640 * a unnecessary OOM kill. 4641 */ 4642 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4643 check_retry_zonelist(zonelist_iter_cookie)) 4644 goto restart; 4645 4646 /* Reclaim has failed us, start killing things */ 4647 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4648 if (page) 4649 goto got_pg; 4650 4651 /* Avoid allocations with no watermarks from looping endlessly */ 4652 if (tsk_is_oom_victim(current) && 4653 (alloc_flags & ALLOC_OOM || 4654 (gfp_mask & __GFP_NOMEMALLOC))) 4655 goto nopage; 4656 4657 /* Retry as long as the OOM killer is making progress */ 4658 if (did_some_progress) { 4659 no_progress_loops = 0; 4660 goto retry; 4661 } 4662 4663 nopage: 4664 /* 4665 * Deal with possible cpuset update races or zonelist updates to avoid 4666 * a unnecessary OOM kill. 4667 */ 4668 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4669 check_retry_zonelist(zonelist_iter_cookie)) 4670 goto restart; 4671 4672 /* 4673 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4674 * we always retry 4675 */ 4676 if (unlikely(nofail)) { 4677 /* 4678 * Lacking direct_reclaim we can't do anything to reclaim memory, 4679 * we disregard these unreasonable nofail requests and still 4680 * return NULL 4681 */ 4682 if (!can_direct_reclaim) 4683 goto fail; 4684 4685 /* 4686 * Help non-failing allocations by giving some access to memory 4687 * reserves normally used for high priority non-blocking 4688 * allocations but do not use ALLOC_NO_WATERMARKS because this 4689 * could deplete whole memory reserves which would just make 4690 * the situation worse. 4691 */ 4692 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4693 if (page) 4694 goto got_pg; 4695 4696 cond_resched(); 4697 goto retry; 4698 } 4699 fail: 4700 warn_alloc(gfp_mask, ac->nodemask, 4701 "page allocation failure: order:%u", order); 4702 got_pg: 4703 return page; 4704 } 4705 4706 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4707 int preferred_nid, nodemask_t *nodemask, 4708 struct alloc_context *ac, gfp_t *alloc_gfp, 4709 unsigned int *alloc_flags) 4710 { 4711 ac->highest_zoneidx = gfp_zone(gfp_mask); 4712 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4713 ac->nodemask = nodemask; 4714 ac->migratetype = gfp_migratetype(gfp_mask); 4715 4716 if (cpusets_enabled()) { 4717 *alloc_gfp |= __GFP_HARDWALL; 4718 /* 4719 * When we are in the interrupt context, it is irrelevant 4720 * to the current task context. It means that any node ok. 4721 */ 4722 if (in_task() && !ac->nodemask) 4723 ac->nodemask = &cpuset_current_mems_allowed; 4724 else 4725 *alloc_flags |= ALLOC_CPUSET; 4726 } 4727 4728 might_alloc(gfp_mask); 4729 4730 /* 4731 * Don't invoke should_fail logic, since it may call 4732 * get_random_u32() and printk() which need to spin_lock. 4733 */ 4734 if (!(*alloc_flags & ALLOC_TRYLOCK) && 4735 should_fail_alloc_page(gfp_mask, order)) 4736 return false; 4737 4738 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4739 4740 /* Dirty zone balancing only done in the fast path */ 4741 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4742 4743 /* 4744 * The preferred zone is used for statistics but crucially it is 4745 * also used as the starting point for the zonelist iterator. It 4746 * may get reset for allocations that ignore memory policies. 4747 */ 4748 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4749 ac->highest_zoneidx, ac->nodemask); 4750 4751 return true; 4752 } 4753 4754 /* 4755 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 4756 * @gfp: GFP flags for the allocation 4757 * @preferred_nid: The preferred NUMA node ID to allocate from 4758 * @nodemask: Set of nodes to allocate from, may be NULL 4759 * @nr_pages: The number of pages desired in the array 4760 * @page_array: Array to store the pages 4761 * 4762 * This is a batched version of the page allocator that attempts to 4763 * allocate nr_pages quickly. Pages are added to the page_array. 4764 * 4765 * Note that only NULL elements are populated with pages and nr_pages 4766 * is the maximum number of pages that will be stored in the array. 4767 * 4768 * Returns the number of pages in the array. 4769 */ 4770 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4771 nodemask_t *nodemask, int nr_pages, 4772 struct page **page_array) 4773 { 4774 struct page *page; 4775 unsigned long __maybe_unused UP_flags; 4776 struct zone *zone; 4777 struct zoneref *z; 4778 struct per_cpu_pages *pcp; 4779 struct list_head *pcp_list; 4780 struct alloc_context ac; 4781 gfp_t alloc_gfp; 4782 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4783 int nr_populated = 0, nr_account = 0; 4784 4785 /* 4786 * Skip populated array elements to determine if any pages need 4787 * to be allocated before disabling IRQs. 4788 */ 4789 while (nr_populated < nr_pages && page_array[nr_populated]) 4790 nr_populated++; 4791 4792 /* No pages requested? */ 4793 if (unlikely(nr_pages <= 0)) 4794 goto out; 4795 4796 /* Already populated array? */ 4797 if (unlikely(nr_pages - nr_populated == 0)) 4798 goto out; 4799 4800 /* Bulk allocator does not support memcg accounting. */ 4801 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4802 goto failed; 4803 4804 /* Use the single page allocator for one page. */ 4805 if (nr_pages - nr_populated == 1) 4806 goto failed; 4807 4808 #ifdef CONFIG_PAGE_OWNER 4809 /* 4810 * PAGE_OWNER may recurse into the allocator to allocate space to 4811 * save the stack with pagesets.lock held. Releasing/reacquiring 4812 * removes much of the performance benefit of bulk allocation so 4813 * force the caller to allocate one page at a time as it'll have 4814 * similar performance to added complexity to the bulk allocator. 4815 */ 4816 if (static_branch_unlikely(&page_owner_inited)) 4817 goto failed; 4818 #endif 4819 4820 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4821 gfp &= gfp_allowed_mask; 4822 alloc_gfp = gfp; 4823 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4824 goto out; 4825 gfp = alloc_gfp; 4826 4827 /* Find an allowed local zone that meets the low watermark. */ 4828 z = ac.preferred_zoneref; 4829 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4830 unsigned long mark; 4831 4832 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4833 !__cpuset_zone_allowed(zone, gfp)) { 4834 continue; 4835 } 4836 4837 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4838 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4839 goto failed; 4840 } 4841 4842 cond_accept_memory(zone, 0, alloc_flags); 4843 retry_this_zone: 4844 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4845 if (zone_watermark_fast(zone, 0, mark, 4846 zonelist_zone_idx(ac.preferred_zoneref), 4847 alloc_flags, gfp)) { 4848 break; 4849 } 4850 4851 if (cond_accept_memory(zone, 0, alloc_flags)) 4852 goto retry_this_zone; 4853 4854 /* Try again if zone has deferred pages */ 4855 if (deferred_pages_enabled()) { 4856 if (_deferred_grow_zone(zone, 0)) 4857 goto retry_this_zone; 4858 } 4859 } 4860 4861 /* 4862 * If there are no allowed local zones that meets the watermarks then 4863 * try to allocate a single page and reclaim if necessary. 4864 */ 4865 if (unlikely(!zone)) 4866 goto failed; 4867 4868 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4869 pcp_trylock_prepare(UP_flags); 4870 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4871 if (!pcp) 4872 goto failed_irq; 4873 4874 /* Attempt the batch allocation */ 4875 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4876 while (nr_populated < nr_pages) { 4877 4878 /* Skip existing pages */ 4879 if (page_array[nr_populated]) { 4880 nr_populated++; 4881 continue; 4882 } 4883 4884 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4885 pcp, pcp_list); 4886 if (unlikely(!page)) { 4887 /* Try and allocate at least one page */ 4888 if (!nr_account) { 4889 pcp_spin_unlock(pcp); 4890 goto failed_irq; 4891 } 4892 break; 4893 } 4894 nr_account++; 4895 4896 prep_new_page(page, 0, gfp, 0); 4897 set_page_refcounted(page); 4898 page_array[nr_populated++] = page; 4899 } 4900 4901 pcp_spin_unlock(pcp); 4902 pcp_trylock_finish(UP_flags); 4903 4904 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4905 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4906 4907 out: 4908 return nr_populated; 4909 4910 failed_irq: 4911 pcp_trylock_finish(UP_flags); 4912 4913 failed: 4914 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4915 if (page) 4916 page_array[nr_populated++] = page; 4917 goto out; 4918 } 4919 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4920 4921 /* 4922 * This is the 'heart' of the zoned buddy allocator. 4923 */ 4924 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 4925 int preferred_nid, nodemask_t *nodemask) 4926 { 4927 struct page *page; 4928 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4929 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4930 struct alloc_context ac = { }; 4931 4932 /* 4933 * There are several places where we assume that the order value is sane 4934 * so bail out early if the request is out of bound. 4935 */ 4936 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4937 return NULL; 4938 4939 gfp &= gfp_allowed_mask; 4940 /* 4941 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4942 * resp. GFP_NOIO which has to be inherited for all allocation requests 4943 * from a particular context which has been marked by 4944 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4945 * movable zones are not used during allocation. 4946 */ 4947 gfp = current_gfp_context(gfp); 4948 alloc_gfp = gfp; 4949 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4950 &alloc_gfp, &alloc_flags)) 4951 return NULL; 4952 4953 /* 4954 * Forbid the first pass from falling back to types that fragment 4955 * memory until all local zones are considered. 4956 */ 4957 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4958 4959 /* First allocation attempt */ 4960 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4961 if (likely(page)) 4962 goto out; 4963 4964 alloc_gfp = gfp; 4965 ac.spread_dirty_pages = false; 4966 4967 /* 4968 * Restore the original nodemask if it was potentially replaced with 4969 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4970 */ 4971 ac.nodemask = nodemask; 4972 4973 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4974 4975 out: 4976 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4977 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4978 free_frozen_pages(page, order); 4979 page = NULL; 4980 } 4981 4982 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4983 kmsan_alloc_page(page, order, alloc_gfp); 4984 4985 return page; 4986 } 4987 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 4988 4989 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4990 int preferred_nid, nodemask_t *nodemask) 4991 { 4992 struct page *page; 4993 4994 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 4995 if (page) 4996 set_page_refcounted(page); 4997 return page; 4998 } 4999 EXPORT_SYMBOL(__alloc_pages_noprof); 5000 5001 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 5002 nodemask_t *nodemask) 5003 { 5004 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 5005 preferred_nid, nodemask); 5006 return page_rmappable_folio(page); 5007 } 5008 EXPORT_SYMBOL(__folio_alloc_noprof); 5009 5010 /* 5011 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5012 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5013 * you need to access high mem. 5014 */ 5015 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 5016 { 5017 struct page *page; 5018 5019 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 5020 if (!page) 5021 return 0; 5022 return (unsigned long) page_address(page); 5023 } 5024 EXPORT_SYMBOL(get_free_pages_noprof); 5025 5026 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5027 { 5028 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5029 } 5030 EXPORT_SYMBOL(get_zeroed_page_noprof); 5031 5032 /** 5033 * ___free_pages - Free pages allocated with alloc_pages(). 5034 * @page: The page pointer returned from alloc_pages(). 5035 * @order: The order of the allocation. 5036 * @fpi_flags: Free Page Internal flags. 5037 * 5038 * This function can free multi-page allocations that are not compound 5039 * pages. It does not check that the @order passed in matches that of 5040 * the allocation, so it is easy to leak memory. Freeing more memory 5041 * than was allocated will probably emit a warning. 5042 * 5043 * If the last reference to this page is speculative, it will be released 5044 * by put_page() which only frees the first page of a non-compound 5045 * allocation. To prevent the remaining pages from being leaked, we free 5046 * the subsequent pages here. If you want to use the page's reference 5047 * count to decide when to free the allocation, you should allocate a 5048 * compound page, and use put_page() instead of __free_pages(). 5049 * 5050 * Context: May be called in interrupt context or while holding a normal 5051 * spinlock, but not in NMI context or while holding a raw spinlock. 5052 */ 5053 static void ___free_pages(struct page *page, unsigned int order, 5054 fpi_t fpi_flags) 5055 { 5056 /* get PageHead before we drop reference */ 5057 int head = PageHead(page); 5058 /* get alloc tag in case the page is released by others */ 5059 struct alloc_tag *tag = pgalloc_tag_get(page); 5060 5061 if (put_page_testzero(page)) 5062 __free_frozen_pages(page, order, fpi_flags); 5063 else if (!head) { 5064 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5065 while (order-- > 0) 5066 __free_frozen_pages(page + (1 << order), order, 5067 fpi_flags); 5068 } 5069 } 5070 void __free_pages(struct page *page, unsigned int order) 5071 { 5072 ___free_pages(page, order, FPI_NONE); 5073 } 5074 EXPORT_SYMBOL(__free_pages); 5075 5076 /* 5077 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5078 * page type (not only those that came from try_alloc_pages) 5079 */ 5080 void free_pages_nolock(struct page *page, unsigned int order) 5081 { 5082 ___free_pages(page, order, FPI_TRYLOCK); 5083 } 5084 5085 void free_pages(unsigned long addr, unsigned int order) 5086 { 5087 if (addr != 0) { 5088 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5089 __free_pages(virt_to_page((void *)addr), order); 5090 } 5091 } 5092 5093 EXPORT_SYMBOL(free_pages); 5094 5095 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5096 size_t size) 5097 { 5098 if (addr) { 5099 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5100 struct page *page = virt_to_page((void *)addr); 5101 struct page *last = page + nr; 5102 5103 split_page_owner(page, order, 0); 5104 pgalloc_tag_split(page_folio(page), order, 0); 5105 split_page_memcg(page, order); 5106 while (page < --last) 5107 set_page_refcounted(last); 5108 5109 last = page + (1UL << order); 5110 for (page += nr; page < last; page++) 5111 __free_pages_ok(page, 0, FPI_TO_TAIL); 5112 } 5113 return (void *)addr; 5114 } 5115 5116 /** 5117 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5118 * @size: the number of bytes to allocate 5119 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5120 * 5121 * This function is similar to alloc_pages(), except that it allocates the 5122 * minimum number of pages to satisfy the request. alloc_pages() can only 5123 * allocate memory in power-of-two pages. 5124 * 5125 * This function is also limited by MAX_PAGE_ORDER. 5126 * 5127 * Memory allocated by this function must be released by free_pages_exact(). 5128 * 5129 * Return: pointer to the allocated area or %NULL in case of error. 5130 */ 5131 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5132 { 5133 unsigned int order = get_order(size); 5134 unsigned long addr; 5135 5136 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5137 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5138 5139 addr = get_free_pages_noprof(gfp_mask, order); 5140 return make_alloc_exact(addr, order, size); 5141 } 5142 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5143 5144 /** 5145 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5146 * pages on a node. 5147 * @nid: the preferred node ID where memory should be allocated 5148 * @size: the number of bytes to allocate 5149 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5150 * 5151 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5152 * back. 5153 * 5154 * Return: pointer to the allocated area or %NULL in case of error. 5155 */ 5156 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5157 { 5158 unsigned int order = get_order(size); 5159 struct page *p; 5160 5161 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5162 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5163 5164 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5165 if (!p) 5166 return NULL; 5167 return make_alloc_exact((unsigned long)page_address(p), order, size); 5168 } 5169 5170 /** 5171 * free_pages_exact - release memory allocated via alloc_pages_exact() 5172 * @virt: the value returned by alloc_pages_exact. 5173 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5174 * 5175 * Release the memory allocated by a previous call to alloc_pages_exact. 5176 */ 5177 void free_pages_exact(void *virt, size_t size) 5178 { 5179 unsigned long addr = (unsigned long)virt; 5180 unsigned long end = addr + PAGE_ALIGN(size); 5181 5182 while (addr < end) { 5183 free_page(addr); 5184 addr += PAGE_SIZE; 5185 } 5186 } 5187 EXPORT_SYMBOL(free_pages_exact); 5188 5189 /** 5190 * nr_free_zone_pages - count number of pages beyond high watermark 5191 * @offset: The zone index of the highest zone 5192 * 5193 * nr_free_zone_pages() counts the number of pages which are beyond the 5194 * high watermark within all zones at or below a given zone index. For each 5195 * zone, the number of pages is calculated as: 5196 * 5197 * nr_free_zone_pages = managed_pages - high_pages 5198 * 5199 * Return: number of pages beyond high watermark. 5200 */ 5201 static unsigned long nr_free_zone_pages(int offset) 5202 { 5203 struct zoneref *z; 5204 struct zone *zone; 5205 5206 /* Just pick one node, since fallback list is circular */ 5207 unsigned long sum = 0; 5208 5209 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5210 5211 for_each_zone_zonelist(zone, z, zonelist, offset) { 5212 unsigned long size = zone_managed_pages(zone); 5213 unsigned long high = high_wmark_pages(zone); 5214 if (size > high) 5215 sum += size - high; 5216 } 5217 5218 return sum; 5219 } 5220 5221 /** 5222 * nr_free_buffer_pages - count number of pages beyond high watermark 5223 * 5224 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5225 * watermark within ZONE_DMA and ZONE_NORMAL. 5226 * 5227 * Return: number of pages beyond high watermark within ZONE_DMA and 5228 * ZONE_NORMAL. 5229 */ 5230 unsigned long nr_free_buffer_pages(void) 5231 { 5232 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5233 } 5234 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5235 5236 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5237 { 5238 zoneref->zone = zone; 5239 zoneref->zone_idx = zone_idx(zone); 5240 } 5241 5242 /* 5243 * Builds allocation fallback zone lists. 5244 * 5245 * Add all populated zones of a node to the zonelist. 5246 */ 5247 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5248 { 5249 struct zone *zone; 5250 enum zone_type zone_type = MAX_NR_ZONES; 5251 int nr_zones = 0; 5252 5253 do { 5254 zone_type--; 5255 zone = pgdat->node_zones + zone_type; 5256 if (populated_zone(zone)) { 5257 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5258 check_highest_zone(zone_type); 5259 } 5260 } while (zone_type); 5261 5262 return nr_zones; 5263 } 5264 5265 #ifdef CONFIG_NUMA 5266 5267 static int __parse_numa_zonelist_order(char *s) 5268 { 5269 /* 5270 * We used to support different zonelists modes but they turned 5271 * out to be just not useful. Let's keep the warning in place 5272 * if somebody still use the cmd line parameter so that we do 5273 * not fail it silently 5274 */ 5275 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5276 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5277 return -EINVAL; 5278 } 5279 return 0; 5280 } 5281 5282 static char numa_zonelist_order[] = "Node"; 5283 #define NUMA_ZONELIST_ORDER_LEN 16 5284 /* 5285 * sysctl handler for numa_zonelist_order 5286 */ 5287 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5288 void *buffer, size_t *length, loff_t *ppos) 5289 { 5290 if (write) 5291 return __parse_numa_zonelist_order(buffer); 5292 return proc_dostring(table, write, buffer, length, ppos); 5293 } 5294 5295 static int node_load[MAX_NUMNODES]; 5296 5297 /** 5298 * find_next_best_node - find the next node that should appear in a given node's fallback list 5299 * @node: node whose fallback list we're appending 5300 * @used_node_mask: nodemask_t of already used nodes 5301 * 5302 * We use a number of factors to determine which is the next node that should 5303 * appear on a given node's fallback list. The node should not have appeared 5304 * already in @node's fallback list, and it should be the next closest node 5305 * according to the distance array (which contains arbitrary distance values 5306 * from each node to each node in the system), and should also prefer nodes 5307 * with no CPUs, since presumably they'll have very little allocation pressure 5308 * on them otherwise. 5309 * 5310 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5311 */ 5312 int find_next_best_node(int node, nodemask_t *used_node_mask) 5313 { 5314 int n, val; 5315 int min_val = INT_MAX; 5316 int best_node = NUMA_NO_NODE; 5317 5318 /* 5319 * Use the local node if we haven't already, but for memoryless local 5320 * node, we should skip it and fall back to other nodes. 5321 */ 5322 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5323 node_set(node, *used_node_mask); 5324 return node; 5325 } 5326 5327 for_each_node_state(n, N_MEMORY) { 5328 5329 /* Don't want a node to appear more than once */ 5330 if (node_isset(n, *used_node_mask)) 5331 continue; 5332 5333 /* Use the distance array to find the distance */ 5334 val = node_distance(node, n); 5335 5336 /* Penalize nodes under us ("prefer the next node") */ 5337 val += (n < node); 5338 5339 /* Give preference to headless and unused nodes */ 5340 if (!cpumask_empty(cpumask_of_node(n))) 5341 val += PENALTY_FOR_NODE_WITH_CPUS; 5342 5343 /* Slight preference for less loaded node */ 5344 val *= MAX_NUMNODES; 5345 val += node_load[n]; 5346 5347 if (val < min_val) { 5348 min_val = val; 5349 best_node = n; 5350 } 5351 } 5352 5353 if (best_node >= 0) 5354 node_set(best_node, *used_node_mask); 5355 5356 return best_node; 5357 } 5358 5359 5360 /* 5361 * Build zonelists ordered by node and zones within node. 5362 * This results in maximum locality--normal zone overflows into local 5363 * DMA zone, if any--but risks exhausting DMA zone. 5364 */ 5365 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5366 unsigned nr_nodes) 5367 { 5368 struct zoneref *zonerefs; 5369 int i; 5370 5371 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5372 5373 for (i = 0; i < nr_nodes; i++) { 5374 int nr_zones; 5375 5376 pg_data_t *node = NODE_DATA(node_order[i]); 5377 5378 nr_zones = build_zonerefs_node(node, zonerefs); 5379 zonerefs += nr_zones; 5380 } 5381 zonerefs->zone = NULL; 5382 zonerefs->zone_idx = 0; 5383 } 5384 5385 /* 5386 * Build __GFP_THISNODE zonelists 5387 */ 5388 static void build_thisnode_zonelists(pg_data_t *pgdat) 5389 { 5390 struct zoneref *zonerefs; 5391 int nr_zones; 5392 5393 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5394 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5395 zonerefs += nr_zones; 5396 zonerefs->zone = NULL; 5397 zonerefs->zone_idx = 0; 5398 } 5399 5400 static void build_zonelists(pg_data_t *pgdat) 5401 { 5402 static int node_order[MAX_NUMNODES]; 5403 int node, nr_nodes = 0; 5404 nodemask_t used_mask = NODE_MASK_NONE; 5405 int local_node, prev_node; 5406 5407 /* NUMA-aware ordering of nodes */ 5408 local_node = pgdat->node_id; 5409 prev_node = local_node; 5410 5411 memset(node_order, 0, sizeof(node_order)); 5412 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5413 /* 5414 * We don't want to pressure a particular node. 5415 * So adding penalty to the first node in same 5416 * distance group to make it round-robin. 5417 */ 5418 if (node_distance(local_node, node) != 5419 node_distance(local_node, prev_node)) 5420 node_load[node] += 1; 5421 5422 node_order[nr_nodes++] = node; 5423 prev_node = node; 5424 } 5425 5426 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5427 build_thisnode_zonelists(pgdat); 5428 pr_info("Fallback order for Node %d: ", local_node); 5429 for (node = 0; node < nr_nodes; node++) 5430 pr_cont("%d ", node_order[node]); 5431 pr_cont("\n"); 5432 } 5433 5434 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5435 /* 5436 * Return node id of node used for "local" allocations. 5437 * I.e., first node id of first zone in arg node's generic zonelist. 5438 * Used for initializing percpu 'numa_mem', which is used primarily 5439 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5440 */ 5441 int local_memory_node(int node) 5442 { 5443 struct zoneref *z; 5444 5445 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5446 gfp_zone(GFP_KERNEL), 5447 NULL); 5448 return zonelist_node_idx(z); 5449 } 5450 #endif 5451 5452 static void setup_min_unmapped_ratio(void); 5453 static void setup_min_slab_ratio(void); 5454 #else /* CONFIG_NUMA */ 5455 5456 static void build_zonelists(pg_data_t *pgdat) 5457 { 5458 struct zoneref *zonerefs; 5459 int nr_zones; 5460 5461 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5462 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5463 zonerefs += nr_zones; 5464 5465 zonerefs->zone = NULL; 5466 zonerefs->zone_idx = 0; 5467 } 5468 5469 #endif /* CONFIG_NUMA */ 5470 5471 /* 5472 * Boot pageset table. One per cpu which is going to be used for all 5473 * zones and all nodes. The parameters will be set in such a way 5474 * that an item put on a list will immediately be handed over to 5475 * the buddy list. This is safe since pageset manipulation is done 5476 * with interrupts disabled. 5477 * 5478 * The boot_pagesets must be kept even after bootup is complete for 5479 * unused processors and/or zones. They do play a role for bootstrapping 5480 * hotplugged processors. 5481 * 5482 * zoneinfo_show() and maybe other functions do 5483 * not check if the processor is online before following the pageset pointer. 5484 * Other parts of the kernel may not check if the zone is available. 5485 */ 5486 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5487 /* These effectively disable the pcplists in the boot pageset completely */ 5488 #define BOOT_PAGESET_HIGH 0 5489 #define BOOT_PAGESET_BATCH 1 5490 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5491 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5492 5493 static void __build_all_zonelists(void *data) 5494 { 5495 int nid; 5496 int __maybe_unused cpu; 5497 pg_data_t *self = data; 5498 unsigned long flags; 5499 5500 /* 5501 * The zonelist_update_seq must be acquired with irqsave because the 5502 * reader can be invoked from IRQ with GFP_ATOMIC. 5503 */ 5504 write_seqlock_irqsave(&zonelist_update_seq, flags); 5505 /* 5506 * Also disable synchronous printk() to prevent any printk() from 5507 * trying to hold port->lock, for 5508 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5509 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5510 */ 5511 printk_deferred_enter(); 5512 5513 #ifdef CONFIG_NUMA 5514 memset(node_load, 0, sizeof(node_load)); 5515 #endif 5516 5517 /* 5518 * This node is hotadded and no memory is yet present. So just 5519 * building zonelists is fine - no need to touch other nodes. 5520 */ 5521 if (self && !node_online(self->node_id)) { 5522 build_zonelists(self); 5523 } else { 5524 /* 5525 * All possible nodes have pgdat preallocated 5526 * in free_area_init 5527 */ 5528 for_each_node(nid) { 5529 pg_data_t *pgdat = NODE_DATA(nid); 5530 5531 build_zonelists(pgdat); 5532 } 5533 5534 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5535 /* 5536 * We now know the "local memory node" for each node-- 5537 * i.e., the node of the first zone in the generic zonelist. 5538 * Set up numa_mem percpu variable for on-line cpus. During 5539 * boot, only the boot cpu should be on-line; we'll init the 5540 * secondary cpus' numa_mem as they come on-line. During 5541 * node/memory hotplug, we'll fixup all on-line cpus. 5542 */ 5543 for_each_online_cpu(cpu) 5544 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5545 #endif 5546 } 5547 5548 printk_deferred_exit(); 5549 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5550 } 5551 5552 static noinline void __init 5553 build_all_zonelists_init(void) 5554 { 5555 int cpu; 5556 5557 __build_all_zonelists(NULL); 5558 5559 /* 5560 * Initialize the boot_pagesets that are going to be used 5561 * for bootstrapping processors. The real pagesets for 5562 * each zone will be allocated later when the per cpu 5563 * allocator is available. 5564 * 5565 * boot_pagesets are used also for bootstrapping offline 5566 * cpus if the system is already booted because the pagesets 5567 * are needed to initialize allocators on a specific cpu too. 5568 * F.e. the percpu allocator needs the page allocator which 5569 * needs the percpu allocator in order to allocate its pagesets 5570 * (a chicken-egg dilemma). 5571 */ 5572 for_each_possible_cpu(cpu) 5573 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5574 5575 mminit_verify_zonelist(); 5576 cpuset_init_current_mems_allowed(); 5577 } 5578 5579 /* 5580 * unless system_state == SYSTEM_BOOTING. 5581 * 5582 * __ref due to call of __init annotated helper build_all_zonelists_init 5583 * [protected by SYSTEM_BOOTING]. 5584 */ 5585 void __ref build_all_zonelists(pg_data_t *pgdat) 5586 { 5587 unsigned long vm_total_pages; 5588 5589 if (system_state == SYSTEM_BOOTING) { 5590 build_all_zonelists_init(); 5591 } else { 5592 __build_all_zonelists(pgdat); 5593 /* cpuset refresh routine should be here */ 5594 } 5595 /* Get the number of free pages beyond high watermark in all zones. */ 5596 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5597 /* 5598 * Disable grouping by mobility if the number of pages in the 5599 * system is too low to allow the mechanism to work. It would be 5600 * more accurate, but expensive to check per-zone. This check is 5601 * made on memory-hotadd so a system can start with mobility 5602 * disabled and enable it later 5603 */ 5604 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5605 page_group_by_mobility_disabled = 1; 5606 else 5607 page_group_by_mobility_disabled = 0; 5608 5609 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5610 nr_online_nodes, 5611 str_off_on(page_group_by_mobility_disabled), 5612 vm_total_pages); 5613 #ifdef CONFIG_NUMA 5614 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5615 #endif 5616 } 5617 5618 static int zone_batchsize(struct zone *zone) 5619 { 5620 #ifdef CONFIG_MMU 5621 int batch; 5622 5623 /* 5624 * The number of pages to batch allocate is either ~0.1% 5625 * of the zone or 1MB, whichever is smaller. The batch 5626 * size is striking a balance between allocation latency 5627 * and zone lock contention. 5628 */ 5629 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5630 batch /= 4; /* We effectively *= 4 below */ 5631 if (batch < 1) 5632 batch = 1; 5633 5634 /* 5635 * Clamp the batch to a 2^n - 1 value. Having a power 5636 * of 2 value was found to be more likely to have 5637 * suboptimal cache aliasing properties in some cases. 5638 * 5639 * For example if 2 tasks are alternately allocating 5640 * batches of pages, one task can end up with a lot 5641 * of pages of one half of the possible page colors 5642 * and the other with pages of the other colors. 5643 */ 5644 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5645 5646 return batch; 5647 5648 #else 5649 /* The deferral and batching of frees should be suppressed under NOMMU 5650 * conditions. 5651 * 5652 * The problem is that NOMMU needs to be able to allocate large chunks 5653 * of contiguous memory as there's no hardware page translation to 5654 * assemble apparent contiguous memory from discontiguous pages. 5655 * 5656 * Queueing large contiguous runs of pages for batching, however, 5657 * causes the pages to actually be freed in smaller chunks. As there 5658 * can be a significant delay between the individual batches being 5659 * recycled, this leads to the once large chunks of space being 5660 * fragmented and becoming unavailable for high-order allocations. 5661 */ 5662 return 0; 5663 #endif 5664 } 5665 5666 static int percpu_pagelist_high_fraction; 5667 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5668 int high_fraction) 5669 { 5670 #ifdef CONFIG_MMU 5671 int high; 5672 int nr_split_cpus; 5673 unsigned long total_pages; 5674 5675 if (!high_fraction) { 5676 /* 5677 * By default, the high value of the pcp is based on the zone 5678 * low watermark so that if they are full then background 5679 * reclaim will not be started prematurely. 5680 */ 5681 total_pages = low_wmark_pages(zone); 5682 } else { 5683 /* 5684 * If percpu_pagelist_high_fraction is configured, the high 5685 * value is based on a fraction of the managed pages in the 5686 * zone. 5687 */ 5688 total_pages = zone_managed_pages(zone) / high_fraction; 5689 } 5690 5691 /* 5692 * Split the high value across all online CPUs local to the zone. Note 5693 * that early in boot that CPUs may not be online yet and that during 5694 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5695 * onlined. For memory nodes that have no CPUs, split the high value 5696 * across all online CPUs to mitigate the risk that reclaim is triggered 5697 * prematurely due to pages stored on pcp lists. 5698 */ 5699 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5700 if (!nr_split_cpus) 5701 nr_split_cpus = num_online_cpus(); 5702 high = total_pages / nr_split_cpus; 5703 5704 /* 5705 * Ensure high is at least batch*4. The multiple is based on the 5706 * historical relationship between high and batch. 5707 */ 5708 high = max(high, batch << 2); 5709 5710 return high; 5711 #else 5712 return 0; 5713 #endif 5714 } 5715 5716 /* 5717 * pcp->high and pcp->batch values are related and generally batch is lower 5718 * than high. They are also related to pcp->count such that count is lower 5719 * than high, and as soon as it reaches high, the pcplist is flushed. 5720 * 5721 * However, guaranteeing these relations at all times would require e.g. write 5722 * barriers here but also careful usage of read barriers at the read side, and 5723 * thus be prone to error and bad for performance. Thus the update only prevents 5724 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5725 * should ensure they can cope with those fields changing asynchronously, and 5726 * fully trust only the pcp->count field on the local CPU with interrupts 5727 * disabled. 5728 * 5729 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5730 * outside of boot time (or some other assurance that no concurrent updaters 5731 * exist). 5732 */ 5733 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5734 unsigned long high_max, unsigned long batch) 5735 { 5736 WRITE_ONCE(pcp->batch, batch); 5737 WRITE_ONCE(pcp->high_min, high_min); 5738 WRITE_ONCE(pcp->high_max, high_max); 5739 } 5740 5741 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5742 { 5743 int pindex; 5744 5745 memset(pcp, 0, sizeof(*pcp)); 5746 memset(pzstats, 0, sizeof(*pzstats)); 5747 5748 spin_lock_init(&pcp->lock); 5749 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5750 INIT_LIST_HEAD(&pcp->lists[pindex]); 5751 5752 /* 5753 * Set batch and high values safe for a boot pageset. A true percpu 5754 * pageset's initialization will update them subsequently. Here we don't 5755 * need to be as careful as pageset_update() as nobody can access the 5756 * pageset yet. 5757 */ 5758 pcp->high_min = BOOT_PAGESET_HIGH; 5759 pcp->high_max = BOOT_PAGESET_HIGH; 5760 pcp->batch = BOOT_PAGESET_BATCH; 5761 pcp->free_count = 0; 5762 } 5763 5764 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5765 unsigned long high_max, unsigned long batch) 5766 { 5767 struct per_cpu_pages *pcp; 5768 int cpu; 5769 5770 for_each_possible_cpu(cpu) { 5771 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5772 pageset_update(pcp, high_min, high_max, batch); 5773 } 5774 } 5775 5776 /* 5777 * Calculate and set new high and batch values for all per-cpu pagesets of a 5778 * zone based on the zone's size. 5779 */ 5780 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5781 { 5782 int new_high_min, new_high_max, new_batch; 5783 5784 new_batch = max(1, zone_batchsize(zone)); 5785 if (percpu_pagelist_high_fraction) { 5786 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5787 percpu_pagelist_high_fraction); 5788 /* 5789 * PCP high is tuned manually, disable auto-tuning via 5790 * setting high_min and high_max to the manual value. 5791 */ 5792 new_high_max = new_high_min; 5793 } else { 5794 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5795 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5796 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5797 } 5798 5799 if (zone->pageset_high_min == new_high_min && 5800 zone->pageset_high_max == new_high_max && 5801 zone->pageset_batch == new_batch) 5802 return; 5803 5804 zone->pageset_high_min = new_high_min; 5805 zone->pageset_high_max = new_high_max; 5806 zone->pageset_batch = new_batch; 5807 5808 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5809 new_batch); 5810 } 5811 5812 void __meminit setup_zone_pageset(struct zone *zone) 5813 { 5814 int cpu; 5815 5816 /* Size may be 0 on !SMP && !NUMA */ 5817 if (sizeof(struct per_cpu_zonestat) > 0) 5818 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5819 5820 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5821 for_each_possible_cpu(cpu) { 5822 struct per_cpu_pages *pcp; 5823 struct per_cpu_zonestat *pzstats; 5824 5825 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5826 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5827 per_cpu_pages_init(pcp, pzstats); 5828 } 5829 5830 zone_set_pageset_high_and_batch(zone, 0); 5831 } 5832 5833 /* 5834 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5835 * page high values need to be recalculated. 5836 */ 5837 static void zone_pcp_update(struct zone *zone, int cpu_online) 5838 { 5839 mutex_lock(&pcp_batch_high_lock); 5840 zone_set_pageset_high_and_batch(zone, cpu_online); 5841 mutex_unlock(&pcp_batch_high_lock); 5842 } 5843 5844 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5845 { 5846 struct per_cpu_pages *pcp; 5847 struct cpu_cacheinfo *cci; 5848 5849 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5850 cci = get_cpu_cacheinfo(cpu); 5851 /* 5852 * If data cache slice of CPU is large enough, "pcp->batch" 5853 * pages can be preserved in PCP before draining PCP for 5854 * consecutive high-order pages freeing without allocation. 5855 * This can reduce zone lock contention without hurting 5856 * cache-hot pages sharing. 5857 */ 5858 spin_lock(&pcp->lock); 5859 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5860 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5861 else 5862 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5863 spin_unlock(&pcp->lock); 5864 } 5865 5866 void setup_pcp_cacheinfo(unsigned int cpu) 5867 { 5868 struct zone *zone; 5869 5870 for_each_populated_zone(zone) 5871 zone_pcp_update_cacheinfo(zone, cpu); 5872 } 5873 5874 /* 5875 * Allocate per cpu pagesets and initialize them. 5876 * Before this call only boot pagesets were available. 5877 */ 5878 void __init setup_per_cpu_pageset(void) 5879 { 5880 struct pglist_data *pgdat; 5881 struct zone *zone; 5882 int __maybe_unused cpu; 5883 5884 for_each_populated_zone(zone) 5885 setup_zone_pageset(zone); 5886 5887 #ifdef CONFIG_NUMA 5888 /* 5889 * Unpopulated zones continue using the boot pagesets. 5890 * The numa stats for these pagesets need to be reset. 5891 * Otherwise, they will end up skewing the stats of 5892 * the nodes these zones are associated with. 5893 */ 5894 for_each_possible_cpu(cpu) { 5895 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5896 memset(pzstats->vm_numa_event, 0, 5897 sizeof(pzstats->vm_numa_event)); 5898 } 5899 #endif 5900 5901 for_each_online_pgdat(pgdat) 5902 pgdat->per_cpu_nodestats = 5903 alloc_percpu(struct per_cpu_nodestat); 5904 } 5905 5906 __meminit void zone_pcp_init(struct zone *zone) 5907 { 5908 /* 5909 * per cpu subsystem is not up at this point. The following code 5910 * relies on the ability of the linker to provide the 5911 * offset of a (static) per cpu variable into the per cpu area. 5912 */ 5913 zone->per_cpu_pageset = &boot_pageset; 5914 zone->per_cpu_zonestats = &boot_zonestats; 5915 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5916 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5917 zone->pageset_batch = BOOT_PAGESET_BATCH; 5918 5919 if (populated_zone(zone)) 5920 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5921 zone->present_pages, zone_batchsize(zone)); 5922 } 5923 5924 static void setup_per_zone_lowmem_reserve(void); 5925 5926 void adjust_managed_page_count(struct page *page, long count) 5927 { 5928 atomic_long_add(count, &page_zone(page)->managed_pages); 5929 totalram_pages_add(count); 5930 setup_per_zone_lowmem_reserve(); 5931 } 5932 EXPORT_SYMBOL(adjust_managed_page_count); 5933 5934 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5935 { 5936 void *pos; 5937 unsigned long pages = 0; 5938 5939 start = (void *)PAGE_ALIGN((unsigned long)start); 5940 end = (void *)((unsigned long)end & PAGE_MASK); 5941 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5942 struct page *page = virt_to_page(pos); 5943 void *direct_map_addr; 5944 5945 /* 5946 * 'direct_map_addr' might be different from 'pos' 5947 * because some architectures' virt_to_page() 5948 * work with aliases. Getting the direct map 5949 * address ensures that we get a _writeable_ 5950 * alias for the memset(). 5951 */ 5952 direct_map_addr = page_address(page); 5953 /* 5954 * Perform a kasan-unchecked memset() since this memory 5955 * has not been initialized. 5956 */ 5957 direct_map_addr = kasan_reset_tag(direct_map_addr); 5958 if ((unsigned int)poison <= 0xFF) 5959 memset(direct_map_addr, poison, PAGE_SIZE); 5960 5961 free_reserved_page(page); 5962 } 5963 5964 if (pages && s) 5965 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5966 5967 return pages; 5968 } 5969 5970 void free_reserved_page(struct page *page) 5971 { 5972 clear_page_tag_ref(page); 5973 ClearPageReserved(page); 5974 init_page_count(page); 5975 __free_page(page); 5976 adjust_managed_page_count(page, 1); 5977 } 5978 EXPORT_SYMBOL(free_reserved_page); 5979 5980 static int page_alloc_cpu_dead(unsigned int cpu) 5981 { 5982 struct zone *zone; 5983 5984 lru_add_drain_cpu(cpu); 5985 mlock_drain_remote(cpu); 5986 drain_pages(cpu); 5987 5988 /* 5989 * Spill the event counters of the dead processor 5990 * into the current processors event counters. 5991 * This artificially elevates the count of the current 5992 * processor. 5993 */ 5994 vm_events_fold_cpu(cpu); 5995 5996 /* 5997 * Zero the differential counters of the dead processor 5998 * so that the vm statistics are consistent. 5999 * 6000 * This is only okay since the processor is dead and cannot 6001 * race with what we are doing. 6002 */ 6003 cpu_vm_stats_fold(cpu); 6004 6005 for_each_populated_zone(zone) 6006 zone_pcp_update(zone, 0); 6007 6008 return 0; 6009 } 6010 6011 static int page_alloc_cpu_online(unsigned int cpu) 6012 { 6013 struct zone *zone; 6014 6015 for_each_populated_zone(zone) 6016 zone_pcp_update(zone, 1); 6017 return 0; 6018 } 6019 6020 void __init page_alloc_init_cpuhp(void) 6021 { 6022 int ret; 6023 6024 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6025 "mm/page_alloc:pcp", 6026 page_alloc_cpu_online, 6027 page_alloc_cpu_dead); 6028 WARN_ON(ret < 0); 6029 } 6030 6031 /* 6032 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6033 * or min_free_kbytes changes. 6034 */ 6035 static void calculate_totalreserve_pages(void) 6036 { 6037 struct pglist_data *pgdat; 6038 unsigned long reserve_pages = 0; 6039 enum zone_type i, j; 6040 6041 for_each_online_pgdat(pgdat) { 6042 6043 pgdat->totalreserve_pages = 0; 6044 6045 for (i = 0; i < MAX_NR_ZONES; i++) { 6046 struct zone *zone = pgdat->node_zones + i; 6047 long max = 0; 6048 unsigned long managed_pages = zone_managed_pages(zone); 6049 6050 /* Find valid and maximum lowmem_reserve in the zone */ 6051 for (j = i; j < MAX_NR_ZONES; j++) { 6052 if (zone->lowmem_reserve[j] > max) 6053 max = zone->lowmem_reserve[j]; 6054 } 6055 6056 /* we treat the high watermark as reserved pages. */ 6057 max += high_wmark_pages(zone); 6058 6059 if (max > managed_pages) 6060 max = managed_pages; 6061 6062 pgdat->totalreserve_pages += max; 6063 6064 reserve_pages += max; 6065 } 6066 } 6067 totalreserve_pages = reserve_pages; 6068 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6069 } 6070 6071 /* 6072 * setup_per_zone_lowmem_reserve - called whenever 6073 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6074 * has a correct pages reserved value, so an adequate number of 6075 * pages are left in the zone after a successful __alloc_pages(). 6076 */ 6077 static void setup_per_zone_lowmem_reserve(void) 6078 { 6079 struct pglist_data *pgdat; 6080 enum zone_type i, j; 6081 6082 for_each_online_pgdat(pgdat) { 6083 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6084 struct zone *zone = &pgdat->node_zones[i]; 6085 int ratio = sysctl_lowmem_reserve_ratio[i]; 6086 bool clear = !ratio || !zone_managed_pages(zone); 6087 unsigned long managed_pages = 0; 6088 6089 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6090 struct zone *upper_zone = &pgdat->node_zones[j]; 6091 6092 managed_pages += zone_managed_pages(upper_zone); 6093 6094 if (clear) 6095 zone->lowmem_reserve[j] = 0; 6096 else 6097 zone->lowmem_reserve[j] = managed_pages / ratio; 6098 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6099 zone->lowmem_reserve[j]); 6100 } 6101 } 6102 } 6103 6104 /* update totalreserve_pages */ 6105 calculate_totalreserve_pages(); 6106 } 6107 6108 static void __setup_per_zone_wmarks(void) 6109 { 6110 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6111 unsigned long lowmem_pages = 0; 6112 struct zone *zone; 6113 unsigned long flags; 6114 6115 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6116 for_each_zone(zone) { 6117 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6118 lowmem_pages += zone_managed_pages(zone); 6119 } 6120 6121 for_each_zone(zone) { 6122 u64 tmp; 6123 6124 spin_lock_irqsave(&zone->lock, flags); 6125 tmp = (u64)pages_min * zone_managed_pages(zone); 6126 tmp = div64_ul(tmp, lowmem_pages); 6127 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6128 /* 6129 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6130 * need highmem and movable zones pages, so cap pages_min 6131 * to a small value here. 6132 * 6133 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6134 * deltas control async page reclaim, and so should 6135 * not be capped for highmem and movable zones. 6136 */ 6137 unsigned long min_pages; 6138 6139 min_pages = zone_managed_pages(zone) / 1024; 6140 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6141 zone->_watermark[WMARK_MIN] = min_pages; 6142 } else { 6143 /* 6144 * If it's a lowmem zone, reserve a number of pages 6145 * proportionate to the zone's size. 6146 */ 6147 zone->_watermark[WMARK_MIN] = tmp; 6148 } 6149 6150 /* 6151 * Set the kswapd watermarks distance according to the 6152 * scale factor in proportion to available memory, but 6153 * ensure a minimum size on small systems. 6154 */ 6155 tmp = max_t(u64, tmp >> 2, 6156 mult_frac(zone_managed_pages(zone), 6157 watermark_scale_factor, 10000)); 6158 6159 zone->watermark_boost = 0; 6160 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6161 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6162 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6163 trace_mm_setup_per_zone_wmarks(zone); 6164 6165 spin_unlock_irqrestore(&zone->lock, flags); 6166 } 6167 6168 /* update totalreserve_pages */ 6169 calculate_totalreserve_pages(); 6170 } 6171 6172 /** 6173 * setup_per_zone_wmarks - called when min_free_kbytes changes 6174 * or when memory is hot-{added|removed} 6175 * 6176 * Ensures that the watermark[min,low,high] values for each zone are set 6177 * correctly with respect to min_free_kbytes. 6178 */ 6179 void setup_per_zone_wmarks(void) 6180 { 6181 struct zone *zone; 6182 static DEFINE_SPINLOCK(lock); 6183 6184 spin_lock(&lock); 6185 __setup_per_zone_wmarks(); 6186 spin_unlock(&lock); 6187 6188 /* 6189 * The watermark size have changed so update the pcpu batch 6190 * and high limits or the limits may be inappropriate. 6191 */ 6192 for_each_zone(zone) 6193 zone_pcp_update(zone, 0); 6194 } 6195 6196 /* 6197 * Initialise min_free_kbytes. 6198 * 6199 * For small machines we want it small (128k min). For large machines 6200 * we want it large (256MB max). But it is not linear, because network 6201 * bandwidth does not increase linearly with machine size. We use 6202 * 6203 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6204 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6205 * 6206 * which yields 6207 * 6208 * 16MB: 512k 6209 * 32MB: 724k 6210 * 64MB: 1024k 6211 * 128MB: 1448k 6212 * 256MB: 2048k 6213 * 512MB: 2896k 6214 * 1024MB: 4096k 6215 * 2048MB: 5792k 6216 * 4096MB: 8192k 6217 * 8192MB: 11584k 6218 * 16384MB: 16384k 6219 */ 6220 void calculate_min_free_kbytes(void) 6221 { 6222 unsigned long lowmem_kbytes; 6223 int new_min_free_kbytes; 6224 6225 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6226 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6227 6228 if (new_min_free_kbytes > user_min_free_kbytes) 6229 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6230 else 6231 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6232 new_min_free_kbytes, user_min_free_kbytes); 6233 6234 } 6235 6236 int __meminit init_per_zone_wmark_min(void) 6237 { 6238 calculate_min_free_kbytes(); 6239 setup_per_zone_wmarks(); 6240 refresh_zone_stat_thresholds(); 6241 setup_per_zone_lowmem_reserve(); 6242 6243 #ifdef CONFIG_NUMA 6244 setup_min_unmapped_ratio(); 6245 setup_min_slab_ratio(); 6246 #endif 6247 6248 khugepaged_min_free_kbytes_update(); 6249 6250 return 0; 6251 } 6252 postcore_initcall(init_per_zone_wmark_min) 6253 6254 /* 6255 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6256 * that we can call two helper functions whenever min_free_kbytes 6257 * changes. 6258 */ 6259 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6260 void *buffer, size_t *length, loff_t *ppos) 6261 { 6262 int rc; 6263 6264 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6265 if (rc) 6266 return rc; 6267 6268 if (write) { 6269 user_min_free_kbytes = min_free_kbytes; 6270 setup_per_zone_wmarks(); 6271 } 6272 return 0; 6273 } 6274 6275 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6276 void *buffer, size_t *length, loff_t *ppos) 6277 { 6278 int rc; 6279 6280 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6281 if (rc) 6282 return rc; 6283 6284 if (write) 6285 setup_per_zone_wmarks(); 6286 6287 return 0; 6288 } 6289 6290 #ifdef CONFIG_NUMA 6291 static void setup_min_unmapped_ratio(void) 6292 { 6293 pg_data_t *pgdat; 6294 struct zone *zone; 6295 6296 for_each_online_pgdat(pgdat) 6297 pgdat->min_unmapped_pages = 0; 6298 6299 for_each_zone(zone) 6300 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6301 sysctl_min_unmapped_ratio) / 100; 6302 } 6303 6304 6305 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6306 void *buffer, size_t *length, loff_t *ppos) 6307 { 6308 int rc; 6309 6310 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6311 if (rc) 6312 return rc; 6313 6314 setup_min_unmapped_ratio(); 6315 6316 return 0; 6317 } 6318 6319 static void setup_min_slab_ratio(void) 6320 { 6321 pg_data_t *pgdat; 6322 struct zone *zone; 6323 6324 for_each_online_pgdat(pgdat) 6325 pgdat->min_slab_pages = 0; 6326 6327 for_each_zone(zone) 6328 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6329 sysctl_min_slab_ratio) / 100; 6330 } 6331 6332 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6333 void *buffer, size_t *length, loff_t *ppos) 6334 { 6335 int rc; 6336 6337 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6338 if (rc) 6339 return rc; 6340 6341 setup_min_slab_ratio(); 6342 6343 return 0; 6344 } 6345 #endif 6346 6347 /* 6348 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6349 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6350 * whenever sysctl_lowmem_reserve_ratio changes. 6351 * 6352 * The reserve ratio obviously has absolutely no relation with the 6353 * minimum watermarks. The lowmem reserve ratio can only make sense 6354 * if in function of the boot time zone sizes. 6355 */ 6356 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6357 int write, void *buffer, size_t *length, loff_t *ppos) 6358 { 6359 int i; 6360 6361 proc_dointvec_minmax(table, write, buffer, length, ppos); 6362 6363 for (i = 0; i < MAX_NR_ZONES; i++) { 6364 if (sysctl_lowmem_reserve_ratio[i] < 1) 6365 sysctl_lowmem_reserve_ratio[i] = 0; 6366 } 6367 6368 setup_per_zone_lowmem_reserve(); 6369 return 0; 6370 } 6371 6372 /* 6373 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6374 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6375 * pagelist can have before it gets flushed back to buddy allocator. 6376 */ 6377 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6378 int write, void *buffer, size_t *length, loff_t *ppos) 6379 { 6380 struct zone *zone; 6381 int old_percpu_pagelist_high_fraction; 6382 int ret; 6383 6384 mutex_lock(&pcp_batch_high_lock); 6385 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6386 6387 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6388 if (!write || ret < 0) 6389 goto out; 6390 6391 /* Sanity checking to avoid pcp imbalance */ 6392 if (percpu_pagelist_high_fraction && 6393 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6394 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6395 ret = -EINVAL; 6396 goto out; 6397 } 6398 6399 /* No change? */ 6400 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6401 goto out; 6402 6403 for_each_populated_zone(zone) 6404 zone_set_pageset_high_and_batch(zone, 0); 6405 out: 6406 mutex_unlock(&pcp_batch_high_lock); 6407 return ret; 6408 } 6409 6410 static const struct ctl_table page_alloc_sysctl_table[] = { 6411 { 6412 .procname = "min_free_kbytes", 6413 .data = &min_free_kbytes, 6414 .maxlen = sizeof(min_free_kbytes), 6415 .mode = 0644, 6416 .proc_handler = min_free_kbytes_sysctl_handler, 6417 .extra1 = SYSCTL_ZERO, 6418 }, 6419 { 6420 .procname = "watermark_boost_factor", 6421 .data = &watermark_boost_factor, 6422 .maxlen = sizeof(watermark_boost_factor), 6423 .mode = 0644, 6424 .proc_handler = proc_dointvec_minmax, 6425 .extra1 = SYSCTL_ZERO, 6426 }, 6427 { 6428 .procname = "watermark_scale_factor", 6429 .data = &watermark_scale_factor, 6430 .maxlen = sizeof(watermark_scale_factor), 6431 .mode = 0644, 6432 .proc_handler = watermark_scale_factor_sysctl_handler, 6433 .extra1 = SYSCTL_ONE, 6434 .extra2 = SYSCTL_THREE_THOUSAND, 6435 }, 6436 { 6437 .procname = "defrag_mode", 6438 .data = &defrag_mode, 6439 .maxlen = sizeof(defrag_mode), 6440 .mode = 0644, 6441 .proc_handler = proc_dointvec_minmax, 6442 .extra1 = SYSCTL_ZERO, 6443 .extra2 = SYSCTL_ONE, 6444 }, 6445 { 6446 .procname = "percpu_pagelist_high_fraction", 6447 .data = &percpu_pagelist_high_fraction, 6448 .maxlen = sizeof(percpu_pagelist_high_fraction), 6449 .mode = 0644, 6450 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6451 .extra1 = SYSCTL_ZERO, 6452 }, 6453 { 6454 .procname = "lowmem_reserve_ratio", 6455 .data = &sysctl_lowmem_reserve_ratio, 6456 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6457 .mode = 0644, 6458 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6459 }, 6460 #ifdef CONFIG_NUMA 6461 { 6462 .procname = "numa_zonelist_order", 6463 .data = &numa_zonelist_order, 6464 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6465 .mode = 0644, 6466 .proc_handler = numa_zonelist_order_handler, 6467 }, 6468 { 6469 .procname = "min_unmapped_ratio", 6470 .data = &sysctl_min_unmapped_ratio, 6471 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6472 .mode = 0644, 6473 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6474 .extra1 = SYSCTL_ZERO, 6475 .extra2 = SYSCTL_ONE_HUNDRED, 6476 }, 6477 { 6478 .procname = "min_slab_ratio", 6479 .data = &sysctl_min_slab_ratio, 6480 .maxlen = sizeof(sysctl_min_slab_ratio), 6481 .mode = 0644, 6482 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6483 .extra1 = SYSCTL_ZERO, 6484 .extra2 = SYSCTL_ONE_HUNDRED, 6485 }, 6486 #endif 6487 }; 6488 6489 void __init page_alloc_sysctl_init(void) 6490 { 6491 register_sysctl_init("vm", page_alloc_sysctl_table); 6492 } 6493 6494 #ifdef CONFIG_CONTIG_ALLOC 6495 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6496 static void alloc_contig_dump_pages(struct list_head *page_list) 6497 { 6498 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6499 6500 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6501 struct page *page; 6502 6503 dump_stack(); 6504 list_for_each_entry(page, page_list, lru) 6505 dump_page(page, "migration failure"); 6506 } 6507 } 6508 6509 /* 6510 * [start, end) must belong to a single zone. 6511 * @migratetype: using migratetype to filter the type of migration in 6512 * trace_mm_alloc_contig_migrate_range_info. 6513 */ 6514 static int __alloc_contig_migrate_range(struct compact_control *cc, 6515 unsigned long start, unsigned long end, int migratetype) 6516 { 6517 /* This function is based on compact_zone() from compaction.c. */ 6518 unsigned int nr_reclaimed; 6519 unsigned long pfn = start; 6520 unsigned int tries = 0; 6521 int ret = 0; 6522 struct migration_target_control mtc = { 6523 .nid = zone_to_nid(cc->zone), 6524 .gfp_mask = cc->gfp_mask, 6525 .reason = MR_CONTIG_RANGE, 6526 }; 6527 struct page *page; 6528 unsigned long total_mapped = 0; 6529 unsigned long total_migrated = 0; 6530 unsigned long total_reclaimed = 0; 6531 6532 lru_cache_disable(); 6533 6534 while (pfn < end || !list_empty(&cc->migratepages)) { 6535 if (fatal_signal_pending(current)) { 6536 ret = -EINTR; 6537 break; 6538 } 6539 6540 if (list_empty(&cc->migratepages)) { 6541 cc->nr_migratepages = 0; 6542 ret = isolate_migratepages_range(cc, pfn, end); 6543 if (ret && ret != -EAGAIN) 6544 break; 6545 pfn = cc->migrate_pfn; 6546 tries = 0; 6547 } else if (++tries == 5) { 6548 ret = -EBUSY; 6549 break; 6550 } 6551 6552 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6553 &cc->migratepages); 6554 cc->nr_migratepages -= nr_reclaimed; 6555 6556 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6557 total_reclaimed += nr_reclaimed; 6558 list_for_each_entry(page, &cc->migratepages, lru) { 6559 struct folio *folio = page_folio(page); 6560 6561 total_mapped += folio_mapped(folio) * 6562 folio_nr_pages(folio); 6563 } 6564 } 6565 6566 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6567 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6568 6569 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6570 total_migrated += cc->nr_migratepages; 6571 6572 /* 6573 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6574 * to retry again over this error, so do the same here. 6575 */ 6576 if (ret == -ENOMEM) 6577 break; 6578 } 6579 6580 lru_cache_enable(); 6581 if (ret < 0) { 6582 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6583 alloc_contig_dump_pages(&cc->migratepages); 6584 putback_movable_pages(&cc->migratepages); 6585 } 6586 6587 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6588 total_migrated, 6589 total_reclaimed, 6590 total_mapped); 6591 return (ret < 0) ? ret : 0; 6592 } 6593 6594 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) 6595 { 6596 int order; 6597 6598 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6599 struct page *page, *next; 6600 int nr_pages = 1 << order; 6601 6602 list_for_each_entry_safe(page, next, &list[order], lru) { 6603 int i; 6604 6605 post_alloc_hook(page, order, gfp_mask); 6606 set_page_refcounted(page); 6607 if (!order) 6608 continue; 6609 6610 split_page(page, order); 6611 6612 /* Add all subpages to the order-0 head, in sequence. */ 6613 list_del(&page->lru); 6614 for (i = 0; i < nr_pages; i++) 6615 list_add_tail(&page[i].lru, &list[0]); 6616 } 6617 } 6618 } 6619 6620 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6621 { 6622 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6623 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6624 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6625 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6626 6627 /* 6628 * We are given the range to allocate; node, mobility and placement 6629 * hints are irrelevant at this point. We'll simply ignore them. 6630 */ 6631 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6632 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6633 6634 /* 6635 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6636 * selected action flags. 6637 */ 6638 if (gfp_mask & ~(reclaim_mask | action_mask)) 6639 return -EINVAL; 6640 6641 /* 6642 * Flags to control page compaction/migration/reclaim, to free up our 6643 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6644 * for them. 6645 * 6646 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6647 * to not degrade callers. 6648 */ 6649 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6650 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6651 return 0; 6652 } 6653 6654 /** 6655 * alloc_contig_range() -- tries to allocate given range of pages 6656 * @start: start PFN to allocate 6657 * @end: one-past-the-last PFN to allocate 6658 * @migratetype: migratetype of the underlying pageblocks (either 6659 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6660 * in range must have the same migratetype and it must 6661 * be either of the two. 6662 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6663 * action and reclaim modifiers are supported. Reclaim modifiers 6664 * control allocation behavior during compaction/migration/reclaim. 6665 * 6666 * The PFN range does not have to be pageblock aligned. The PFN range must 6667 * belong to a single zone. 6668 * 6669 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6670 * pageblocks in the range. Once isolated, the pageblocks should not 6671 * be modified by others. 6672 * 6673 * Return: zero on success or negative error code. On success all 6674 * pages which PFN is in [start, end) are allocated for the caller and 6675 * need to be freed with free_contig_range(). 6676 */ 6677 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6678 unsigned migratetype, gfp_t gfp_mask) 6679 { 6680 unsigned long outer_start, outer_end; 6681 int ret = 0; 6682 6683 struct compact_control cc = { 6684 .nr_migratepages = 0, 6685 .order = -1, 6686 .zone = page_zone(pfn_to_page(start)), 6687 .mode = MIGRATE_SYNC, 6688 .ignore_skip_hint = true, 6689 .no_set_skip_hint = true, 6690 .alloc_contig = true, 6691 }; 6692 INIT_LIST_HEAD(&cc.migratepages); 6693 6694 gfp_mask = current_gfp_context(gfp_mask); 6695 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 6696 return -EINVAL; 6697 6698 /* 6699 * What we do here is we mark all pageblocks in range as 6700 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6701 * have different sizes, and due to the way page allocator 6702 * work, start_isolate_page_range() has special handlings for this. 6703 * 6704 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6705 * migrate the pages from an unaligned range (ie. pages that 6706 * we are interested in). This will put all the pages in 6707 * range back to page allocator as MIGRATE_ISOLATE. 6708 * 6709 * When this is done, we take the pages in range from page 6710 * allocator removing them from the buddy system. This way 6711 * page allocator will never consider using them. 6712 * 6713 * This lets us mark the pageblocks back as 6714 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6715 * aligned range but not in the unaligned, original range are 6716 * put back to page allocator so that buddy can use them. 6717 */ 6718 6719 ret = start_isolate_page_range(start, end, migratetype, 0); 6720 if (ret) 6721 goto done; 6722 6723 drain_all_pages(cc.zone); 6724 6725 /* 6726 * In case of -EBUSY, we'd like to know which page causes problem. 6727 * So, just fall through. test_pages_isolated() has a tracepoint 6728 * which will report the busy page. 6729 * 6730 * It is possible that busy pages could become available before 6731 * the call to test_pages_isolated, and the range will actually be 6732 * allocated. So, if we fall through be sure to clear ret so that 6733 * -EBUSY is not accidentally used or returned to caller. 6734 */ 6735 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6736 if (ret && ret != -EBUSY) 6737 goto done; 6738 6739 /* 6740 * When in-use hugetlb pages are migrated, they may simply be released 6741 * back into the free hugepage pool instead of being returned to the 6742 * buddy system. After the migration of in-use huge pages is completed, 6743 * we will invoke replace_free_hugepage_folios() to ensure that these 6744 * hugepages are properly released to the buddy system. 6745 */ 6746 ret = replace_free_hugepage_folios(start, end); 6747 if (ret) 6748 goto done; 6749 6750 /* 6751 * Pages from [start, end) are within a pageblock_nr_pages 6752 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6753 * more, all pages in [start, end) are free in page allocator. 6754 * What we are going to do is to allocate all pages from 6755 * [start, end) (that is remove them from page allocator). 6756 * 6757 * The only problem is that pages at the beginning and at the 6758 * end of interesting range may be not aligned with pages that 6759 * page allocator holds, ie. they can be part of higher order 6760 * pages. Because of this, we reserve the bigger range and 6761 * once this is done free the pages we are not interested in. 6762 * 6763 * We don't have to hold zone->lock here because the pages are 6764 * isolated thus they won't get removed from buddy. 6765 */ 6766 outer_start = find_large_buddy(start); 6767 6768 /* Make sure the range is really isolated. */ 6769 if (test_pages_isolated(outer_start, end, 0)) { 6770 ret = -EBUSY; 6771 goto done; 6772 } 6773 6774 /* Grab isolated pages from freelists. */ 6775 outer_end = isolate_freepages_range(&cc, outer_start, end); 6776 if (!outer_end) { 6777 ret = -EBUSY; 6778 goto done; 6779 } 6780 6781 if (!(gfp_mask & __GFP_COMP)) { 6782 split_free_pages(cc.freepages, gfp_mask); 6783 6784 /* Free head and tail (if any) */ 6785 if (start != outer_start) 6786 free_contig_range(outer_start, start - outer_start); 6787 if (end != outer_end) 6788 free_contig_range(end, outer_end - end); 6789 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 6790 struct page *head = pfn_to_page(start); 6791 int order = ilog2(end - start); 6792 6793 check_new_pages(head, order); 6794 prep_new_page(head, order, gfp_mask, 0); 6795 set_page_refcounted(head); 6796 } else { 6797 ret = -EINVAL; 6798 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 6799 start, end, outer_start, outer_end); 6800 } 6801 done: 6802 undo_isolate_page_range(start, end, migratetype); 6803 return ret; 6804 } 6805 EXPORT_SYMBOL(alloc_contig_range_noprof); 6806 6807 static int __alloc_contig_pages(unsigned long start_pfn, 6808 unsigned long nr_pages, gfp_t gfp_mask) 6809 { 6810 unsigned long end_pfn = start_pfn + nr_pages; 6811 6812 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6813 gfp_mask); 6814 } 6815 6816 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6817 unsigned long nr_pages) 6818 { 6819 unsigned long i, end_pfn = start_pfn + nr_pages; 6820 struct page *page; 6821 6822 for (i = start_pfn; i < end_pfn; i++) { 6823 page = pfn_to_online_page(i); 6824 if (!page) 6825 return false; 6826 6827 if (page_zone(page) != z) 6828 return false; 6829 6830 if (PageReserved(page)) 6831 return false; 6832 6833 if (PageHuge(page)) 6834 return false; 6835 } 6836 return true; 6837 } 6838 6839 static bool zone_spans_last_pfn(const struct zone *zone, 6840 unsigned long start_pfn, unsigned long nr_pages) 6841 { 6842 unsigned long last_pfn = start_pfn + nr_pages - 1; 6843 6844 return zone_spans_pfn(zone, last_pfn); 6845 } 6846 6847 /** 6848 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6849 * @nr_pages: Number of contiguous pages to allocate 6850 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 6851 * action and reclaim modifiers are supported. Reclaim modifiers 6852 * control allocation behavior during compaction/migration/reclaim. 6853 * @nid: Target node 6854 * @nodemask: Mask for other possible nodes 6855 * 6856 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6857 * on an applicable zonelist to find a contiguous pfn range which can then be 6858 * tried for allocation with alloc_contig_range(). This routine is intended 6859 * for allocation requests which can not be fulfilled with the buddy allocator. 6860 * 6861 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6862 * power of two, then allocated range is also guaranteed to be aligned to same 6863 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6864 * 6865 * Allocated pages can be freed with free_contig_range() or by manually calling 6866 * __free_page() on each allocated page. 6867 * 6868 * Return: pointer to contiguous pages on success, or NULL if not successful. 6869 */ 6870 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6871 int nid, nodemask_t *nodemask) 6872 { 6873 unsigned long ret, pfn, flags; 6874 struct zonelist *zonelist; 6875 struct zone *zone; 6876 struct zoneref *z; 6877 6878 zonelist = node_zonelist(nid, gfp_mask); 6879 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6880 gfp_zone(gfp_mask), nodemask) { 6881 spin_lock_irqsave(&zone->lock, flags); 6882 6883 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6884 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6885 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6886 /* 6887 * We release the zone lock here because 6888 * alloc_contig_range() will also lock the zone 6889 * at some point. If there's an allocation 6890 * spinning on this lock, it may win the race 6891 * and cause alloc_contig_range() to fail... 6892 */ 6893 spin_unlock_irqrestore(&zone->lock, flags); 6894 ret = __alloc_contig_pages(pfn, nr_pages, 6895 gfp_mask); 6896 if (!ret) 6897 return pfn_to_page(pfn); 6898 spin_lock_irqsave(&zone->lock, flags); 6899 } 6900 pfn += nr_pages; 6901 } 6902 spin_unlock_irqrestore(&zone->lock, flags); 6903 } 6904 return NULL; 6905 } 6906 #endif /* CONFIG_CONTIG_ALLOC */ 6907 6908 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6909 { 6910 unsigned long count = 0; 6911 struct folio *folio = pfn_folio(pfn); 6912 6913 if (folio_test_large(folio)) { 6914 int expected = folio_nr_pages(folio); 6915 6916 if (nr_pages == expected) 6917 folio_put(folio); 6918 else 6919 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 6920 pfn, nr_pages, expected); 6921 return; 6922 } 6923 6924 for (; nr_pages--; pfn++) { 6925 struct page *page = pfn_to_page(pfn); 6926 6927 count += page_count(page) != 1; 6928 __free_page(page); 6929 } 6930 WARN(count != 0, "%lu pages are still in use!\n", count); 6931 } 6932 EXPORT_SYMBOL(free_contig_range); 6933 6934 /* 6935 * Effectively disable pcplists for the zone by setting the high limit to 0 6936 * and draining all cpus. A concurrent page freeing on another CPU that's about 6937 * to put the page on pcplist will either finish before the drain and the page 6938 * will be drained, or observe the new high limit and skip the pcplist. 6939 * 6940 * Must be paired with a call to zone_pcp_enable(). 6941 */ 6942 void zone_pcp_disable(struct zone *zone) 6943 { 6944 mutex_lock(&pcp_batch_high_lock); 6945 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6946 __drain_all_pages(zone, true); 6947 } 6948 6949 void zone_pcp_enable(struct zone *zone) 6950 { 6951 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6952 zone->pageset_high_max, zone->pageset_batch); 6953 mutex_unlock(&pcp_batch_high_lock); 6954 } 6955 6956 void zone_pcp_reset(struct zone *zone) 6957 { 6958 int cpu; 6959 struct per_cpu_zonestat *pzstats; 6960 6961 if (zone->per_cpu_pageset != &boot_pageset) { 6962 for_each_online_cpu(cpu) { 6963 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6964 drain_zonestat(zone, pzstats); 6965 } 6966 free_percpu(zone->per_cpu_pageset); 6967 zone->per_cpu_pageset = &boot_pageset; 6968 if (zone->per_cpu_zonestats != &boot_zonestats) { 6969 free_percpu(zone->per_cpu_zonestats); 6970 zone->per_cpu_zonestats = &boot_zonestats; 6971 } 6972 } 6973 } 6974 6975 #ifdef CONFIG_MEMORY_HOTREMOVE 6976 /* 6977 * All pages in the range must be in a single zone, must not contain holes, 6978 * must span full sections, and must be isolated before calling this function. 6979 * 6980 * Returns the number of managed (non-PageOffline()) pages in the range: the 6981 * number of pages for which memory offlining code must adjust managed page 6982 * counters using adjust_managed_page_count(). 6983 */ 6984 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6985 unsigned long end_pfn) 6986 { 6987 unsigned long already_offline = 0, flags; 6988 unsigned long pfn = start_pfn; 6989 struct page *page; 6990 struct zone *zone; 6991 unsigned int order; 6992 6993 offline_mem_sections(pfn, end_pfn); 6994 zone = page_zone(pfn_to_page(pfn)); 6995 spin_lock_irqsave(&zone->lock, flags); 6996 while (pfn < end_pfn) { 6997 page = pfn_to_page(pfn); 6998 /* 6999 * The HWPoisoned page may be not in buddy system, and 7000 * page_count() is not 0. 7001 */ 7002 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7003 pfn++; 7004 continue; 7005 } 7006 /* 7007 * At this point all remaining PageOffline() pages have a 7008 * reference count of 0 and can simply be skipped. 7009 */ 7010 if (PageOffline(page)) { 7011 BUG_ON(page_count(page)); 7012 BUG_ON(PageBuddy(page)); 7013 already_offline++; 7014 pfn++; 7015 continue; 7016 } 7017 7018 BUG_ON(page_count(page)); 7019 BUG_ON(!PageBuddy(page)); 7020 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 7021 order = buddy_order(page); 7022 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 7023 pfn += (1 << order); 7024 } 7025 spin_unlock_irqrestore(&zone->lock, flags); 7026 7027 return end_pfn - start_pfn - already_offline; 7028 } 7029 #endif 7030 7031 /* 7032 * This function returns a stable result only if called under zone lock. 7033 */ 7034 bool is_free_buddy_page(const struct page *page) 7035 { 7036 unsigned long pfn = page_to_pfn(page); 7037 unsigned int order; 7038 7039 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7040 const struct page *head = page - (pfn & ((1 << order) - 1)); 7041 7042 if (PageBuddy(head) && 7043 buddy_order_unsafe(head) >= order) 7044 break; 7045 } 7046 7047 return order <= MAX_PAGE_ORDER; 7048 } 7049 EXPORT_SYMBOL(is_free_buddy_page); 7050 7051 #ifdef CONFIG_MEMORY_FAILURE 7052 static inline void add_to_free_list(struct page *page, struct zone *zone, 7053 unsigned int order, int migratetype, 7054 bool tail) 7055 { 7056 __add_to_free_list(page, zone, order, migratetype, tail); 7057 account_freepages(zone, 1 << order, migratetype); 7058 } 7059 7060 /* 7061 * Break down a higher-order page in sub-pages, and keep our target out of 7062 * buddy allocator. 7063 */ 7064 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7065 struct page *target, int low, int high, 7066 int migratetype) 7067 { 7068 unsigned long size = 1 << high; 7069 struct page *current_buddy; 7070 7071 while (high > low) { 7072 high--; 7073 size >>= 1; 7074 7075 if (target >= &page[size]) { 7076 current_buddy = page; 7077 page = page + size; 7078 } else { 7079 current_buddy = page + size; 7080 } 7081 7082 if (set_page_guard(zone, current_buddy, high)) 7083 continue; 7084 7085 add_to_free_list(current_buddy, zone, high, migratetype, false); 7086 set_buddy_order(current_buddy, high); 7087 } 7088 } 7089 7090 /* 7091 * Take a page that will be marked as poisoned off the buddy allocator. 7092 */ 7093 bool take_page_off_buddy(struct page *page) 7094 { 7095 struct zone *zone = page_zone(page); 7096 unsigned long pfn = page_to_pfn(page); 7097 unsigned long flags; 7098 unsigned int order; 7099 bool ret = false; 7100 7101 spin_lock_irqsave(&zone->lock, flags); 7102 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7103 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7104 int page_order = buddy_order(page_head); 7105 7106 if (PageBuddy(page_head) && page_order >= order) { 7107 unsigned long pfn_head = page_to_pfn(page_head); 7108 int migratetype = get_pfnblock_migratetype(page_head, 7109 pfn_head); 7110 7111 del_page_from_free_list(page_head, zone, page_order, 7112 migratetype); 7113 break_down_buddy_pages(zone, page_head, page, 0, 7114 page_order, migratetype); 7115 SetPageHWPoisonTakenOff(page); 7116 ret = true; 7117 break; 7118 } 7119 if (page_count(page_head) > 0) 7120 break; 7121 } 7122 spin_unlock_irqrestore(&zone->lock, flags); 7123 return ret; 7124 } 7125 7126 /* 7127 * Cancel takeoff done by take_page_off_buddy(). 7128 */ 7129 bool put_page_back_buddy(struct page *page) 7130 { 7131 struct zone *zone = page_zone(page); 7132 unsigned long flags; 7133 bool ret = false; 7134 7135 spin_lock_irqsave(&zone->lock, flags); 7136 if (put_page_testzero(page)) { 7137 unsigned long pfn = page_to_pfn(page); 7138 int migratetype = get_pfnblock_migratetype(page, pfn); 7139 7140 ClearPageHWPoisonTakenOff(page); 7141 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7142 if (TestClearPageHWPoison(page)) { 7143 ret = true; 7144 } 7145 } 7146 spin_unlock_irqrestore(&zone->lock, flags); 7147 7148 return ret; 7149 } 7150 #endif 7151 7152 #ifdef CONFIG_ZONE_DMA 7153 bool has_managed_dma(void) 7154 { 7155 struct pglist_data *pgdat; 7156 7157 for_each_online_pgdat(pgdat) { 7158 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 7159 7160 if (managed_zone(zone)) 7161 return true; 7162 } 7163 return false; 7164 } 7165 #endif /* CONFIG_ZONE_DMA */ 7166 7167 #ifdef CONFIG_UNACCEPTED_MEMORY 7168 7169 static bool lazy_accept = true; 7170 7171 static int __init accept_memory_parse(char *p) 7172 { 7173 if (!strcmp(p, "lazy")) { 7174 lazy_accept = true; 7175 return 0; 7176 } else if (!strcmp(p, "eager")) { 7177 lazy_accept = false; 7178 return 0; 7179 } else { 7180 return -EINVAL; 7181 } 7182 } 7183 early_param("accept_memory", accept_memory_parse); 7184 7185 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7186 { 7187 phys_addr_t start = page_to_phys(page); 7188 7189 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7190 } 7191 7192 static void __accept_page(struct zone *zone, unsigned long *flags, 7193 struct page *page) 7194 { 7195 list_del(&page->lru); 7196 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7197 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7198 __ClearPageUnaccepted(page); 7199 spin_unlock_irqrestore(&zone->lock, *flags); 7200 7201 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7202 7203 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7204 } 7205 7206 void accept_page(struct page *page) 7207 { 7208 struct zone *zone = page_zone(page); 7209 unsigned long flags; 7210 7211 spin_lock_irqsave(&zone->lock, flags); 7212 if (!PageUnaccepted(page)) { 7213 spin_unlock_irqrestore(&zone->lock, flags); 7214 return; 7215 } 7216 7217 /* Unlocks zone->lock */ 7218 __accept_page(zone, &flags, page); 7219 } 7220 7221 static bool try_to_accept_memory_one(struct zone *zone) 7222 { 7223 unsigned long flags; 7224 struct page *page; 7225 7226 spin_lock_irqsave(&zone->lock, flags); 7227 page = list_first_entry_or_null(&zone->unaccepted_pages, 7228 struct page, lru); 7229 if (!page) { 7230 spin_unlock_irqrestore(&zone->lock, flags); 7231 return false; 7232 } 7233 7234 /* Unlocks zone->lock */ 7235 __accept_page(zone, &flags, page); 7236 7237 return true; 7238 } 7239 7240 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7241 int alloc_flags) 7242 { 7243 long to_accept, wmark; 7244 bool ret = false; 7245 7246 if (list_empty(&zone->unaccepted_pages)) 7247 return false; 7248 7249 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7250 if (alloc_flags & ALLOC_TRYLOCK) 7251 return false; 7252 7253 wmark = promo_wmark_pages(zone); 7254 7255 /* 7256 * Watermarks have not been initialized yet. 7257 * 7258 * Accepting one MAX_ORDER page to ensure progress. 7259 */ 7260 if (!wmark) 7261 return try_to_accept_memory_one(zone); 7262 7263 /* How much to accept to get to promo watermark? */ 7264 to_accept = wmark - 7265 (zone_page_state(zone, NR_FREE_PAGES) - 7266 __zone_watermark_unusable_free(zone, order, 0) - 7267 zone_page_state(zone, NR_UNACCEPTED)); 7268 7269 while (to_accept > 0) { 7270 if (!try_to_accept_memory_one(zone)) 7271 break; 7272 ret = true; 7273 to_accept -= MAX_ORDER_NR_PAGES; 7274 } 7275 7276 return ret; 7277 } 7278 7279 static bool __free_unaccepted(struct page *page) 7280 { 7281 struct zone *zone = page_zone(page); 7282 unsigned long flags; 7283 7284 if (!lazy_accept) 7285 return false; 7286 7287 spin_lock_irqsave(&zone->lock, flags); 7288 list_add_tail(&page->lru, &zone->unaccepted_pages); 7289 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7290 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7291 __SetPageUnaccepted(page); 7292 spin_unlock_irqrestore(&zone->lock, flags); 7293 7294 return true; 7295 } 7296 7297 #else 7298 7299 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7300 { 7301 return false; 7302 } 7303 7304 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7305 int alloc_flags) 7306 { 7307 return false; 7308 } 7309 7310 static bool __free_unaccepted(struct page *page) 7311 { 7312 BUILD_BUG(); 7313 return false; 7314 } 7315 7316 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7317 7318 /** 7319 * try_alloc_pages - opportunistic reentrant allocation from any context 7320 * @nid: node to allocate from 7321 * @order: allocation order size 7322 * 7323 * Allocates pages of a given order from the given node. This is safe to 7324 * call from any context (from atomic, NMI, and also reentrant 7325 * allocator -> tracepoint -> try_alloc_pages_noprof). 7326 * Allocation is best effort and to be expected to fail easily so nobody should 7327 * rely on the success. Failures are not reported via warn_alloc(). 7328 * See always fail conditions below. 7329 * 7330 * Return: allocated page or NULL on failure. 7331 */ 7332 struct page *try_alloc_pages_noprof(int nid, unsigned int order) 7333 { 7334 /* 7335 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7336 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7337 * is not safe in arbitrary context. 7338 * 7339 * These two are the conditions for gfpflags_allow_spinning() being true. 7340 * 7341 * Specify __GFP_NOWARN since failing try_alloc_pages() is not a reason 7342 * to warn. Also warn would trigger printk() which is unsafe from 7343 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7344 * since the running context is unknown. 7345 * 7346 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7347 * is safe in any context. Also zeroing the page is mandatory for 7348 * BPF use cases. 7349 * 7350 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7351 * specify it here to highlight that try_alloc_pages() 7352 * doesn't want to deplete reserves. 7353 */ 7354 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC 7355 | __GFP_ACCOUNT; 7356 unsigned int alloc_flags = ALLOC_TRYLOCK; 7357 struct alloc_context ac = { }; 7358 struct page *page; 7359 7360 /* 7361 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7362 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7363 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7364 * mark the task as the owner of another rt_spin_lock which will 7365 * confuse PI logic, so return immediately if called form hard IRQ or 7366 * NMI. 7367 * 7368 * Note, irqs_disabled() case is ok. This function can be called 7369 * from raw_spin_lock_irqsave region. 7370 */ 7371 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7372 return NULL; 7373 if (!pcp_allowed_order(order)) 7374 return NULL; 7375 7376 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7377 if (deferred_pages_enabled()) 7378 return NULL; 7379 7380 if (nid == NUMA_NO_NODE) 7381 nid = numa_node_id(); 7382 7383 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7384 &alloc_gfp, &alloc_flags); 7385 7386 /* 7387 * Best effort allocation from percpu free list. 7388 * If it's empty attempt to spin_trylock zone->lock. 7389 */ 7390 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7391 7392 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7393 7394 if (page) 7395 set_page_refcounted(page); 7396 7397 if (memcg_kmem_online() && page && 7398 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7399 free_pages_nolock(page, order); 7400 page = NULL; 7401 } 7402 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7403 kmsan_alloc_page(page, order, alloc_gfp); 7404 return page; 7405 } 7406