1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* Free the page without taking locks. Rely on trylock only. */ 92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 93 94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 95 static DEFINE_MUTEX(pcp_batch_high_lock); 96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 97 98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 99 /* 100 * On SMP, spin_trylock is sufficient protection. 101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 102 * Pass flags to a no-op inline function to typecheck and silence the unused 103 * variable warning. 104 */ 105 static inline void __pcp_trylock_noop(unsigned long *flags) { } 106 #define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags)) 107 #define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags)) 108 #else 109 110 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 111 #define pcp_trylock_prepare(flags) local_irq_save(flags) 112 #define pcp_trylock_finish(flags) local_irq_restore(flags) 113 #endif 114 115 /* 116 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 117 * a migration causing the wrong PCP to be locked and remote memory being 118 * potentially allocated, pin the task to the CPU for the lookup+lock. 119 * preempt_disable is used on !RT because it is faster than migrate_disable. 120 * migrate_disable is used on RT because otherwise RT spinlock usage is 121 * interfered with and a high priority task cannot preempt the allocator. 122 */ 123 #ifndef CONFIG_PREEMPT_RT 124 #define pcpu_task_pin() preempt_disable() 125 #define pcpu_task_unpin() preempt_enable() 126 #else 127 #define pcpu_task_pin() migrate_disable() 128 #define pcpu_task_unpin() migrate_enable() 129 #endif 130 131 /* 132 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 133 * Return value should be used with equivalent unlock helper. 134 */ 135 #define pcpu_spin_trylock(type, member, ptr) \ 136 ({ \ 137 type *_ret; \ 138 pcpu_task_pin(); \ 139 _ret = this_cpu_ptr(ptr); \ 140 if (!spin_trylock(&_ret->member)) { \ 141 pcpu_task_unpin(); \ 142 _ret = NULL; \ 143 } \ 144 _ret; \ 145 }) 146 147 #define pcpu_spin_unlock(member, ptr) \ 148 ({ \ 149 spin_unlock(&ptr->member); \ 150 pcpu_task_unpin(); \ 151 }) 152 153 /* struct per_cpu_pages specific helpers. */ 154 #define pcp_spin_trylock(ptr, UP_flags) \ 155 ({ \ 156 struct per_cpu_pages *__ret; \ 157 pcp_trylock_prepare(UP_flags); \ 158 __ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \ 159 if (!__ret) \ 160 pcp_trylock_finish(UP_flags); \ 161 __ret; \ 162 }) 163 164 #define pcp_spin_unlock(ptr, UP_flags) \ 165 ({ \ 166 pcpu_spin_unlock(lock, ptr); \ 167 pcp_trylock_finish(UP_flags); \ 168 }) 169 170 /* 171 * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e. 172 * a potentially remote cpu drain) and get interrupted by an operation that 173 * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP 174 * spinlock assumptions making the trylock a no-op. So we have to turn that 175 * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no 176 * remote cpu's so we can only be locking the only existing local one. 177 */ 178 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 179 static inline void __flags_noop(unsigned long *flags) { } 180 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \ 181 ({ \ 182 __flags_noop(&(flags)); \ 183 spin_lock(&(ptr)->lock); \ 184 }) 185 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \ 186 ({ \ 187 spin_unlock(&(ptr)->lock); \ 188 __flags_noop(&(flags)); \ 189 }) 190 #else 191 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \ 192 spin_lock_irqsave(&(ptr)->lock, flags) 193 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \ 194 spin_unlock_irqrestore(&(ptr)->lock, flags) 195 #endif 196 197 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 198 DEFINE_PER_CPU(int, numa_node); 199 EXPORT_PER_CPU_SYMBOL(numa_node); 200 #endif 201 202 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 203 204 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 205 /* 206 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 207 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 208 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 209 * defined in <linux/topology.h>. 210 */ 211 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 212 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 213 #endif 214 215 static DEFINE_MUTEX(pcpu_drain_mutex); 216 217 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 218 volatile unsigned long latent_entropy __latent_entropy; 219 EXPORT_SYMBOL(latent_entropy); 220 #endif 221 222 /* 223 * Array of node states. 224 */ 225 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 226 [N_POSSIBLE] = NODE_MASK_ALL, 227 [N_ONLINE] = { { [0] = 1UL } }, 228 #ifndef CONFIG_NUMA 229 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 230 #ifdef CONFIG_HIGHMEM 231 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 232 #endif 233 [N_MEMORY] = { { [0] = 1UL } }, 234 [N_CPU] = { { [0] = 1UL } }, 235 #endif /* NUMA */ 236 }; 237 EXPORT_SYMBOL(node_states); 238 239 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 240 241 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 242 unsigned int pageblock_order __read_mostly; 243 #endif 244 245 static void __free_pages_ok(struct page *page, unsigned int order, 246 fpi_t fpi_flags); 247 248 /* 249 * results with 256, 32 in the lowmem_reserve sysctl: 250 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 251 * 1G machine -> (16M dma, 784M normal, 224M high) 252 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 253 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 254 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 255 * 256 * TBD: should special case ZONE_DMA32 machines here - in those we normally 257 * don't need any ZONE_NORMAL reservation 258 */ 259 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 260 #ifdef CONFIG_ZONE_DMA 261 [ZONE_DMA] = 256, 262 #endif 263 #ifdef CONFIG_ZONE_DMA32 264 [ZONE_DMA32] = 256, 265 #endif 266 [ZONE_NORMAL] = 32, 267 #ifdef CONFIG_HIGHMEM 268 [ZONE_HIGHMEM] = 0, 269 #endif 270 [ZONE_MOVABLE] = 0, 271 }; 272 273 char * const zone_names[MAX_NR_ZONES] = { 274 #ifdef CONFIG_ZONE_DMA 275 "DMA", 276 #endif 277 #ifdef CONFIG_ZONE_DMA32 278 "DMA32", 279 #endif 280 "Normal", 281 #ifdef CONFIG_HIGHMEM 282 "HighMem", 283 #endif 284 "Movable", 285 #ifdef CONFIG_ZONE_DEVICE 286 "Device", 287 #endif 288 }; 289 290 const char * const migratetype_names[MIGRATE_TYPES] = { 291 "Unmovable", 292 "Movable", 293 "Reclaimable", 294 "HighAtomic", 295 #ifdef CONFIG_CMA 296 "CMA", 297 #endif 298 #ifdef CONFIG_MEMORY_ISOLATION 299 "Isolate", 300 #endif 301 }; 302 303 int min_free_kbytes = 1024; 304 int user_min_free_kbytes = -1; 305 static int watermark_boost_factor __read_mostly = 15000; 306 static int watermark_scale_factor = 10; 307 int defrag_mode; 308 309 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 310 int movable_zone; 311 EXPORT_SYMBOL(movable_zone); 312 313 #if MAX_NUMNODES > 1 314 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 315 unsigned int nr_online_nodes __read_mostly = 1; 316 EXPORT_SYMBOL(nr_node_ids); 317 EXPORT_SYMBOL(nr_online_nodes); 318 #endif 319 320 static bool page_contains_unaccepted(struct page *page, unsigned int order); 321 static bool cond_accept_memory(struct zone *zone, unsigned int order, 322 int alloc_flags); 323 static bool __free_unaccepted(struct page *page); 324 325 int page_group_by_mobility_disabled __read_mostly; 326 327 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 328 /* 329 * During boot we initialize deferred pages on-demand, as needed, but once 330 * page_alloc_init_late() has finished, the deferred pages are all initialized, 331 * and we can permanently disable that path. 332 */ 333 DEFINE_STATIC_KEY_TRUE(deferred_pages); 334 335 static inline bool deferred_pages_enabled(void) 336 { 337 return static_branch_unlikely(&deferred_pages); 338 } 339 340 /* 341 * deferred_grow_zone() is __init, but it is called from 342 * get_page_from_freelist() during early boot until deferred_pages permanently 343 * disables this call. This is why we have refdata wrapper to avoid warning, 344 * and to ensure that the function body gets unloaded. 345 */ 346 static bool __ref 347 _deferred_grow_zone(struct zone *zone, unsigned int order) 348 { 349 return deferred_grow_zone(zone, order); 350 } 351 #else 352 static inline bool deferred_pages_enabled(void) 353 { 354 return false; 355 } 356 357 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 358 { 359 return false; 360 } 361 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 362 363 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 364 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 365 unsigned long pfn) 366 { 367 #ifdef CONFIG_SPARSEMEM 368 return section_to_usemap(__pfn_to_section(pfn)); 369 #else 370 return page_zone(page)->pageblock_flags; 371 #endif /* CONFIG_SPARSEMEM */ 372 } 373 374 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 375 { 376 #ifdef CONFIG_SPARSEMEM 377 pfn &= (PAGES_PER_SECTION-1); 378 #else 379 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 380 #endif /* CONFIG_SPARSEMEM */ 381 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 382 } 383 384 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit) 385 { 386 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS; 387 } 388 389 static __always_inline void 390 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn, 391 unsigned long **bitmap_word, unsigned long *bitidx) 392 { 393 unsigned long *bitmap; 394 unsigned long word_bitidx; 395 396 #ifdef CONFIG_MEMORY_ISOLATION 397 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8); 398 #else 399 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 400 #endif 401 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK); 402 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 403 404 bitmap = get_pageblock_bitmap(page, pfn); 405 *bitidx = pfn_to_bitidx(page, pfn); 406 word_bitidx = *bitidx / BITS_PER_LONG; 407 *bitidx &= (BITS_PER_LONG - 1); 408 *bitmap_word = &bitmap[word_bitidx]; 409 } 410 411 412 /** 413 * __get_pfnblock_flags_mask - Return the requested group of flags for 414 * a pageblock_nr_pages block of pages 415 * @page: The page within the block of interest 416 * @pfn: The target page frame number 417 * @mask: mask of bits that the caller is interested in 418 * 419 * Return: pageblock_bits flags 420 */ 421 static unsigned long __get_pfnblock_flags_mask(const struct page *page, 422 unsigned long pfn, 423 unsigned long mask) 424 { 425 unsigned long *bitmap_word; 426 unsigned long bitidx; 427 unsigned long word; 428 429 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 430 /* 431 * This races, without locks, with set_pfnblock_migratetype(). Ensure 432 * a consistent read of the memory array, so that results, even though 433 * racy, are not corrupted. 434 */ 435 word = READ_ONCE(*bitmap_word); 436 return (word >> bitidx) & mask; 437 } 438 439 /** 440 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set 441 * @page: The page within the block of interest 442 * @pfn: The target page frame number 443 * @pb_bit: pageblock bit to check 444 * 445 * Return: true if the bit is set, otherwise false 446 */ 447 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, 448 enum pageblock_bits pb_bit) 449 { 450 unsigned long *bitmap_word; 451 unsigned long bitidx; 452 453 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 454 return false; 455 456 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 457 458 return test_bit(bitidx + pb_bit, bitmap_word); 459 } 460 461 /** 462 * get_pfnblock_migratetype - Return the migratetype of a pageblock 463 * @page: The page within the block of interest 464 * @pfn: The target page frame number 465 * 466 * Return: The migratetype of the pageblock 467 * 468 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn 469 * to save a call to page_to_pfn(). 470 */ 471 __always_inline enum migratetype 472 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) 473 { 474 unsigned long mask = MIGRATETYPE_AND_ISO_MASK; 475 unsigned long flags; 476 477 flags = __get_pfnblock_flags_mask(page, pfn, mask); 478 479 #ifdef CONFIG_MEMORY_ISOLATION 480 if (flags & BIT(PB_migrate_isolate)) 481 return MIGRATE_ISOLATE; 482 #endif 483 return flags & MIGRATETYPE_MASK; 484 } 485 486 /** 487 * __set_pfnblock_flags_mask - Set the requested group of flags for 488 * a pageblock_nr_pages block of pages 489 * @page: The page within the block of interest 490 * @pfn: The target page frame number 491 * @flags: The flags to set 492 * @mask: mask of bits that the caller is interested in 493 */ 494 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn, 495 unsigned long flags, unsigned long mask) 496 { 497 unsigned long *bitmap_word; 498 unsigned long bitidx; 499 unsigned long word; 500 501 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 502 503 mask <<= bitidx; 504 flags <<= bitidx; 505 506 word = READ_ONCE(*bitmap_word); 507 do { 508 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags)); 509 } 510 511 /** 512 * set_pfnblock_bit - Set a standalone bit of a pageblock 513 * @page: The page within the block of interest 514 * @pfn: The target page frame number 515 * @pb_bit: pageblock bit to set 516 */ 517 void set_pfnblock_bit(const struct page *page, unsigned long pfn, 518 enum pageblock_bits pb_bit) 519 { 520 unsigned long *bitmap_word; 521 unsigned long bitidx; 522 523 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 524 return; 525 526 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 527 528 set_bit(bitidx + pb_bit, bitmap_word); 529 } 530 531 /** 532 * clear_pfnblock_bit - Clear a standalone bit of a pageblock 533 * @page: The page within the block of interest 534 * @pfn: The target page frame number 535 * @pb_bit: pageblock bit to clear 536 */ 537 void clear_pfnblock_bit(const struct page *page, unsigned long pfn, 538 enum pageblock_bits pb_bit) 539 { 540 unsigned long *bitmap_word; 541 unsigned long bitidx; 542 543 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 544 return; 545 546 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 547 548 clear_bit(bitidx + pb_bit, bitmap_word); 549 } 550 551 /** 552 * set_pageblock_migratetype - Set the migratetype of a pageblock 553 * @page: The page within the block of interest 554 * @migratetype: migratetype to set 555 */ 556 static void set_pageblock_migratetype(struct page *page, 557 enum migratetype migratetype) 558 { 559 if (unlikely(page_group_by_mobility_disabled && 560 migratetype < MIGRATE_PCPTYPES)) 561 migratetype = MIGRATE_UNMOVABLE; 562 563 #ifdef CONFIG_MEMORY_ISOLATION 564 if (migratetype == MIGRATE_ISOLATE) { 565 VM_WARN_ONCE(1, 566 "Use set_pageblock_isolate() for pageblock isolation"); 567 return; 568 } 569 VM_WARN_ONCE(get_pageblock_isolate(page), 570 "Use clear_pageblock_isolate() to unisolate pageblock"); 571 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */ 572 #endif 573 __set_pfnblock_flags_mask(page, page_to_pfn(page), 574 (unsigned long)migratetype, 575 MIGRATETYPE_AND_ISO_MASK); 576 } 577 578 void __meminit init_pageblock_migratetype(struct page *page, 579 enum migratetype migratetype, 580 bool isolate) 581 { 582 unsigned long flags; 583 584 if (unlikely(page_group_by_mobility_disabled && 585 migratetype < MIGRATE_PCPTYPES)) 586 migratetype = MIGRATE_UNMOVABLE; 587 588 flags = migratetype; 589 590 #ifdef CONFIG_MEMORY_ISOLATION 591 if (migratetype == MIGRATE_ISOLATE) { 592 VM_WARN_ONCE( 593 1, 594 "Set isolate=true to isolate pageblock with a migratetype"); 595 return; 596 } 597 if (isolate) 598 flags |= BIT(PB_migrate_isolate); 599 #endif 600 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags, 601 MIGRATETYPE_AND_ISO_MASK); 602 } 603 604 #ifdef CONFIG_DEBUG_VM 605 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 606 { 607 int ret; 608 unsigned seq; 609 unsigned long pfn = page_to_pfn(page); 610 unsigned long sp, start_pfn; 611 612 do { 613 seq = zone_span_seqbegin(zone); 614 start_pfn = zone->zone_start_pfn; 615 sp = zone->spanned_pages; 616 ret = !zone_spans_pfn(zone, pfn); 617 } while (zone_span_seqretry(zone, seq)); 618 619 if (ret) 620 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 621 pfn, zone_to_nid(zone), zone->name, 622 start_pfn, start_pfn + sp); 623 624 return ret; 625 } 626 627 /* 628 * Temporary debugging check for pages not lying within a given zone. 629 */ 630 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 631 { 632 if (page_outside_zone_boundaries(zone, page)) 633 return true; 634 if (zone != page_zone(page)) 635 return true; 636 637 return false; 638 } 639 #else 640 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 641 { 642 return false; 643 } 644 #endif 645 646 static void bad_page(struct page *page, const char *reason) 647 { 648 static unsigned long resume; 649 static unsigned long nr_shown; 650 static unsigned long nr_unshown; 651 652 /* 653 * Allow a burst of 60 reports, then keep quiet for that minute; 654 * or allow a steady drip of one report per second. 655 */ 656 if (nr_shown == 60) { 657 if (time_before(jiffies, resume)) { 658 nr_unshown++; 659 goto out; 660 } 661 if (nr_unshown) { 662 pr_alert( 663 "BUG: Bad page state: %lu messages suppressed\n", 664 nr_unshown); 665 nr_unshown = 0; 666 } 667 nr_shown = 0; 668 } 669 if (nr_shown++ == 0) 670 resume = jiffies + 60 * HZ; 671 672 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 673 current->comm, page_to_pfn(page)); 674 dump_page(page, reason); 675 676 print_modules(); 677 dump_stack(); 678 out: 679 /* Leave bad fields for debug, except PageBuddy could make trouble */ 680 if (PageBuddy(page)) 681 __ClearPageBuddy(page); 682 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 683 } 684 685 static inline unsigned int order_to_pindex(int migratetype, int order) 686 { 687 688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 689 bool movable; 690 if (order > PAGE_ALLOC_COSTLY_ORDER) { 691 VM_BUG_ON(order != HPAGE_PMD_ORDER); 692 693 movable = migratetype == MIGRATE_MOVABLE; 694 695 return NR_LOWORDER_PCP_LISTS + movable; 696 } 697 #else 698 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 699 #endif 700 701 return (MIGRATE_PCPTYPES * order) + migratetype; 702 } 703 704 static inline int pindex_to_order(unsigned int pindex) 705 { 706 int order = pindex / MIGRATE_PCPTYPES; 707 708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 709 if (pindex >= NR_LOWORDER_PCP_LISTS) 710 order = HPAGE_PMD_ORDER; 711 #else 712 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 713 #endif 714 715 return order; 716 } 717 718 static inline bool pcp_allowed_order(unsigned int order) 719 { 720 if (order <= PAGE_ALLOC_COSTLY_ORDER) 721 return true; 722 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 723 if (order == HPAGE_PMD_ORDER) 724 return true; 725 #endif 726 return false; 727 } 728 729 /* 730 * Higher-order pages are called "compound pages". They are structured thusly: 731 * 732 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 733 * 734 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 735 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 736 * 737 * The first tail page's ->compound_order holds the order of allocation. 738 * This usage means that zero-order pages may not be compound. 739 */ 740 741 void prep_compound_page(struct page *page, unsigned int order) 742 { 743 int i; 744 int nr_pages = 1 << order; 745 746 __SetPageHead(page); 747 for (i = 1; i < nr_pages; i++) 748 prep_compound_tail(page, i); 749 750 prep_compound_head(page, order); 751 } 752 753 static inline void set_buddy_order(struct page *page, unsigned int order) 754 { 755 set_page_private(page, order); 756 __SetPageBuddy(page); 757 } 758 759 #ifdef CONFIG_COMPACTION 760 static inline struct capture_control *task_capc(struct zone *zone) 761 { 762 struct capture_control *capc = current->capture_control; 763 764 return unlikely(capc) && 765 !(current->flags & PF_KTHREAD) && 766 !capc->page && 767 capc->cc->zone == zone ? capc : NULL; 768 } 769 770 static inline bool 771 compaction_capture(struct capture_control *capc, struct page *page, 772 int order, int migratetype) 773 { 774 if (!capc || order != capc->cc->order) 775 return false; 776 777 /* Do not accidentally pollute CMA or isolated regions*/ 778 if (is_migrate_cma(migratetype) || 779 is_migrate_isolate(migratetype)) 780 return false; 781 782 /* 783 * Do not let lower order allocations pollute a movable pageblock 784 * unless compaction is also requesting movable pages. 785 * This might let an unmovable request use a reclaimable pageblock 786 * and vice-versa but no more than normal fallback logic which can 787 * have trouble finding a high-order free page. 788 */ 789 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 790 capc->cc->migratetype != MIGRATE_MOVABLE) 791 return false; 792 793 if (migratetype != capc->cc->migratetype) 794 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 795 capc->cc->migratetype, migratetype); 796 797 capc->page = page; 798 return true; 799 } 800 801 #else 802 static inline struct capture_control *task_capc(struct zone *zone) 803 { 804 return NULL; 805 } 806 807 static inline bool 808 compaction_capture(struct capture_control *capc, struct page *page, 809 int order, int migratetype) 810 { 811 return false; 812 } 813 #endif /* CONFIG_COMPACTION */ 814 815 static inline void account_freepages(struct zone *zone, int nr_pages, 816 int migratetype) 817 { 818 lockdep_assert_held(&zone->lock); 819 820 if (is_migrate_isolate(migratetype)) 821 return; 822 823 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 824 825 if (is_migrate_cma(migratetype)) 826 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 827 else if (migratetype == MIGRATE_HIGHATOMIC) 828 WRITE_ONCE(zone->nr_free_highatomic, 829 zone->nr_free_highatomic + nr_pages); 830 } 831 832 /* Used for pages not on another list */ 833 static inline void __add_to_free_list(struct page *page, struct zone *zone, 834 unsigned int order, int migratetype, 835 bool tail) 836 { 837 struct free_area *area = &zone->free_area[order]; 838 int nr_pages = 1 << order; 839 840 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 841 "page type is %d, passed migratetype is %d (nr=%d)\n", 842 get_pageblock_migratetype(page), migratetype, nr_pages); 843 844 if (tail) 845 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 846 else 847 list_add(&page->buddy_list, &area->free_list[migratetype]); 848 area->nr_free++; 849 850 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 851 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 852 } 853 854 /* 855 * Used for pages which are on another list. Move the pages to the tail 856 * of the list - so the moved pages won't immediately be considered for 857 * allocation again (e.g., optimization for memory onlining). 858 */ 859 static inline void move_to_free_list(struct page *page, struct zone *zone, 860 unsigned int order, int old_mt, int new_mt) 861 { 862 struct free_area *area = &zone->free_area[order]; 863 int nr_pages = 1 << order; 864 865 /* Free page moving can fail, so it happens before the type update */ 866 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 867 "page type is %d, passed migratetype is %d (nr=%d)\n", 868 get_pageblock_migratetype(page), old_mt, nr_pages); 869 870 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 871 872 account_freepages(zone, -nr_pages, old_mt); 873 account_freepages(zone, nr_pages, new_mt); 874 875 if (order >= pageblock_order && 876 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 877 if (!is_migrate_isolate(old_mt)) 878 nr_pages = -nr_pages; 879 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 880 } 881 } 882 883 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 884 unsigned int order, int migratetype) 885 { 886 int nr_pages = 1 << order; 887 888 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 889 "page type is %d, passed migratetype is %d (nr=%d)\n", 890 get_pageblock_migratetype(page), migratetype, nr_pages); 891 892 /* clear reported state and update reported page count */ 893 if (page_reported(page)) 894 __ClearPageReported(page); 895 896 list_del(&page->buddy_list); 897 __ClearPageBuddy(page); 898 set_page_private(page, 0); 899 zone->free_area[order].nr_free--; 900 901 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 902 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 903 } 904 905 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 906 unsigned int order, int migratetype) 907 { 908 __del_page_from_free_list(page, zone, order, migratetype); 909 account_freepages(zone, -(1 << order), migratetype); 910 } 911 912 static inline struct page *get_page_from_free_area(struct free_area *area, 913 int migratetype) 914 { 915 return list_first_entry_or_null(&area->free_list[migratetype], 916 struct page, buddy_list); 917 } 918 919 /* 920 * If this is less than the 2nd largest possible page, check if the buddy 921 * of the next-higher order is free. If it is, it's possible 922 * that pages are being freed that will coalesce soon. In case, 923 * that is happening, add the free page to the tail of the list 924 * so it's less likely to be used soon and more likely to be merged 925 * as a 2-level higher order page 926 */ 927 static inline bool 928 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 929 struct page *page, unsigned int order) 930 { 931 unsigned long higher_page_pfn; 932 struct page *higher_page; 933 934 if (order >= MAX_PAGE_ORDER - 1) 935 return false; 936 937 higher_page_pfn = buddy_pfn & pfn; 938 higher_page = page + (higher_page_pfn - pfn); 939 940 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 941 NULL) != NULL; 942 } 943 944 static void change_pageblock_range(struct page *pageblock_page, 945 int start_order, int migratetype) 946 { 947 int nr_pageblocks = 1 << (start_order - pageblock_order); 948 949 while (nr_pageblocks--) { 950 set_pageblock_migratetype(pageblock_page, migratetype); 951 pageblock_page += pageblock_nr_pages; 952 } 953 } 954 955 /* 956 * Freeing function for a buddy system allocator. 957 * 958 * The concept of a buddy system is to maintain direct-mapped table 959 * (containing bit values) for memory blocks of various "orders". 960 * The bottom level table contains the map for the smallest allocatable 961 * units of memory (here, pages), and each level above it describes 962 * pairs of units from the levels below, hence, "buddies". 963 * At a high level, all that happens here is marking the table entry 964 * at the bottom level available, and propagating the changes upward 965 * as necessary, plus some accounting needed to play nicely with other 966 * parts of the VM system. 967 * At each level, we keep a list of pages, which are heads of continuous 968 * free pages of length of (1 << order) and marked with PageBuddy. 969 * Page's order is recorded in page_private(page) field. 970 * So when we are allocating or freeing one, we can derive the state of the 971 * other. That is, if we allocate a small block, and both were 972 * free, the remainder of the region must be split into blocks. 973 * If a block is freed, and its buddy is also free, then this 974 * triggers coalescing into a block of larger size. 975 * 976 * -- nyc 977 */ 978 979 static inline void __free_one_page(struct page *page, 980 unsigned long pfn, 981 struct zone *zone, unsigned int order, 982 int migratetype, fpi_t fpi_flags) 983 { 984 struct capture_control *capc = task_capc(zone); 985 unsigned long buddy_pfn = 0; 986 unsigned long combined_pfn; 987 struct page *buddy; 988 bool to_tail; 989 990 VM_BUG_ON(!zone_is_initialized(zone)); 991 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); 992 993 VM_BUG_ON(migratetype == -1); 994 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 995 VM_BUG_ON_PAGE(bad_range(zone, page), page); 996 997 account_freepages(zone, 1 << order, migratetype); 998 999 while (order < MAX_PAGE_ORDER) { 1000 int buddy_mt = migratetype; 1001 1002 if (compaction_capture(capc, page, order, migratetype)) { 1003 account_freepages(zone, -(1 << order), migratetype); 1004 return; 1005 } 1006 1007 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 1008 if (!buddy) 1009 goto done_merging; 1010 1011 if (unlikely(order >= pageblock_order)) { 1012 /* 1013 * We want to prevent merge between freepages on pageblock 1014 * without fallbacks and normal pageblock. Without this, 1015 * pageblock isolation could cause incorrect freepage or CMA 1016 * accounting or HIGHATOMIC accounting. 1017 */ 1018 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 1019 1020 if (migratetype != buddy_mt && 1021 (!migratetype_is_mergeable(migratetype) || 1022 !migratetype_is_mergeable(buddy_mt))) 1023 goto done_merging; 1024 } 1025 1026 /* 1027 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1028 * merge with it and move up one order. 1029 */ 1030 if (page_is_guard(buddy)) 1031 clear_page_guard(zone, buddy, order); 1032 else 1033 __del_page_from_free_list(buddy, zone, order, buddy_mt); 1034 1035 if (unlikely(buddy_mt != migratetype)) { 1036 /* 1037 * Match buddy type. This ensures that an 1038 * expand() down the line puts the sub-blocks 1039 * on the right freelists. 1040 */ 1041 change_pageblock_range(buddy, order, migratetype); 1042 } 1043 1044 combined_pfn = buddy_pfn & pfn; 1045 page = page + (combined_pfn - pfn); 1046 pfn = combined_pfn; 1047 order++; 1048 } 1049 1050 done_merging: 1051 set_buddy_order(page, order); 1052 1053 if (fpi_flags & FPI_TO_TAIL) 1054 to_tail = true; 1055 else if (is_shuffle_order(order)) 1056 to_tail = shuffle_pick_tail(); 1057 else 1058 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1059 1060 __add_to_free_list(page, zone, order, migratetype, to_tail); 1061 1062 /* Notify page reporting subsystem of freed page */ 1063 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1064 page_reporting_notify_free(order); 1065 } 1066 1067 /* 1068 * A bad page could be due to a number of fields. Instead of multiple branches, 1069 * try and check multiple fields with one check. The caller must do a detailed 1070 * check if necessary. 1071 */ 1072 static inline bool page_expected_state(struct page *page, 1073 unsigned long check_flags) 1074 { 1075 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1076 return false; 1077 1078 if (unlikely((unsigned long)page->mapping | 1079 page_ref_count(page) | 1080 #ifdef CONFIG_MEMCG 1081 page->memcg_data | 1082 #endif 1083 page_pool_page_is_pp(page) | 1084 (page->flags.f & check_flags))) 1085 return false; 1086 1087 return true; 1088 } 1089 1090 static const char *page_bad_reason(struct page *page, unsigned long flags) 1091 { 1092 const char *bad_reason = NULL; 1093 1094 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1095 bad_reason = "nonzero mapcount"; 1096 if (unlikely(page->mapping != NULL)) 1097 bad_reason = "non-NULL mapping"; 1098 if (unlikely(page_ref_count(page) != 0)) 1099 bad_reason = "nonzero _refcount"; 1100 if (unlikely(page->flags.f & flags)) { 1101 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1102 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1103 else 1104 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1105 } 1106 #ifdef CONFIG_MEMCG 1107 if (unlikely(page->memcg_data)) 1108 bad_reason = "page still charged to cgroup"; 1109 #endif 1110 if (unlikely(page_pool_page_is_pp(page))) 1111 bad_reason = "page_pool leak"; 1112 return bad_reason; 1113 } 1114 1115 static inline bool free_page_is_bad(struct page *page) 1116 { 1117 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1118 return false; 1119 1120 /* Something has gone sideways, find it */ 1121 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1122 return true; 1123 } 1124 1125 static inline bool is_check_pages_enabled(void) 1126 { 1127 return static_branch_unlikely(&check_pages_enabled); 1128 } 1129 1130 static int free_tail_page_prepare(struct page *head_page, struct page *page) 1131 { 1132 struct folio *folio = (struct folio *)head_page; 1133 int ret = 1; 1134 1135 /* 1136 * We rely page->lru.next never has bit 0 set, unless the page 1137 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1138 */ 1139 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1140 1141 if (!is_check_pages_enabled()) { 1142 ret = 0; 1143 goto out; 1144 } 1145 switch (page - head_page) { 1146 case 1: 1147 /* the first tail page: these may be in place of ->mapping */ 1148 if (unlikely(folio_large_mapcount(folio))) { 1149 bad_page(page, "nonzero large_mapcount"); 1150 goto out; 1151 } 1152 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 1153 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1154 bad_page(page, "nonzero nr_pages_mapped"); 1155 goto out; 1156 } 1157 if (IS_ENABLED(CONFIG_MM_ID)) { 1158 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 1159 bad_page(page, "nonzero mm mapcount 0"); 1160 goto out; 1161 } 1162 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 1163 bad_page(page, "nonzero mm mapcount 1"); 1164 goto out; 1165 } 1166 } 1167 if (IS_ENABLED(CONFIG_64BIT)) { 1168 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1169 bad_page(page, "nonzero entire_mapcount"); 1170 goto out; 1171 } 1172 if (unlikely(atomic_read(&folio->_pincount))) { 1173 bad_page(page, "nonzero pincount"); 1174 goto out; 1175 } 1176 } 1177 break; 1178 case 2: 1179 /* the second tail page: deferred_list overlaps ->mapping */ 1180 if (unlikely(!list_empty(&folio->_deferred_list))) { 1181 bad_page(page, "on deferred list"); 1182 goto out; 1183 } 1184 if (!IS_ENABLED(CONFIG_64BIT)) { 1185 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1186 bad_page(page, "nonzero entire_mapcount"); 1187 goto out; 1188 } 1189 if (unlikely(atomic_read(&folio->_pincount))) { 1190 bad_page(page, "nonzero pincount"); 1191 goto out; 1192 } 1193 } 1194 break; 1195 case 3: 1196 /* the third tail page: hugetlb specifics overlap ->mappings */ 1197 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1198 break; 1199 fallthrough; 1200 default: 1201 if (page->mapping != TAIL_MAPPING) { 1202 bad_page(page, "corrupted mapping in tail page"); 1203 goto out; 1204 } 1205 break; 1206 } 1207 if (unlikely(!PageTail(page))) { 1208 bad_page(page, "PageTail not set"); 1209 goto out; 1210 } 1211 if (unlikely(compound_head(page) != head_page)) { 1212 bad_page(page, "compound_head not consistent"); 1213 goto out; 1214 } 1215 ret = 0; 1216 out: 1217 page->mapping = NULL; 1218 clear_compound_head(page); 1219 return ret; 1220 } 1221 1222 /* 1223 * Skip KASAN memory poisoning when either: 1224 * 1225 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1226 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1227 * using page tags instead (see below). 1228 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1229 * that error detection is disabled for accesses via the page address. 1230 * 1231 * Pages will have match-all tags in the following circumstances: 1232 * 1233 * 1. Pages are being initialized for the first time, including during deferred 1234 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1235 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1236 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1237 * 3. The allocation was excluded from being checked due to sampling, 1238 * see the call to kasan_unpoison_pages. 1239 * 1240 * Poisoning pages during deferred memory init will greatly lengthen the 1241 * process and cause problem in large memory systems as the deferred pages 1242 * initialization is done with interrupt disabled. 1243 * 1244 * Assuming that there will be no reference to those newly initialized 1245 * pages before they are ever allocated, this should have no effect on 1246 * KASAN memory tracking as the poison will be properly inserted at page 1247 * allocation time. The only corner case is when pages are allocated by 1248 * on-demand allocation and then freed again before the deferred pages 1249 * initialization is done, but this is not likely to happen. 1250 */ 1251 static inline bool should_skip_kasan_poison(struct page *page) 1252 { 1253 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1254 return deferred_pages_enabled(); 1255 1256 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1257 } 1258 1259 static void kernel_init_pages(struct page *page, int numpages) 1260 { 1261 int i; 1262 1263 /* s390's use of memset() could override KASAN redzones. */ 1264 kasan_disable_current(); 1265 for (i = 0; i < numpages; i++) 1266 clear_highpage_kasan_tagged(page + i); 1267 kasan_enable_current(); 1268 } 1269 1270 #ifdef CONFIG_MEM_ALLOC_PROFILING 1271 1272 /* Should be called only if mem_alloc_profiling_enabled() */ 1273 void __clear_page_tag_ref(struct page *page) 1274 { 1275 union pgtag_ref_handle handle; 1276 union codetag_ref ref; 1277 1278 if (get_page_tag_ref(page, &ref, &handle)) { 1279 set_codetag_empty(&ref); 1280 update_page_tag_ref(handle, &ref); 1281 put_page_tag_ref(handle); 1282 } 1283 } 1284 1285 /* Should be called only if mem_alloc_profiling_enabled() */ 1286 static noinline 1287 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1288 unsigned int nr) 1289 { 1290 union pgtag_ref_handle handle; 1291 union codetag_ref ref; 1292 1293 if (get_page_tag_ref(page, &ref, &handle)) { 1294 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1295 update_page_tag_ref(handle, &ref); 1296 put_page_tag_ref(handle); 1297 } 1298 } 1299 1300 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1301 unsigned int nr) 1302 { 1303 if (mem_alloc_profiling_enabled()) 1304 __pgalloc_tag_add(page, task, nr); 1305 } 1306 1307 /* Should be called only if mem_alloc_profiling_enabled() */ 1308 static noinline 1309 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1310 { 1311 union pgtag_ref_handle handle; 1312 union codetag_ref ref; 1313 1314 if (get_page_tag_ref(page, &ref, &handle)) { 1315 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1316 update_page_tag_ref(handle, &ref); 1317 put_page_tag_ref(handle); 1318 } 1319 } 1320 1321 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1322 { 1323 if (mem_alloc_profiling_enabled()) 1324 __pgalloc_tag_sub(page, nr); 1325 } 1326 1327 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1328 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1329 { 1330 if (tag) 1331 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1332 } 1333 1334 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1335 1336 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1337 unsigned int nr) {} 1338 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1339 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1340 1341 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1342 1343 __always_inline bool free_pages_prepare(struct page *page, 1344 unsigned int order) 1345 { 1346 int bad = 0; 1347 bool skip_kasan_poison = should_skip_kasan_poison(page); 1348 bool init = want_init_on_free(); 1349 bool compound = PageCompound(page); 1350 struct folio *folio = page_folio(page); 1351 1352 VM_BUG_ON_PAGE(PageTail(page), page); 1353 1354 trace_mm_page_free(page, order); 1355 kmsan_free_page(page, order); 1356 1357 if (memcg_kmem_online() && PageMemcgKmem(page)) 1358 __memcg_kmem_uncharge_page(page, order); 1359 1360 /* 1361 * In rare cases, when truncation or holepunching raced with 1362 * munlock after VM_LOCKED was cleared, Mlocked may still be 1363 * found set here. This does not indicate a problem, unless 1364 * "unevictable_pgs_cleared" appears worryingly large. 1365 */ 1366 if (unlikely(folio_test_mlocked(folio))) { 1367 long nr_pages = folio_nr_pages(folio); 1368 1369 __folio_clear_mlocked(folio); 1370 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1371 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1372 } 1373 1374 if (unlikely(PageHWPoison(page)) && !order) { 1375 /* Do not let hwpoison pages hit pcplists/buddy */ 1376 reset_page_owner(page, order); 1377 page_table_check_free(page, order); 1378 pgalloc_tag_sub(page, 1 << order); 1379 1380 /* 1381 * The page is isolated and accounted for. 1382 * Mark the codetag as empty to avoid accounting error 1383 * when the page is freed by unpoison_memory(). 1384 */ 1385 clear_page_tag_ref(page); 1386 return false; 1387 } 1388 1389 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1390 1391 /* 1392 * Check tail pages before head page information is cleared to 1393 * avoid checking PageCompound for order-0 pages. 1394 */ 1395 if (unlikely(order)) { 1396 int i; 1397 1398 if (compound) { 1399 page[1].flags.f &= ~PAGE_FLAGS_SECOND; 1400 #ifdef NR_PAGES_IN_LARGE_FOLIO 1401 folio->_nr_pages = 0; 1402 #endif 1403 } 1404 for (i = 1; i < (1 << order); i++) { 1405 if (compound) 1406 bad += free_tail_page_prepare(page, page + i); 1407 if (is_check_pages_enabled()) { 1408 if (free_page_is_bad(page + i)) { 1409 bad++; 1410 continue; 1411 } 1412 } 1413 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1414 } 1415 } 1416 if (folio_test_anon(folio)) { 1417 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1418 folio->mapping = NULL; 1419 } 1420 if (unlikely(page_has_type(page))) 1421 /* Reset the page_type (which overlays _mapcount) */ 1422 page->page_type = UINT_MAX; 1423 1424 if (is_check_pages_enabled()) { 1425 if (free_page_is_bad(page)) 1426 bad++; 1427 if (bad) 1428 return false; 1429 } 1430 1431 page_cpupid_reset_last(page); 1432 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1433 reset_page_owner(page, order); 1434 page_table_check_free(page, order); 1435 pgalloc_tag_sub(page, 1 << order); 1436 1437 if (!PageHighMem(page)) { 1438 debug_check_no_locks_freed(page_address(page), 1439 PAGE_SIZE << order); 1440 debug_check_no_obj_freed(page_address(page), 1441 PAGE_SIZE << order); 1442 } 1443 1444 kernel_poison_pages(page, 1 << order); 1445 1446 /* 1447 * As memory initialization might be integrated into KASAN, 1448 * KASAN poisoning and memory initialization code must be 1449 * kept together to avoid discrepancies in behavior. 1450 * 1451 * With hardware tag-based KASAN, memory tags must be set before the 1452 * page becomes unavailable via debug_pagealloc or arch_free_page. 1453 */ 1454 if (!skip_kasan_poison) { 1455 kasan_poison_pages(page, order, init); 1456 1457 /* Memory is already initialized if KASAN did it internally. */ 1458 if (kasan_has_integrated_init()) 1459 init = false; 1460 } 1461 if (init) 1462 kernel_init_pages(page, 1 << order); 1463 1464 /* 1465 * arch_free_page() can make the page's contents inaccessible. s390 1466 * does this. So nothing which can access the page's contents should 1467 * happen after this. 1468 */ 1469 arch_free_page(page, order); 1470 1471 debug_pagealloc_unmap_pages(page, 1 << order); 1472 1473 return true; 1474 } 1475 1476 /* 1477 * Frees a number of pages from the PCP lists 1478 * Assumes all pages on list are in same zone. 1479 * count is the number of pages to free. 1480 */ 1481 static void free_pcppages_bulk(struct zone *zone, int count, 1482 struct per_cpu_pages *pcp, 1483 int pindex) 1484 { 1485 unsigned long flags; 1486 unsigned int order; 1487 struct page *page; 1488 1489 /* 1490 * Ensure proper count is passed which otherwise would stuck in the 1491 * below while (list_empty(list)) loop. 1492 */ 1493 count = min(pcp->count, count); 1494 1495 /* Ensure requested pindex is drained first. */ 1496 pindex = pindex - 1; 1497 1498 spin_lock_irqsave(&zone->lock, flags); 1499 1500 while (count > 0) { 1501 struct list_head *list; 1502 int nr_pages; 1503 1504 /* Remove pages from lists in a round-robin fashion. */ 1505 do { 1506 if (++pindex > NR_PCP_LISTS - 1) 1507 pindex = 0; 1508 list = &pcp->lists[pindex]; 1509 } while (list_empty(list)); 1510 1511 order = pindex_to_order(pindex); 1512 nr_pages = 1 << order; 1513 do { 1514 unsigned long pfn; 1515 int mt; 1516 1517 page = list_last_entry(list, struct page, pcp_list); 1518 pfn = page_to_pfn(page); 1519 mt = get_pfnblock_migratetype(page, pfn); 1520 1521 /* must delete to avoid corrupting pcp list */ 1522 list_del(&page->pcp_list); 1523 count -= nr_pages; 1524 pcp->count -= nr_pages; 1525 1526 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1527 trace_mm_page_pcpu_drain(page, order, mt); 1528 } while (count > 0 && !list_empty(list)); 1529 } 1530 1531 spin_unlock_irqrestore(&zone->lock, flags); 1532 } 1533 1534 /* Split a multi-block free page into its individual pageblocks. */ 1535 static void split_large_buddy(struct zone *zone, struct page *page, 1536 unsigned long pfn, int order, fpi_t fpi) 1537 { 1538 unsigned long end = pfn + (1 << order); 1539 1540 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1541 /* Caller removed page from freelist, buddy info cleared! */ 1542 VM_WARN_ON_ONCE(PageBuddy(page)); 1543 1544 if (order > pageblock_order) 1545 order = pageblock_order; 1546 1547 do { 1548 int mt = get_pfnblock_migratetype(page, pfn); 1549 1550 __free_one_page(page, pfn, zone, order, mt, fpi); 1551 pfn += 1 << order; 1552 if (pfn == end) 1553 break; 1554 page = pfn_to_page(pfn); 1555 } while (1); 1556 } 1557 1558 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1559 unsigned int order) 1560 { 1561 /* Remember the order */ 1562 page->private = order; 1563 /* Add the page to the free list */ 1564 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1565 } 1566 1567 static void free_one_page(struct zone *zone, struct page *page, 1568 unsigned long pfn, unsigned int order, 1569 fpi_t fpi_flags) 1570 { 1571 struct llist_head *llhead; 1572 unsigned long flags; 1573 1574 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1575 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1576 add_page_to_zone_llist(zone, page, order); 1577 return; 1578 } 1579 } else { 1580 spin_lock_irqsave(&zone->lock, flags); 1581 } 1582 1583 /* The lock succeeded. Process deferred pages. */ 1584 llhead = &zone->trylock_free_pages; 1585 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1586 struct llist_node *llnode; 1587 struct page *p, *tmp; 1588 1589 llnode = llist_del_all(llhead); 1590 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1591 unsigned int p_order = p->private; 1592 1593 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1594 __count_vm_events(PGFREE, 1 << p_order); 1595 } 1596 } 1597 split_large_buddy(zone, page, pfn, order, fpi_flags); 1598 spin_unlock_irqrestore(&zone->lock, flags); 1599 1600 __count_vm_events(PGFREE, 1 << order); 1601 } 1602 1603 static void __free_pages_ok(struct page *page, unsigned int order, 1604 fpi_t fpi_flags) 1605 { 1606 unsigned long pfn = page_to_pfn(page); 1607 struct zone *zone = page_zone(page); 1608 1609 if (free_pages_prepare(page, order)) 1610 free_one_page(zone, page, pfn, order, fpi_flags); 1611 } 1612 1613 void __meminit __free_pages_core(struct page *page, unsigned int order, 1614 enum meminit_context context) 1615 { 1616 unsigned int nr_pages = 1 << order; 1617 struct page *p = page; 1618 unsigned int loop; 1619 1620 /* 1621 * When initializing the memmap, __init_single_page() sets the refcount 1622 * of all pages to 1 ("allocated"/"not free"). We have to set the 1623 * refcount of all involved pages to 0. 1624 * 1625 * Note that hotplugged memory pages are initialized to PageOffline(). 1626 * Pages freed from memblock might be marked as reserved. 1627 */ 1628 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1629 unlikely(context == MEMINIT_HOTPLUG)) { 1630 for (loop = 0; loop < nr_pages; loop++, p++) { 1631 VM_WARN_ON_ONCE(PageReserved(p)); 1632 __ClearPageOffline(p); 1633 set_page_count(p, 0); 1634 } 1635 1636 adjust_managed_page_count(page, nr_pages); 1637 } else { 1638 for (loop = 0; loop < nr_pages; loop++, p++) { 1639 __ClearPageReserved(p); 1640 set_page_count(p, 0); 1641 } 1642 1643 /* memblock adjusts totalram_pages() manually. */ 1644 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1645 } 1646 1647 if (page_contains_unaccepted(page, order)) { 1648 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1649 return; 1650 1651 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1652 } 1653 1654 /* 1655 * Bypass PCP and place fresh pages right to the tail, primarily 1656 * relevant for memory onlining. 1657 */ 1658 __free_pages_ok(page, order, FPI_TO_TAIL); 1659 } 1660 1661 /* 1662 * Check that the whole (or subset of) a pageblock given by the interval of 1663 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1664 * with the migration of free compaction scanner. 1665 * 1666 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1667 * 1668 * It's possible on some configurations to have a setup like node0 node1 node0 1669 * i.e. it's possible that all pages within a zones range of pages do not 1670 * belong to a single zone. We assume that a border between node0 and node1 1671 * can occur within a single pageblock, but not a node0 node1 node0 1672 * interleaving within a single pageblock. It is therefore sufficient to check 1673 * the first and last page of a pageblock and avoid checking each individual 1674 * page in a pageblock. 1675 * 1676 * Note: the function may return non-NULL struct page even for a page block 1677 * which contains a memory hole (i.e. there is no physical memory for a subset 1678 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1679 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1680 * even though the start pfn is online and valid. This should be safe most of 1681 * the time because struct pages are still initialized via init_unavailable_range() 1682 * and pfn walkers shouldn't touch any physical memory range for which they do 1683 * not recognize any specific metadata in struct pages. 1684 */ 1685 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1686 unsigned long end_pfn, struct zone *zone) 1687 { 1688 struct page *start_page; 1689 struct page *end_page; 1690 1691 /* end_pfn is one past the range we are checking */ 1692 end_pfn--; 1693 1694 if (!pfn_valid(end_pfn)) 1695 return NULL; 1696 1697 start_page = pfn_to_online_page(start_pfn); 1698 if (!start_page) 1699 return NULL; 1700 1701 if (page_zone(start_page) != zone) 1702 return NULL; 1703 1704 end_page = pfn_to_page(end_pfn); 1705 1706 /* This gives a shorter code than deriving page_zone(end_page) */ 1707 if (page_zone_id(start_page) != page_zone_id(end_page)) 1708 return NULL; 1709 1710 return start_page; 1711 } 1712 1713 /* 1714 * The order of subdivision here is critical for the IO subsystem. 1715 * Please do not alter this order without good reasons and regression 1716 * testing. Specifically, as large blocks of memory are subdivided, 1717 * the order in which smaller blocks are delivered depends on the order 1718 * they're subdivided in this function. This is the primary factor 1719 * influencing the order in which pages are delivered to the IO 1720 * subsystem according to empirical testing, and this is also justified 1721 * by considering the behavior of a buddy system containing a single 1722 * large block of memory acted on by a series of small allocations. 1723 * This behavior is a critical factor in sglist merging's success. 1724 * 1725 * -- nyc 1726 */ 1727 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1728 int high, int migratetype) 1729 { 1730 unsigned int size = 1 << high; 1731 unsigned int nr_added = 0; 1732 1733 while (high > low) { 1734 high--; 1735 size >>= 1; 1736 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1737 1738 /* 1739 * Mark as guard pages (or page), that will allow to 1740 * merge back to allocator when buddy will be freed. 1741 * Corresponding page table entries will not be touched, 1742 * pages will stay not present in virtual address space 1743 */ 1744 if (set_page_guard(zone, &page[size], high)) 1745 continue; 1746 1747 __add_to_free_list(&page[size], zone, high, migratetype, false); 1748 set_buddy_order(&page[size], high); 1749 nr_added += size; 1750 } 1751 1752 return nr_added; 1753 } 1754 1755 static __always_inline void page_del_and_expand(struct zone *zone, 1756 struct page *page, int low, 1757 int high, int migratetype) 1758 { 1759 int nr_pages = 1 << high; 1760 1761 __del_page_from_free_list(page, zone, high, migratetype); 1762 nr_pages -= expand(zone, page, low, high, migratetype); 1763 account_freepages(zone, -nr_pages, migratetype); 1764 } 1765 1766 static void check_new_page_bad(struct page *page) 1767 { 1768 if (unlikely(PageHWPoison(page))) { 1769 /* Don't complain about hwpoisoned pages */ 1770 if (PageBuddy(page)) 1771 __ClearPageBuddy(page); 1772 return; 1773 } 1774 1775 bad_page(page, 1776 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1777 } 1778 1779 /* 1780 * This page is about to be returned from the page allocator 1781 */ 1782 static bool check_new_page(struct page *page) 1783 { 1784 if (likely(page_expected_state(page, 1785 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1786 return false; 1787 1788 check_new_page_bad(page); 1789 return true; 1790 } 1791 1792 static inline bool check_new_pages(struct page *page, unsigned int order) 1793 { 1794 if (is_check_pages_enabled()) { 1795 for (int i = 0; i < (1 << order); i++) { 1796 struct page *p = page + i; 1797 1798 if (check_new_page(p)) 1799 return true; 1800 } 1801 } 1802 1803 return false; 1804 } 1805 1806 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1807 { 1808 /* Don't skip if a software KASAN mode is enabled. */ 1809 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1810 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1811 return false; 1812 1813 /* Skip, if hardware tag-based KASAN is not enabled. */ 1814 if (!kasan_hw_tags_enabled()) 1815 return true; 1816 1817 /* 1818 * With hardware tag-based KASAN enabled, skip if this has been 1819 * requested via __GFP_SKIP_KASAN. 1820 */ 1821 return flags & __GFP_SKIP_KASAN; 1822 } 1823 1824 static inline bool should_skip_init(gfp_t flags) 1825 { 1826 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1827 if (!kasan_hw_tags_enabled()) 1828 return false; 1829 1830 /* For hardware tag-based KASAN, skip if requested. */ 1831 return (flags & __GFP_SKIP_ZERO); 1832 } 1833 1834 inline void post_alloc_hook(struct page *page, unsigned int order, 1835 gfp_t gfp_flags) 1836 { 1837 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1838 !should_skip_init(gfp_flags); 1839 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1840 int i; 1841 1842 set_page_private(page, 0); 1843 1844 arch_alloc_page(page, order); 1845 debug_pagealloc_map_pages(page, 1 << order); 1846 1847 /* 1848 * Page unpoisoning must happen before memory initialization. 1849 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1850 * allocations and the page unpoisoning code will complain. 1851 */ 1852 kernel_unpoison_pages(page, 1 << order); 1853 1854 /* 1855 * As memory initialization might be integrated into KASAN, 1856 * KASAN unpoisoning and memory initializion code must be 1857 * kept together to avoid discrepancies in behavior. 1858 */ 1859 1860 /* 1861 * If memory tags should be zeroed 1862 * (which happens only when memory should be initialized as well). 1863 */ 1864 if (zero_tags) 1865 init = !tag_clear_highpages(page, 1 << order); 1866 1867 if (!should_skip_kasan_unpoison(gfp_flags) && 1868 kasan_unpoison_pages(page, order, init)) { 1869 /* Take note that memory was initialized by KASAN. */ 1870 if (kasan_has_integrated_init()) 1871 init = false; 1872 } else { 1873 /* 1874 * If memory tags have not been set by KASAN, reset the page 1875 * tags to ensure page_address() dereferencing does not fault. 1876 */ 1877 for (i = 0; i != 1 << order; ++i) 1878 page_kasan_tag_reset(page + i); 1879 } 1880 /* If memory is still not initialized, initialize it now. */ 1881 if (init) 1882 kernel_init_pages(page, 1 << order); 1883 1884 set_page_owner(page, order, gfp_flags); 1885 page_table_check_alloc(page, order); 1886 pgalloc_tag_add(page, current, 1 << order); 1887 } 1888 1889 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1890 unsigned int alloc_flags) 1891 { 1892 post_alloc_hook(page, order, gfp_flags); 1893 1894 if (order && (gfp_flags & __GFP_COMP)) 1895 prep_compound_page(page, order); 1896 1897 /* 1898 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1899 * allocate the page. The expectation is that the caller is taking 1900 * steps that will free more memory. The caller should avoid the page 1901 * being used for !PFMEMALLOC purposes. 1902 */ 1903 if (alloc_flags & ALLOC_NO_WATERMARKS) 1904 set_page_pfmemalloc(page); 1905 else 1906 clear_page_pfmemalloc(page); 1907 } 1908 1909 /* 1910 * Go through the free lists for the given migratetype and remove 1911 * the smallest available page from the freelists 1912 */ 1913 static __always_inline 1914 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1915 int migratetype) 1916 { 1917 unsigned int current_order; 1918 struct free_area *area; 1919 struct page *page; 1920 1921 /* Find a page of the appropriate size in the preferred list */ 1922 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1923 area = &(zone->free_area[current_order]); 1924 page = get_page_from_free_area(area, migratetype); 1925 if (!page) 1926 continue; 1927 1928 page_del_and_expand(zone, page, order, current_order, 1929 migratetype); 1930 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1931 pcp_allowed_order(order) && 1932 migratetype < MIGRATE_PCPTYPES); 1933 return page; 1934 } 1935 1936 return NULL; 1937 } 1938 1939 1940 /* 1941 * This array describes the order lists are fallen back to when 1942 * the free lists for the desirable migrate type are depleted 1943 * 1944 * The other migratetypes do not have fallbacks. 1945 */ 1946 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1947 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1948 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1949 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1950 }; 1951 1952 #ifdef CONFIG_CMA 1953 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1954 unsigned int order) 1955 { 1956 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1957 } 1958 #else 1959 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1960 unsigned int order) { return NULL; } 1961 #endif 1962 1963 /* 1964 * Move all free pages of a block to new type's freelist. Caller needs to 1965 * change the block type. 1966 */ 1967 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1968 int old_mt, int new_mt) 1969 { 1970 struct page *page; 1971 unsigned long pfn, end_pfn; 1972 unsigned int order; 1973 int pages_moved = 0; 1974 1975 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1976 end_pfn = pageblock_end_pfn(start_pfn); 1977 1978 for (pfn = start_pfn; pfn < end_pfn;) { 1979 page = pfn_to_page(pfn); 1980 if (!PageBuddy(page)) { 1981 pfn++; 1982 continue; 1983 } 1984 1985 /* Make sure we are not inadvertently changing nodes */ 1986 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1987 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1988 1989 order = buddy_order(page); 1990 1991 move_to_free_list(page, zone, order, old_mt, new_mt); 1992 1993 pfn += 1 << order; 1994 pages_moved += 1 << order; 1995 } 1996 1997 return pages_moved; 1998 } 1999 2000 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 2001 unsigned long *start_pfn, 2002 int *num_free, int *num_movable) 2003 { 2004 unsigned long pfn, start, end; 2005 2006 pfn = page_to_pfn(page); 2007 start = pageblock_start_pfn(pfn); 2008 end = pageblock_end_pfn(pfn); 2009 2010 /* 2011 * The caller only has the lock for @zone, don't touch ranges 2012 * that straddle into other zones. While we could move part of 2013 * the range that's inside the zone, this call is usually 2014 * accompanied by other operations such as migratetype updates 2015 * which also should be locked. 2016 */ 2017 if (!zone_spans_pfn(zone, start)) 2018 return false; 2019 if (!zone_spans_pfn(zone, end - 1)) 2020 return false; 2021 2022 *start_pfn = start; 2023 2024 if (num_free) { 2025 *num_free = 0; 2026 *num_movable = 0; 2027 for (pfn = start; pfn < end;) { 2028 page = pfn_to_page(pfn); 2029 if (PageBuddy(page)) { 2030 int nr = 1 << buddy_order(page); 2031 2032 *num_free += nr; 2033 pfn += nr; 2034 continue; 2035 } 2036 /* 2037 * We assume that pages that could be isolated for 2038 * migration are movable. But we don't actually try 2039 * isolating, as that would be expensive. 2040 */ 2041 if (PageLRU(page) || page_has_movable_ops(page)) 2042 (*num_movable)++; 2043 pfn++; 2044 } 2045 } 2046 2047 return true; 2048 } 2049 2050 static int move_freepages_block(struct zone *zone, struct page *page, 2051 int old_mt, int new_mt) 2052 { 2053 unsigned long start_pfn; 2054 int res; 2055 2056 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2057 return -1; 2058 2059 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt); 2060 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 2061 2062 return res; 2063 2064 } 2065 2066 #ifdef CONFIG_MEMORY_ISOLATION 2067 /* Look for a buddy that straddles start_pfn */ 2068 static unsigned long find_large_buddy(unsigned long start_pfn) 2069 { 2070 /* 2071 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing 2072 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking 2073 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy, 2074 * the starting order does not matter. 2075 */ 2076 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; 2077 struct page *page; 2078 unsigned long pfn = start_pfn; 2079 2080 while (!PageBuddy(page = pfn_to_page(pfn))) { 2081 /* Nothing found */ 2082 if (++order > MAX_PAGE_ORDER) 2083 return start_pfn; 2084 pfn &= ~0UL << order; 2085 } 2086 2087 /* 2088 * Found a preceding buddy, but does it straddle? 2089 */ 2090 if (pfn + (1 << buddy_order(page)) > start_pfn) 2091 return pfn; 2092 2093 /* Nothing found */ 2094 return start_pfn; 2095 } 2096 2097 static inline void toggle_pageblock_isolate(struct page *page, bool isolate) 2098 { 2099 if (isolate) 2100 set_pageblock_isolate(page); 2101 else 2102 clear_pageblock_isolate(page); 2103 } 2104 2105 /** 2106 * __move_freepages_block_isolate - move free pages in block for page isolation 2107 * @zone: the zone 2108 * @page: the pageblock page 2109 * @isolate: to isolate the given pageblock or unisolate it 2110 * 2111 * This is similar to move_freepages_block(), but handles the special 2112 * case encountered in page isolation, where the block of interest 2113 * might be part of a larger buddy spanning multiple pageblocks. 2114 * 2115 * Unlike the regular page allocator path, which moves pages while 2116 * stealing buddies off the freelist, page isolation is interested in 2117 * arbitrary pfn ranges that may have overlapping buddies on both ends. 2118 * 2119 * This function handles that. Straddling buddies are split into 2120 * individual pageblocks. Only the block of interest is moved. 2121 * 2122 * Returns %true if pages could be moved, %false otherwise. 2123 */ 2124 static bool __move_freepages_block_isolate(struct zone *zone, 2125 struct page *page, bool isolate) 2126 { 2127 unsigned long start_pfn, buddy_pfn; 2128 int from_mt; 2129 int to_mt; 2130 struct page *buddy; 2131 2132 if (isolate == get_pageblock_isolate(page)) { 2133 VM_WARN_ONCE(1, "%s a pageblock that is already in that state", 2134 isolate ? "Isolate" : "Unisolate"); 2135 return false; 2136 } 2137 2138 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2139 return false; 2140 2141 /* No splits needed if buddies can't span multiple blocks */ 2142 if (pageblock_order == MAX_PAGE_ORDER) 2143 goto move; 2144 2145 buddy_pfn = find_large_buddy(start_pfn); 2146 buddy = pfn_to_page(buddy_pfn); 2147 /* We're a part of a larger buddy */ 2148 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) { 2149 int order = buddy_order(buddy); 2150 2151 del_page_from_free_list(buddy, zone, order, 2152 get_pfnblock_migratetype(buddy, buddy_pfn)); 2153 toggle_pageblock_isolate(page, isolate); 2154 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE); 2155 return true; 2156 } 2157 2158 move: 2159 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */ 2160 if (isolate) { 2161 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2162 MIGRATETYPE_MASK); 2163 to_mt = MIGRATE_ISOLATE; 2164 } else { 2165 from_mt = MIGRATE_ISOLATE; 2166 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2167 MIGRATETYPE_MASK); 2168 } 2169 2170 __move_freepages_block(zone, start_pfn, from_mt, to_mt); 2171 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate); 2172 2173 return true; 2174 } 2175 2176 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page) 2177 { 2178 return __move_freepages_block_isolate(zone, page, true); 2179 } 2180 2181 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page) 2182 { 2183 return __move_freepages_block_isolate(zone, page, false); 2184 } 2185 2186 #endif /* CONFIG_MEMORY_ISOLATION */ 2187 2188 static inline bool boost_watermark(struct zone *zone) 2189 { 2190 unsigned long max_boost; 2191 2192 if (!watermark_boost_factor) 2193 return false; 2194 /* 2195 * Don't bother in zones that are unlikely to produce results. 2196 * On small machines, including kdump capture kernels running 2197 * in a small area, boosting the watermark can cause an out of 2198 * memory situation immediately. 2199 */ 2200 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2201 return false; 2202 2203 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2204 watermark_boost_factor, 10000); 2205 2206 /* 2207 * high watermark may be uninitialised if fragmentation occurs 2208 * very early in boot so do not boost. We do not fall 2209 * through and boost by pageblock_nr_pages as failing 2210 * allocations that early means that reclaim is not going 2211 * to help and it may even be impossible to reclaim the 2212 * boosted watermark resulting in a hang. 2213 */ 2214 if (!max_boost) 2215 return false; 2216 2217 max_boost = max(pageblock_nr_pages, max_boost); 2218 2219 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2220 max_boost); 2221 2222 return true; 2223 } 2224 2225 /* 2226 * When we are falling back to another migratetype during allocation, should we 2227 * try to claim an entire block to satisfy further allocations, instead of 2228 * polluting multiple pageblocks? 2229 */ 2230 static bool should_try_claim_block(unsigned int order, int start_mt) 2231 { 2232 /* 2233 * Leaving this order check is intended, although there is 2234 * relaxed order check in next check. The reason is that 2235 * we can actually claim the whole pageblock if this condition met, 2236 * but, below check doesn't guarantee it and that is just heuristic 2237 * so could be changed anytime. 2238 */ 2239 if (order >= pageblock_order) 2240 return true; 2241 2242 /* 2243 * Above a certain threshold, always try to claim, as it's likely there 2244 * will be more free pages in the pageblock. 2245 */ 2246 if (order >= pageblock_order / 2) 2247 return true; 2248 2249 /* 2250 * Unmovable/reclaimable allocations would cause permanent 2251 * fragmentations if they fell back to allocating from a movable block 2252 * (polluting it), so we try to claim the whole block regardless of the 2253 * allocation size. Later movable allocations can always steal from this 2254 * block, which is less problematic. 2255 */ 2256 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2257 return true; 2258 2259 if (page_group_by_mobility_disabled) 2260 return true; 2261 2262 /* 2263 * Movable pages won't cause permanent fragmentation, so when you alloc 2264 * small pages, we just need to temporarily steal unmovable or 2265 * reclaimable pages that are closest to the request size. After a 2266 * while, memory compaction may occur to form large contiguous pages, 2267 * and the next movable allocation may not need to steal. 2268 */ 2269 return false; 2270 } 2271 2272 /* 2273 * Check whether there is a suitable fallback freepage with requested order. 2274 * If claimable is true, this function returns fallback_mt only if 2275 * we would do this whole-block claiming. This would help to reduce 2276 * fragmentation due to mixed migratetype pages in one pageblock. 2277 */ 2278 int find_suitable_fallback(struct free_area *area, unsigned int order, 2279 int migratetype, bool claimable) 2280 { 2281 int i; 2282 2283 if (claimable && !should_try_claim_block(order, migratetype)) 2284 return -2; 2285 2286 if (area->nr_free == 0) 2287 return -1; 2288 2289 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2290 int fallback_mt = fallbacks[migratetype][i]; 2291 2292 if (!free_area_empty(area, fallback_mt)) 2293 return fallback_mt; 2294 } 2295 2296 return -1; 2297 } 2298 2299 /* 2300 * This function implements actual block claiming behaviour. If order is large 2301 * enough, we can claim the whole pageblock for the requested migratetype. If 2302 * not, we check the pageblock for constituent pages; if at least half of the 2303 * pages are free or compatible, we can still claim the whole block, so pages 2304 * freed in the future will be put on the correct free list. 2305 */ 2306 static struct page * 2307 try_to_claim_block(struct zone *zone, struct page *page, 2308 int current_order, int order, int start_type, 2309 int block_type, unsigned int alloc_flags) 2310 { 2311 int free_pages, movable_pages, alike_pages; 2312 unsigned long start_pfn; 2313 2314 /* Take ownership for orders >= pageblock_order */ 2315 if (current_order >= pageblock_order) { 2316 unsigned int nr_added; 2317 2318 del_page_from_free_list(page, zone, current_order, block_type); 2319 change_pageblock_range(page, current_order, start_type); 2320 nr_added = expand(zone, page, order, current_order, start_type); 2321 account_freepages(zone, nr_added, start_type); 2322 return page; 2323 } 2324 2325 /* 2326 * Boost watermarks to increase reclaim pressure to reduce the 2327 * likelihood of future fallbacks. Wake kswapd now as the node 2328 * may be balanced overall and kswapd will not wake naturally. 2329 */ 2330 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2331 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2332 2333 /* moving whole block can fail due to zone boundary conditions */ 2334 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2335 &movable_pages)) 2336 return NULL; 2337 2338 /* 2339 * Determine how many pages are compatible with our allocation. 2340 * For movable allocation, it's the number of movable pages which 2341 * we just obtained. For other types it's a bit more tricky. 2342 */ 2343 if (start_type == MIGRATE_MOVABLE) { 2344 alike_pages = movable_pages; 2345 } else { 2346 /* 2347 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2348 * to MOVABLE pageblock, consider all non-movable pages as 2349 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2350 * vice versa, be conservative since we can't distinguish the 2351 * exact migratetype of non-movable pages. 2352 */ 2353 if (block_type == MIGRATE_MOVABLE) 2354 alike_pages = pageblock_nr_pages 2355 - (free_pages + movable_pages); 2356 else 2357 alike_pages = 0; 2358 } 2359 /* 2360 * If a sufficient number of pages in the block are either free or of 2361 * compatible migratability as our allocation, claim the whole block. 2362 */ 2363 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2364 page_group_by_mobility_disabled) { 2365 __move_freepages_block(zone, start_pfn, block_type, start_type); 2366 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type); 2367 return __rmqueue_smallest(zone, order, start_type); 2368 } 2369 2370 return NULL; 2371 } 2372 2373 /* 2374 * Try to allocate from some fallback migratetype by claiming the entire block, 2375 * i.e. converting it to the allocation's start migratetype. 2376 * 2377 * The use of signed ints for order and current_order is a deliberate 2378 * deviation from the rest of this file, to make the for loop 2379 * condition simpler. 2380 */ 2381 static __always_inline struct page * 2382 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, 2383 unsigned int alloc_flags) 2384 { 2385 struct free_area *area; 2386 int current_order; 2387 int min_order = order; 2388 struct page *page; 2389 int fallback_mt; 2390 2391 /* 2392 * Do not steal pages from freelists belonging to other pageblocks 2393 * i.e. orders < pageblock_order. If there are no local zones free, 2394 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2395 */ 2396 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2397 min_order = pageblock_order; 2398 2399 /* 2400 * Find the largest available free page in the other list. This roughly 2401 * approximates finding the pageblock with the most free pages, which 2402 * would be too costly to do exactly. 2403 */ 2404 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2405 --current_order) { 2406 area = &(zone->free_area[current_order]); 2407 fallback_mt = find_suitable_fallback(area, current_order, 2408 start_migratetype, true); 2409 2410 /* No block in that order */ 2411 if (fallback_mt == -1) 2412 continue; 2413 2414 /* Advanced into orders too low to claim, abort */ 2415 if (fallback_mt == -2) 2416 break; 2417 2418 page = get_page_from_free_area(area, fallback_mt); 2419 page = try_to_claim_block(zone, page, current_order, order, 2420 start_migratetype, fallback_mt, 2421 alloc_flags); 2422 if (page) { 2423 trace_mm_page_alloc_extfrag(page, order, current_order, 2424 start_migratetype, fallback_mt); 2425 return page; 2426 } 2427 } 2428 2429 return NULL; 2430 } 2431 2432 /* 2433 * Try to steal a single page from some fallback migratetype. Leave the rest of 2434 * the block as its current migratetype, potentially causing fragmentation. 2435 */ 2436 static __always_inline struct page * 2437 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) 2438 { 2439 struct free_area *area; 2440 int current_order; 2441 struct page *page; 2442 int fallback_mt; 2443 2444 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2445 area = &(zone->free_area[current_order]); 2446 fallback_mt = find_suitable_fallback(area, current_order, 2447 start_migratetype, false); 2448 if (fallback_mt == -1) 2449 continue; 2450 2451 page = get_page_from_free_area(area, fallback_mt); 2452 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2453 trace_mm_page_alloc_extfrag(page, order, current_order, 2454 start_migratetype, fallback_mt); 2455 return page; 2456 } 2457 2458 return NULL; 2459 } 2460 2461 enum rmqueue_mode { 2462 RMQUEUE_NORMAL, 2463 RMQUEUE_CMA, 2464 RMQUEUE_CLAIM, 2465 RMQUEUE_STEAL, 2466 }; 2467 2468 /* 2469 * Do the hard work of removing an element from the buddy allocator. 2470 * Call me with the zone->lock already held. 2471 */ 2472 static __always_inline struct page * 2473 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2474 unsigned int alloc_flags, enum rmqueue_mode *mode) 2475 { 2476 struct page *page; 2477 2478 if (IS_ENABLED(CONFIG_CMA)) { 2479 /* 2480 * Balance movable allocations between regular and CMA areas by 2481 * allocating from CMA when over half of the zone's free memory 2482 * is in the CMA area. 2483 */ 2484 if (alloc_flags & ALLOC_CMA && 2485 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2486 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2487 page = __rmqueue_cma_fallback(zone, order); 2488 if (page) 2489 return page; 2490 } 2491 } 2492 2493 /* 2494 * First try the freelists of the requested migratetype, then try 2495 * fallbacks modes with increasing levels of fragmentation risk. 2496 * 2497 * The fallback logic is expensive and rmqueue_bulk() calls in 2498 * a loop with the zone->lock held, meaning the freelists are 2499 * not subject to any outside changes. Remember in *mode where 2500 * we found pay dirt, to save us the search on the next call. 2501 */ 2502 switch (*mode) { 2503 case RMQUEUE_NORMAL: 2504 page = __rmqueue_smallest(zone, order, migratetype); 2505 if (page) 2506 return page; 2507 fallthrough; 2508 case RMQUEUE_CMA: 2509 if (alloc_flags & ALLOC_CMA) { 2510 page = __rmqueue_cma_fallback(zone, order); 2511 if (page) { 2512 *mode = RMQUEUE_CMA; 2513 return page; 2514 } 2515 } 2516 fallthrough; 2517 case RMQUEUE_CLAIM: 2518 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); 2519 if (page) { 2520 /* Replenished preferred freelist, back to normal mode. */ 2521 *mode = RMQUEUE_NORMAL; 2522 return page; 2523 } 2524 fallthrough; 2525 case RMQUEUE_STEAL: 2526 if (!(alloc_flags & ALLOC_NOFRAGMENT)) { 2527 page = __rmqueue_steal(zone, order, migratetype); 2528 if (page) { 2529 *mode = RMQUEUE_STEAL; 2530 return page; 2531 } 2532 } 2533 } 2534 return NULL; 2535 } 2536 2537 /* 2538 * Obtain a specified number of elements from the buddy allocator, all under 2539 * a single hold of the lock, for efficiency. Add them to the supplied list. 2540 * Returns the number of new pages which were placed at *list. 2541 */ 2542 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2543 unsigned long count, struct list_head *list, 2544 int migratetype, unsigned int alloc_flags) 2545 { 2546 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2547 unsigned long flags; 2548 int i; 2549 2550 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2551 if (!spin_trylock_irqsave(&zone->lock, flags)) 2552 return 0; 2553 } else { 2554 spin_lock_irqsave(&zone->lock, flags); 2555 } 2556 for (i = 0; i < count; ++i) { 2557 struct page *page = __rmqueue(zone, order, migratetype, 2558 alloc_flags, &rmqm); 2559 if (unlikely(page == NULL)) 2560 break; 2561 2562 /* 2563 * Split buddy pages returned by expand() are received here in 2564 * physical page order. The page is added to the tail of 2565 * caller's list. From the callers perspective, the linked list 2566 * is ordered by page number under some conditions. This is 2567 * useful for IO devices that can forward direction from the 2568 * head, thus also in the physical page order. This is useful 2569 * for IO devices that can merge IO requests if the physical 2570 * pages are ordered properly. 2571 */ 2572 list_add_tail(&page->pcp_list, list); 2573 } 2574 spin_unlock_irqrestore(&zone->lock, flags); 2575 2576 return i; 2577 } 2578 2579 /* 2580 * Called from the vmstat counter updater to decay the PCP high. 2581 * Return whether there are addition works to do. 2582 */ 2583 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2584 { 2585 int high_min, to_drain, to_drain_batched, batch; 2586 unsigned long UP_flags; 2587 bool todo = false; 2588 2589 high_min = READ_ONCE(pcp->high_min); 2590 batch = READ_ONCE(pcp->batch); 2591 /* 2592 * Decrease pcp->high periodically to try to free possible 2593 * idle PCP pages. And, avoid to free too many pages to 2594 * control latency. This caps pcp->high decrement too. 2595 */ 2596 if (pcp->high > high_min) { 2597 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2598 pcp->high - (pcp->high >> 3), high_min); 2599 if (pcp->high > high_min) 2600 todo = true; 2601 } 2602 2603 to_drain = pcp->count - pcp->high; 2604 while (to_drain > 0) { 2605 to_drain_batched = min(to_drain, batch); 2606 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2607 free_pcppages_bulk(zone, to_drain_batched, pcp, 0); 2608 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2609 todo = true; 2610 2611 to_drain -= to_drain_batched; 2612 } 2613 2614 return todo; 2615 } 2616 2617 #ifdef CONFIG_NUMA 2618 /* 2619 * Called from the vmstat counter updater to drain pagesets of this 2620 * currently executing processor on remote nodes after they have 2621 * expired. 2622 */ 2623 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2624 { 2625 unsigned long UP_flags; 2626 int to_drain, batch; 2627 2628 batch = READ_ONCE(pcp->batch); 2629 to_drain = min(pcp->count, batch); 2630 if (to_drain > 0) { 2631 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2632 free_pcppages_bulk(zone, to_drain, pcp, 0); 2633 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2634 } 2635 } 2636 #endif 2637 2638 /* 2639 * Drain pcplists of the indicated processor and zone. 2640 */ 2641 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2642 { 2643 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2644 unsigned long UP_flags; 2645 int count; 2646 2647 do { 2648 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2649 count = pcp->count; 2650 if (count) { 2651 int to_drain = min(count, 2652 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2653 2654 free_pcppages_bulk(zone, to_drain, pcp, 0); 2655 count -= to_drain; 2656 } 2657 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2658 } while (count); 2659 } 2660 2661 /* 2662 * Drain pcplists of all zones on the indicated processor. 2663 */ 2664 static void drain_pages(unsigned int cpu) 2665 { 2666 struct zone *zone; 2667 2668 for_each_populated_zone(zone) { 2669 drain_pages_zone(cpu, zone); 2670 } 2671 } 2672 2673 /* 2674 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2675 */ 2676 void drain_local_pages(struct zone *zone) 2677 { 2678 int cpu = smp_processor_id(); 2679 2680 if (zone) 2681 drain_pages_zone(cpu, zone); 2682 else 2683 drain_pages(cpu); 2684 } 2685 2686 /* 2687 * The implementation of drain_all_pages(), exposing an extra parameter to 2688 * drain on all cpus. 2689 * 2690 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2691 * not empty. The check for non-emptiness can however race with a free to 2692 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2693 * that need the guarantee that every CPU has drained can disable the 2694 * optimizing racy check. 2695 */ 2696 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2697 { 2698 int cpu; 2699 2700 /* 2701 * Allocate in the BSS so we won't require allocation in 2702 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2703 */ 2704 static cpumask_t cpus_with_pcps; 2705 2706 /* 2707 * Do not drain if one is already in progress unless it's specific to 2708 * a zone. Such callers are primarily CMA and memory hotplug and need 2709 * the drain to be complete when the call returns. 2710 */ 2711 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2712 if (!zone) 2713 return; 2714 mutex_lock(&pcpu_drain_mutex); 2715 } 2716 2717 /* 2718 * We don't care about racing with CPU hotplug event 2719 * as offline notification will cause the notified 2720 * cpu to drain that CPU pcps and on_each_cpu_mask 2721 * disables preemption as part of its processing 2722 */ 2723 for_each_online_cpu(cpu) { 2724 struct per_cpu_pages *pcp; 2725 struct zone *z; 2726 bool has_pcps = false; 2727 2728 if (force_all_cpus) { 2729 /* 2730 * The pcp.count check is racy, some callers need a 2731 * guarantee that no cpu is missed. 2732 */ 2733 has_pcps = true; 2734 } else if (zone) { 2735 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2736 if (pcp->count) 2737 has_pcps = true; 2738 } else { 2739 for_each_populated_zone(z) { 2740 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2741 if (pcp->count) { 2742 has_pcps = true; 2743 break; 2744 } 2745 } 2746 } 2747 2748 if (has_pcps) 2749 cpumask_set_cpu(cpu, &cpus_with_pcps); 2750 else 2751 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2752 } 2753 2754 for_each_cpu(cpu, &cpus_with_pcps) { 2755 if (zone) 2756 drain_pages_zone(cpu, zone); 2757 else 2758 drain_pages(cpu); 2759 } 2760 2761 mutex_unlock(&pcpu_drain_mutex); 2762 } 2763 2764 /* 2765 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2766 * 2767 * When zone parameter is non-NULL, spill just the single zone's pages. 2768 */ 2769 void drain_all_pages(struct zone *zone) 2770 { 2771 __drain_all_pages(zone, false); 2772 } 2773 2774 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2775 { 2776 int min_nr_free, max_nr_free; 2777 2778 /* Free as much as possible if batch freeing high-order pages. */ 2779 if (unlikely(free_high)) 2780 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2781 2782 /* Check for PCP disabled or boot pageset */ 2783 if (unlikely(high < batch)) 2784 return 1; 2785 2786 /* Leave at least pcp->batch pages on the list */ 2787 min_nr_free = batch; 2788 max_nr_free = high - batch; 2789 2790 /* 2791 * Increase the batch number to the number of the consecutive 2792 * freed pages to reduce zone lock contention. 2793 */ 2794 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2795 2796 return batch; 2797 } 2798 2799 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2800 int batch, bool free_high) 2801 { 2802 int high, high_min, high_max; 2803 2804 high_min = READ_ONCE(pcp->high_min); 2805 high_max = READ_ONCE(pcp->high_max); 2806 high = pcp->high = clamp(pcp->high, high_min, high_max); 2807 2808 if (unlikely(!high)) 2809 return 0; 2810 2811 if (unlikely(free_high)) { 2812 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2813 high_min); 2814 return 0; 2815 } 2816 2817 /* 2818 * If reclaim is active, limit the number of pages that can be 2819 * stored on pcp lists 2820 */ 2821 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2822 int free_count = max_t(int, pcp->free_count, batch); 2823 2824 pcp->high = max(high - free_count, high_min); 2825 return min(batch << 2, pcp->high); 2826 } 2827 2828 if (high_min == high_max) 2829 return high; 2830 2831 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2832 int free_count = max_t(int, pcp->free_count, batch); 2833 2834 pcp->high = max(high - free_count, high_min); 2835 high = max(pcp->count, high_min); 2836 } else if (pcp->count >= high) { 2837 int need_high = pcp->free_count + batch; 2838 2839 /* pcp->high should be large enough to hold batch freed pages */ 2840 if (pcp->high < need_high) 2841 pcp->high = clamp(need_high, high_min, high_max); 2842 } 2843 2844 return high; 2845 } 2846 2847 /* 2848 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the 2849 * pcp's watermarks below high. 2850 * 2851 * May return a freed pcp, if during page freeing the pcp spinlock cannot be 2852 * reacquired. Return true if pcp is locked, false otherwise. 2853 */ 2854 static bool free_frozen_page_commit(struct zone *zone, 2855 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2856 unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags) 2857 { 2858 int high, batch; 2859 int to_free, to_free_batched; 2860 int pindex; 2861 int cpu = smp_processor_id(); 2862 int ret = true; 2863 bool free_high = false; 2864 2865 /* 2866 * On freeing, reduce the number of pages that are batch allocated. 2867 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2868 * allocations. 2869 */ 2870 pcp->alloc_factor >>= 1; 2871 __count_vm_events(PGFREE, 1 << order); 2872 pindex = order_to_pindex(migratetype, order); 2873 list_add(&page->pcp_list, &pcp->lists[pindex]); 2874 pcp->count += 1 << order; 2875 2876 batch = READ_ONCE(pcp->batch); 2877 /* 2878 * As high-order pages other than THP's stored on PCP can contribute 2879 * to fragmentation, limit the number stored when PCP is heavily 2880 * freeing without allocation. The remainder after bulk freeing 2881 * stops will be drained from vmstat refresh context. 2882 */ 2883 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2884 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) && 2885 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2886 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2887 pcp->count >= batch)); 2888 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2889 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2890 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2891 } 2892 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2893 pcp->free_count += (1 << order); 2894 2895 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2896 /* 2897 * Do not attempt to take a zone lock. Let pcp->count get 2898 * over high mark temporarily. 2899 */ 2900 return true; 2901 } 2902 2903 high = nr_pcp_high(pcp, zone, batch, free_high); 2904 if (pcp->count < high) 2905 return true; 2906 2907 to_free = nr_pcp_free(pcp, batch, high, free_high); 2908 while (to_free > 0 && pcp->count > 0) { 2909 to_free_batched = min(to_free, batch); 2910 free_pcppages_bulk(zone, to_free_batched, pcp, pindex); 2911 to_free -= to_free_batched; 2912 2913 if (to_free == 0 || pcp->count == 0) 2914 break; 2915 2916 pcp_spin_unlock(pcp, *UP_flags); 2917 2918 pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags); 2919 if (!pcp) { 2920 ret = false; 2921 break; 2922 } 2923 2924 /* 2925 * Check if this thread has been migrated to a different CPU. 2926 * If that is the case, give up and indicate that the pcp is 2927 * returned in an unlocked state. 2928 */ 2929 if (smp_processor_id() != cpu) { 2930 pcp_spin_unlock(pcp, *UP_flags); 2931 ret = false; 2932 break; 2933 } 2934 } 2935 2936 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2937 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2938 ZONE_MOVABLE, 0)) { 2939 struct pglist_data *pgdat = zone->zone_pgdat; 2940 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2941 2942 /* 2943 * Assume that memory pressure on this node is gone and may be 2944 * in a reclaimable state. If a memory fallback node exists, 2945 * direct reclaim may not have been triggered, causing a 2946 * 'hopeless node' to stay in that state for a while. Let 2947 * kswapd work again by resetting kswapd_failures. 2948 */ 2949 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES && 2950 next_memory_node(pgdat->node_id) < MAX_NUMNODES) 2951 atomic_set(&pgdat->kswapd_failures, 0); 2952 } 2953 return ret; 2954 } 2955 2956 /* 2957 * Free a pcp page 2958 */ 2959 static void __free_frozen_pages(struct page *page, unsigned int order, 2960 fpi_t fpi_flags) 2961 { 2962 unsigned long UP_flags; 2963 struct per_cpu_pages *pcp; 2964 struct zone *zone; 2965 unsigned long pfn = page_to_pfn(page); 2966 int migratetype; 2967 2968 if (!pcp_allowed_order(order)) { 2969 __free_pages_ok(page, order, fpi_flags); 2970 return; 2971 } 2972 2973 if (!free_pages_prepare(page, order)) 2974 return; 2975 2976 /* 2977 * We only track unmovable, reclaimable and movable on pcp lists. 2978 * Place ISOLATE pages on the isolated list because they are being 2979 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2980 * get those areas back if necessary. Otherwise, we may have to free 2981 * excessively into the page allocator 2982 */ 2983 zone = page_zone(page); 2984 migratetype = get_pfnblock_migratetype(page, pfn); 2985 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2986 if (unlikely(is_migrate_isolate(migratetype))) { 2987 free_one_page(zone, page, pfn, order, fpi_flags); 2988 return; 2989 } 2990 migratetype = MIGRATE_MOVABLE; 2991 } 2992 2993 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2994 && (in_nmi() || in_hardirq()))) { 2995 add_page_to_zone_llist(zone, page, order); 2996 return; 2997 } 2998 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 2999 if (pcp) { 3000 if (!free_frozen_page_commit(zone, pcp, page, migratetype, 3001 order, fpi_flags, &UP_flags)) 3002 return; 3003 pcp_spin_unlock(pcp, UP_flags); 3004 } else { 3005 free_one_page(zone, page, pfn, order, fpi_flags); 3006 } 3007 } 3008 3009 void free_frozen_pages(struct page *page, unsigned int order) 3010 { 3011 __free_frozen_pages(page, order, FPI_NONE); 3012 } 3013 3014 void free_frozen_pages_nolock(struct page *page, unsigned int order) 3015 { 3016 __free_frozen_pages(page, order, FPI_TRYLOCK); 3017 } 3018 3019 /* 3020 * Free a batch of folios 3021 */ 3022 void free_unref_folios(struct folio_batch *folios) 3023 { 3024 unsigned long UP_flags; 3025 struct per_cpu_pages *pcp = NULL; 3026 struct zone *locked_zone = NULL; 3027 int i, j; 3028 3029 /* Prepare folios for freeing */ 3030 for (i = 0, j = 0; i < folios->nr; i++) { 3031 struct folio *folio = folios->folios[i]; 3032 unsigned long pfn = folio_pfn(folio); 3033 unsigned int order = folio_order(folio); 3034 3035 if (!free_pages_prepare(&folio->page, order)) 3036 continue; 3037 /* 3038 * Free orders not handled on the PCP directly to the 3039 * allocator. 3040 */ 3041 if (!pcp_allowed_order(order)) { 3042 free_one_page(folio_zone(folio), &folio->page, 3043 pfn, order, FPI_NONE); 3044 continue; 3045 } 3046 folio->private = (void *)(unsigned long)order; 3047 if (j != i) 3048 folios->folios[j] = folio; 3049 j++; 3050 } 3051 folios->nr = j; 3052 3053 for (i = 0; i < folios->nr; i++) { 3054 struct folio *folio = folios->folios[i]; 3055 struct zone *zone = folio_zone(folio); 3056 unsigned long pfn = folio_pfn(folio); 3057 unsigned int order = (unsigned long)folio->private; 3058 int migratetype; 3059 3060 folio->private = NULL; 3061 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 3062 3063 /* Different zone requires a different pcp lock */ 3064 if (zone != locked_zone || 3065 is_migrate_isolate(migratetype)) { 3066 if (pcp) { 3067 pcp_spin_unlock(pcp, UP_flags); 3068 locked_zone = NULL; 3069 pcp = NULL; 3070 } 3071 3072 /* 3073 * Free isolated pages directly to the 3074 * allocator, see comment in free_frozen_pages. 3075 */ 3076 if (is_migrate_isolate(migratetype)) { 3077 free_one_page(zone, &folio->page, pfn, 3078 order, FPI_NONE); 3079 continue; 3080 } 3081 3082 /* 3083 * trylock is necessary as folios may be getting freed 3084 * from IRQ or SoftIRQ context after an IO completion. 3085 */ 3086 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 3087 if (unlikely(!pcp)) { 3088 free_one_page(zone, &folio->page, pfn, 3089 order, FPI_NONE); 3090 continue; 3091 } 3092 locked_zone = zone; 3093 } 3094 3095 /* 3096 * Non-isolated types over MIGRATE_PCPTYPES get added 3097 * to the MIGRATE_MOVABLE pcp list. 3098 */ 3099 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3100 migratetype = MIGRATE_MOVABLE; 3101 3102 trace_mm_page_free_batched(&folio->page); 3103 if (!free_frozen_page_commit(zone, pcp, &folio->page, 3104 migratetype, order, FPI_NONE, &UP_flags)) { 3105 pcp = NULL; 3106 locked_zone = NULL; 3107 } 3108 } 3109 3110 if (pcp) 3111 pcp_spin_unlock(pcp, UP_flags); 3112 folio_batch_reinit(folios); 3113 } 3114 3115 /* 3116 * split_page takes a non-compound higher-order page, and splits it into 3117 * n (1<<order) sub-pages: page[0..n] 3118 * Each sub-page must be freed individually. 3119 * 3120 * Note: this is probably too low level an operation for use in drivers. 3121 * Please consult with lkml before using this in your driver. 3122 */ 3123 void split_page(struct page *page, unsigned int order) 3124 { 3125 int i; 3126 3127 VM_BUG_ON_PAGE(PageCompound(page), page); 3128 VM_BUG_ON_PAGE(!page_count(page), page); 3129 3130 for (i = 1; i < (1 << order); i++) 3131 set_page_refcounted(page + i); 3132 split_page_owner(page, order, 0); 3133 pgalloc_tag_split(page_folio(page), order, 0); 3134 split_page_memcg(page, order); 3135 } 3136 EXPORT_SYMBOL_GPL(split_page); 3137 3138 int __isolate_free_page(struct page *page, unsigned int order) 3139 { 3140 struct zone *zone = page_zone(page); 3141 int mt = get_pageblock_migratetype(page); 3142 3143 if (!is_migrate_isolate(mt)) { 3144 unsigned long watermark; 3145 /* 3146 * Obey watermarks as if the page was being allocated. We can 3147 * emulate a high-order watermark check with a raised order-0 3148 * watermark, because we already know our high-order page 3149 * exists. 3150 */ 3151 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3152 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3153 return 0; 3154 } 3155 3156 del_page_from_free_list(page, zone, order, mt); 3157 3158 /* 3159 * Set the pageblock if the isolated page is at least half of a 3160 * pageblock 3161 */ 3162 if (order >= pageblock_order - 1) { 3163 struct page *endpage = page + (1 << order) - 1; 3164 for (; page < endpage; page += pageblock_nr_pages) { 3165 int mt = get_pageblock_migratetype(page); 3166 /* 3167 * Only change normal pageblocks (i.e., they can merge 3168 * with others) 3169 */ 3170 if (migratetype_is_mergeable(mt)) 3171 move_freepages_block(zone, page, mt, 3172 MIGRATE_MOVABLE); 3173 } 3174 } 3175 3176 return 1UL << order; 3177 } 3178 3179 /** 3180 * __putback_isolated_page - Return a now-isolated page back where we got it 3181 * @page: Page that was isolated 3182 * @order: Order of the isolated page 3183 * @mt: The page's pageblock's migratetype 3184 * 3185 * This function is meant to return a page pulled from the free lists via 3186 * __isolate_free_page back to the free lists they were pulled from. 3187 */ 3188 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3189 { 3190 struct zone *zone = page_zone(page); 3191 3192 /* zone lock should be held when this function is called */ 3193 lockdep_assert_held(&zone->lock); 3194 3195 /* Return isolated page to tail of freelist. */ 3196 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3197 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3198 } 3199 3200 /* 3201 * Update NUMA hit/miss statistics 3202 */ 3203 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3204 long nr_account) 3205 { 3206 #ifdef CONFIG_NUMA 3207 enum numa_stat_item local_stat = NUMA_LOCAL; 3208 3209 /* skip numa counters update if numa stats is disabled */ 3210 if (!static_branch_likely(&vm_numa_stat_key)) 3211 return; 3212 3213 if (zone_to_nid(z) != numa_node_id()) 3214 local_stat = NUMA_OTHER; 3215 3216 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3217 __count_numa_events(z, NUMA_HIT, nr_account); 3218 else { 3219 __count_numa_events(z, NUMA_MISS, nr_account); 3220 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3221 } 3222 __count_numa_events(z, local_stat, nr_account); 3223 #endif 3224 } 3225 3226 static __always_inline 3227 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 3228 unsigned int order, unsigned int alloc_flags, 3229 int migratetype) 3230 { 3231 struct page *page; 3232 unsigned long flags; 3233 3234 do { 3235 page = NULL; 3236 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 3237 if (!spin_trylock_irqsave(&zone->lock, flags)) 3238 return NULL; 3239 } else { 3240 spin_lock_irqsave(&zone->lock, flags); 3241 } 3242 if (alloc_flags & ALLOC_HIGHATOMIC) 3243 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3244 if (!page) { 3245 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 3246 3247 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); 3248 3249 /* 3250 * If the allocation fails, allow OOM handling and 3251 * order-0 (atomic) allocs access to HIGHATOMIC 3252 * reserves as failing now is worse than failing a 3253 * high-order atomic allocation in the future. 3254 */ 3255 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 3256 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3257 3258 if (!page) { 3259 spin_unlock_irqrestore(&zone->lock, flags); 3260 return NULL; 3261 } 3262 } 3263 spin_unlock_irqrestore(&zone->lock, flags); 3264 } while (check_new_pages(page, order)); 3265 3266 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3267 zone_statistics(preferred_zone, zone, 1); 3268 3269 return page; 3270 } 3271 3272 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3273 { 3274 int high, base_batch, batch, max_nr_alloc; 3275 int high_max, high_min; 3276 3277 base_batch = READ_ONCE(pcp->batch); 3278 high_min = READ_ONCE(pcp->high_min); 3279 high_max = READ_ONCE(pcp->high_max); 3280 high = pcp->high = clamp(pcp->high, high_min, high_max); 3281 3282 /* Check for PCP disabled or boot pageset */ 3283 if (unlikely(high < base_batch)) 3284 return 1; 3285 3286 if (order) 3287 batch = base_batch; 3288 else 3289 batch = (base_batch << pcp->alloc_factor); 3290 3291 /* 3292 * If we had larger pcp->high, we could avoid to allocate from 3293 * zone. 3294 */ 3295 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3296 high = pcp->high = min(high + batch, high_max); 3297 3298 if (!order) { 3299 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3300 /* 3301 * Double the number of pages allocated each time there is 3302 * subsequent allocation of order-0 pages without any freeing. 3303 */ 3304 if (batch <= max_nr_alloc && 3305 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3306 pcp->alloc_factor++; 3307 batch = min(batch, max_nr_alloc); 3308 } 3309 3310 /* 3311 * Scale batch relative to order if batch implies free pages 3312 * can be stored on the PCP. Batch can be 1 for small zones or 3313 * for boot pagesets which should never store free pages as 3314 * the pages may belong to arbitrary zones. 3315 */ 3316 if (batch > 1) 3317 batch = max(batch >> order, 2); 3318 3319 return batch; 3320 } 3321 3322 /* Remove page from the per-cpu list, caller must protect the list */ 3323 static inline 3324 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3325 int migratetype, 3326 unsigned int alloc_flags, 3327 struct per_cpu_pages *pcp, 3328 struct list_head *list) 3329 { 3330 struct page *page; 3331 3332 do { 3333 if (list_empty(list)) { 3334 int batch = nr_pcp_alloc(pcp, zone, order); 3335 int alloced; 3336 3337 alloced = rmqueue_bulk(zone, order, 3338 batch, list, 3339 migratetype, alloc_flags); 3340 3341 pcp->count += alloced << order; 3342 if (unlikely(list_empty(list))) 3343 return NULL; 3344 } 3345 3346 page = list_first_entry(list, struct page, pcp_list); 3347 list_del(&page->pcp_list); 3348 pcp->count -= 1 << order; 3349 } while (check_new_pages(page, order)); 3350 3351 return page; 3352 } 3353 3354 /* Lock and remove page from the per-cpu list */ 3355 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3356 struct zone *zone, unsigned int order, 3357 int migratetype, unsigned int alloc_flags) 3358 { 3359 struct per_cpu_pages *pcp; 3360 struct list_head *list; 3361 struct page *page; 3362 unsigned long UP_flags; 3363 3364 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3365 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 3366 if (!pcp) 3367 return NULL; 3368 3369 /* 3370 * On allocation, reduce the number of pages that are batch freed. 3371 * See nr_pcp_free() where free_factor is increased for subsequent 3372 * frees. 3373 */ 3374 pcp->free_count >>= 1; 3375 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3376 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3377 pcp_spin_unlock(pcp, UP_flags); 3378 if (page) { 3379 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3380 zone_statistics(preferred_zone, zone, 1); 3381 } 3382 return page; 3383 } 3384 3385 /* 3386 * Allocate a page from the given zone. 3387 * Use pcplists for THP or "cheap" high-order allocations. 3388 */ 3389 3390 /* 3391 * Do not instrument rmqueue() with KMSAN. This function may call 3392 * __msan_poison_alloca() through a call to set_pfnblock_migratetype(). 3393 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3394 * may call rmqueue() again, which will result in a deadlock. 3395 */ 3396 __no_sanitize_memory 3397 static inline 3398 struct page *rmqueue(struct zone *preferred_zone, 3399 struct zone *zone, unsigned int order, 3400 gfp_t gfp_flags, unsigned int alloc_flags, 3401 int migratetype) 3402 { 3403 struct page *page; 3404 3405 if (likely(pcp_allowed_order(order))) { 3406 page = rmqueue_pcplist(preferred_zone, zone, order, 3407 migratetype, alloc_flags); 3408 if (likely(page)) 3409 goto out; 3410 } 3411 3412 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3413 migratetype); 3414 3415 out: 3416 /* Separate test+clear to avoid unnecessary atomics */ 3417 if ((alloc_flags & ALLOC_KSWAPD) && 3418 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3419 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3420 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3421 } 3422 3423 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3424 return page; 3425 } 3426 3427 /* 3428 * Reserve the pageblock(s) surrounding an allocation request for 3429 * exclusive use of high-order atomic allocations if there are no 3430 * empty page blocks that contain a page with a suitable order 3431 */ 3432 static void reserve_highatomic_pageblock(struct page *page, int order, 3433 struct zone *zone) 3434 { 3435 int mt; 3436 unsigned long max_managed, flags; 3437 3438 /* 3439 * The number reserved as: minimum is 1 pageblock, maximum is 3440 * roughly 1% of a zone. But if 1% of a zone falls below a 3441 * pageblock size, then don't reserve any pageblocks. 3442 * Check is race-prone but harmless. 3443 */ 3444 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3445 return; 3446 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3447 if (zone->nr_reserved_highatomic >= max_managed) 3448 return; 3449 3450 spin_lock_irqsave(&zone->lock, flags); 3451 3452 /* Recheck the nr_reserved_highatomic limit under the lock */ 3453 if (zone->nr_reserved_highatomic >= max_managed) 3454 goto out_unlock; 3455 3456 /* Yoink! */ 3457 mt = get_pageblock_migratetype(page); 3458 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3459 if (!migratetype_is_mergeable(mt)) 3460 goto out_unlock; 3461 3462 if (order < pageblock_order) { 3463 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3464 goto out_unlock; 3465 zone->nr_reserved_highatomic += pageblock_nr_pages; 3466 } else { 3467 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3468 zone->nr_reserved_highatomic += 1 << order; 3469 } 3470 3471 out_unlock: 3472 spin_unlock_irqrestore(&zone->lock, flags); 3473 } 3474 3475 /* 3476 * Used when an allocation is about to fail under memory pressure. This 3477 * potentially hurts the reliability of high-order allocations when under 3478 * intense memory pressure but failed atomic allocations should be easier 3479 * to recover from than an OOM. 3480 * 3481 * If @force is true, try to unreserve pageblocks even though highatomic 3482 * pageblock is exhausted. 3483 */ 3484 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3485 bool force) 3486 { 3487 struct zonelist *zonelist = ac->zonelist; 3488 unsigned long flags; 3489 struct zoneref *z; 3490 struct zone *zone; 3491 struct page *page; 3492 int order; 3493 int ret; 3494 3495 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3496 ac->nodemask) { 3497 /* 3498 * Preserve at least one pageblock unless memory pressure 3499 * is really high. 3500 */ 3501 if (!force && zone->nr_reserved_highatomic <= 3502 pageblock_nr_pages) 3503 continue; 3504 3505 spin_lock_irqsave(&zone->lock, flags); 3506 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3507 struct free_area *area = &(zone->free_area[order]); 3508 unsigned long size; 3509 3510 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3511 if (!page) 3512 continue; 3513 3514 size = max(pageblock_nr_pages, 1UL << order); 3515 /* 3516 * It should never happen but changes to 3517 * locking could inadvertently allow a per-cpu 3518 * drain to add pages to MIGRATE_HIGHATOMIC 3519 * while unreserving so be safe and watch for 3520 * underflows. 3521 */ 3522 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3523 size = zone->nr_reserved_highatomic; 3524 zone->nr_reserved_highatomic -= size; 3525 3526 /* 3527 * Convert to ac->migratetype and avoid the normal 3528 * pageblock stealing heuristics. Minimally, the caller 3529 * is doing the work and needs the pages. More 3530 * importantly, if the block was always converted to 3531 * MIGRATE_UNMOVABLE or another type then the number 3532 * of pageblocks that cannot be completely freed 3533 * may increase. 3534 */ 3535 if (order < pageblock_order) 3536 ret = move_freepages_block(zone, page, 3537 MIGRATE_HIGHATOMIC, 3538 ac->migratetype); 3539 else { 3540 move_to_free_list(page, zone, order, 3541 MIGRATE_HIGHATOMIC, 3542 ac->migratetype); 3543 change_pageblock_range(page, order, 3544 ac->migratetype); 3545 ret = 1; 3546 } 3547 /* 3548 * Reserving the block(s) already succeeded, 3549 * so this should not fail on zone boundaries. 3550 */ 3551 WARN_ON_ONCE(ret == -1); 3552 if (ret > 0) { 3553 spin_unlock_irqrestore(&zone->lock, flags); 3554 return ret; 3555 } 3556 } 3557 spin_unlock_irqrestore(&zone->lock, flags); 3558 } 3559 3560 return false; 3561 } 3562 3563 static inline long __zone_watermark_unusable_free(struct zone *z, 3564 unsigned int order, unsigned int alloc_flags) 3565 { 3566 long unusable_free = (1 << order) - 1; 3567 3568 /* 3569 * If the caller does not have rights to reserves below the min 3570 * watermark then subtract the free pages reserved for highatomic. 3571 */ 3572 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3573 unusable_free += READ_ONCE(z->nr_free_highatomic); 3574 3575 #ifdef CONFIG_CMA 3576 /* If allocation can't use CMA areas don't use free CMA pages */ 3577 if (!(alloc_flags & ALLOC_CMA)) 3578 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3579 #endif 3580 3581 return unusable_free; 3582 } 3583 3584 /* 3585 * Return true if free base pages are above 'mark'. For high-order checks it 3586 * will return true of the order-0 watermark is reached and there is at least 3587 * one free page of a suitable size. Checking now avoids taking the zone lock 3588 * to check in the allocation paths if no pages are free. 3589 */ 3590 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3591 int highest_zoneidx, unsigned int alloc_flags, 3592 long free_pages) 3593 { 3594 long min = mark; 3595 int o; 3596 3597 /* free_pages may go negative - that's OK */ 3598 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3599 3600 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3601 /* 3602 * __GFP_HIGH allows access to 50% of the min reserve as well 3603 * as OOM. 3604 */ 3605 if (alloc_flags & ALLOC_MIN_RESERVE) { 3606 min -= min / 2; 3607 3608 /* 3609 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3610 * access more reserves than just __GFP_HIGH. Other 3611 * non-blocking allocations requests such as GFP_NOWAIT 3612 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3613 * access to the min reserve. 3614 */ 3615 if (alloc_flags & ALLOC_NON_BLOCK) 3616 min -= min / 4; 3617 } 3618 3619 /* 3620 * OOM victims can try even harder than the normal reserve 3621 * users on the grounds that it's definitely going to be in 3622 * the exit path shortly and free memory. Any allocation it 3623 * makes during the free path will be small and short-lived. 3624 */ 3625 if (alloc_flags & ALLOC_OOM) 3626 min -= min / 2; 3627 } 3628 3629 /* 3630 * Check watermarks for an order-0 allocation request. If these 3631 * are not met, then a high-order request also cannot go ahead 3632 * even if a suitable page happened to be free. 3633 */ 3634 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3635 return false; 3636 3637 /* If this is an order-0 request then the watermark is fine */ 3638 if (!order) 3639 return true; 3640 3641 /* For a high-order request, check at least one suitable page is free */ 3642 for (o = order; o < NR_PAGE_ORDERS; o++) { 3643 struct free_area *area = &z->free_area[o]; 3644 int mt; 3645 3646 if (!area->nr_free) 3647 continue; 3648 3649 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3650 if (!free_area_empty(area, mt)) 3651 return true; 3652 } 3653 3654 #ifdef CONFIG_CMA 3655 if ((alloc_flags & ALLOC_CMA) && 3656 !free_area_empty(area, MIGRATE_CMA)) { 3657 return true; 3658 } 3659 #endif 3660 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3661 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3662 return true; 3663 } 3664 } 3665 return false; 3666 } 3667 3668 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3669 int highest_zoneidx, unsigned int alloc_flags) 3670 { 3671 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3672 zone_page_state(z, NR_FREE_PAGES)); 3673 } 3674 3675 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3676 unsigned long mark, int highest_zoneidx, 3677 unsigned int alloc_flags, gfp_t gfp_mask) 3678 { 3679 long free_pages; 3680 3681 free_pages = zone_page_state(z, NR_FREE_PAGES); 3682 3683 /* 3684 * Fast check for order-0 only. If this fails then the reserves 3685 * need to be calculated. 3686 */ 3687 if (!order) { 3688 long usable_free; 3689 long reserved; 3690 3691 usable_free = free_pages; 3692 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3693 3694 /* reserved may over estimate high-atomic reserves. */ 3695 usable_free -= min(usable_free, reserved); 3696 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3697 return true; 3698 } 3699 3700 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3701 free_pages)) 3702 return true; 3703 3704 /* 3705 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3706 * when checking the min watermark. The min watermark is the 3707 * point where boosting is ignored so that kswapd is woken up 3708 * when below the low watermark. 3709 */ 3710 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3711 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3712 mark = z->_watermark[WMARK_MIN]; 3713 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3714 alloc_flags, free_pages); 3715 } 3716 3717 return false; 3718 } 3719 3720 #ifdef CONFIG_NUMA 3721 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3722 3723 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3724 { 3725 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3726 node_reclaim_distance; 3727 } 3728 #else /* CONFIG_NUMA */ 3729 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3730 { 3731 return true; 3732 } 3733 #endif /* CONFIG_NUMA */ 3734 3735 /* 3736 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3737 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3738 * premature use of a lower zone may cause lowmem pressure problems that 3739 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3740 * probably too small. It only makes sense to spread allocations to avoid 3741 * fragmentation between the Normal and DMA32 zones. 3742 */ 3743 static inline unsigned int 3744 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3745 { 3746 unsigned int alloc_flags; 3747 3748 /* 3749 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3750 * to save a branch. 3751 */ 3752 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3753 3754 if (defrag_mode) { 3755 alloc_flags |= ALLOC_NOFRAGMENT; 3756 return alloc_flags; 3757 } 3758 3759 #ifdef CONFIG_ZONE_DMA32 3760 if (!zone) 3761 return alloc_flags; 3762 3763 if (zone_idx(zone) != ZONE_NORMAL) 3764 return alloc_flags; 3765 3766 /* 3767 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3768 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3769 * on UMA that if Normal is populated then so is DMA32. 3770 */ 3771 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3772 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3773 return alloc_flags; 3774 3775 alloc_flags |= ALLOC_NOFRAGMENT; 3776 #endif /* CONFIG_ZONE_DMA32 */ 3777 return alloc_flags; 3778 } 3779 3780 /* Must be called after current_gfp_context() which can change gfp_mask */ 3781 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3782 unsigned int alloc_flags) 3783 { 3784 #ifdef CONFIG_CMA 3785 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3786 alloc_flags |= ALLOC_CMA; 3787 #endif 3788 return alloc_flags; 3789 } 3790 3791 /* 3792 * get_page_from_freelist goes through the zonelist trying to allocate 3793 * a page. 3794 */ 3795 static struct page * 3796 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3797 const struct alloc_context *ac) 3798 { 3799 struct zoneref *z; 3800 struct zone *zone; 3801 struct pglist_data *last_pgdat = NULL; 3802 bool last_pgdat_dirty_ok = false; 3803 bool no_fallback; 3804 bool skip_kswapd_nodes = nr_online_nodes > 1; 3805 bool skipped_kswapd_nodes = false; 3806 3807 retry: 3808 /* 3809 * Scan zonelist, looking for a zone with enough free. 3810 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c. 3811 */ 3812 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3813 z = ac->preferred_zoneref; 3814 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3815 ac->nodemask) { 3816 struct page *page; 3817 unsigned long mark; 3818 3819 if (cpusets_enabled() && 3820 (alloc_flags & ALLOC_CPUSET) && 3821 !__cpuset_zone_allowed(zone, gfp_mask)) 3822 continue; 3823 /* 3824 * When allocating a page cache page for writing, we 3825 * want to get it from a node that is within its dirty 3826 * limit, such that no single node holds more than its 3827 * proportional share of globally allowed dirty pages. 3828 * The dirty limits take into account the node's 3829 * lowmem reserves and high watermark so that kswapd 3830 * should be able to balance it without having to 3831 * write pages from its LRU list. 3832 * 3833 * XXX: For now, allow allocations to potentially 3834 * exceed the per-node dirty limit in the slowpath 3835 * (spread_dirty_pages unset) before going into reclaim, 3836 * which is important when on a NUMA setup the allowed 3837 * nodes are together not big enough to reach the 3838 * global limit. The proper fix for these situations 3839 * will require awareness of nodes in the 3840 * dirty-throttling and the flusher threads. 3841 */ 3842 if (ac->spread_dirty_pages) { 3843 if (last_pgdat != zone->zone_pgdat) { 3844 last_pgdat = zone->zone_pgdat; 3845 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3846 } 3847 3848 if (!last_pgdat_dirty_ok) 3849 continue; 3850 } 3851 3852 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3853 zone != zonelist_zone(ac->preferred_zoneref)) { 3854 int local_nid; 3855 3856 /* 3857 * If moving to a remote node, retry but allow 3858 * fragmenting fallbacks. Locality is more important 3859 * than fragmentation avoidance. 3860 */ 3861 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3862 if (zone_to_nid(zone) != local_nid) { 3863 alloc_flags &= ~ALLOC_NOFRAGMENT; 3864 goto retry; 3865 } 3866 } 3867 3868 /* 3869 * If kswapd is already active on a node, keep looking 3870 * for other nodes that might be idle. This can happen 3871 * if another process has NUMA bindings and is causing 3872 * kswapd wakeups on only some nodes. Avoid accidental 3873 * "node_reclaim_mode"-like behavior in this case. 3874 */ 3875 if (skip_kswapd_nodes && 3876 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) { 3877 skipped_kswapd_nodes = true; 3878 continue; 3879 } 3880 3881 cond_accept_memory(zone, order, alloc_flags); 3882 3883 /* 3884 * Detect whether the number of free pages is below high 3885 * watermark. If so, we will decrease pcp->high and free 3886 * PCP pages in free path to reduce the possibility of 3887 * premature page reclaiming. Detection is done here to 3888 * avoid to do that in hotter free path. 3889 */ 3890 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3891 goto check_alloc_wmark; 3892 3893 mark = high_wmark_pages(zone); 3894 if (zone_watermark_fast(zone, order, mark, 3895 ac->highest_zoneidx, alloc_flags, 3896 gfp_mask)) 3897 goto try_this_zone; 3898 else 3899 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3900 3901 check_alloc_wmark: 3902 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3903 if (!zone_watermark_fast(zone, order, mark, 3904 ac->highest_zoneidx, alloc_flags, 3905 gfp_mask)) { 3906 int ret; 3907 3908 if (cond_accept_memory(zone, order, alloc_flags)) 3909 goto try_this_zone; 3910 3911 /* 3912 * Watermark failed for this zone, but see if we can 3913 * grow this zone if it contains deferred pages. 3914 */ 3915 if (deferred_pages_enabled()) { 3916 if (_deferred_grow_zone(zone, order)) 3917 goto try_this_zone; 3918 } 3919 /* Checked here to keep the fast path fast */ 3920 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3921 if (alloc_flags & ALLOC_NO_WATERMARKS) 3922 goto try_this_zone; 3923 3924 if (!node_reclaim_enabled() || 3925 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3926 continue; 3927 3928 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3929 switch (ret) { 3930 case NODE_RECLAIM_NOSCAN: 3931 /* did not scan */ 3932 continue; 3933 case NODE_RECLAIM_FULL: 3934 /* scanned but unreclaimable */ 3935 continue; 3936 default: 3937 /* did we reclaim enough */ 3938 if (zone_watermark_ok(zone, order, mark, 3939 ac->highest_zoneidx, alloc_flags)) 3940 goto try_this_zone; 3941 3942 continue; 3943 } 3944 } 3945 3946 try_this_zone: 3947 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3948 gfp_mask, alloc_flags, ac->migratetype); 3949 if (page) { 3950 prep_new_page(page, order, gfp_mask, alloc_flags); 3951 3952 /* 3953 * If this is a high-order atomic allocation then check 3954 * if the pageblock should be reserved for the future 3955 */ 3956 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3957 reserve_highatomic_pageblock(page, order, zone); 3958 3959 return page; 3960 } else { 3961 if (cond_accept_memory(zone, order, alloc_flags)) 3962 goto try_this_zone; 3963 3964 /* Try again if zone has deferred pages */ 3965 if (deferred_pages_enabled()) { 3966 if (_deferred_grow_zone(zone, order)) 3967 goto try_this_zone; 3968 } 3969 } 3970 } 3971 3972 /* 3973 * If we skipped over nodes with active kswapds and found no 3974 * idle nodes, retry and place anywhere the watermarks permit. 3975 */ 3976 if (skip_kswapd_nodes && skipped_kswapd_nodes) { 3977 skip_kswapd_nodes = false; 3978 goto retry; 3979 } 3980 3981 /* 3982 * It's possible on a UMA machine to get through all zones that are 3983 * fragmented. If avoiding fragmentation, reset and try again. 3984 */ 3985 if (no_fallback && !defrag_mode) { 3986 alloc_flags &= ~ALLOC_NOFRAGMENT; 3987 goto retry; 3988 } 3989 3990 return NULL; 3991 } 3992 3993 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3994 { 3995 unsigned int filter = SHOW_MEM_FILTER_NODES; 3996 3997 /* 3998 * This documents exceptions given to allocations in certain 3999 * contexts that are allowed to allocate outside current's set 4000 * of allowed nodes. 4001 */ 4002 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4003 if (tsk_is_oom_victim(current) || 4004 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4005 filter &= ~SHOW_MEM_FILTER_NODES; 4006 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4007 filter &= ~SHOW_MEM_FILTER_NODES; 4008 4009 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 4010 mem_cgroup_show_protected_memory(NULL); 4011 } 4012 4013 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4014 { 4015 struct va_format vaf; 4016 va_list args; 4017 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4018 4019 if ((gfp_mask & __GFP_NOWARN) || 4020 !__ratelimit(&nopage_rs) || 4021 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4022 return; 4023 4024 va_start(args, fmt); 4025 vaf.fmt = fmt; 4026 vaf.va = &args; 4027 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4028 current->comm, &vaf, gfp_mask, &gfp_mask, 4029 nodemask_pr_args(nodemask)); 4030 va_end(args); 4031 4032 cpuset_print_current_mems_allowed(); 4033 pr_cont("\n"); 4034 dump_stack(); 4035 warn_alloc_show_mem(gfp_mask, nodemask); 4036 } 4037 4038 static inline struct page * 4039 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4040 unsigned int alloc_flags, 4041 const struct alloc_context *ac) 4042 { 4043 struct page *page; 4044 4045 page = get_page_from_freelist(gfp_mask, order, 4046 alloc_flags|ALLOC_CPUSET, ac); 4047 /* 4048 * fallback to ignore cpuset restriction if our nodes 4049 * are depleted 4050 */ 4051 if (!page) 4052 page = get_page_from_freelist(gfp_mask, order, 4053 alloc_flags, ac); 4054 return page; 4055 } 4056 4057 static inline struct page * 4058 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4059 const struct alloc_context *ac, unsigned long *did_some_progress) 4060 { 4061 struct oom_control oc = { 4062 .zonelist = ac->zonelist, 4063 .nodemask = ac->nodemask, 4064 .memcg = NULL, 4065 .gfp_mask = gfp_mask, 4066 .order = order, 4067 }; 4068 struct page *page; 4069 4070 *did_some_progress = 0; 4071 4072 /* 4073 * Acquire the oom lock. If that fails, somebody else is 4074 * making progress for us. 4075 */ 4076 if (!mutex_trylock(&oom_lock)) { 4077 *did_some_progress = 1; 4078 schedule_timeout_uninterruptible(1); 4079 return NULL; 4080 } 4081 4082 /* 4083 * Go through the zonelist yet one more time, keep very high watermark 4084 * here, this is only to catch a parallel oom killing, we must fail if 4085 * we're still under heavy pressure. But make sure that this reclaim 4086 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4087 * allocation which will never fail due to oom_lock already held. 4088 */ 4089 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4090 ~__GFP_DIRECT_RECLAIM, order, 4091 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4092 if (page) 4093 goto out; 4094 4095 /* Coredumps can quickly deplete all memory reserves */ 4096 if (current->flags & PF_DUMPCORE) 4097 goto out; 4098 /* The OOM killer will not help higher order allocs */ 4099 if (order > PAGE_ALLOC_COSTLY_ORDER) 4100 goto out; 4101 /* 4102 * We have already exhausted all our reclaim opportunities without any 4103 * success so it is time to admit defeat. We will skip the OOM killer 4104 * because it is very likely that the caller has a more reasonable 4105 * fallback than shooting a random task. 4106 * 4107 * The OOM killer may not free memory on a specific node. 4108 */ 4109 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4110 goto out; 4111 /* The OOM killer does not needlessly kill tasks for lowmem */ 4112 if (ac->highest_zoneidx < ZONE_NORMAL) 4113 goto out; 4114 if (pm_suspended_storage()) 4115 goto out; 4116 /* 4117 * XXX: GFP_NOFS allocations should rather fail than rely on 4118 * other request to make a forward progress. 4119 * We are in an unfortunate situation where out_of_memory cannot 4120 * do much for this context but let's try it to at least get 4121 * access to memory reserved if the current task is killed (see 4122 * out_of_memory). Once filesystems are ready to handle allocation 4123 * failures more gracefully we should just bail out here. 4124 */ 4125 4126 /* Exhausted what can be done so it's blame time */ 4127 if (out_of_memory(&oc) || 4128 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4129 *did_some_progress = 1; 4130 4131 /* 4132 * Help non-failing allocations by giving them access to memory 4133 * reserves 4134 */ 4135 if (gfp_mask & __GFP_NOFAIL) 4136 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4137 ALLOC_NO_WATERMARKS, ac); 4138 } 4139 out: 4140 mutex_unlock(&oom_lock); 4141 return page; 4142 } 4143 4144 /* 4145 * Maximum number of compaction retries with a progress before OOM 4146 * killer is consider as the only way to move forward. 4147 */ 4148 #define MAX_COMPACT_RETRIES 16 4149 4150 #ifdef CONFIG_COMPACTION 4151 /* Try memory compaction for high-order allocations before reclaim */ 4152 static struct page * 4153 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4154 unsigned int alloc_flags, const struct alloc_context *ac, 4155 enum compact_priority prio, enum compact_result *compact_result) 4156 { 4157 struct page *page = NULL; 4158 unsigned long pflags; 4159 unsigned int noreclaim_flag; 4160 4161 if (!order) 4162 return NULL; 4163 4164 psi_memstall_enter(&pflags); 4165 delayacct_compact_start(); 4166 noreclaim_flag = memalloc_noreclaim_save(); 4167 4168 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4169 prio, &page); 4170 4171 memalloc_noreclaim_restore(noreclaim_flag); 4172 psi_memstall_leave(&pflags); 4173 delayacct_compact_end(); 4174 4175 if (*compact_result == COMPACT_SKIPPED) 4176 return NULL; 4177 /* 4178 * At least in one zone compaction wasn't deferred or skipped, so let's 4179 * count a compaction stall 4180 */ 4181 count_vm_event(COMPACTSTALL); 4182 4183 /* Prep a captured page if available */ 4184 if (page) 4185 prep_new_page(page, order, gfp_mask, alloc_flags); 4186 4187 /* Try get a page from the freelist if available */ 4188 if (!page) 4189 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4190 4191 if (page) { 4192 struct zone *zone = page_zone(page); 4193 4194 zone->compact_blockskip_flush = false; 4195 compaction_defer_reset(zone, order, true); 4196 count_vm_event(COMPACTSUCCESS); 4197 return page; 4198 } 4199 4200 /* 4201 * It's bad if compaction run occurs and fails. The most likely reason 4202 * is that pages exist, but not enough to satisfy watermarks. 4203 */ 4204 count_vm_event(COMPACTFAIL); 4205 4206 cond_resched(); 4207 4208 return NULL; 4209 } 4210 4211 static inline bool 4212 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4213 enum compact_result compact_result, 4214 enum compact_priority *compact_priority, 4215 int *compaction_retries) 4216 { 4217 int max_retries = MAX_COMPACT_RETRIES; 4218 int min_priority; 4219 bool ret = false; 4220 int retries = *compaction_retries; 4221 enum compact_priority priority = *compact_priority; 4222 4223 if (!order) 4224 return false; 4225 4226 if (fatal_signal_pending(current)) 4227 return false; 4228 4229 /* 4230 * Compaction was skipped due to a lack of free order-0 4231 * migration targets. Continue if reclaim can help. 4232 */ 4233 if (compact_result == COMPACT_SKIPPED) { 4234 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4235 goto out; 4236 } 4237 4238 /* 4239 * Compaction managed to coalesce some page blocks, but the 4240 * allocation failed presumably due to a race. Retry some. 4241 */ 4242 if (compact_result == COMPACT_SUCCESS) { 4243 /* 4244 * !costly requests are much more important than 4245 * __GFP_RETRY_MAYFAIL costly ones because they are de 4246 * facto nofail and invoke OOM killer to move on while 4247 * costly can fail and users are ready to cope with 4248 * that. 1/4 retries is rather arbitrary but we would 4249 * need much more detailed feedback from compaction to 4250 * make a better decision. 4251 */ 4252 if (order > PAGE_ALLOC_COSTLY_ORDER) 4253 max_retries /= 4; 4254 4255 if (++(*compaction_retries) <= max_retries) { 4256 ret = true; 4257 goto out; 4258 } 4259 } 4260 4261 /* 4262 * Compaction failed. Retry with increasing priority. 4263 */ 4264 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4265 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4266 4267 if (*compact_priority > min_priority) { 4268 (*compact_priority)--; 4269 *compaction_retries = 0; 4270 ret = true; 4271 } 4272 out: 4273 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4274 return ret; 4275 } 4276 #else 4277 static inline struct page * 4278 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4279 unsigned int alloc_flags, const struct alloc_context *ac, 4280 enum compact_priority prio, enum compact_result *compact_result) 4281 { 4282 *compact_result = COMPACT_SKIPPED; 4283 return NULL; 4284 } 4285 4286 static inline bool 4287 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4288 enum compact_result compact_result, 4289 enum compact_priority *compact_priority, 4290 int *compaction_retries) 4291 { 4292 struct zone *zone; 4293 struct zoneref *z; 4294 4295 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4296 return false; 4297 4298 /* 4299 * There are setups with compaction disabled which would prefer to loop 4300 * inside the allocator rather than hit the oom killer prematurely. 4301 * Let's give them a good hope and keep retrying while the order-0 4302 * watermarks are OK. 4303 */ 4304 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4305 ac->highest_zoneidx, ac->nodemask) { 4306 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4307 ac->highest_zoneidx, alloc_flags)) 4308 return true; 4309 } 4310 return false; 4311 } 4312 #endif /* CONFIG_COMPACTION */ 4313 4314 #ifdef CONFIG_LOCKDEP 4315 static struct lockdep_map __fs_reclaim_map = 4316 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4317 4318 static bool __need_reclaim(gfp_t gfp_mask) 4319 { 4320 /* no reclaim without waiting on it */ 4321 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4322 return false; 4323 4324 /* this guy won't enter reclaim */ 4325 if (current->flags & PF_MEMALLOC) 4326 return false; 4327 4328 if (gfp_mask & __GFP_NOLOCKDEP) 4329 return false; 4330 4331 return true; 4332 } 4333 4334 void __fs_reclaim_acquire(unsigned long ip) 4335 { 4336 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4337 } 4338 4339 void __fs_reclaim_release(unsigned long ip) 4340 { 4341 lock_release(&__fs_reclaim_map, ip); 4342 } 4343 4344 void fs_reclaim_acquire(gfp_t gfp_mask) 4345 { 4346 gfp_mask = current_gfp_context(gfp_mask); 4347 4348 if (__need_reclaim(gfp_mask)) { 4349 if (gfp_mask & __GFP_FS) 4350 __fs_reclaim_acquire(_RET_IP_); 4351 4352 #ifdef CONFIG_MMU_NOTIFIER 4353 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4354 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4355 #endif 4356 4357 } 4358 } 4359 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4360 4361 void fs_reclaim_release(gfp_t gfp_mask) 4362 { 4363 gfp_mask = current_gfp_context(gfp_mask); 4364 4365 if (__need_reclaim(gfp_mask)) { 4366 if (gfp_mask & __GFP_FS) 4367 __fs_reclaim_release(_RET_IP_); 4368 } 4369 } 4370 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4371 #endif 4372 4373 /* 4374 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4375 * have been rebuilt so allocation retries. Reader side does not lock and 4376 * retries the allocation if zonelist changes. Writer side is protected by the 4377 * embedded spin_lock. 4378 */ 4379 static DEFINE_SEQLOCK(zonelist_update_seq); 4380 4381 static unsigned int zonelist_iter_begin(void) 4382 { 4383 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4384 return read_seqbegin(&zonelist_update_seq); 4385 4386 return 0; 4387 } 4388 4389 static unsigned int check_retry_zonelist(unsigned int seq) 4390 { 4391 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4392 return read_seqretry(&zonelist_update_seq, seq); 4393 4394 return seq; 4395 } 4396 4397 /* Perform direct synchronous page reclaim */ 4398 static unsigned long 4399 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4400 const struct alloc_context *ac) 4401 { 4402 unsigned int noreclaim_flag; 4403 unsigned long progress; 4404 4405 cond_resched(); 4406 4407 /* We now go into synchronous reclaim */ 4408 cpuset_memory_pressure_bump(); 4409 fs_reclaim_acquire(gfp_mask); 4410 noreclaim_flag = memalloc_noreclaim_save(); 4411 4412 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4413 ac->nodemask); 4414 4415 memalloc_noreclaim_restore(noreclaim_flag); 4416 fs_reclaim_release(gfp_mask); 4417 4418 cond_resched(); 4419 4420 return progress; 4421 } 4422 4423 /* The really slow allocator path where we enter direct reclaim */ 4424 static inline struct page * 4425 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4426 unsigned int alloc_flags, const struct alloc_context *ac, 4427 unsigned long *did_some_progress) 4428 { 4429 struct page *page = NULL; 4430 unsigned long pflags; 4431 bool drained = false; 4432 4433 psi_memstall_enter(&pflags); 4434 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4435 if (unlikely(!(*did_some_progress))) 4436 goto out; 4437 4438 retry: 4439 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4440 4441 /* 4442 * If an allocation failed after direct reclaim, it could be because 4443 * pages are pinned on the per-cpu lists or in high alloc reserves. 4444 * Shrink them and try again 4445 */ 4446 if (!page && !drained) { 4447 unreserve_highatomic_pageblock(ac, false); 4448 drain_all_pages(NULL); 4449 drained = true; 4450 goto retry; 4451 } 4452 out: 4453 psi_memstall_leave(&pflags); 4454 4455 return page; 4456 } 4457 4458 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4459 const struct alloc_context *ac) 4460 { 4461 struct zoneref *z; 4462 struct zone *zone; 4463 pg_data_t *last_pgdat = NULL; 4464 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4465 unsigned int reclaim_order; 4466 4467 if (defrag_mode) 4468 reclaim_order = max(order, pageblock_order); 4469 else 4470 reclaim_order = order; 4471 4472 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4473 ac->nodemask) { 4474 if (!managed_zone(zone)) 4475 continue; 4476 if (last_pgdat == zone->zone_pgdat) 4477 continue; 4478 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4479 last_pgdat = zone->zone_pgdat; 4480 } 4481 } 4482 4483 static inline unsigned int 4484 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4485 { 4486 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4487 4488 /* 4489 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4490 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4491 * to save two branches. 4492 */ 4493 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4494 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4495 4496 /* 4497 * The caller may dip into page reserves a bit more if the caller 4498 * cannot run direct reclaim, or if the caller has realtime scheduling 4499 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4500 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4501 */ 4502 alloc_flags |= (__force int) 4503 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4504 4505 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4506 /* 4507 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4508 * if it can't schedule. 4509 */ 4510 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4511 alloc_flags |= ALLOC_NON_BLOCK; 4512 4513 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) 4514 alloc_flags |= ALLOC_HIGHATOMIC; 4515 } 4516 4517 /* 4518 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4519 * GFP_ATOMIC) rather than fail, see the comment for 4520 * cpuset_current_node_allowed(). 4521 */ 4522 if (alloc_flags & ALLOC_MIN_RESERVE) 4523 alloc_flags &= ~ALLOC_CPUSET; 4524 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4525 alloc_flags |= ALLOC_MIN_RESERVE; 4526 4527 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4528 4529 if (defrag_mode) 4530 alloc_flags |= ALLOC_NOFRAGMENT; 4531 4532 return alloc_flags; 4533 } 4534 4535 static bool oom_reserves_allowed(struct task_struct *tsk) 4536 { 4537 if (!tsk_is_oom_victim(tsk)) 4538 return false; 4539 4540 /* 4541 * !MMU doesn't have oom reaper so give access to memory reserves 4542 * only to the thread with TIF_MEMDIE set 4543 */ 4544 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4545 return false; 4546 4547 return true; 4548 } 4549 4550 /* 4551 * Distinguish requests which really need access to full memory 4552 * reserves from oom victims which can live with a portion of it 4553 */ 4554 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4555 { 4556 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4557 return 0; 4558 if (gfp_mask & __GFP_MEMALLOC) 4559 return ALLOC_NO_WATERMARKS; 4560 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4561 return ALLOC_NO_WATERMARKS; 4562 if (!in_interrupt()) { 4563 if (current->flags & PF_MEMALLOC) 4564 return ALLOC_NO_WATERMARKS; 4565 else if (oom_reserves_allowed(current)) 4566 return ALLOC_OOM; 4567 } 4568 4569 return 0; 4570 } 4571 4572 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4573 { 4574 return !!__gfp_pfmemalloc_flags(gfp_mask); 4575 } 4576 4577 /* 4578 * Checks whether it makes sense to retry the reclaim to make a forward progress 4579 * for the given allocation request. 4580 * 4581 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4582 * without success, or when we couldn't even meet the watermark if we 4583 * reclaimed all remaining pages on the LRU lists. 4584 * 4585 * Returns true if a retry is viable or false to enter the oom path. 4586 */ 4587 static inline bool 4588 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4589 struct alloc_context *ac, int alloc_flags, 4590 bool did_some_progress, int *no_progress_loops) 4591 { 4592 struct zone *zone; 4593 struct zoneref *z; 4594 bool ret = false; 4595 4596 /* 4597 * Costly allocations might have made a progress but this doesn't mean 4598 * their order will become available due to high fragmentation so 4599 * always increment the no progress counter for them 4600 */ 4601 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4602 *no_progress_loops = 0; 4603 else 4604 (*no_progress_loops)++; 4605 4606 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4607 goto out; 4608 4609 4610 /* 4611 * Keep reclaiming pages while there is a chance this will lead 4612 * somewhere. If none of the target zones can satisfy our allocation 4613 * request even if all reclaimable pages are considered then we are 4614 * screwed and have to go OOM. 4615 */ 4616 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4617 ac->highest_zoneidx, ac->nodemask) { 4618 unsigned long available; 4619 unsigned long reclaimable; 4620 unsigned long min_wmark = min_wmark_pages(zone); 4621 bool wmark; 4622 4623 if (cpusets_enabled() && 4624 (alloc_flags & ALLOC_CPUSET) && 4625 !__cpuset_zone_allowed(zone, gfp_mask)) 4626 continue; 4627 4628 available = reclaimable = zone_reclaimable_pages(zone); 4629 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4630 4631 /* 4632 * Would the allocation succeed if we reclaimed all 4633 * reclaimable pages? 4634 */ 4635 wmark = __zone_watermark_ok(zone, order, min_wmark, 4636 ac->highest_zoneidx, alloc_flags, available); 4637 trace_reclaim_retry_zone(z, order, reclaimable, 4638 available, min_wmark, *no_progress_loops, wmark); 4639 if (wmark) { 4640 ret = true; 4641 break; 4642 } 4643 } 4644 4645 /* 4646 * Memory allocation/reclaim might be called from a WQ context and the 4647 * current implementation of the WQ concurrency control doesn't 4648 * recognize that a particular WQ is congested if the worker thread is 4649 * looping without ever sleeping. Therefore we have to do a short sleep 4650 * here rather than calling cond_resched(). 4651 */ 4652 if (current->flags & PF_WQ_WORKER) 4653 schedule_timeout_uninterruptible(1); 4654 else 4655 cond_resched(); 4656 out: 4657 /* Before OOM, exhaust highatomic_reserve */ 4658 if (!ret) 4659 return unreserve_highatomic_pageblock(ac, true); 4660 4661 return ret; 4662 } 4663 4664 static inline bool 4665 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4666 { 4667 /* 4668 * It's possible that cpuset's mems_allowed and the nodemask from 4669 * mempolicy don't intersect. This should be normally dealt with by 4670 * policy_nodemask(), but it's possible to race with cpuset update in 4671 * such a way the check therein was true, and then it became false 4672 * before we got our cpuset_mems_cookie here. 4673 * This assumes that for all allocations, ac->nodemask can come only 4674 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4675 * when it does not intersect with the cpuset restrictions) or the 4676 * caller can deal with a violated nodemask. 4677 */ 4678 if (cpusets_enabled() && ac->nodemask && 4679 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4680 ac->nodemask = NULL; 4681 return true; 4682 } 4683 4684 /* 4685 * When updating a task's mems_allowed or mempolicy nodemask, it is 4686 * possible to race with parallel threads in such a way that our 4687 * allocation can fail while the mask is being updated. If we are about 4688 * to fail, check if the cpuset changed during allocation and if so, 4689 * retry. 4690 */ 4691 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4692 return true; 4693 4694 return false; 4695 } 4696 4697 static inline struct page * 4698 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4699 struct alloc_context *ac) 4700 { 4701 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4702 bool can_compact = gfp_compaction_allowed(gfp_mask); 4703 bool nofail = gfp_mask & __GFP_NOFAIL; 4704 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4705 struct page *page = NULL; 4706 unsigned int alloc_flags; 4707 unsigned long did_some_progress; 4708 enum compact_priority compact_priority; 4709 enum compact_result compact_result; 4710 int compaction_retries; 4711 int no_progress_loops; 4712 unsigned int cpuset_mems_cookie; 4713 unsigned int zonelist_iter_cookie; 4714 int reserve_flags; 4715 4716 if (unlikely(nofail)) { 4717 /* 4718 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4719 * otherwise, we may result in lockup. 4720 */ 4721 WARN_ON_ONCE(!can_direct_reclaim); 4722 /* 4723 * PF_MEMALLOC request from this context is rather bizarre 4724 * because we cannot reclaim anything and only can loop waiting 4725 * for somebody to do a work for us. 4726 */ 4727 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4728 } 4729 4730 restart: 4731 compaction_retries = 0; 4732 no_progress_loops = 0; 4733 compact_result = COMPACT_SKIPPED; 4734 compact_priority = DEF_COMPACT_PRIORITY; 4735 cpuset_mems_cookie = read_mems_allowed_begin(); 4736 zonelist_iter_cookie = zonelist_iter_begin(); 4737 4738 /* 4739 * The fast path uses conservative alloc_flags to succeed only until 4740 * kswapd needs to be woken up, and to avoid the cost of setting up 4741 * alloc_flags precisely. So we do that now. 4742 */ 4743 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4744 4745 /* 4746 * We need to recalculate the starting point for the zonelist iterator 4747 * because we might have used different nodemask in the fast path, or 4748 * there was a cpuset modification and we are retrying - otherwise we 4749 * could end up iterating over non-eligible zones endlessly. 4750 */ 4751 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4752 ac->highest_zoneidx, ac->nodemask); 4753 if (!zonelist_zone(ac->preferred_zoneref)) 4754 goto nopage; 4755 4756 /* 4757 * Check for insane configurations where the cpuset doesn't contain 4758 * any suitable zone to satisfy the request - e.g. non-movable 4759 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4760 */ 4761 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4762 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4763 ac->highest_zoneidx, 4764 &cpuset_current_mems_allowed); 4765 if (!zonelist_zone(z)) 4766 goto nopage; 4767 } 4768 4769 if (alloc_flags & ALLOC_KSWAPD) 4770 wake_all_kswapds(order, gfp_mask, ac); 4771 4772 /* 4773 * The adjusted alloc_flags might result in immediate success, so try 4774 * that first 4775 */ 4776 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4777 if (page) 4778 goto got_pg; 4779 4780 /* 4781 * For costly allocations, try direct compaction first, as it's likely 4782 * that we have enough base pages and don't need to reclaim. For non- 4783 * movable high-order allocations, do that as well, as compaction will 4784 * try prevent permanent fragmentation by migrating from blocks of the 4785 * same migratetype. 4786 * Don't try this for allocations that are allowed to ignore 4787 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4788 */ 4789 if (can_direct_reclaim && can_compact && 4790 (costly_order || 4791 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4792 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4793 page = __alloc_pages_direct_compact(gfp_mask, order, 4794 alloc_flags, ac, 4795 INIT_COMPACT_PRIORITY, 4796 &compact_result); 4797 if (page) 4798 goto got_pg; 4799 4800 /* 4801 * Checks for costly allocations with __GFP_NORETRY, which 4802 * includes some THP page fault allocations 4803 */ 4804 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4805 /* 4806 * If allocating entire pageblock(s) and compaction 4807 * failed because all zones are below low watermarks 4808 * or is prohibited because it recently failed at this 4809 * order, fail immediately unless the allocator has 4810 * requested compaction and reclaim retry. 4811 * 4812 * Reclaim is 4813 * - potentially very expensive because zones are far 4814 * below their low watermarks or this is part of very 4815 * bursty high order allocations, 4816 * - not guaranteed to help because isolate_freepages() 4817 * may not iterate over freed pages as part of its 4818 * linear scan, and 4819 * - unlikely to make entire pageblocks free on its 4820 * own. 4821 */ 4822 if (compact_result == COMPACT_SKIPPED || 4823 compact_result == COMPACT_DEFERRED) 4824 goto nopage; 4825 4826 /* 4827 * Looks like reclaim/compaction is worth trying, but 4828 * sync compaction could be very expensive, so keep 4829 * using async compaction. 4830 */ 4831 compact_priority = INIT_COMPACT_PRIORITY; 4832 } 4833 } 4834 4835 retry: 4836 /* 4837 * Deal with possible cpuset update races or zonelist updates to avoid 4838 * infinite retries. 4839 */ 4840 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4841 check_retry_zonelist(zonelist_iter_cookie)) 4842 goto restart; 4843 4844 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4845 if (alloc_flags & ALLOC_KSWAPD) 4846 wake_all_kswapds(order, gfp_mask, ac); 4847 4848 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4849 if (reserve_flags) 4850 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4851 (alloc_flags & ALLOC_KSWAPD); 4852 4853 /* 4854 * Reset the nodemask and zonelist iterators if memory policies can be 4855 * ignored. These allocations are high priority and system rather than 4856 * user oriented. 4857 */ 4858 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4859 ac->nodemask = NULL; 4860 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4861 ac->highest_zoneidx, ac->nodemask); 4862 } 4863 4864 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4865 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4866 if (page) 4867 goto got_pg; 4868 4869 /* Caller is not willing to reclaim, we can't balance anything */ 4870 if (!can_direct_reclaim) 4871 goto nopage; 4872 4873 /* Avoid recursion of direct reclaim */ 4874 if (current->flags & PF_MEMALLOC) 4875 goto nopage; 4876 4877 /* Try direct reclaim and then allocating */ 4878 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4879 &did_some_progress); 4880 if (page) 4881 goto got_pg; 4882 4883 /* Try direct compaction and then allocating */ 4884 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4885 compact_priority, &compact_result); 4886 if (page) 4887 goto got_pg; 4888 4889 /* Do not loop if specifically requested */ 4890 if (gfp_mask & __GFP_NORETRY) 4891 goto nopage; 4892 4893 /* 4894 * Do not retry costly high order allocations unless they are 4895 * __GFP_RETRY_MAYFAIL and we can compact 4896 */ 4897 if (costly_order && (!can_compact || 4898 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4899 goto nopage; 4900 4901 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4902 did_some_progress > 0, &no_progress_loops)) 4903 goto retry; 4904 4905 /* 4906 * It doesn't make any sense to retry for the compaction if the order-0 4907 * reclaim is not able to make any progress because the current 4908 * implementation of the compaction depends on the sufficient amount 4909 * of free memory (see __compaction_suitable) 4910 */ 4911 if (did_some_progress > 0 && can_compact && 4912 should_compact_retry(ac, order, alloc_flags, 4913 compact_result, &compact_priority, 4914 &compaction_retries)) 4915 goto retry; 4916 4917 /* Reclaim/compaction failed to prevent the fallback */ 4918 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4919 alloc_flags &= ~ALLOC_NOFRAGMENT; 4920 goto retry; 4921 } 4922 4923 /* 4924 * Deal with possible cpuset update races or zonelist updates to avoid 4925 * a unnecessary OOM kill. 4926 */ 4927 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4928 check_retry_zonelist(zonelist_iter_cookie)) 4929 goto restart; 4930 4931 /* Reclaim has failed us, start killing things */ 4932 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4933 if (page) 4934 goto got_pg; 4935 4936 /* Avoid allocations with no watermarks from looping endlessly */ 4937 if (tsk_is_oom_victim(current) && 4938 (alloc_flags & ALLOC_OOM || 4939 (gfp_mask & __GFP_NOMEMALLOC))) 4940 goto nopage; 4941 4942 /* Retry as long as the OOM killer is making progress */ 4943 if (did_some_progress) { 4944 no_progress_loops = 0; 4945 goto retry; 4946 } 4947 4948 nopage: 4949 /* 4950 * Deal with possible cpuset update races or zonelist updates to avoid 4951 * a unnecessary OOM kill. 4952 */ 4953 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4954 check_retry_zonelist(zonelist_iter_cookie)) 4955 goto restart; 4956 4957 /* 4958 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4959 * we always retry 4960 */ 4961 if (unlikely(nofail)) { 4962 /* 4963 * Lacking direct_reclaim we can't do anything to reclaim memory, 4964 * we disregard these unreasonable nofail requests and still 4965 * return NULL 4966 */ 4967 if (!can_direct_reclaim) 4968 goto fail; 4969 4970 /* 4971 * Help non-failing allocations by giving some access to memory 4972 * reserves normally used for high priority non-blocking 4973 * allocations but do not use ALLOC_NO_WATERMARKS because this 4974 * could deplete whole memory reserves which would just make 4975 * the situation worse. 4976 */ 4977 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4978 if (page) 4979 goto got_pg; 4980 4981 cond_resched(); 4982 goto retry; 4983 } 4984 fail: 4985 warn_alloc(gfp_mask, ac->nodemask, 4986 "page allocation failure: order:%u", order); 4987 got_pg: 4988 return page; 4989 } 4990 4991 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4992 int preferred_nid, nodemask_t *nodemask, 4993 struct alloc_context *ac, gfp_t *alloc_gfp, 4994 unsigned int *alloc_flags) 4995 { 4996 ac->highest_zoneidx = gfp_zone(gfp_mask); 4997 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4998 ac->nodemask = nodemask; 4999 ac->migratetype = gfp_migratetype(gfp_mask); 5000 5001 if (cpusets_enabled()) { 5002 *alloc_gfp |= __GFP_HARDWALL; 5003 /* 5004 * When we are in the interrupt context, it is irrelevant 5005 * to the current task context. It means that any node ok. 5006 */ 5007 if (in_task() && !ac->nodemask) 5008 ac->nodemask = &cpuset_current_mems_allowed; 5009 else 5010 *alloc_flags |= ALLOC_CPUSET; 5011 } 5012 5013 might_alloc(gfp_mask); 5014 5015 /* 5016 * Don't invoke should_fail logic, since it may call 5017 * get_random_u32() and printk() which need to spin_lock. 5018 */ 5019 if (!(*alloc_flags & ALLOC_TRYLOCK) && 5020 should_fail_alloc_page(gfp_mask, order)) 5021 return false; 5022 5023 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5024 5025 /* Dirty zone balancing only done in the fast path */ 5026 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5027 5028 /* 5029 * The preferred zone is used for statistics but crucially it is 5030 * also used as the starting point for the zonelist iterator. It 5031 * may get reset for allocations that ignore memory policies. 5032 */ 5033 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5034 ac->highest_zoneidx, ac->nodemask); 5035 5036 return true; 5037 } 5038 5039 /* 5040 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 5041 * @gfp: GFP flags for the allocation 5042 * @preferred_nid: The preferred NUMA node ID to allocate from 5043 * @nodemask: Set of nodes to allocate from, may be NULL 5044 * @nr_pages: The number of pages desired in the array 5045 * @page_array: Array to store the pages 5046 * 5047 * This is a batched version of the page allocator that attempts to allocate 5048 * @nr_pages quickly. Pages are added to @page_array. 5049 * 5050 * Note that only the elements in @page_array that were cleared to %NULL on 5051 * entry are populated with newly allocated pages. @nr_pages is the maximum 5052 * number of pages that will be stored in the array. 5053 * 5054 * Returns the number of pages in @page_array, including ones already 5055 * allocated on entry. This can be less than the number requested in @nr_pages, 5056 * but all empty slots are filled from the beginning. I.e., if all slots in 5057 * @page_array were set to %NULL on entry, the slots from 0 to the return value 5058 * - 1 will be filled. 5059 */ 5060 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 5061 nodemask_t *nodemask, int nr_pages, 5062 struct page **page_array) 5063 { 5064 struct page *page; 5065 unsigned long UP_flags; 5066 struct zone *zone; 5067 struct zoneref *z; 5068 struct per_cpu_pages *pcp; 5069 struct list_head *pcp_list; 5070 struct alloc_context ac; 5071 gfp_t alloc_gfp; 5072 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5073 int nr_populated = 0, nr_account = 0; 5074 5075 /* 5076 * Skip populated array elements to determine if any pages need 5077 * to be allocated before disabling IRQs. 5078 */ 5079 while (nr_populated < nr_pages && page_array[nr_populated]) 5080 nr_populated++; 5081 5082 /* No pages requested? */ 5083 if (unlikely(nr_pages <= 0)) 5084 goto out; 5085 5086 /* Already populated array? */ 5087 if (unlikely(nr_pages - nr_populated == 0)) 5088 goto out; 5089 5090 /* Bulk allocator does not support memcg accounting. */ 5091 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 5092 goto failed; 5093 5094 /* Use the single page allocator for one page. */ 5095 if (nr_pages - nr_populated == 1) 5096 goto failed; 5097 5098 #ifdef CONFIG_PAGE_OWNER 5099 /* 5100 * PAGE_OWNER may recurse into the allocator to allocate space to 5101 * save the stack with pagesets.lock held. Releasing/reacquiring 5102 * removes much of the performance benefit of bulk allocation so 5103 * force the caller to allocate one page at a time as it'll have 5104 * similar performance to added complexity to the bulk allocator. 5105 */ 5106 if (static_branch_unlikely(&page_owner_inited)) 5107 goto failed; 5108 #endif 5109 5110 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5111 gfp &= gfp_allowed_mask; 5112 alloc_gfp = gfp; 5113 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5114 goto out; 5115 gfp = alloc_gfp; 5116 5117 /* Find an allowed local zone that meets the low watermark. */ 5118 z = ac.preferred_zoneref; 5119 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 5120 unsigned long mark; 5121 5122 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5123 !__cpuset_zone_allowed(zone, gfp)) { 5124 continue; 5125 } 5126 5127 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 5128 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 5129 goto failed; 5130 } 5131 5132 cond_accept_memory(zone, 0, alloc_flags); 5133 retry_this_zone: 5134 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5135 if (zone_watermark_fast(zone, 0, mark, 5136 zonelist_zone_idx(ac.preferred_zoneref), 5137 alloc_flags, gfp)) { 5138 break; 5139 } 5140 5141 if (cond_accept_memory(zone, 0, alloc_flags)) 5142 goto retry_this_zone; 5143 5144 /* Try again if zone has deferred pages */ 5145 if (deferred_pages_enabled()) { 5146 if (_deferred_grow_zone(zone, 0)) 5147 goto retry_this_zone; 5148 } 5149 } 5150 5151 /* 5152 * If there are no allowed local zones that meets the watermarks then 5153 * try to allocate a single page and reclaim if necessary. 5154 */ 5155 if (unlikely(!zone)) 5156 goto failed; 5157 5158 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 5159 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 5160 if (!pcp) 5161 goto failed; 5162 5163 /* Attempt the batch allocation */ 5164 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5165 while (nr_populated < nr_pages) { 5166 5167 /* Skip existing pages */ 5168 if (page_array[nr_populated]) { 5169 nr_populated++; 5170 continue; 5171 } 5172 5173 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5174 pcp, pcp_list); 5175 if (unlikely(!page)) { 5176 /* Try and allocate at least one page */ 5177 if (!nr_account) { 5178 pcp_spin_unlock(pcp, UP_flags); 5179 goto failed; 5180 } 5181 break; 5182 } 5183 nr_account++; 5184 5185 prep_new_page(page, 0, gfp, 0); 5186 set_page_refcounted(page); 5187 page_array[nr_populated++] = page; 5188 } 5189 5190 pcp_spin_unlock(pcp, UP_flags); 5191 5192 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5193 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 5194 5195 out: 5196 return nr_populated; 5197 5198 failed: 5199 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 5200 if (page) 5201 page_array[nr_populated++] = page; 5202 goto out; 5203 } 5204 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 5205 5206 /* 5207 * This is the 'heart' of the zoned buddy allocator. 5208 */ 5209 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 5210 int preferred_nid, nodemask_t *nodemask) 5211 { 5212 struct page *page; 5213 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5214 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5215 struct alloc_context ac = { }; 5216 5217 /* 5218 * There are several places where we assume that the order value is sane 5219 * so bail out early if the request is out of bound. 5220 */ 5221 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 5222 return NULL; 5223 5224 gfp &= gfp_allowed_mask; 5225 /* 5226 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5227 * resp. GFP_NOIO which has to be inherited for all allocation requests 5228 * from a particular context which has been marked by 5229 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5230 * movable zones are not used during allocation. 5231 */ 5232 gfp = current_gfp_context(gfp); 5233 alloc_gfp = gfp; 5234 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5235 &alloc_gfp, &alloc_flags)) 5236 return NULL; 5237 5238 /* 5239 * Forbid the first pass from falling back to types that fragment 5240 * memory until all local zones are considered. 5241 */ 5242 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 5243 5244 /* First allocation attempt */ 5245 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5246 if (likely(page)) 5247 goto out; 5248 5249 alloc_gfp = gfp; 5250 ac.spread_dirty_pages = false; 5251 5252 /* 5253 * Restore the original nodemask if it was potentially replaced with 5254 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5255 */ 5256 ac.nodemask = nodemask; 5257 5258 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5259 5260 out: 5261 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 5262 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5263 free_frozen_pages(page, order); 5264 page = NULL; 5265 } 5266 5267 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5268 kmsan_alloc_page(page, order, alloc_gfp); 5269 5270 return page; 5271 } 5272 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 5273 5274 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 5275 int preferred_nid, nodemask_t *nodemask) 5276 { 5277 struct page *page; 5278 5279 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 5280 if (page) 5281 set_page_refcounted(page); 5282 return page; 5283 } 5284 EXPORT_SYMBOL(__alloc_pages_noprof); 5285 5286 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 5287 nodemask_t *nodemask) 5288 { 5289 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 5290 preferred_nid, nodemask); 5291 return page_rmappable_folio(page); 5292 } 5293 EXPORT_SYMBOL(__folio_alloc_noprof); 5294 5295 /* 5296 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5297 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5298 * you need to access high mem. 5299 */ 5300 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 5301 { 5302 struct page *page; 5303 5304 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 5305 if (!page) 5306 return 0; 5307 return (unsigned long) page_address(page); 5308 } 5309 EXPORT_SYMBOL(get_free_pages_noprof); 5310 5311 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5312 { 5313 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5314 } 5315 EXPORT_SYMBOL(get_zeroed_page_noprof); 5316 5317 static void ___free_pages(struct page *page, unsigned int order, 5318 fpi_t fpi_flags) 5319 { 5320 /* get PageHead before we drop reference */ 5321 int head = PageHead(page); 5322 /* get alloc tag in case the page is released by others */ 5323 struct alloc_tag *tag = pgalloc_tag_get(page); 5324 5325 if (put_page_testzero(page)) 5326 __free_frozen_pages(page, order, fpi_flags); 5327 else if (!head) { 5328 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5329 while (order-- > 0) { 5330 /* 5331 * The "tail" pages of this non-compound high-order 5332 * page will have no code tags, so to avoid warnings 5333 * mark them as empty. 5334 */ 5335 clear_page_tag_ref(page + (1 << order)); 5336 __free_frozen_pages(page + (1 << order), order, 5337 fpi_flags); 5338 } 5339 } 5340 } 5341 5342 /** 5343 * __free_pages - Free pages allocated with alloc_pages(). 5344 * @page: The page pointer returned from alloc_pages(). 5345 * @order: The order of the allocation. 5346 * 5347 * This function can free multi-page allocations that are not compound 5348 * pages. It does not check that the @order passed in matches that of 5349 * the allocation, so it is easy to leak memory. Freeing more memory 5350 * than was allocated will probably emit a warning. 5351 * 5352 * If the last reference to this page is speculative, it will be released 5353 * by put_page() which only frees the first page of a non-compound 5354 * allocation. To prevent the remaining pages from being leaked, we free 5355 * the subsequent pages here. If you want to use the page's reference 5356 * count to decide when to free the allocation, you should allocate a 5357 * compound page, and use put_page() instead of __free_pages(). 5358 * 5359 * Context: May be called in interrupt context or while holding a normal 5360 * spinlock, but not in NMI context or while holding a raw spinlock. 5361 */ 5362 void __free_pages(struct page *page, unsigned int order) 5363 { 5364 ___free_pages(page, order, FPI_NONE); 5365 } 5366 EXPORT_SYMBOL(__free_pages); 5367 5368 /* 5369 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5370 * page type (not only those that came from alloc_pages_nolock) 5371 */ 5372 void free_pages_nolock(struct page *page, unsigned int order) 5373 { 5374 ___free_pages(page, order, FPI_TRYLOCK); 5375 } 5376 5377 /** 5378 * free_pages - Free pages allocated with __get_free_pages(). 5379 * @addr: The virtual address tied to a page returned from __get_free_pages(). 5380 * @order: The order of the allocation. 5381 * 5382 * This function behaves the same as __free_pages(). Use this function 5383 * to free pages when you only have a valid virtual address. If you have 5384 * the page, call __free_pages() instead. 5385 */ 5386 void free_pages(unsigned long addr, unsigned int order) 5387 { 5388 if (addr != 0) { 5389 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5390 __free_pages(virt_to_page((void *)addr), order); 5391 } 5392 } 5393 5394 EXPORT_SYMBOL(free_pages); 5395 5396 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5397 size_t size) 5398 { 5399 if (addr) { 5400 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5401 struct page *page = virt_to_page((void *)addr); 5402 struct page *last = page + nr; 5403 5404 split_page_owner(page, order, 0); 5405 pgalloc_tag_split(page_folio(page), order, 0); 5406 split_page_memcg(page, order); 5407 while (page < --last) 5408 set_page_refcounted(last); 5409 5410 last = page + (1UL << order); 5411 for (page += nr; page < last; page++) 5412 __free_pages_ok(page, 0, FPI_TO_TAIL); 5413 } 5414 return (void *)addr; 5415 } 5416 5417 /** 5418 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5419 * @size: the number of bytes to allocate 5420 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5421 * 5422 * This function is similar to alloc_pages(), except that it allocates the 5423 * minimum number of pages to satisfy the request. alloc_pages() can only 5424 * allocate memory in power-of-two pages. 5425 * 5426 * This function is also limited by MAX_PAGE_ORDER. 5427 * 5428 * Memory allocated by this function must be released by free_pages_exact(). 5429 * 5430 * Return: pointer to the allocated area or %NULL in case of error. 5431 */ 5432 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5433 { 5434 unsigned int order = get_order(size); 5435 unsigned long addr; 5436 5437 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5438 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5439 5440 addr = get_free_pages_noprof(gfp_mask, order); 5441 return make_alloc_exact(addr, order, size); 5442 } 5443 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5444 5445 /** 5446 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5447 * pages on a node. 5448 * @nid: the preferred node ID where memory should be allocated 5449 * @size: the number of bytes to allocate 5450 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5451 * 5452 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5453 * back. 5454 * 5455 * Return: pointer to the allocated area or %NULL in case of error. 5456 */ 5457 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5458 { 5459 unsigned int order = get_order(size); 5460 struct page *p; 5461 5462 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5463 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5464 5465 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5466 if (!p) 5467 return NULL; 5468 return make_alloc_exact((unsigned long)page_address(p), order, size); 5469 } 5470 5471 /** 5472 * free_pages_exact - release memory allocated via alloc_pages_exact() 5473 * @virt: the value returned by alloc_pages_exact. 5474 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5475 * 5476 * Release the memory allocated by a previous call to alloc_pages_exact. 5477 */ 5478 void free_pages_exact(void *virt, size_t size) 5479 { 5480 unsigned long addr = (unsigned long)virt; 5481 unsigned long end = addr + PAGE_ALIGN(size); 5482 5483 while (addr < end) { 5484 free_page(addr); 5485 addr += PAGE_SIZE; 5486 } 5487 } 5488 EXPORT_SYMBOL(free_pages_exact); 5489 5490 /** 5491 * nr_free_zone_pages - count number of pages beyond high watermark 5492 * @offset: The zone index of the highest zone 5493 * 5494 * nr_free_zone_pages() counts the number of pages which are beyond the 5495 * high watermark within all zones at or below a given zone index. For each 5496 * zone, the number of pages is calculated as: 5497 * 5498 * nr_free_zone_pages = managed_pages - high_pages 5499 * 5500 * Return: number of pages beyond high watermark. 5501 */ 5502 static unsigned long nr_free_zone_pages(int offset) 5503 { 5504 struct zoneref *z; 5505 struct zone *zone; 5506 5507 /* Just pick one node, since fallback list is circular */ 5508 unsigned long sum = 0; 5509 5510 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5511 5512 for_each_zone_zonelist(zone, z, zonelist, offset) { 5513 unsigned long size = zone_managed_pages(zone); 5514 unsigned long high = high_wmark_pages(zone); 5515 if (size > high) 5516 sum += size - high; 5517 } 5518 5519 return sum; 5520 } 5521 5522 /** 5523 * nr_free_buffer_pages - count number of pages beyond high watermark 5524 * 5525 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5526 * watermark within ZONE_DMA and ZONE_NORMAL. 5527 * 5528 * Return: number of pages beyond high watermark within ZONE_DMA and 5529 * ZONE_NORMAL. 5530 */ 5531 unsigned long nr_free_buffer_pages(void) 5532 { 5533 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5534 } 5535 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5536 5537 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5538 { 5539 zoneref->zone = zone; 5540 zoneref->zone_idx = zone_idx(zone); 5541 } 5542 5543 /* 5544 * Builds allocation fallback zone lists. 5545 * 5546 * Add all populated zones of a node to the zonelist. 5547 */ 5548 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5549 { 5550 struct zone *zone; 5551 enum zone_type zone_type = MAX_NR_ZONES; 5552 int nr_zones = 0; 5553 5554 do { 5555 zone_type--; 5556 zone = pgdat->node_zones + zone_type; 5557 if (populated_zone(zone)) { 5558 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5559 check_highest_zone(zone_type); 5560 } 5561 } while (zone_type); 5562 5563 return nr_zones; 5564 } 5565 5566 #ifdef CONFIG_NUMA 5567 5568 static int __parse_numa_zonelist_order(char *s) 5569 { 5570 /* 5571 * We used to support different zonelists modes but they turned 5572 * out to be just not useful. Let's keep the warning in place 5573 * if somebody still use the cmd line parameter so that we do 5574 * not fail it silently 5575 */ 5576 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5577 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5578 return -EINVAL; 5579 } 5580 return 0; 5581 } 5582 5583 static char numa_zonelist_order[] = "Node"; 5584 #define NUMA_ZONELIST_ORDER_LEN 16 5585 /* 5586 * sysctl handler for numa_zonelist_order 5587 */ 5588 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5589 void *buffer, size_t *length, loff_t *ppos) 5590 { 5591 if (write) 5592 return __parse_numa_zonelist_order(buffer); 5593 return proc_dostring(table, write, buffer, length, ppos); 5594 } 5595 5596 static int node_load[MAX_NUMNODES]; 5597 5598 /** 5599 * find_next_best_node - find the next node that should appear in a given node's fallback list 5600 * @node: node whose fallback list we're appending 5601 * @used_node_mask: nodemask_t of already used nodes 5602 * 5603 * We use a number of factors to determine which is the next node that should 5604 * appear on a given node's fallback list. The node should not have appeared 5605 * already in @node's fallback list, and it should be the next closest node 5606 * according to the distance array (which contains arbitrary distance values 5607 * from each node to each node in the system), and should also prefer nodes 5608 * with no CPUs, since presumably they'll have very little allocation pressure 5609 * on them otherwise. 5610 * 5611 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5612 */ 5613 int find_next_best_node(int node, nodemask_t *used_node_mask) 5614 { 5615 int n, val; 5616 int min_val = INT_MAX; 5617 int best_node = NUMA_NO_NODE; 5618 5619 /* 5620 * Use the local node if we haven't already, but for memoryless local 5621 * node, we should skip it and fall back to other nodes. 5622 */ 5623 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5624 node_set(node, *used_node_mask); 5625 return node; 5626 } 5627 5628 for_each_node_state(n, N_MEMORY) { 5629 5630 /* Don't want a node to appear more than once */ 5631 if (node_isset(n, *used_node_mask)) 5632 continue; 5633 5634 /* Use the distance array to find the distance */ 5635 val = node_distance(node, n); 5636 5637 /* Penalize nodes under us ("prefer the next node") */ 5638 val += (n < node); 5639 5640 /* Give preference to headless and unused nodes */ 5641 if (!cpumask_empty(cpumask_of_node(n))) 5642 val += PENALTY_FOR_NODE_WITH_CPUS; 5643 5644 /* Slight preference for less loaded node */ 5645 val *= MAX_NUMNODES; 5646 val += node_load[n]; 5647 5648 if (val < min_val) { 5649 min_val = val; 5650 best_node = n; 5651 } 5652 } 5653 5654 if (best_node >= 0) 5655 node_set(best_node, *used_node_mask); 5656 5657 return best_node; 5658 } 5659 5660 5661 /* 5662 * Build zonelists ordered by node and zones within node. 5663 * This results in maximum locality--normal zone overflows into local 5664 * DMA zone, if any--but risks exhausting DMA zone. 5665 */ 5666 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5667 unsigned nr_nodes) 5668 { 5669 struct zoneref *zonerefs; 5670 int i; 5671 5672 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5673 5674 for (i = 0; i < nr_nodes; i++) { 5675 int nr_zones; 5676 5677 pg_data_t *node = NODE_DATA(node_order[i]); 5678 5679 nr_zones = build_zonerefs_node(node, zonerefs); 5680 zonerefs += nr_zones; 5681 } 5682 zonerefs->zone = NULL; 5683 zonerefs->zone_idx = 0; 5684 } 5685 5686 /* 5687 * Build __GFP_THISNODE zonelists 5688 */ 5689 static void build_thisnode_zonelists(pg_data_t *pgdat) 5690 { 5691 struct zoneref *zonerefs; 5692 int nr_zones; 5693 5694 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5695 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5696 zonerefs += nr_zones; 5697 zonerefs->zone = NULL; 5698 zonerefs->zone_idx = 0; 5699 } 5700 5701 static void build_zonelists(pg_data_t *pgdat) 5702 { 5703 static int node_order[MAX_NUMNODES]; 5704 int node, nr_nodes = 0; 5705 nodemask_t used_mask = NODE_MASK_NONE; 5706 int local_node, prev_node; 5707 5708 /* NUMA-aware ordering of nodes */ 5709 local_node = pgdat->node_id; 5710 prev_node = local_node; 5711 5712 memset(node_order, 0, sizeof(node_order)); 5713 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5714 /* 5715 * We don't want to pressure a particular node. 5716 * So adding penalty to the first node in same 5717 * distance group to make it round-robin. 5718 */ 5719 if (node_distance(local_node, node) != 5720 node_distance(local_node, prev_node)) 5721 node_load[node] += 1; 5722 5723 node_order[nr_nodes++] = node; 5724 prev_node = node; 5725 } 5726 5727 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5728 build_thisnode_zonelists(pgdat); 5729 pr_info("Fallback order for Node %d: ", local_node); 5730 for (node = 0; node < nr_nodes; node++) 5731 pr_cont("%d ", node_order[node]); 5732 pr_cont("\n"); 5733 } 5734 5735 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5736 /* 5737 * Return node id of node used for "local" allocations. 5738 * I.e., first node id of first zone in arg node's generic zonelist. 5739 * Used for initializing percpu 'numa_mem', which is used primarily 5740 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5741 */ 5742 int local_memory_node(int node) 5743 { 5744 struct zoneref *z; 5745 5746 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5747 gfp_zone(GFP_KERNEL), 5748 NULL); 5749 return zonelist_node_idx(z); 5750 } 5751 #endif 5752 5753 static void setup_min_unmapped_ratio(void); 5754 static void setup_min_slab_ratio(void); 5755 #else /* CONFIG_NUMA */ 5756 5757 static void build_zonelists(pg_data_t *pgdat) 5758 { 5759 struct zoneref *zonerefs; 5760 int nr_zones; 5761 5762 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5763 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5764 zonerefs += nr_zones; 5765 5766 zonerefs->zone = NULL; 5767 zonerefs->zone_idx = 0; 5768 } 5769 5770 #endif /* CONFIG_NUMA */ 5771 5772 /* 5773 * Boot pageset table. One per cpu which is going to be used for all 5774 * zones and all nodes. The parameters will be set in such a way 5775 * that an item put on a list will immediately be handed over to 5776 * the buddy list. This is safe since pageset manipulation is done 5777 * with interrupts disabled. 5778 * 5779 * The boot_pagesets must be kept even after bootup is complete for 5780 * unused processors and/or zones. They do play a role for bootstrapping 5781 * hotplugged processors. 5782 * 5783 * zoneinfo_show() and maybe other functions do 5784 * not check if the processor is online before following the pageset pointer. 5785 * Other parts of the kernel may not check if the zone is available. 5786 */ 5787 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5788 /* These effectively disable the pcplists in the boot pageset completely */ 5789 #define BOOT_PAGESET_HIGH 0 5790 #define BOOT_PAGESET_BATCH 1 5791 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5792 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5793 5794 static void __build_all_zonelists(void *data) 5795 { 5796 int nid; 5797 int __maybe_unused cpu; 5798 pg_data_t *self = data; 5799 unsigned long flags; 5800 5801 /* 5802 * The zonelist_update_seq must be acquired with irqsave because the 5803 * reader can be invoked from IRQ with GFP_ATOMIC. 5804 */ 5805 write_seqlock_irqsave(&zonelist_update_seq, flags); 5806 /* 5807 * Also disable synchronous printk() to prevent any printk() from 5808 * trying to hold port->lock, for 5809 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5810 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5811 */ 5812 printk_deferred_enter(); 5813 5814 #ifdef CONFIG_NUMA 5815 memset(node_load, 0, sizeof(node_load)); 5816 #endif 5817 5818 /* 5819 * This node is hotadded and no memory is yet present. So just 5820 * building zonelists is fine - no need to touch other nodes. 5821 */ 5822 if (self && !node_online(self->node_id)) { 5823 build_zonelists(self); 5824 } else { 5825 /* 5826 * All possible nodes have pgdat preallocated 5827 * in free_area_init 5828 */ 5829 for_each_node(nid) { 5830 pg_data_t *pgdat = NODE_DATA(nid); 5831 5832 build_zonelists(pgdat); 5833 } 5834 5835 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5836 /* 5837 * We now know the "local memory node" for each node-- 5838 * i.e., the node of the first zone in the generic zonelist. 5839 * Set up numa_mem percpu variable for on-line cpus. During 5840 * boot, only the boot cpu should be on-line; we'll init the 5841 * secondary cpus' numa_mem as they come on-line. During 5842 * node/memory hotplug, we'll fixup all on-line cpus. 5843 */ 5844 for_each_online_cpu(cpu) 5845 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5846 #endif 5847 } 5848 5849 printk_deferred_exit(); 5850 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5851 } 5852 5853 static noinline void __init 5854 build_all_zonelists_init(void) 5855 { 5856 int cpu; 5857 5858 __build_all_zonelists(NULL); 5859 5860 /* 5861 * Initialize the boot_pagesets that are going to be used 5862 * for bootstrapping processors. The real pagesets for 5863 * each zone will be allocated later when the per cpu 5864 * allocator is available. 5865 * 5866 * boot_pagesets are used also for bootstrapping offline 5867 * cpus if the system is already booted because the pagesets 5868 * are needed to initialize allocators on a specific cpu too. 5869 * F.e. the percpu allocator needs the page allocator which 5870 * needs the percpu allocator in order to allocate its pagesets 5871 * (a chicken-egg dilemma). 5872 */ 5873 for_each_possible_cpu(cpu) 5874 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5875 5876 mminit_verify_zonelist(); 5877 cpuset_init_current_mems_allowed(); 5878 } 5879 5880 /* 5881 * unless system_state == SYSTEM_BOOTING. 5882 * 5883 * __ref due to call of __init annotated helper build_all_zonelists_init 5884 * [protected by SYSTEM_BOOTING]. 5885 */ 5886 void __ref build_all_zonelists(pg_data_t *pgdat) 5887 { 5888 unsigned long vm_total_pages; 5889 5890 if (system_state == SYSTEM_BOOTING) { 5891 build_all_zonelists_init(); 5892 } else { 5893 __build_all_zonelists(pgdat); 5894 /* cpuset refresh routine should be here */ 5895 } 5896 /* Get the number of free pages beyond high watermark in all zones. */ 5897 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5898 /* 5899 * Disable grouping by mobility if the number of pages in the 5900 * system is too low to allow the mechanism to work. It would be 5901 * more accurate, but expensive to check per-zone. This check is 5902 * made on memory-hotadd so a system can start with mobility 5903 * disabled and enable it later 5904 */ 5905 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5906 page_group_by_mobility_disabled = 1; 5907 else 5908 page_group_by_mobility_disabled = 0; 5909 5910 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5911 nr_online_nodes, 5912 str_off_on(page_group_by_mobility_disabled), 5913 vm_total_pages); 5914 #ifdef CONFIG_NUMA 5915 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5916 #endif 5917 } 5918 5919 static int zone_batchsize(struct zone *zone) 5920 { 5921 #ifdef CONFIG_MMU 5922 int batch; 5923 5924 /* 5925 * The number of pages to batch allocate is either ~0.025% 5926 * of the zone or 256KB, whichever is smaller. The batch 5927 * size is striking a balance between allocation latency 5928 * and zone lock contention. 5929 */ 5930 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE); 5931 if (batch <= 1) 5932 return 1; 5933 5934 /* 5935 * Clamp the batch to a 2^n - 1 value. Having a power 5936 * of 2 value was found to be more likely to have 5937 * suboptimal cache aliasing properties in some cases. 5938 * 5939 * For example if 2 tasks are alternately allocating 5940 * batches of pages, one task can end up with a lot 5941 * of pages of one half of the possible page colors 5942 * and the other with pages of the other colors. 5943 */ 5944 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5945 5946 return batch; 5947 5948 #else 5949 /* The deferral and batching of frees should be suppressed under NOMMU 5950 * conditions. 5951 * 5952 * The problem is that NOMMU needs to be able to allocate large chunks 5953 * of contiguous memory as there's no hardware page translation to 5954 * assemble apparent contiguous memory from discontiguous pages. 5955 * 5956 * Queueing large contiguous runs of pages for batching, however, 5957 * causes the pages to actually be freed in smaller chunks. As there 5958 * can be a significant delay between the individual batches being 5959 * recycled, this leads to the once large chunks of space being 5960 * fragmented and becoming unavailable for high-order allocations. 5961 */ 5962 return 1; 5963 #endif 5964 } 5965 5966 static int percpu_pagelist_high_fraction; 5967 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5968 int high_fraction) 5969 { 5970 #ifdef CONFIG_MMU 5971 int high; 5972 int nr_split_cpus; 5973 unsigned long total_pages; 5974 5975 if (!high_fraction) { 5976 /* 5977 * By default, the high value of the pcp is based on the zone 5978 * low watermark so that if they are full then background 5979 * reclaim will not be started prematurely. 5980 */ 5981 total_pages = low_wmark_pages(zone); 5982 } else { 5983 /* 5984 * If percpu_pagelist_high_fraction is configured, the high 5985 * value is based on a fraction of the managed pages in the 5986 * zone. 5987 */ 5988 total_pages = zone_managed_pages(zone) / high_fraction; 5989 } 5990 5991 /* 5992 * Split the high value across all online CPUs local to the zone. Note 5993 * that early in boot that CPUs may not be online yet and that during 5994 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5995 * onlined. For memory nodes that have no CPUs, split the high value 5996 * across all online CPUs to mitigate the risk that reclaim is triggered 5997 * prematurely due to pages stored on pcp lists. 5998 */ 5999 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6000 if (!nr_split_cpus) 6001 nr_split_cpus = num_online_cpus(); 6002 high = total_pages / nr_split_cpus; 6003 6004 /* 6005 * Ensure high is at least batch*4. The multiple is based on the 6006 * historical relationship between high and batch. 6007 */ 6008 high = max(high, batch << 2); 6009 6010 return high; 6011 #else 6012 return 0; 6013 #endif 6014 } 6015 6016 /* 6017 * pcp->high and pcp->batch values are related and generally batch is lower 6018 * than high. They are also related to pcp->count such that count is lower 6019 * than high, and as soon as it reaches high, the pcplist is flushed. 6020 * 6021 * However, guaranteeing these relations at all times would require e.g. write 6022 * barriers here but also careful usage of read barriers at the read side, and 6023 * thus be prone to error and bad for performance. Thus the update only prevents 6024 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 6025 * should ensure they can cope with those fields changing asynchronously, and 6026 * fully trust only the pcp->count field on the local CPU with interrupts 6027 * disabled. 6028 * 6029 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6030 * outside of boot time (or some other assurance that no concurrent updaters 6031 * exist). 6032 */ 6033 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 6034 unsigned long high_max, unsigned long batch) 6035 { 6036 WRITE_ONCE(pcp->batch, batch); 6037 WRITE_ONCE(pcp->high_min, high_min); 6038 WRITE_ONCE(pcp->high_max, high_max); 6039 } 6040 6041 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6042 { 6043 int pindex; 6044 6045 memset(pcp, 0, sizeof(*pcp)); 6046 memset(pzstats, 0, sizeof(*pzstats)); 6047 6048 spin_lock_init(&pcp->lock); 6049 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6050 INIT_LIST_HEAD(&pcp->lists[pindex]); 6051 6052 /* 6053 * Set batch and high values safe for a boot pageset. A true percpu 6054 * pageset's initialization will update them subsequently. Here we don't 6055 * need to be as careful as pageset_update() as nobody can access the 6056 * pageset yet. 6057 */ 6058 pcp->high_min = BOOT_PAGESET_HIGH; 6059 pcp->high_max = BOOT_PAGESET_HIGH; 6060 pcp->batch = BOOT_PAGESET_BATCH; 6061 } 6062 6063 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 6064 unsigned long high_max, unsigned long batch) 6065 { 6066 struct per_cpu_pages *pcp; 6067 int cpu; 6068 6069 for_each_possible_cpu(cpu) { 6070 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6071 pageset_update(pcp, high_min, high_max, batch); 6072 } 6073 } 6074 6075 /* 6076 * Calculate and set new high and batch values for all per-cpu pagesets of a 6077 * zone based on the zone's size. 6078 */ 6079 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 6080 { 6081 int new_high_min, new_high_max, new_batch; 6082 6083 new_batch = zone_batchsize(zone); 6084 if (percpu_pagelist_high_fraction) { 6085 new_high_min = zone_highsize(zone, new_batch, cpu_online, 6086 percpu_pagelist_high_fraction); 6087 /* 6088 * PCP high is tuned manually, disable auto-tuning via 6089 * setting high_min and high_max to the manual value. 6090 */ 6091 new_high_max = new_high_min; 6092 } else { 6093 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 6094 new_high_max = zone_highsize(zone, new_batch, cpu_online, 6095 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 6096 } 6097 6098 if (zone->pageset_high_min == new_high_min && 6099 zone->pageset_high_max == new_high_max && 6100 zone->pageset_batch == new_batch) 6101 return; 6102 6103 zone->pageset_high_min = new_high_min; 6104 zone->pageset_high_max = new_high_max; 6105 zone->pageset_batch = new_batch; 6106 6107 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 6108 new_batch); 6109 } 6110 6111 void __meminit setup_zone_pageset(struct zone *zone) 6112 { 6113 int cpu; 6114 6115 /* Size may be 0 on !SMP && !NUMA */ 6116 if (sizeof(struct per_cpu_zonestat) > 0) 6117 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 6118 6119 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 6120 for_each_possible_cpu(cpu) { 6121 struct per_cpu_pages *pcp; 6122 struct per_cpu_zonestat *pzstats; 6123 6124 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6125 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6126 per_cpu_pages_init(pcp, pzstats); 6127 } 6128 6129 zone_set_pageset_high_and_batch(zone, 0); 6130 } 6131 6132 /* 6133 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6134 * page high values need to be recalculated. 6135 */ 6136 static void zone_pcp_update(struct zone *zone, int cpu_online) 6137 { 6138 mutex_lock(&pcp_batch_high_lock); 6139 zone_set_pageset_high_and_batch(zone, cpu_online); 6140 mutex_unlock(&pcp_batch_high_lock); 6141 } 6142 6143 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 6144 { 6145 struct per_cpu_pages *pcp; 6146 struct cpu_cacheinfo *cci; 6147 unsigned long UP_flags; 6148 6149 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6150 cci = get_cpu_cacheinfo(cpu); 6151 /* 6152 * If data cache slice of CPU is large enough, "pcp->batch" 6153 * pages can be preserved in PCP before draining PCP for 6154 * consecutive high-order pages freeing without allocation. 6155 * This can reduce zone lock contention without hurting 6156 * cache-hot pages sharing. 6157 */ 6158 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 6159 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 6160 pcp->flags |= PCPF_FREE_HIGH_BATCH; 6161 else 6162 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 6163 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 6164 } 6165 6166 void setup_pcp_cacheinfo(unsigned int cpu) 6167 { 6168 struct zone *zone; 6169 6170 for_each_populated_zone(zone) 6171 zone_pcp_update_cacheinfo(zone, cpu); 6172 } 6173 6174 /* 6175 * Allocate per cpu pagesets and initialize them. 6176 * Before this call only boot pagesets were available. 6177 */ 6178 void __init setup_per_cpu_pageset(void) 6179 { 6180 struct pglist_data *pgdat; 6181 struct zone *zone; 6182 int __maybe_unused cpu; 6183 6184 for_each_populated_zone(zone) 6185 setup_zone_pageset(zone); 6186 6187 #ifdef CONFIG_NUMA 6188 /* 6189 * Unpopulated zones continue using the boot pagesets. 6190 * The numa stats for these pagesets need to be reset. 6191 * Otherwise, they will end up skewing the stats of 6192 * the nodes these zones are associated with. 6193 */ 6194 for_each_possible_cpu(cpu) { 6195 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 6196 memset(pzstats->vm_numa_event, 0, 6197 sizeof(pzstats->vm_numa_event)); 6198 } 6199 #endif 6200 6201 for_each_online_pgdat(pgdat) 6202 pgdat->per_cpu_nodestats = 6203 alloc_percpu(struct per_cpu_nodestat); 6204 } 6205 6206 __meminit void zone_pcp_init(struct zone *zone) 6207 { 6208 /* 6209 * per cpu subsystem is not up at this point. The following code 6210 * relies on the ability of the linker to provide the 6211 * offset of a (static) per cpu variable into the per cpu area. 6212 */ 6213 zone->per_cpu_pageset = &boot_pageset; 6214 zone->per_cpu_zonestats = &boot_zonestats; 6215 zone->pageset_high_min = BOOT_PAGESET_HIGH; 6216 zone->pageset_high_max = BOOT_PAGESET_HIGH; 6217 zone->pageset_batch = BOOT_PAGESET_BATCH; 6218 6219 if (populated_zone(zone)) 6220 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 6221 zone->present_pages, zone_batchsize(zone)); 6222 } 6223 6224 static void setup_per_zone_lowmem_reserve(void); 6225 6226 void adjust_managed_page_count(struct page *page, long count) 6227 { 6228 atomic_long_add(count, &page_zone(page)->managed_pages); 6229 totalram_pages_add(count); 6230 setup_per_zone_lowmem_reserve(); 6231 } 6232 EXPORT_SYMBOL(adjust_managed_page_count); 6233 6234 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6235 { 6236 void *pos; 6237 unsigned long pages = 0; 6238 6239 start = (void *)PAGE_ALIGN((unsigned long)start); 6240 end = (void *)((unsigned long)end & PAGE_MASK); 6241 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6242 struct page *page = virt_to_page(pos); 6243 void *direct_map_addr; 6244 6245 /* 6246 * 'direct_map_addr' might be different from 'pos' 6247 * because some architectures' virt_to_page() 6248 * work with aliases. Getting the direct map 6249 * address ensures that we get a _writeable_ 6250 * alias for the memset(). 6251 */ 6252 direct_map_addr = page_address(page); 6253 /* 6254 * Perform a kasan-unchecked memset() since this memory 6255 * has not been initialized. 6256 */ 6257 direct_map_addr = kasan_reset_tag(direct_map_addr); 6258 if ((unsigned int)poison <= 0xFF) 6259 memset(direct_map_addr, poison, PAGE_SIZE); 6260 6261 free_reserved_page(page); 6262 } 6263 6264 if (pages && s) 6265 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6266 6267 return pages; 6268 } 6269 6270 void free_reserved_page(struct page *page) 6271 { 6272 clear_page_tag_ref(page); 6273 ClearPageReserved(page); 6274 init_page_count(page); 6275 __free_page(page); 6276 adjust_managed_page_count(page, 1); 6277 } 6278 EXPORT_SYMBOL(free_reserved_page); 6279 6280 static int page_alloc_cpu_dead(unsigned int cpu) 6281 { 6282 struct zone *zone; 6283 6284 lru_add_drain_cpu(cpu); 6285 mlock_drain_remote(cpu); 6286 drain_pages(cpu); 6287 6288 /* 6289 * Spill the event counters of the dead processor 6290 * into the current processors event counters. 6291 * This artificially elevates the count of the current 6292 * processor. 6293 */ 6294 vm_events_fold_cpu(cpu); 6295 6296 /* 6297 * Zero the differential counters of the dead processor 6298 * so that the vm statistics are consistent. 6299 * 6300 * This is only okay since the processor is dead and cannot 6301 * race with what we are doing. 6302 */ 6303 cpu_vm_stats_fold(cpu); 6304 6305 for_each_populated_zone(zone) 6306 zone_pcp_update(zone, 0); 6307 6308 return 0; 6309 } 6310 6311 static int page_alloc_cpu_online(unsigned int cpu) 6312 { 6313 struct zone *zone; 6314 6315 for_each_populated_zone(zone) 6316 zone_pcp_update(zone, 1); 6317 return 0; 6318 } 6319 6320 void __init page_alloc_init_cpuhp(void) 6321 { 6322 int ret; 6323 6324 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6325 "mm/page_alloc:pcp", 6326 page_alloc_cpu_online, 6327 page_alloc_cpu_dead); 6328 WARN_ON(ret < 0); 6329 } 6330 6331 /* 6332 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6333 * or min_free_kbytes changes. 6334 */ 6335 static void calculate_totalreserve_pages(void) 6336 { 6337 struct pglist_data *pgdat; 6338 unsigned long reserve_pages = 0; 6339 enum zone_type i, j; 6340 6341 for_each_online_pgdat(pgdat) { 6342 6343 pgdat->totalreserve_pages = 0; 6344 6345 for (i = 0; i < MAX_NR_ZONES; i++) { 6346 struct zone *zone = pgdat->node_zones + i; 6347 long max = 0; 6348 unsigned long managed_pages = zone_managed_pages(zone); 6349 6350 /* 6351 * lowmem_reserve[j] is monotonically non-decreasing 6352 * in j for a given zone (see 6353 * setup_per_zone_lowmem_reserve()). The maximum 6354 * valid reserve lives at the highest index with a 6355 * non-zero value, so scan backwards and stop at the 6356 * first hit. 6357 */ 6358 for (j = MAX_NR_ZONES - 1; j > i; j--) { 6359 if (!zone->lowmem_reserve[j]) 6360 continue; 6361 6362 max = zone->lowmem_reserve[j]; 6363 break; 6364 } 6365 /* we treat the high watermark as reserved pages. */ 6366 max += high_wmark_pages(zone); 6367 6368 max = min_t(unsigned long, max, managed_pages); 6369 6370 pgdat->totalreserve_pages += max; 6371 6372 reserve_pages += max; 6373 } 6374 } 6375 totalreserve_pages = reserve_pages; 6376 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6377 } 6378 6379 /* 6380 * setup_per_zone_lowmem_reserve - called whenever 6381 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6382 * has a correct pages reserved value, so an adequate number of 6383 * pages are left in the zone after a successful __alloc_pages(). 6384 */ 6385 static void setup_per_zone_lowmem_reserve(void) 6386 { 6387 struct pglist_data *pgdat; 6388 enum zone_type i, j; 6389 /* 6390 * For a given zone node_zones[i], lowmem_reserve[j] (j > i) 6391 * represents how many pages in zone i must effectively be kept 6392 * in reserve when deciding whether an allocation class that is 6393 * allowed to allocate from zones up to j may fall back into 6394 * zone i. 6395 * 6396 * As j increases, the allocation class can use a strictly larger 6397 * set of fallback zones and therefore must not be allowed to 6398 * deplete low zones more aggressively than a less flexible one. 6399 * As a result, lowmem_reserve[j] is required to be monotonically 6400 * non-decreasing in j for each zone i. Callers such as 6401 * calculate_totalreserve_pages() rely on this monotonicity when 6402 * selecting the maximum reserve entry. 6403 */ 6404 for_each_online_pgdat(pgdat) { 6405 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6406 struct zone *zone = &pgdat->node_zones[i]; 6407 int ratio = sysctl_lowmem_reserve_ratio[i]; 6408 bool clear = !ratio || !zone_managed_pages(zone); 6409 unsigned long managed_pages = 0; 6410 6411 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6412 struct zone *upper_zone = &pgdat->node_zones[j]; 6413 6414 managed_pages += zone_managed_pages(upper_zone); 6415 6416 if (clear) 6417 zone->lowmem_reserve[j] = 0; 6418 else 6419 zone->lowmem_reserve[j] = managed_pages / ratio; 6420 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6421 zone->lowmem_reserve[j]); 6422 } 6423 } 6424 } 6425 6426 /* update totalreserve_pages */ 6427 calculate_totalreserve_pages(); 6428 } 6429 6430 static void __setup_per_zone_wmarks(void) 6431 { 6432 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6433 unsigned long lowmem_pages = 0; 6434 struct zone *zone; 6435 unsigned long flags; 6436 6437 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6438 for_each_zone(zone) { 6439 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6440 lowmem_pages += zone_managed_pages(zone); 6441 } 6442 6443 for_each_zone(zone) { 6444 u64 tmp; 6445 6446 spin_lock_irqsave(&zone->lock, flags); 6447 tmp = (u64)pages_min * zone_managed_pages(zone); 6448 tmp = div64_ul(tmp, lowmem_pages); 6449 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6450 /* 6451 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6452 * need highmem and movable zones pages, so cap pages_min 6453 * to a small value here. 6454 * 6455 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6456 * deltas control async page reclaim, and so should 6457 * not be capped for highmem and movable zones. 6458 */ 6459 unsigned long min_pages; 6460 6461 min_pages = zone_managed_pages(zone) / 1024; 6462 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6463 zone->_watermark[WMARK_MIN] = min_pages; 6464 } else { 6465 /* 6466 * If it's a lowmem zone, reserve a number of pages 6467 * proportionate to the zone's size. 6468 */ 6469 zone->_watermark[WMARK_MIN] = tmp; 6470 } 6471 6472 /* 6473 * Set the kswapd watermarks distance according to the 6474 * scale factor in proportion to available memory, but 6475 * ensure a minimum size on small systems. 6476 */ 6477 tmp = max_t(u64, tmp >> 2, 6478 mult_frac(zone_managed_pages(zone), 6479 watermark_scale_factor, 10000)); 6480 6481 zone->watermark_boost = 0; 6482 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6483 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6484 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6485 trace_mm_setup_per_zone_wmarks(zone); 6486 6487 spin_unlock_irqrestore(&zone->lock, flags); 6488 } 6489 6490 /* update totalreserve_pages */ 6491 calculate_totalreserve_pages(); 6492 } 6493 6494 /** 6495 * setup_per_zone_wmarks - called when min_free_kbytes changes 6496 * or when memory is hot-{added|removed} 6497 * 6498 * Ensures that the watermark[min,low,high] values for each zone are set 6499 * correctly with respect to min_free_kbytes. 6500 */ 6501 void setup_per_zone_wmarks(void) 6502 { 6503 struct zone *zone; 6504 static DEFINE_SPINLOCK(lock); 6505 6506 spin_lock(&lock); 6507 __setup_per_zone_wmarks(); 6508 spin_unlock(&lock); 6509 6510 /* 6511 * The watermark size have changed so update the pcpu batch 6512 * and high limits or the limits may be inappropriate. 6513 */ 6514 for_each_zone(zone) 6515 zone_pcp_update(zone, 0); 6516 } 6517 6518 /* 6519 * Initialise min_free_kbytes. 6520 * 6521 * For small machines we want it small (128k min). For large machines 6522 * we want it large (256MB max). But it is not linear, because network 6523 * bandwidth does not increase linearly with machine size. We use 6524 * 6525 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6526 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6527 * 6528 * which yields 6529 * 6530 * 16MB: 512k 6531 * 32MB: 724k 6532 * 64MB: 1024k 6533 * 128MB: 1448k 6534 * 256MB: 2048k 6535 * 512MB: 2896k 6536 * 1024MB: 4096k 6537 * 2048MB: 5792k 6538 * 4096MB: 8192k 6539 * 8192MB: 11584k 6540 * 16384MB: 16384k 6541 */ 6542 void calculate_min_free_kbytes(void) 6543 { 6544 unsigned long lowmem_kbytes; 6545 int new_min_free_kbytes; 6546 6547 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6548 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6549 6550 if (new_min_free_kbytes > user_min_free_kbytes) 6551 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6552 else 6553 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6554 new_min_free_kbytes, user_min_free_kbytes); 6555 6556 } 6557 6558 int __meminit init_per_zone_wmark_min(void) 6559 { 6560 calculate_min_free_kbytes(); 6561 setup_per_zone_wmarks(); 6562 refresh_zone_stat_thresholds(); 6563 setup_per_zone_lowmem_reserve(); 6564 6565 #ifdef CONFIG_NUMA 6566 setup_min_unmapped_ratio(); 6567 setup_min_slab_ratio(); 6568 #endif 6569 6570 khugepaged_min_free_kbytes_update(); 6571 6572 return 0; 6573 } 6574 postcore_initcall(init_per_zone_wmark_min) 6575 6576 /* 6577 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6578 * that we can call two helper functions whenever min_free_kbytes 6579 * changes. 6580 */ 6581 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6582 void *buffer, size_t *length, loff_t *ppos) 6583 { 6584 int rc; 6585 6586 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6587 if (rc) 6588 return rc; 6589 6590 if (write) { 6591 user_min_free_kbytes = min_free_kbytes; 6592 setup_per_zone_wmarks(); 6593 } 6594 return 0; 6595 } 6596 6597 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6598 void *buffer, size_t *length, loff_t *ppos) 6599 { 6600 int rc; 6601 6602 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6603 if (rc) 6604 return rc; 6605 6606 if (write) 6607 setup_per_zone_wmarks(); 6608 6609 return 0; 6610 } 6611 6612 #ifdef CONFIG_NUMA 6613 static void setup_min_unmapped_ratio(void) 6614 { 6615 pg_data_t *pgdat; 6616 struct zone *zone; 6617 6618 for_each_online_pgdat(pgdat) 6619 pgdat->min_unmapped_pages = 0; 6620 6621 for_each_zone(zone) 6622 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6623 sysctl_min_unmapped_ratio) / 100; 6624 } 6625 6626 6627 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6628 void *buffer, size_t *length, loff_t *ppos) 6629 { 6630 int rc; 6631 6632 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6633 if (rc) 6634 return rc; 6635 6636 setup_min_unmapped_ratio(); 6637 6638 return 0; 6639 } 6640 6641 static void setup_min_slab_ratio(void) 6642 { 6643 pg_data_t *pgdat; 6644 struct zone *zone; 6645 6646 for_each_online_pgdat(pgdat) 6647 pgdat->min_slab_pages = 0; 6648 6649 for_each_zone(zone) 6650 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6651 sysctl_min_slab_ratio) / 100; 6652 } 6653 6654 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6655 void *buffer, size_t *length, loff_t *ppos) 6656 { 6657 int rc; 6658 6659 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6660 if (rc) 6661 return rc; 6662 6663 setup_min_slab_ratio(); 6664 6665 return 0; 6666 } 6667 #endif 6668 6669 /* 6670 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6671 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6672 * whenever sysctl_lowmem_reserve_ratio changes. 6673 * 6674 * The reserve ratio obviously has absolutely no relation with the 6675 * minimum watermarks. The lowmem reserve ratio can only make sense 6676 * if in function of the boot time zone sizes. 6677 */ 6678 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6679 int write, void *buffer, size_t *length, loff_t *ppos) 6680 { 6681 int i; 6682 6683 proc_dointvec_minmax(table, write, buffer, length, ppos); 6684 6685 for (i = 0; i < MAX_NR_ZONES; i++) { 6686 if (sysctl_lowmem_reserve_ratio[i] < 1) 6687 sysctl_lowmem_reserve_ratio[i] = 0; 6688 } 6689 6690 setup_per_zone_lowmem_reserve(); 6691 return 0; 6692 } 6693 6694 /* 6695 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6696 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6697 * pagelist can have before it gets flushed back to buddy allocator. 6698 */ 6699 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6700 int write, void *buffer, size_t *length, loff_t *ppos) 6701 { 6702 struct zone *zone; 6703 int old_percpu_pagelist_high_fraction; 6704 int ret; 6705 6706 /* 6707 * Avoid using pcp_batch_high_lock for reads as the value is read 6708 * atomically and a race with offlining is harmless. 6709 */ 6710 6711 if (!write) 6712 return proc_dointvec_minmax(table, write, buffer, length, ppos); 6713 6714 mutex_lock(&pcp_batch_high_lock); 6715 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6716 6717 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6718 if (ret < 0) 6719 goto out; 6720 6721 /* Sanity checking to avoid pcp imbalance */ 6722 if (percpu_pagelist_high_fraction && 6723 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6724 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6725 ret = -EINVAL; 6726 goto out; 6727 } 6728 6729 /* No change? */ 6730 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6731 goto out; 6732 6733 for_each_populated_zone(zone) 6734 zone_set_pageset_high_and_batch(zone, 0); 6735 out: 6736 mutex_unlock(&pcp_batch_high_lock); 6737 return ret; 6738 } 6739 6740 static const struct ctl_table page_alloc_sysctl_table[] = { 6741 { 6742 .procname = "min_free_kbytes", 6743 .data = &min_free_kbytes, 6744 .maxlen = sizeof(min_free_kbytes), 6745 .mode = 0644, 6746 .proc_handler = min_free_kbytes_sysctl_handler, 6747 .extra1 = SYSCTL_ZERO, 6748 }, 6749 { 6750 .procname = "watermark_boost_factor", 6751 .data = &watermark_boost_factor, 6752 .maxlen = sizeof(watermark_boost_factor), 6753 .mode = 0644, 6754 .proc_handler = proc_dointvec_minmax, 6755 .extra1 = SYSCTL_ZERO, 6756 }, 6757 { 6758 .procname = "watermark_scale_factor", 6759 .data = &watermark_scale_factor, 6760 .maxlen = sizeof(watermark_scale_factor), 6761 .mode = 0644, 6762 .proc_handler = watermark_scale_factor_sysctl_handler, 6763 .extra1 = SYSCTL_ONE, 6764 .extra2 = SYSCTL_THREE_THOUSAND, 6765 }, 6766 { 6767 .procname = "defrag_mode", 6768 .data = &defrag_mode, 6769 .maxlen = sizeof(defrag_mode), 6770 .mode = 0644, 6771 .proc_handler = proc_dointvec_minmax, 6772 .extra1 = SYSCTL_ZERO, 6773 .extra2 = SYSCTL_ONE, 6774 }, 6775 { 6776 .procname = "percpu_pagelist_high_fraction", 6777 .data = &percpu_pagelist_high_fraction, 6778 .maxlen = sizeof(percpu_pagelist_high_fraction), 6779 .mode = 0644, 6780 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6781 .extra1 = SYSCTL_ZERO, 6782 }, 6783 { 6784 .procname = "lowmem_reserve_ratio", 6785 .data = &sysctl_lowmem_reserve_ratio, 6786 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6787 .mode = 0644, 6788 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6789 }, 6790 #ifdef CONFIG_NUMA 6791 { 6792 .procname = "numa_zonelist_order", 6793 .data = &numa_zonelist_order, 6794 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6795 .mode = 0644, 6796 .proc_handler = numa_zonelist_order_handler, 6797 }, 6798 { 6799 .procname = "min_unmapped_ratio", 6800 .data = &sysctl_min_unmapped_ratio, 6801 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6802 .mode = 0644, 6803 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6804 .extra1 = SYSCTL_ZERO, 6805 .extra2 = SYSCTL_ONE_HUNDRED, 6806 }, 6807 { 6808 .procname = "min_slab_ratio", 6809 .data = &sysctl_min_slab_ratio, 6810 .maxlen = sizeof(sysctl_min_slab_ratio), 6811 .mode = 0644, 6812 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6813 .extra1 = SYSCTL_ZERO, 6814 .extra2 = SYSCTL_ONE_HUNDRED, 6815 }, 6816 #endif 6817 }; 6818 6819 void __init page_alloc_sysctl_init(void) 6820 { 6821 register_sysctl_init("vm", page_alloc_sysctl_table); 6822 } 6823 6824 #ifdef CONFIG_CONTIG_ALLOC 6825 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6826 static void alloc_contig_dump_pages(struct list_head *page_list) 6827 { 6828 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6829 6830 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6831 struct page *page; 6832 6833 dump_stack(); 6834 list_for_each_entry(page, page_list, lru) 6835 dump_page(page, "migration failure"); 6836 } 6837 } 6838 6839 /* [start, end) must belong to a single zone. */ 6840 static int __alloc_contig_migrate_range(struct compact_control *cc, 6841 unsigned long start, unsigned long end) 6842 { 6843 /* This function is based on compact_zone() from compaction.c. */ 6844 unsigned int nr_reclaimed; 6845 unsigned long pfn = start; 6846 unsigned int tries = 0; 6847 int ret = 0; 6848 struct migration_target_control mtc = { 6849 .nid = zone_to_nid(cc->zone), 6850 .gfp_mask = cc->gfp_mask, 6851 .reason = MR_CONTIG_RANGE, 6852 }; 6853 6854 lru_cache_disable(); 6855 6856 while (pfn < end || !list_empty(&cc->migratepages)) { 6857 if (fatal_signal_pending(current)) { 6858 ret = -EINTR; 6859 break; 6860 } 6861 6862 if (list_empty(&cc->migratepages)) { 6863 cc->nr_migratepages = 0; 6864 ret = isolate_migratepages_range(cc, pfn, end); 6865 if (ret && ret != -EAGAIN) 6866 break; 6867 pfn = cc->migrate_pfn; 6868 tries = 0; 6869 } else if (++tries == 5) { 6870 ret = -EBUSY; 6871 break; 6872 } 6873 6874 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6875 &cc->migratepages); 6876 cc->nr_migratepages -= nr_reclaimed; 6877 6878 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6879 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6880 6881 /* 6882 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6883 * to retry again over this error, so do the same here. 6884 */ 6885 if (ret == -ENOMEM) 6886 break; 6887 } 6888 6889 lru_cache_enable(); 6890 if (ret < 0) { 6891 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6892 alloc_contig_dump_pages(&cc->migratepages); 6893 putback_movable_pages(&cc->migratepages); 6894 } 6895 6896 return (ret < 0) ? ret : 0; 6897 } 6898 6899 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) 6900 { 6901 int order; 6902 6903 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6904 struct page *page, *next; 6905 int nr_pages = 1 << order; 6906 6907 list_for_each_entry_safe(page, next, &list[order], lru) { 6908 int i; 6909 6910 post_alloc_hook(page, order, gfp_mask); 6911 set_page_refcounted(page); 6912 if (!order) 6913 continue; 6914 6915 split_page(page, order); 6916 6917 /* Add all subpages to the order-0 head, in sequence. */ 6918 list_del(&page->lru); 6919 for (i = 0; i < nr_pages; i++) 6920 list_add_tail(&page[i].lru, &list[0]); 6921 } 6922 } 6923 } 6924 6925 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6926 { 6927 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6928 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6929 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6930 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6931 6932 /* 6933 * We are given the range to allocate; node, mobility and placement 6934 * hints are irrelevant at this point. We'll simply ignore them. 6935 */ 6936 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6937 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6938 6939 /* 6940 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6941 * selected action flags. 6942 */ 6943 if (gfp_mask & ~(reclaim_mask | action_mask)) 6944 return -EINVAL; 6945 6946 /* 6947 * Flags to control page compaction/migration/reclaim, to free up our 6948 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6949 * for them. 6950 * 6951 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6952 * to not degrade callers. 6953 */ 6954 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6955 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6956 return 0; 6957 } 6958 6959 /** 6960 * alloc_contig_range() -- tries to allocate given range of pages 6961 * @start: start PFN to allocate 6962 * @end: one-past-the-last PFN to allocate 6963 * @alloc_flags: allocation information 6964 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6965 * action and reclaim modifiers are supported. Reclaim modifiers 6966 * control allocation behavior during compaction/migration/reclaim. 6967 * 6968 * The PFN range does not have to be pageblock aligned. The PFN range must 6969 * belong to a single zone. 6970 * 6971 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6972 * pageblocks in the range. Once isolated, the pageblocks should not 6973 * be modified by others. 6974 * 6975 * Return: zero on success or negative error code. On success all 6976 * pages which PFN is in [start, end) are allocated for the caller and 6977 * need to be freed with free_contig_range(). 6978 */ 6979 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6980 acr_flags_t alloc_flags, gfp_t gfp_mask) 6981 { 6982 const unsigned int order = ilog2(end - start); 6983 unsigned long outer_start, outer_end; 6984 int ret = 0; 6985 6986 struct compact_control cc = { 6987 .nr_migratepages = 0, 6988 .order = -1, 6989 .zone = page_zone(pfn_to_page(start)), 6990 .mode = MIGRATE_SYNC, 6991 .ignore_skip_hint = true, 6992 .no_set_skip_hint = true, 6993 .alloc_contig = true, 6994 }; 6995 INIT_LIST_HEAD(&cc.migratepages); 6996 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ? 6997 PB_ISOLATE_MODE_CMA_ALLOC : 6998 PB_ISOLATE_MODE_OTHER; 6999 7000 /* 7001 * In contrast to the buddy, we allow for orders here that exceed 7002 * MAX_PAGE_ORDER, so we must manually make sure that we are not 7003 * exceeding the maximum folio order. 7004 */ 7005 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER)) 7006 return -EINVAL; 7007 7008 gfp_mask = current_gfp_context(gfp_mask); 7009 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 7010 return -EINVAL; 7011 7012 /* 7013 * What we do here is we mark all pageblocks in range as 7014 * MIGRATE_ISOLATE. Because pageblock and max order pages may 7015 * have different sizes, and due to the way page allocator 7016 * work, start_isolate_page_range() has special handlings for this. 7017 * 7018 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 7019 * migrate the pages from an unaligned range (ie. pages that 7020 * we are interested in). This will put all the pages in 7021 * range back to page allocator as MIGRATE_ISOLATE. 7022 * 7023 * When this is done, we take the pages in range from page 7024 * allocator removing them from the buddy system. This way 7025 * page allocator will never consider using them. 7026 * 7027 * This lets us mark the pageblocks back as 7028 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7029 * aligned range but not in the unaligned, original range are 7030 * put back to page allocator so that buddy can use them. 7031 */ 7032 7033 ret = start_isolate_page_range(start, end, mode); 7034 if (ret) 7035 goto done; 7036 7037 drain_all_pages(cc.zone); 7038 7039 /* 7040 * In case of -EBUSY, we'd like to know which page causes problem. 7041 * So, just fall through. test_pages_isolated() has a tracepoint 7042 * which will report the busy page. 7043 * 7044 * It is possible that busy pages could become available before 7045 * the call to test_pages_isolated, and the range will actually be 7046 * allocated. So, if we fall through be sure to clear ret so that 7047 * -EBUSY is not accidentally used or returned to caller. 7048 */ 7049 ret = __alloc_contig_migrate_range(&cc, start, end); 7050 if (ret && ret != -EBUSY) 7051 goto done; 7052 7053 /* 7054 * When in-use hugetlb pages are migrated, they may simply be released 7055 * back into the free hugepage pool instead of being returned to the 7056 * buddy system. After the migration of in-use huge pages is completed, 7057 * we will invoke replace_free_hugepage_folios() to ensure that these 7058 * hugepages are properly released to the buddy system. 7059 */ 7060 ret = replace_free_hugepage_folios(start, end); 7061 if (ret) 7062 goto done; 7063 7064 /* 7065 * Pages from [start, end) are within a pageblock_nr_pages 7066 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7067 * more, all pages in [start, end) are free in page allocator. 7068 * What we are going to do is to allocate all pages from 7069 * [start, end) (that is remove them from page allocator). 7070 * 7071 * The only problem is that pages at the beginning and at the 7072 * end of interesting range may be not aligned with pages that 7073 * page allocator holds, ie. they can be part of higher order 7074 * pages. Because of this, we reserve the bigger range and 7075 * once this is done free the pages we are not interested in. 7076 * 7077 * We don't have to hold zone->lock here because the pages are 7078 * isolated thus they won't get removed from buddy. 7079 */ 7080 outer_start = find_large_buddy(start); 7081 7082 /* Make sure the range is really isolated. */ 7083 if (test_pages_isolated(outer_start, end, mode)) { 7084 ret = -EBUSY; 7085 goto done; 7086 } 7087 7088 /* Grab isolated pages from freelists. */ 7089 outer_end = isolate_freepages_range(&cc, outer_start, end); 7090 if (!outer_end) { 7091 ret = -EBUSY; 7092 goto done; 7093 } 7094 7095 if (!(gfp_mask & __GFP_COMP)) { 7096 split_free_pages(cc.freepages, gfp_mask); 7097 7098 /* Free head and tail (if any) */ 7099 if (start != outer_start) 7100 free_contig_range(outer_start, start - outer_start); 7101 if (end != outer_end) 7102 free_contig_range(end, outer_end - end); 7103 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 7104 struct page *head = pfn_to_page(start); 7105 7106 check_new_pages(head, order); 7107 prep_new_page(head, order, gfp_mask, 0); 7108 set_page_refcounted(head); 7109 } else { 7110 ret = -EINVAL; 7111 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 7112 start, end, outer_start, outer_end); 7113 } 7114 done: 7115 undo_isolate_page_range(start, end); 7116 return ret; 7117 } 7118 EXPORT_SYMBOL(alloc_contig_range_noprof); 7119 7120 static int __alloc_contig_pages(unsigned long start_pfn, 7121 unsigned long nr_pages, gfp_t gfp_mask) 7122 { 7123 unsigned long end_pfn = start_pfn + nr_pages; 7124 7125 return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE, 7126 gfp_mask); 7127 } 7128 7129 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 7130 unsigned long nr_pages) 7131 { 7132 unsigned long i, end_pfn = start_pfn + nr_pages; 7133 struct page *page; 7134 7135 for (i = start_pfn; i < end_pfn; i++) { 7136 page = pfn_to_online_page(i); 7137 if (!page) 7138 return false; 7139 7140 if (page_zone(page) != z) 7141 return false; 7142 7143 if (PageReserved(page)) 7144 return false; 7145 7146 if (PageHuge(page)) 7147 return false; 7148 } 7149 return true; 7150 } 7151 7152 static bool zone_spans_last_pfn(const struct zone *zone, 7153 unsigned long start_pfn, unsigned long nr_pages) 7154 { 7155 unsigned long last_pfn = start_pfn + nr_pages - 1; 7156 7157 return zone_spans_pfn(zone, last_pfn); 7158 } 7159 7160 /** 7161 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 7162 * @nr_pages: Number of contiguous pages to allocate 7163 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 7164 * action and reclaim modifiers are supported. Reclaim modifiers 7165 * control allocation behavior during compaction/migration/reclaim. 7166 * @nid: Target node 7167 * @nodemask: Mask for other possible nodes 7168 * 7169 * This routine is a wrapper around alloc_contig_range(). It scans over zones 7170 * on an applicable zonelist to find a contiguous pfn range which can then be 7171 * tried for allocation with alloc_contig_range(). This routine is intended 7172 * for allocation requests which can not be fulfilled with the buddy allocator. 7173 * 7174 * The allocated memory is always aligned to a page boundary. If nr_pages is a 7175 * power of two, then allocated range is also guaranteed to be aligned to same 7176 * nr_pages (e.g. 1GB request would be aligned to 1GB). 7177 * 7178 * Allocated pages can be freed with free_contig_range() or by manually calling 7179 * __free_page() on each allocated page. 7180 * 7181 * Return: pointer to contiguous pages on success, or NULL if not successful. 7182 */ 7183 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 7184 int nid, nodemask_t *nodemask) 7185 { 7186 unsigned long ret, pfn, flags; 7187 struct zonelist *zonelist; 7188 struct zone *zone; 7189 struct zoneref *z; 7190 7191 zonelist = node_zonelist(nid, gfp_mask); 7192 for_each_zone_zonelist_nodemask(zone, z, zonelist, 7193 gfp_zone(gfp_mask), nodemask) { 7194 spin_lock_irqsave(&zone->lock, flags); 7195 7196 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 7197 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 7198 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 7199 /* 7200 * We release the zone lock here because 7201 * alloc_contig_range() will also lock the zone 7202 * at some point. If there's an allocation 7203 * spinning on this lock, it may win the race 7204 * and cause alloc_contig_range() to fail... 7205 */ 7206 spin_unlock_irqrestore(&zone->lock, flags); 7207 ret = __alloc_contig_pages(pfn, nr_pages, 7208 gfp_mask); 7209 if (!ret) 7210 return pfn_to_page(pfn); 7211 spin_lock_irqsave(&zone->lock, flags); 7212 } 7213 pfn += nr_pages; 7214 } 7215 spin_unlock_irqrestore(&zone->lock, flags); 7216 } 7217 return NULL; 7218 } 7219 #endif /* CONFIG_CONTIG_ALLOC */ 7220 7221 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 7222 { 7223 unsigned long count = 0; 7224 struct folio *folio = pfn_folio(pfn); 7225 7226 if (folio_test_large(folio)) { 7227 int expected = folio_nr_pages(folio); 7228 7229 if (nr_pages == expected) 7230 folio_put(folio); 7231 else 7232 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n", 7233 pfn, nr_pages, expected); 7234 return; 7235 } 7236 7237 for (; nr_pages--; pfn++) { 7238 struct page *page = pfn_to_page(pfn); 7239 7240 count += page_count(page) != 1; 7241 __free_page(page); 7242 } 7243 WARN(count != 0, "%lu pages are still in use!\n", count); 7244 } 7245 EXPORT_SYMBOL(free_contig_range); 7246 7247 /* 7248 * Effectively disable pcplists for the zone by setting the high limit to 0 7249 * and draining all cpus. A concurrent page freeing on another CPU that's about 7250 * to put the page on pcplist will either finish before the drain and the page 7251 * will be drained, or observe the new high limit and skip the pcplist. 7252 * 7253 * Must be paired with a call to zone_pcp_enable(). 7254 */ 7255 void zone_pcp_disable(struct zone *zone) 7256 { 7257 mutex_lock(&pcp_batch_high_lock); 7258 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 7259 __drain_all_pages(zone, true); 7260 } 7261 7262 void zone_pcp_enable(struct zone *zone) 7263 { 7264 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 7265 zone->pageset_high_max, zone->pageset_batch); 7266 mutex_unlock(&pcp_batch_high_lock); 7267 } 7268 7269 void zone_pcp_reset(struct zone *zone) 7270 { 7271 int cpu; 7272 struct per_cpu_zonestat *pzstats; 7273 7274 if (zone->per_cpu_pageset != &boot_pageset) { 7275 for_each_online_cpu(cpu) { 7276 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7277 drain_zonestat(zone, pzstats); 7278 } 7279 free_percpu(zone->per_cpu_pageset); 7280 zone->per_cpu_pageset = &boot_pageset; 7281 if (zone->per_cpu_zonestats != &boot_zonestats) { 7282 free_percpu(zone->per_cpu_zonestats); 7283 zone->per_cpu_zonestats = &boot_zonestats; 7284 } 7285 } 7286 } 7287 7288 #ifdef CONFIG_MEMORY_HOTREMOVE 7289 /* 7290 * All pages in the range must be in a single zone, must not contain holes, 7291 * must span full sections, and must be isolated before calling this function. 7292 * 7293 * Returns the number of managed (non-PageOffline()) pages in the range: the 7294 * number of pages for which memory offlining code must adjust managed page 7295 * counters using adjust_managed_page_count(). 7296 */ 7297 unsigned long __offline_isolated_pages(unsigned long start_pfn, 7298 unsigned long end_pfn) 7299 { 7300 unsigned long already_offline = 0, flags; 7301 unsigned long pfn = start_pfn; 7302 struct page *page; 7303 struct zone *zone; 7304 unsigned int order; 7305 7306 offline_mem_sections(pfn, end_pfn); 7307 zone = page_zone(pfn_to_page(pfn)); 7308 spin_lock_irqsave(&zone->lock, flags); 7309 while (pfn < end_pfn) { 7310 page = pfn_to_page(pfn); 7311 /* 7312 * The HWPoisoned page may be not in buddy system, and 7313 * page_count() is not 0. 7314 */ 7315 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7316 pfn++; 7317 continue; 7318 } 7319 /* 7320 * At this point all remaining PageOffline() pages have a 7321 * reference count of 0 and can simply be skipped. 7322 */ 7323 if (PageOffline(page)) { 7324 BUG_ON(page_count(page)); 7325 BUG_ON(PageBuddy(page)); 7326 already_offline++; 7327 pfn++; 7328 continue; 7329 } 7330 7331 BUG_ON(page_count(page)); 7332 BUG_ON(!PageBuddy(page)); 7333 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 7334 order = buddy_order(page); 7335 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 7336 pfn += (1 << order); 7337 } 7338 spin_unlock_irqrestore(&zone->lock, flags); 7339 7340 return end_pfn - start_pfn - already_offline; 7341 } 7342 #endif 7343 7344 /* 7345 * This function returns a stable result only if called under zone lock. 7346 */ 7347 bool is_free_buddy_page(const struct page *page) 7348 { 7349 unsigned long pfn = page_to_pfn(page); 7350 unsigned int order; 7351 7352 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7353 const struct page *head = page - (pfn & ((1 << order) - 1)); 7354 7355 if (PageBuddy(head) && 7356 buddy_order_unsafe(head) >= order) 7357 break; 7358 } 7359 7360 return order <= MAX_PAGE_ORDER; 7361 } 7362 EXPORT_SYMBOL(is_free_buddy_page); 7363 7364 #ifdef CONFIG_MEMORY_FAILURE 7365 static inline void add_to_free_list(struct page *page, struct zone *zone, 7366 unsigned int order, int migratetype, 7367 bool tail) 7368 { 7369 __add_to_free_list(page, zone, order, migratetype, tail); 7370 account_freepages(zone, 1 << order, migratetype); 7371 } 7372 7373 /* 7374 * Break down a higher-order page in sub-pages, and keep our target out of 7375 * buddy allocator. 7376 */ 7377 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7378 struct page *target, int low, int high, 7379 int migratetype) 7380 { 7381 unsigned long size = 1 << high; 7382 struct page *current_buddy; 7383 7384 while (high > low) { 7385 high--; 7386 size >>= 1; 7387 7388 if (target >= &page[size]) { 7389 current_buddy = page; 7390 page = page + size; 7391 } else { 7392 current_buddy = page + size; 7393 } 7394 7395 if (set_page_guard(zone, current_buddy, high)) 7396 continue; 7397 7398 add_to_free_list(current_buddy, zone, high, migratetype, false); 7399 set_buddy_order(current_buddy, high); 7400 } 7401 } 7402 7403 /* 7404 * Take a page that will be marked as poisoned off the buddy allocator. 7405 */ 7406 bool take_page_off_buddy(struct page *page) 7407 { 7408 struct zone *zone = page_zone(page); 7409 unsigned long pfn = page_to_pfn(page); 7410 unsigned long flags; 7411 unsigned int order; 7412 bool ret = false; 7413 7414 spin_lock_irqsave(&zone->lock, flags); 7415 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7416 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7417 int page_order = buddy_order(page_head); 7418 7419 if (PageBuddy(page_head) && page_order >= order) { 7420 unsigned long pfn_head = page_to_pfn(page_head); 7421 int migratetype = get_pfnblock_migratetype(page_head, 7422 pfn_head); 7423 7424 del_page_from_free_list(page_head, zone, page_order, 7425 migratetype); 7426 break_down_buddy_pages(zone, page_head, page, 0, 7427 page_order, migratetype); 7428 SetPageHWPoisonTakenOff(page); 7429 ret = true; 7430 break; 7431 } 7432 if (page_count(page_head) > 0) 7433 break; 7434 } 7435 spin_unlock_irqrestore(&zone->lock, flags); 7436 return ret; 7437 } 7438 7439 /* 7440 * Cancel takeoff done by take_page_off_buddy(). 7441 */ 7442 bool put_page_back_buddy(struct page *page) 7443 { 7444 struct zone *zone = page_zone(page); 7445 unsigned long flags; 7446 bool ret = false; 7447 7448 spin_lock_irqsave(&zone->lock, flags); 7449 if (put_page_testzero(page)) { 7450 unsigned long pfn = page_to_pfn(page); 7451 int migratetype = get_pfnblock_migratetype(page, pfn); 7452 7453 ClearPageHWPoisonTakenOff(page); 7454 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7455 if (TestClearPageHWPoison(page)) { 7456 ret = true; 7457 } 7458 } 7459 spin_unlock_irqrestore(&zone->lock, flags); 7460 7461 return ret; 7462 } 7463 #endif 7464 7465 bool has_managed_zone(enum zone_type zone) 7466 { 7467 struct pglist_data *pgdat; 7468 7469 for_each_online_pgdat(pgdat) { 7470 if (managed_zone(&pgdat->node_zones[zone])) 7471 return true; 7472 } 7473 return false; 7474 } 7475 7476 #ifdef CONFIG_UNACCEPTED_MEMORY 7477 7478 static bool lazy_accept = true; 7479 7480 static int __init accept_memory_parse(char *p) 7481 { 7482 if (!strcmp(p, "lazy")) { 7483 lazy_accept = true; 7484 return 0; 7485 } else if (!strcmp(p, "eager")) { 7486 lazy_accept = false; 7487 return 0; 7488 } else { 7489 return -EINVAL; 7490 } 7491 } 7492 early_param("accept_memory", accept_memory_parse); 7493 7494 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7495 { 7496 phys_addr_t start = page_to_phys(page); 7497 7498 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7499 } 7500 7501 static void __accept_page(struct zone *zone, unsigned long *flags, 7502 struct page *page) 7503 { 7504 list_del(&page->lru); 7505 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7506 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7507 __ClearPageUnaccepted(page); 7508 spin_unlock_irqrestore(&zone->lock, *flags); 7509 7510 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7511 7512 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7513 } 7514 7515 void accept_page(struct page *page) 7516 { 7517 struct zone *zone = page_zone(page); 7518 unsigned long flags; 7519 7520 spin_lock_irqsave(&zone->lock, flags); 7521 if (!PageUnaccepted(page)) { 7522 spin_unlock_irqrestore(&zone->lock, flags); 7523 return; 7524 } 7525 7526 /* Unlocks zone->lock */ 7527 __accept_page(zone, &flags, page); 7528 } 7529 7530 static bool try_to_accept_memory_one(struct zone *zone) 7531 { 7532 unsigned long flags; 7533 struct page *page; 7534 7535 spin_lock_irqsave(&zone->lock, flags); 7536 page = list_first_entry_or_null(&zone->unaccepted_pages, 7537 struct page, lru); 7538 if (!page) { 7539 spin_unlock_irqrestore(&zone->lock, flags); 7540 return false; 7541 } 7542 7543 /* Unlocks zone->lock */ 7544 __accept_page(zone, &flags, page); 7545 7546 return true; 7547 } 7548 7549 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7550 int alloc_flags) 7551 { 7552 long to_accept, wmark; 7553 bool ret = false; 7554 7555 if (list_empty(&zone->unaccepted_pages)) 7556 return false; 7557 7558 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7559 if (alloc_flags & ALLOC_TRYLOCK) 7560 return false; 7561 7562 wmark = promo_wmark_pages(zone); 7563 7564 /* 7565 * Watermarks have not been initialized yet. 7566 * 7567 * Accepting one MAX_ORDER page to ensure progress. 7568 */ 7569 if (!wmark) 7570 return try_to_accept_memory_one(zone); 7571 7572 /* How much to accept to get to promo watermark? */ 7573 to_accept = wmark - 7574 (zone_page_state(zone, NR_FREE_PAGES) - 7575 __zone_watermark_unusable_free(zone, order, 0) - 7576 zone_page_state(zone, NR_UNACCEPTED)); 7577 7578 while (to_accept > 0) { 7579 if (!try_to_accept_memory_one(zone)) 7580 break; 7581 ret = true; 7582 to_accept -= MAX_ORDER_NR_PAGES; 7583 } 7584 7585 return ret; 7586 } 7587 7588 static bool __free_unaccepted(struct page *page) 7589 { 7590 struct zone *zone = page_zone(page); 7591 unsigned long flags; 7592 7593 if (!lazy_accept) 7594 return false; 7595 7596 spin_lock_irqsave(&zone->lock, flags); 7597 list_add_tail(&page->lru, &zone->unaccepted_pages); 7598 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7599 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7600 __SetPageUnaccepted(page); 7601 spin_unlock_irqrestore(&zone->lock, flags); 7602 7603 return true; 7604 } 7605 7606 #else 7607 7608 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7609 { 7610 return false; 7611 } 7612 7613 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7614 int alloc_flags) 7615 { 7616 return false; 7617 } 7618 7619 static bool __free_unaccepted(struct page *page) 7620 { 7621 BUILD_BUG(); 7622 return false; 7623 } 7624 7625 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7626 7627 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7628 { 7629 /* 7630 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7631 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7632 * is not safe in arbitrary context. 7633 * 7634 * These two are the conditions for gfpflags_allow_spinning() being true. 7635 * 7636 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason 7637 * to warn. Also warn would trigger printk() which is unsafe from 7638 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7639 * since the running context is unknown. 7640 * 7641 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7642 * is safe in any context. Also zeroing the page is mandatory for 7643 * BPF use cases. 7644 * 7645 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7646 * specify it here to highlight that alloc_pages_nolock() 7647 * doesn't want to deplete reserves. 7648 */ 7649 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP 7650 | gfp_flags; 7651 unsigned int alloc_flags = ALLOC_TRYLOCK; 7652 struct alloc_context ac = { }; 7653 struct page *page; 7654 7655 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT); 7656 /* 7657 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7658 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7659 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7660 * mark the task as the owner of another rt_spin_lock which will 7661 * confuse PI logic, so return immediately if called form hard IRQ or 7662 * NMI. 7663 * 7664 * Note, irqs_disabled() case is ok. This function can be called 7665 * from raw_spin_lock_irqsave region. 7666 */ 7667 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7668 return NULL; 7669 if (!pcp_allowed_order(order)) 7670 return NULL; 7671 7672 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7673 if (deferred_pages_enabled()) 7674 return NULL; 7675 7676 if (nid == NUMA_NO_NODE) 7677 nid = numa_node_id(); 7678 7679 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7680 &alloc_gfp, &alloc_flags); 7681 7682 /* 7683 * Best effort allocation from percpu free list. 7684 * If it's empty attempt to spin_trylock zone->lock. 7685 */ 7686 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7687 7688 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7689 7690 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) && 7691 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7692 __free_frozen_pages(page, order, FPI_TRYLOCK); 7693 page = NULL; 7694 } 7695 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7696 kmsan_alloc_page(page, order, alloc_gfp); 7697 return page; 7698 } 7699 /** 7700 * alloc_pages_nolock - opportunistic reentrant allocation from any context 7701 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed. 7702 * @nid: node to allocate from 7703 * @order: allocation order size 7704 * 7705 * Allocates pages of a given order from the given node. This is safe to 7706 * call from any context (from atomic, NMI, and also reentrant 7707 * allocator -> tracepoint -> alloc_pages_nolock_noprof). 7708 * Allocation is best effort and to be expected to fail easily so nobody should 7709 * rely on the success. Failures are not reported via warn_alloc(). 7710 * See always fail conditions below. 7711 * 7712 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN. 7713 * It means ENOMEM. There is no reason to call it again and expect !NULL. 7714 */ 7715 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7716 { 7717 struct page *page; 7718 7719 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order); 7720 if (page) 7721 set_page_refcounted(page); 7722 return page; 7723 } 7724 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof); 7725