1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/highmem.h> 20 #include <linux/interrupt.h> 21 #include <linux/jiffies.h> 22 #include <linux/compiler.h> 23 #include <linux/kernel.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/module.h> 27 #include <linux/suspend.h> 28 #include <linux/ratelimit.h> 29 #include <linux/oom.h> 30 #include <linux/topology.h> 31 #include <linux/sysctl.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/pagevec.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <linux/cacheinfo.h> 56 #include <linux/pgalloc_tag.h> 57 #include <asm/div64.h> 58 #include "internal.h" 59 #include "shuffle.h" 60 #include "page_reporting.h" 61 62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 63 typedef int __bitwise fpi_t; 64 65 /* No special request */ 66 #define FPI_NONE ((__force fpi_t)0) 67 68 /* 69 * Skip free page reporting notification for the (possibly merged) page. 70 * This does not hinder free page reporting from grabbing the page, 71 * reporting it and marking it "reported" - it only skips notifying 72 * the free page reporting infrastructure about a newly freed page. For 73 * example, used when temporarily pulling a page from a freelist and 74 * putting it back unmodified. 75 */ 76 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 77 78 /* 79 * Place the (possibly merged) page to the tail of the freelist. Will ignore 80 * page shuffling (relevant code - e.g., memory onlining - is expected to 81 * shuffle the whole zone). 82 * 83 * Note: No code should rely on this flag for correctness - it's purely 84 * to allow for optimizations when handing back either fresh pages 85 * (memory onlining) or untouched pages (page isolation, free page 86 * reporting). 87 */ 88 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 89 90 /* Free the page without taking locks. Rely on trylock only. */ 91 #define FPI_TRYLOCK ((__force fpi_t)BIT(2)) 92 93 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 94 static DEFINE_MUTEX(pcp_batch_high_lock); 95 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 96 97 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 98 /* 99 * On SMP, spin_trylock is sufficient protection. 100 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 101 * Pass flags to a no-op inline function to typecheck and silence the unused 102 * variable warning. 103 */ 104 static inline void __pcp_trylock_noop(unsigned long *flags) { } 105 #define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags)) 106 #define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags)) 107 #else 108 109 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 110 #define pcp_trylock_prepare(flags) local_irq_save(flags) 111 #define pcp_trylock_finish(flags) local_irq_restore(flags) 112 #endif 113 114 /* 115 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 116 * a migration causing the wrong PCP to be locked and remote memory being 117 * potentially allocated, pin the task to the CPU for the lookup+lock. 118 * preempt_disable is used on !RT because it is faster than migrate_disable. 119 * migrate_disable is used on RT because otherwise RT spinlock usage is 120 * interfered with and a high priority task cannot preempt the allocator. 121 */ 122 #ifndef CONFIG_PREEMPT_RT 123 #define pcpu_task_pin() preempt_disable() 124 #define pcpu_task_unpin() preempt_enable() 125 #else 126 #define pcpu_task_pin() migrate_disable() 127 #define pcpu_task_unpin() migrate_enable() 128 #endif 129 130 /* 131 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 132 * Return value should be used with equivalent unlock helper. 133 */ 134 #define pcpu_spin_trylock(type, member, ptr) \ 135 ({ \ 136 type *_ret; \ 137 pcpu_task_pin(); \ 138 _ret = this_cpu_ptr(ptr); \ 139 if (!spin_trylock(&_ret->member)) { \ 140 pcpu_task_unpin(); \ 141 _ret = NULL; \ 142 } \ 143 _ret; \ 144 }) 145 146 #define pcpu_spin_unlock(member, ptr) \ 147 ({ \ 148 spin_unlock(&ptr->member); \ 149 pcpu_task_unpin(); \ 150 }) 151 152 /* struct per_cpu_pages specific helpers. */ 153 #define pcp_spin_trylock(ptr, UP_flags) \ 154 ({ \ 155 struct per_cpu_pages *__ret; \ 156 pcp_trylock_prepare(UP_flags); \ 157 __ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \ 158 if (!__ret) \ 159 pcp_trylock_finish(UP_flags); \ 160 __ret; \ 161 }) 162 163 #define pcp_spin_unlock(ptr, UP_flags) \ 164 ({ \ 165 pcpu_spin_unlock(lock, ptr); \ 166 pcp_trylock_finish(UP_flags); \ 167 }) 168 169 /* 170 * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e. 171 * a potentially remote cpu drain) and get interrupted by an operation that 172 * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP 173 * spinlock assumptions making the trylock a no-op. So we have to turn that 174 * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no 175 * remote cpu's so we can only be locking the only existing local one. 176 */ 177 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 178 static inline void __flags_noop(unsigned long *flags) { } 179 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \ 180 ({ \ 181 __flags_noop(&(flags)); \ 182 spin_lock(&(ptr)->lock); \ 183 }) 184 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \ 185 ({ \ 186 spin_unlock(&(ptr)->lock); \ 187 __flags_noop(&(flags)); \ 188 }) 189 #else 190 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \ 191 spin_lock_irqsave(&(ptr)->lock, flags) 192 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \ 193 spin_unlock_irqrestore(&(ptr)->lock, flags) 194 #endif 195 196 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 197 DEFINE_PER_CPU(int, numa_node); 198 EXPORT_PER_CPU_SYMBOL(numa_node); 199 #endif 200 201 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 202 203 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 204 /* 205 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 206 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 207 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 208 * defined in <linux/topology.h>. 209 */ 210 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 211 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 212 #endif 213 214 static DEFINE_MUTEX(pcpu_drain_mutex); 215 216 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 217 volatile unsigned long latent_entropy __latent_entropy; 218 EXPORT_SYMBOL(latent_entropy); 219 #endif 220 221 /* 222 * Array of node states. 223 */ 224 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 225 [N_POSSIBLE] = NODE_MASK_ALL, 226 [N_ONLINE] = { { [0] = 1UL } }, 227 #ifndef CONFIG_NUMA 228 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 229 #ifdef CONFIG_HIGHMEM 230 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 231 #endif 232 [N_MEMORY] = { { [0] = 1UL } }, 233 [N_CPU] = { { [0] = 1UL } }, 234 #endif /* NUMA */ 235 }; 236 EXPORT_SYMBOL(node_states); 237 238 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 239 240 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 241 unsigned int pageblock_order __read_mostly; 242 #endif 243 244 static void __free_pages_ok(struct page *page, unsigned int order, 245 fpi_t fpi_flags); 246 247 /* 248 * results with 256, 32 in the lowmem_reserve sysctl: 249 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 250 * 1G machine -> (16M dma, 784M normal, 224M high) 251 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 252 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 253 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 254 * 255 * TBD: should special case ZONE_DMA32 machines here - in those we normally 256 * don't need any ZONE_NORMAL reservation 257 */ 258 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 259 #ifdef CONFIG_ZONE_DMA 260 [ZONE_DMA] = 256, 261 #endif 262 #ifdef CONFIG_ZONE_DMA32 263 [ZONE_DMA32] = 256, 264 #endif 265 [ZONE_NORMAL] = 32, 266 #ifdef CONFIG_HIGHMEM 267 [ZONE_HIGHMEM] = 0, 268 #endif 269 [ZONE_MOVABLE] = 0, 270 }; 271 272 char * const zone_names[MAX_NR_ZONES] = { 273 #ifdef CONFIG_ZONE_DMA 274 "DMA", 275 #endif 276 #ifdef CONFIG_ZONE_DMA32 277 "DMA32", 278 #endif 279 "Normal", 280 #ifdef CONFIG_HIGHMEM 281 "HighMem", 282 #endif 283 "Movable", 284 #ifdef CONFIG_ZONE_DEVICE 285 "Device", 286 #endif 287 }; 288 289 const char * const migratetype_names[MIGRATE_TYPES] = { 290 "Unmovable", 291 "Movable", 292 "Reclaimable", 293 "HighAtomic", 294 #ifdef CONFIG_CMA 295 "CMA", 296 #endif 297 #ifdef CONFIG_MEMORY_ISOLATION 298 "Isolate", 299 #endif 300 }; 301 302 int min_free_kbytes = 1024; 303 int user_min_free_kbytes = -1; 304 static int watermark_boost_factor __read_mostly = 15000; 305 static int watermark_scale_factor = 10; 306 int defrag_mode; 307 308 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 309 int movable_zone; 310 EXPORT_SYMBOL(movable_zone); 311 312 #if MAX_NUMNODES > 1 313 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 314 unsigned int nr_online_nodes __read_mostly = 1; 315 EXPORT_SYMBOL(nr_node_ids); 316 EXPORT_SYMBOL(nr_online_nodes); 317 #endif 318 319 static bool page_contains_unaccepted(struct page *page, unsigned int order); 320 static bool cond_accept_memory(struct zone *zone, unsigned int order, 321 int alloc_flags); 322 static bool __free_unaccepted(struct page *page); 323 324 int page_group_by_mobility_disabled __read_mostly; 325 326 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 327 /* 328 * During boot we initialize deferred pages on-demand, as needed, but once 329 * page_alloc_init_late() has finished, the deferred pages are all initialized, 330 * and we can permanently disable that path. 331 */ 332 DEFINE_STATIC_KEY_TRUE(deferred_pages); 333 334 static inline bool deferred_pages_enabled(void) 335 { 336 return static_branch_unlikely(&deferred_pages); 337 } 338 339 /* 340 * deferred_grow_zone() is __init, but it is called from 341 * get_page_from_freelist() during early boot until deferred_pages permanently 342 * disables this call. This is why we have refdata wrapper to avoid warning, 343 * and to ensure that the function body gets unloaded. 344 */ 345 static bool __ref 346 _deferred_grow_zone(struct zone *zone, unsigned int order) 347 { 348 return deferred_grow_zone(zone, order); 349 } 350 #else 351 static inline bool deferred_pages_enabled(void) 352 { 353 return false; 354 } 355 356 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 357 { 358 return false; 359 } 360 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 361 362 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 363 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 364 unsigned long pfn) 365 { 366 #ifdef CONFIG_SPARSEMEM 367 return section_to_usemap(__pfn_to_section(pfn)); 368 #else 369 return page_zone(page)->pageblock_flags; 370 #endif /* CONFIG_SPARSEMEM */ 371 } 372 373 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 374 { 375 #ifdef CONFIG_SPARSEMEM 376 pfn &= (PAGES_PER_SECTION-1); 377 #else 378 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 379 #endif /* CONFIG_SPARSEMEM */ 380 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 381 } 382 383 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit) 384 { 385 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS; 386 } 387 388 static __always_inline void 389 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn, 390 unsigned long **bitmap_word, unsigned long *bitidx) 391 { 392 unsigned long *bitmap; 393 unsigned long word_bitidx; 394 395 #ifdef CONFIG_MEMORY_ISOLATION 396 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8); 397 #else 398 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 399 #endif 400 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK); 401 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 402 403 bitmap = get_pageblock_bitmap(page, pfn); 404 *bitidx = pfn_to_bitidx(page, pfn); 405 word_bitidx = *bitidx / BITS_PER_LONG; 406 *bitidx &= (BITS_PER_LONG - 1); 407 *bitmap_word = &bitmap[word_bitidx]; 408 } 409 410 411 /** 412 * __get_pfnblock_flags_mask - Return the requested group of flags for 413 * a pageblock_nr_pages block of pages 414 * @page: The page within the block of interest 415 * @pfn: The target page frame number 416 * @mask: mask of bits that the caller is interested in 417 * 418 * Return: pageblock_bits flags 419 */ 420 static unsigned long __get_pfnblock_flags_mask(const struct page *page, 421 unsigned long pfn, 422 unsigned long mask) 423 { 424 unsigned long *bitmap_word; 425 unsigned long bitidx; 426 unsigned long word; 427 428 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 429 /* 430 * This races, without locks, with set_pfnblock_migratetype(). Ensure 431 * a consistent read of the memory array, so that results, even though 432 * racy, are not corrupted. 433 */ 434 word = READ_ONCE(*bitmap_word); 435 return (word >> bitidx) & mask; 436 } 437 438 /** 439 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set 440 * @page: The page within the block of interest 441 * @pfn: The target page frame number 442 * @pb_bit: pageblock bit to check 443 * 444 * Return: true if the bit is set, otherwise false 445 */ 446 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, 447 enum pageblock_bits pb_bit) 448 { 449 unsigned long *bitmap_word; 450 unsigned long bitidx; 451 452 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 453 return false; 454 455 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 456 457 return test_bit(bitidx + pb_bit, bitmap_word); 458 } 459 460 /** 461 * get_pfnblock_migratetype - Return the migratetype of a pageblock 462 * @page: The page within the block of interest 463 * @pfn: The target page frame number 464 * 465 * Return: The migratetype of the pageblock 466 * 467 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn 468 * to save a call to page_to_pfn(). 469 */ 470 __always_inline enum migratetype 471 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) 472 { 473 unsigned long mask = MIGRATETYPE_AND_ISO_MASK; 474 unsigned long flags; 475 476 flags = __get_pfnblock_flags_mask(page, pfn, mask); 477 478 #ifdef CONFIG_MEMORY_ISOLATION 479 if (flags & BIT(PB_migrate_isolate)) 480 return MIGRATE_ISOLATE; 481 #endif 482 return flags & MIGRATETYPE_MASK; 483 } 484 485 /** 486 * __set_pfnblock_flags_mask - Set the requested group of flags for 487 * a pageblock_nr_pages block of pages 488 * @page: The page within the block of interest 489 * @pfn: The target page frame number 490 * @flags: The flags to set 491 * @mask: mask of bits that the caller is interested in 492 */ 493 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn, 494 unsigned long flags, unsigned long mask) 495 { 496 unsigned long *bitmap_word; 497 unsigned long bitidx; 498 unsigned long word; 499 500 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 501 502 mask <<= bitidx; 503 flags <<= bitidx; 504 505 word = READ_ONCE(*bitmap_word); 506 do { 507 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags)); 508 } 509 510 /** 511 * set_pfnblock_bit - Set a standalone bit of a pageblock 512 * @page: The page within the block of interest 513 * @pfn: The target page frame number 514 * @pb_bit: pageblock bit to set 515 */ 516 void set_pfnblock_bit(const struct page *page, unsigned long pfn, 517 enum pageblock_bits pb_bit) 518 { 519 unsigned long *bitmap_word; 520 unsigned long bitidx; 521 522 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 523 return; 524 525 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 526 527 set_bit(bitidx + pb_bit, bitmap_word); 528 } 529 530 /** 531 * clear_pfnblock_bit - Clear a standalone bit of a pageblock 532 * @page: The page within the block of interest 533 * @pfn: The target page frame number 534 * @pb_bit: pageblock bit to clear 535 */ 536 void clear_pfnblock_bit(const struct page *page, unsigned long pfn, 537 enum pageblock_bits pb_bit) 538 { 539 unsigned long *bitmap_word; 540 unsigned long bitidx; 541 542 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit))) 543 return; 544 545 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); 546 547 clear_bit(bitidx + pb_bit, bitmap_word); 548 } 549 550 /** 551 * set_pageblock_migratetype - Set the migratetype of a pageblock 552 * @page: The page within the block of interest 553 * @migratetype: migratetype to set 554 */ 555 static void set_pageblock_migratetype(struct page *page, 556 enum migratetype migratetype) 557 { 558 if (unlikely(page_group_by_mobility_disabled && 559 migratetype < MIGRATE_PCPTYPES)) 560 migratetype = MIGRATE_UNMOVABLE; 561 562 #ifdef CONFIG_MEMORY_ISOLATION 563 if (migratetype == MIGRATE_ISOLATE) { 564 VM_WARN_ONCE(1, 565 "Use set_pageblock_isolate() for pageblock isolation"); 566 return; 567 } 568 VM_WARN_ONCE(get_pageblock_isolate(page), 569 "Use clear_pageblock_isolate() to unisolate pageblock"); 570 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */ 571 #endif 572 __set_pfnblock_flags_mask(page, page_to_pfn(page), 573 (unsigned long)migratetype, 574 MIGRATETYPE_AND_ISO_MASK); 575 } 576 577 void __meminit init_pageblock_migratetype(struct page *page, 578 enum migratetype migratetype, 579 bool isolate) 580 { 581 unsigned long flags; 582 583 if (unlikely(page_group_by_mobility_disabled && 584 migratetype < MIGRATE_PCPTYPES)) 585 migratetype = MIGRATE_UNMOVABLE; 586 587 flags = migratetype; 588 589 #ifdef CONFIG_MEMORY_ISOLATION 590 if (migratetype == MIGRATE_ISOLATE) { 591 VM_WARN_ONCE( 592 1, 593 "Set isolate=true to isolate pageblock with a migratetype"); 594 return; 595 } 596 if (isolate) 597 flags |= BIT(PB_migrate_isolate); 598 #endif 599 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags, 600 MIGRATETYPE_AND_ISO_MASK); 601 } 602 603 #ifdef CONFIG_DEBUG_VM 604 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 605 { 606 int ret; 607 unsigned seq; 608 unsigned long pfn = page_to_pfn(page); 609 unsigned long sp, start_pfn; 610 611 do { 612 seq = zone_span_seqbegin(zone); 613 start_pfn = zone->zone_start_pfn; 614 sp = zone->spanned_pages; 615 ret = !zone_spans_pfn(zone, pfn); 616 } while (zone_span_seqretry(zone, seq)); 617 618 if (ret) 619 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 620 pfn, zone_to_nid(zone), zone->name, 621 start_pfn, start_pfn + sp); 622 623 return ret; 624 } 625 626 /* 627 * Temporary debugging check for pages not lying within a given zone. 628 */ 629 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 630 { 631 if (page_outside_zone_boundaries(zone, page)) 632 return true; 633 if (zone != page_zone(page)) 634 return true; 635 636 return false; 637 } 638 #else 639 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 640 { 641 return false; 642 } 643 #endif 644 645 static void bad_page(struct page *page, const char *reason) 646 { 647 static unsigned long resume; 648 static unsigned long nr_shown; 649 static unsigned long nr_unshown; 650 651 /* 652 * Allow a burst of 60 reports, then keep quiet for that minute; 653 * or allow a steady drip of one report per second. 654 */ 655 if (nr_shown == 60) { 656 if (time_before(jiffies, resume)) { 657 nr_unshown++; 658 goto out; 659 } 660 if (nr_unshown) { 661 pr_alert( 662 "BUG: Bad page state: %lu messages suppressed\n", 663 nr_unshown); 664 nr_unshown = 0; 665 } 666 nr_shown = 0; 667 } 668 if (nr_shown++ == 0) 669 resume = jiffies + 60 * HZ; 670 671 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 672 current->comm, page_to_pfn(page)); 673 dump_page(page, reason); 674 675 print_modules(); 676 dump_stack(); 677 out: 678 /* Leave bad fields for debug, except PageBuddy could make trouble */ 679 if (PageBuddy(page)) 680 __ClearPageBuddy(page); 681 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 682 } 683 684 static inline unsigned int order_to_pindex(int migratetype, int order) 685 { 686 687 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 688 bool movable; 689 if (order > PAGE_ALLOC_COSTLY_ORDER) { 690 VM_BUG_ON(order != HPAGE_PMD_ORDER); 691 692 movable = migratetype == MIGRATE_MOVABLE; 693 694 return NR_LOWORDER_PCP_LISTS + movable; 695 } 696 #else 697 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 698 #endif 699 700 return (MIGRATE_PCPTYPES * order) + migratetype; 701 } 702 703 static inline int pindex_to_order(unsigned int pindex) 704 { 705 int order = pindex / MIGRATE_PCPTYPES; 706 707 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 708 if (pindex >= NR_LOWORDER_PCP_LISTS) 709 order = HPAGE_PMD_ORDER; 710 #else 711 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 712 #endif 713 714 return order; 715 } 716 717 static inline bool pcp_allowed_order(unsigned int order) 718 { 719 if (order <= PAGE_ALLOC_COSTLY_ORDER) 720 return true; 721 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 722 if (order == HPAGE_PMD_ORDER) 723 return true; 724 #endif 725 return false; 726 } 727 728 /* 729 * Higher-order pages are called "compound pages". They are structured thusly: 730 * 731 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 732 * 733 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 734 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 735 * 736 * The first tail page's ->compound_order holds the order of allocation. 737 * This usage means that zero-order pages may not be compound. 738 */ 739 740 void prep_compound_page(struct page *page, unsigned int order) 741 { 742 int i; 743 int nr_pages = 1 << order; 744 745 __SetPageHead(page); 746 for (i = 1; i < nr_pages; i++) 747 prep_compound_tail(page, i); 748 749 prep_compound_head(page, order); 750 } 751 752 static inline void set_buddy_order(struct page *page, unsigned int order) 753 { 754 set_page_private(page, order); 755 __SetPageBuddy(page); 756 } 757 758 #ifdef CONFIG_COMPACTION 759 static inline struct capture_control *task_capc(struct zone *zone) 760 { 761 struct capture_control *capc = current->capture_control; 762 763 return unlikely(capc) && 764 !(current->flags & PF_KTHREAD) && 765 !capc->page && 766 capc->cc->zone == zone ? capc : NULL; 767 } 768 769 static inline bool 770 compaction_capture(struct capture_control *capc, struct page *page, 771 int order, int migratetype) 772 { 773 if (!capc || order != capc->cc->order) 774 return false; 775 776 /* Do not accidentally pollute CMA or isolated regions*/ 777 if (is_migrate_cma(migratetype) || 778 is_migrate_isolate(migratetype)) 779 return false; 780 781 /* 782 * Do not let lower order allocations pollute a movable pageblock 783 * unless compaction is also requesting movable pages. 784 * This might let an unmovable request use a reclaimable pageblock 785 * and vice-versa but no more than normal fallback logic which can 786 * have trouble finding a high-order free page. 787 */ 788 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 789 capc->cc->migratetype != MIGRATE_MOVABLE) 790 return false; 791 792 if (migratetype != capc->cc->migratetype) 793 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, 794 capc->cc->migratetype, migratetype); 795 796 capc->page = page; 797 return true; 798 } 799 800 #else 801 static inline struct capture_control *task_capc(struct zone *zone) 802 { 803 return NULL; 804 } 805 806 static inline bool 807 compaction_capture(struct capture_control *capc, struct page *page, 808 int order, int migratetype) 809 { 810 return false; 811 } 812 #endif /* CONFIG_COMPACTION */ 813 814 static inline void account_freepages(struct zone *zone, int nr_pages, 815 int migratetype) 816 { 817 lockdep_assert_held(&zone->lock); 818 819 if (is_migrate_isolate(migratetype)) 820 return; 821 822 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 823 824 if (is_migrate_cma(migratetype)) 825 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 826 else if (migratetype == MIGRATE_HIGHATOMIC) 827 WRITE_ONCE(zone->nr_free_highatomic, 828 zone->nr_free_highatomic + nr_pages); 829 } 830 831 /* Used for pages not on another list */ 832 static inline void __add_to_free_list(struct page *page, struct zone *zone, 833 unsigned int order, int migratetype, 834 bool tail) 835 { 836 struct free_area *area = &zone->free_area[order]; 837 int nr_pages = 1 << order; 838 839 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 840 "page type is %d, passed migratetype is %d (nr=%d)\n", 841 get_pageblock_migratetype(page), migratetype, nr_pages); 842 843 if (tail) 844 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 845 else 846 list_add(&page->buddy_list, &area->free_list[migratetype]); 847 area->nr_free++; 848 849 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 850 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 851 } 852 853 /* 854 * Used for pages which are on another list. Move the pages to the tail 855 * of the list - so the moved pages won't immediately be considered for 856 * allocation again (e.g., optimization for memory onlining). 857 */ 858 static inline void move_to_free_list(struct page *page, struct zone *zone, 859 unsigned int order, int old_mt, int new_mt) 860 { 861 struct free_area *area = &zone->free_area[order]; 862 int nr_pages = 1 << order; 863 864 /* Free page moving can fail, so it happens before the type update */ 865 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 866 "page type is %d, passed migratetype is %d (nr=%d)\n", 867 get_pageblock_migratetype(page), old_mt, nr_pages); 868 869 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 870 871 account_freepages(zone, -nr_pages, old_mt); 872 account_freepages(zone, nr_pages, new_mt); 873 874 if (order >= pageblock_order && 875 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { 876 if (!is_migrate_isolate(old_mt)) 877 nr_pages = -nr_pages; 878 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); 879 } 880 } 881 882 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 883 unsigned int order, int migratetype) 884 { 885 int nr_pages = 1 << order; 886 887 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 888 "page type is %d, passed migratetype is %d (nr=%d)\n", 889 get_pageblock_migratetype(page), migratetype, nr_pages); 890 891 /* clear reported state and update reported page count */ 892 if (page_reported(page)) 893 __ClearPageReported(page); 894 895 list_del(&page->buddy_list); 896 __ClearPageBuddy(page); 897 set_page_private(page, 0); 898 zone->free_area[order].nr_free--; 899 900 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) 901 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); 902 } 903 904 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 905 unsigned int order, int migratetype) 906 { 907 __del_page_from_free_list(page, zone, order, migratetype); 908 account_freepages(zone, -(1 << order), migratetype); 909 } 910 911 static inline struct page *get_page_from_free_area(struct free_area *area, 912 int migratetype) 913 { 914 return list_first_entry_or_null(&area->free_list[migratetype], 915 struct page, buddy_list); 916 } 917 918 /* 919 * If this is less than the 2nd largest possible page, check if the buddy 920 * of the next-higher order is free. If it is, it's possible 921 * that pages are being freed that will coalesce soon. In case, 922 * that is happening, add the free page to the tail of the list 923 * so it's less likely to be used soon and more likely to be merged 924 * as a 2-level higher order page 925 */ 926 static inline bool 927 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 928 struct page *page, unsigned int order) 929 { 930 unsigned long higher_page_pfn; 931 struct page *higher_page; 932 933 if (order >= MAX_PAGE_ORDER - 1) 934 return false; 935 936 higher_page_pfn = buddy_pfn & pfn; 937 higher_page = page + (higher_page_pfn - pfn); 938 939 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 940 NULL) != NULL; 941 } 942 943 static void change_pageblock_range(struct page *pageblock_page, 944 int start_order, int migratetype) 945 { 946 int nr_pageblocks = 1 << (start_order - pageblock_order); 947 948 while (nr_pageblocks--) { 949 set_pageblock_migratetype(pageblock_page, migratetype); 950 pageblock_page += pageblock_nr_pages; 951 } 952 } 953 954 /* 955 * Freeing function for a buddy system allocator. 956 * 957 * The concept of a buddy system is to maintain direct-mapped table 958 * (containing bit values) for memory blocks of various "orders". 959 * The bottom level table contains the map for the smallest allocatable 960 * units of memory (here, pages), and each level above it describes 961 * pairs of units from the levels below, hence, "buddies". 962 * At a high level, all that happens here is marking the table entry 963 * at the bottom level available, and propagating the changes upward 964 * as necessary, plus some accounting needed to play nicely with other 965 * parts of the VM system. 966 * At each level, we keep a list of pages, which are heads of continuous 967 * free pages of length of (1 << order) and marked with PageBuddy. 968 * Page's order is recorded in page_private(page) field. 969 * So when we are allocating or freeing one, we can derive the state of the 970 * other. That is, if we allocate a small block, and both were 971 * free, the remainder of the region must be split into blocks. 972 * If a block is freed, and its buddy is also free, then this 973 * triggers coalescing into a block of larger size. 974 * 975 * -- nyc 976 */ 977 978 static inline void __free_one_page(struct page *page, 979 unsigned long pfn, 980 struct zone *zone, unsigned int order, 981 int migratetype, fpi_t fpi_flags) 982 { 983 struct capture_control *capc = task_capc(zone); 984 unsigned long buddy_pfn = 0; 985 unsigned long combined_pfn; 986 struct page *buddy; 987 bool to_tail; 988 989 VM_BUG_ON(!zone_is_initialized(zone)); 990 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); 991 992 VM_BUG_ON(migratetype == -1); 993 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 994 VM_BUG_ON_PAGE(bad_range(zone, page), page); 995 996 account_freepages(zone, 1 << order, migratetype); 997 998 while (order < MAX_PAGE_ORDER) { 999 int buddy_mt = migratetype; 1000 1001 if (compaction_capture(capc, page, order, migratetype)) { 1002 account_freepages(zone, -(1 << order), migratetype); 1003 return; 1004 } 1005 1006 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 1007 if (!buddy) 1008 goto done_merging; 1009 1010 if (unlikely(order >= pageblock_order)) { 1011 /* 1012 * We want to prevent merge between freepages on pageblock 1013 * without fallbacks and normal pageblock. Without this, 1014 * pageblock isolation could cause incorrect freepage or CMA 1015 * accounting or HIGHATOMIC accounting. 1016 */ 1017 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 1018 1019 if (migratetype != buddy_mt && 1020 (!migratetype_is_mergeable(migratetype) || 1021 !migratetype_is_mergeable(buddy_mt))) 1022 goto done_merging; 1023 } 1024 1025 /* 1026 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1027 * merge with it and move up one order. 1028 */ 1029 if (page_is_guard(buddy)) 1030 clear_page_guard(zone, buddy, order); 1031 else 1032 __del_page_from_free_list(buddy, zone, order, buddy_mt); 1033 1034 if (unlikely(buddy_mt != migratetype)) { 1035 /* 1036 * Match buddy type. This ensures that an 1037 * expand() down the line puts the sub-blocks 1038 * on the right freelists. 1039 */ 1040 change_pageblock_range(buddy, order, migratetype); 1041 } 1042 1043 combined_pfn = buddy_pfn & pfn; 1044 page = page + (combined_pfn - pfn); 1045 pfn = combined_pfn; 1046 order++; 1047 } 1048 1049 done_merging: 1050 set_buddy_order(page, order); 1051 1052 if (fpi_flags & FPI_TO_TAIL) 1053 to_tail = true; 1054 else if (is_shuffle_order(order)) 1055 to_tail = shuffle_pick_tail(); 1056 else 1057 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1058 1059 __add_to_free_list(page, zone, order, migratetype, to_tail); 1060 1061 /* Notify page reporting subsystem of freed page */ 1062 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1063 page_reporting_notify_free(order); 1064 } 1065 1066 /* 1067 * A bad page could be due to a number of fields. Instead of multiple branches, 1068 * try and check multiple fields with one check. The caller must do a detailed 1069 * check if necessary. 1070 */ 1071 static inline bool page_expected_state(struct page *page, 1072 unsigned long check_flags) 1073 { 1074 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1075 return false; 1076 1077 if (unlikely((unsigned long)page->mapping | 1078 page_ref_count(page) | 1079 #ifdef CONFIG_MEMCG 1080 page->memcg_data | 1081 #endif 1082 page_pool_page_is_pp(page) | 1083 (page->flags.f & check_flags))) 1084 return false; 1085 1086 return true; 1087 } 1088 1089 static const char *page_bad_reason(struct page *page, unsigned long flags) 1090 { 1091 const char *bad_reason = NULL; 1092 1093 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1094 bad_reason = "nonzero mapcount"; 1095 if (unlikely(page->mapping != NULL)) 1096 bad_reason = "non-NULL mapping"; 1097 if (unlikely(page_ref_count(page) != 0)) 1098 bad_reason = "nonzero _refcount"; 1099 if (unlikely(page->flags.f & flags)) { 1100 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1101 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1102 else 1103 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1104 } 1105 #ifdef CONFIG_MEMCG 1106 if (unlikely(page->memcg_data)) 1107 bad_reason = "page still charged to cgroup"; 1108 #endif 1109 if (unlikely(page_pool_page_is_pp(page))) 1110 bad_reason = "page_pool leak"; 1111 return bad_reason; 1112 } 1113 1114 static inline bool free_page_is_bad(struct page *page) 1115 { 1116 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1117 return false; 1118 1119 /* Something has gone sideways, find it */ 1120 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1121 return true; 1122 } 1123 1124 static inline bool is_check_pages_enabled(void) 1125 { 1126 return static_branch_unlikely(&check_pages_enabled); 1127 } 1128 1129 static int free_tail_page_prepare(struct page *head_page, struct page *page) 1130 { 1131 struct folio *folio = (struct folio *)head_page; 1132 int ret = 1; 1133 1134 /* 1135 * We rely page->lru.next never has bit 0 set, unless the page 1136 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1137 */ 1138 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1139 1140 if (!is_check_pages_enabled()) { 1141 ret = 0; 1142 goto out; 1143 } 1144 switch (page - head_page) { 1145 case 1: 1146 /* the first tail page: these may be in place of ->mapping */ 1147 if (unlikely(folio_large_mapcount(folio))) { 1148 bad_page(page, "nonzero large_mapcount"); 1149 goto out; 1150 } 1151 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && 1152 unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1153 bad_page(page, "nonzero nr_pages_mapped"); 1154 goto out; 1155 } 1156 if (IS_ENABLED(CONFIG_MM_ID)) { 1157 if (unlikely(folio->_mm_id_mapcount[0] != -1)) { 1158 bad_page(page, "nonzero mm mapcount 0"); 1159 goto out; 1160 } 1161 if (unlikely(folio->_mm_id_mapcount[1] != -1)) { 1162 bad_page(page, "nonzero mm mapcount 1"); 1163 goto out; 1164 } 1165 } 1166 if (IS_ENABLED(CONFIG_64BIT)) { 1167 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1168 bad_page(page, "nonzero entire_mapcount"); 1169 goto out; 1170 } 1171 if (unlikely(atomic_read(&folio->_pincount))) { 1172 bad_page(page, "nonzero pincount"); 1173 goto out; 1174 } 1175 } 1176 break; 1177 case 2: 1178 /* the second tail page: deferred_list overlaps ->mapping */ 1179 if (unlikely(!list_empty(&folio->_deferred_list))) { 1180 bad_page(page, "on deferred list"); 1181 goto out; 1182 } 1183 if (!IS_ENABLED(CONFIG_64BIT)) { 1184 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { 1185 bad_page(page, "nonzero entire_mapcount"); 1186 goto out; 1187 } 1188 if (unlikely(atomic_read(&folio->_pincount))) { 1189 bad_page(page, "nonzero pincount"); 1190 goto out; 1191 } 1192 } 1193 break; 1194 case 3: 1195 /* the third tail page: hugetlb specifics overlap ->mappings */ 1196 if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) 1197 break; 1198 fallthrough; 1199 default: 1200 if (page->mapping != TAIL_MAPPING) { 1201 bad_page(page, "corrupted mapping in tail page"); 1202 goto out; 1203 } 1204 break; 1205 } 1206 if (unlikely(!PageTail(page))) { 1207 bad_page(page, "PageTail not set"); 1208 goto out; 1209 } 1210 if (unlikely(compound_head(page) != head_page)) { 1211 bad_page(page, "compound_head not consistent"); 1212 goto out; 1213 } 1214 ret = 0; 1215 out: 1216 page->mapping = NULL; 1217 clear_compound_head(page); 1218 return ret; 1219 } 1220 1221 /* 1222 * Skip KASAN memory poisoning when either: 1223 * 1224 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1225 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1226 * using page tags instead (see below). 1227 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1228 * that error detection is disabled for accesses via the page address. 1229 * 1230 * Pages will have match-all tags in the following circumstances: 1231 * 1232 * 1. Pages are being initialized for the first time, including during deferred 1233 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1234 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1235 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1236 * 3. The allocation was excluded from being checked due to sampling, 1237 * see the call to kasan_unpoison_pages. 1238 * 1239 * Poisoning pages during deferred memory init will greatly lengthen the 1240 * process and cause problem in large memory systems as the deferred pages 1241 * initialization is done with interrupt disabled. 1242 * 1243 * Assuming that there will be no reference to those newly initialized 1244 * pages before they are ever allocated, this should have no effect on 1245 * KASAN memory tracking as the poison will be properly inserted at page 1246 * allocation time. The only corner case is when pages are allocated by 1247 * on-demand allocation and then freed again before the deferred pages 1248 * initialization is done, but this is not likely to happen. 1249 */ 1250 static inline bool should_skip_kasan_poison(struct page *page) 1251 { 1252 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1253 return deferred_pages_enabled(); 1254 1255 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1256 } 1257 1258 static void kernel_init_pages(struct page *page, int numpages) 1259 { 1260 int i; 1261 1262 /* s390's use of memset() could override KASAN redzones. */ 1263 kasan_disable_current(); 1264 for (i = 0; i < numpages; i++) 1265 clear_highpage_kasan_tagged(page + i); 1266 kasan_enable_current(); 1267 } 1268 1269 #ifdef CONFIG_MEM_ALLOC_PROFILING 1270 1271 /* Should be called only if mem_alloc_profiling_enabled() */ 1272 void __clear_page_tag_ref(struct page *page) 1273 { 1274 union pgtag_ref_handle handle; 1275 union codetag_ref ref; 1276 1277 if (get_page_tag_ref(page, &ref, &handle)) { 1278 set_codetag_empty(&ref); 1279 update_page_tag_ref(handle, &ref); 1280 put_page_tag_ref(handle); 1281 } 1282 } 1283 1284 /* Should be called only if mem_alloc_profiling_enabled() */ 1285 static noinline 1286 void __pgalloc_tag_add(struct page *page, struct task_struct *task, 1287 unsigned int nr) 1288 { 1289 union pgtag_ref_handle handle; 1290 union codetag_ref ref; 1291 1292 if (get_page_tag_ref(page, &ref, &handle)) { 1293 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); 1294 update_page_tag_ref(handle, &ref); 1295 put_page_tag_ref(handle); 1296 } 1297 } 1298 1299 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1300 unsigned int nr) 1301 { 1302 if (mem_alloc_profiling_enabled()) 1303 __pgalloc_tag_add(page, task, nr); 1304 } 1305 1306 /* Should be called only if mem_alloc_profiling_enabled() */ 1307 static noinline 1308 void __pgalloc_tag_sub(struct page *page, unsigned int nr) 1309 { 1310 union pgtag_ref_handle handle; 1311 union codetag_ref ref; 1312 1313 if (get_page_tag_ref(page, &ref, &handle)) { 1314 alloc_tag_sub(&ref, PAGE_SIZE * nr); 1315 update_page_tag_ref(handle, &ref); 1316 put_page_tag_ref(handle); 1317 } 1318 } 1319 1320 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) 1321 { 1322 if (mem_alloc_profiling_enabled()) 1323 __pgalloc_tag_sub(page, nr); 1324 } 1325 1326 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1327 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1328 { 1329 if (tag) 1330 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1331 } 1332 1333 #else /* CONFIG_MEM_ALLOC_PROFILING */ 1334 1335 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1336 unsigned int nr) {} 1337 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1338 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1339 1340 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1341 1342 __always_inline bool __free_pages_prepare(struct page *page, 1343 unsigned int order, fpi_t fpi_flags) 1344 { 1345 int bad = 0; 1346 bool skip_kasan_poison = should_skip_kasan_poison(page); 1347 bool init = want_init_on_free(); 1348 bool compound = PageCompound(page); 1349 struct folio *folio = page_folio(page); 1350 1351 VM_BUG_ON_PAGE(PageTail(page), page); 1352 1353 trace_mm_page_free(page, order); 1354 kmsan_free_page(page, order); 1355 1356 if (memcg_kmem_online() && PageMemcgKmem(page)) 1357 __memcg_kmem_uncharge_page(page, order); 1358 1359 /* 1360 * In rare cases, when truncation or holepunching raced with 1361 * munlock after VM_LOCKED was cleared, Mlocked may still be 1362 * found set here. This does not indicate a problem, unless 1363 * "unevictable_pgs_cleared" appears worryingly large. 1364 */ 1365 if (unlikely(folio_test_mlocked(folio))) { 1366 long nr_pages = folio_nr_pages(folio); 1367 1368 __folio_clear_mlocked(folio); 1369 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1370 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1371 } 1372 1373 if (unlikely(PageHWPoison(page)) && !order) { 1374 /* Do not let hwpoison pages hit pcplists/buddy */ 1375 reset_page_owner(page, order); 1376 page_table_check_free(page, order); 1377 pgalloc_tag_sub(page, 1 << order); 1378 1379 /* 1380 * The page is isolated and accounted for. 1381 * Mark the codetag as empty to avoid accounting error 1382 * when the page is freed by unpoison_memory(). 1383 */ 1384 clear_page_tag_ref(page); 1385 return false; 1386 } 1387 1388 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1389 1390 /* 1391 * Check tail pages before head page information is cleared to 1392 * avoid checking PageCompound for order-0 pages. 1393 */ 1394 if (unlikely(order)) { 1395 int i; 1396 1397 if (compound) { 1398 page[1].flags.f &= ~PAGE_FLAGS_SECOND; 1399 #ifdef NR_PAGES_IN_LARGE_FOLIO 1400 folio->_nr_pages = 0; 1401 #endif 1402 } 1403 for (i = 1; i < (1 << order); i++) { 1404 if (compound) 1405 bad += free_tail_page_prepare(page, page + i); 1406 if (is_check_pages_enabled()) { 1407 if (free_page_is_bad(page + i)) { 1408 bad++; 1409 continue; 1410 } 1411 } 1412 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1413 } 1414 } 1415 if (folio_test_anon(folio)) { 1416 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 1417 folio->mapping = NULL; 1418 } 1419 if (unlikely(page_has_type(page))) 1420 /* Reset the page_type (which overlays _mapcount) */ 1421 page->page_type = UINT_MAX; 1422 1423 if (is_check_pages_enabled()) { 1424 if (free_page_is_bad(page)) 1425 bad++; 1426 if (bad) 1427 return false; 1428 } 1429 1430 page_cpupid_reset_last(page); 1431 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1432 reset_page_owner(page, order); 1433 page_table_check_free(page, order); 1434 pgalloc_tag_sub(page, 1 << order); 1435 1436 if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) { 1437 debug_check_no_locks_freed(page_address(page), 1438 PAGE_SIZE << order); 1439 debug_check_no_obj_freed(page_address(page), 1440 PAGE_SIZE << order); 1441 } 1442 1443 kernel_poison_pages(page, 1 << order); 1444 1445 /* 1446 * As memory initialization might be integrated into KASAN, 1447 * KASAN poisoning and memory initialization code must be 1448 * kept together to avoid discrepancies in behavior. 1449 * 1450 * With hardware tag-based KASAN, memory tags must be set before the 1451 * page becomes unavailable via debug_pagealloc or arch_free_page. 1452 */ 1453 if (!skip_kasan_poison) { 1454 kasan_poison_pages(page, order, init); 1455 1456 /* Memory is already initialized if KASAN did it internally. */ 1457 if (kasan_has_integrated_init()) 1458 init = false; 1459 } 1460 if (init) 1461 kernel_init_pages(page, 1 << order); 1462 1463 /* 1464 * arch_free_page() can make the page's contents inaccessible. s390 1465 * does this. So nothing which can access the page's contents should 1466 * happen after this. 1467 */ 1468 arch_free_page(page, order); 1469 1470 debug_pagealloc_unmap_pages(page, 1 << order); 1471 1472 return true; 1473 } 1474 1475 bool free_pages_prepare(struct page *page, unsigned int order) 1476 { 1477 return __free_pages_prepare(page, order, FPI_NONE); 1478 } 1479 1480 /* 1481 * Frees a number of pages from the PCP lists 1482 * Assumes all pages on list are in same zone. 1483 * count is the number of pages to free. 1484 */ 1485 static void free_pcppages_bulk(struct zone *zone, int count, 1486 struct per_cpu_pages *pcp, 1487 int pindex) 1488 { 1489 unsigned long flags; 1490 unsigned int order; 1491 struct page *page; 1492 1493 /* 1494 * Ensure proper count is passed which otherwise would stuck in the 1495 * below while (list_empty(list)) loop. 1496 */ 1497 count = min(pcp->count, count); 1498 1499 /* Ensure requested pindex is drained first. */ 1500 pindex = pindex - 1; 1501 1502 spin_lock_irqsave(&zone->lock, flags); 1503 1504 while (count > 0) { 1505 struct list_head *list; 1506 int nr_pages; 1507 1508 /* Remove pages from lists in a round-robin fashion. */ 1509 do { 1510 if (++pindex > NR_PCP_LISTS - 1) 1511 pindex = 0; 1512 list = &pcp->lists[pindex]; 1513 } while (list_empty(list)); 1514 1515 order = pindex_to_order(pindex); 1516 nr_pages = 1 << order; 1517 do { 1518 unsigned long pfn; 1519 int mt; 1520 1521 page = list_last_entry(list, struct page, pcp_list); 1522 pfn = page_to_pfn(page); 1523 mt = get_pfnblock_migratetype(page, pfn); 1524 1525 /* must delete to avoid corrupting pcp list */ 1526 list_del(&page->pcp_list); 1527 count -= nr_pages; 1528 pcp->count -= nr_pages; 1529 1530 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1531 trace_mm_page_pcpu_drain(page, order, mt); 1532 } while (count > 0 && !list_empty(list)); 1533 } 1534 1535 spin_unlock_irqrestore(&zone->lock, flags); 1536 } 1537 1538 /* Split a multi-block free page into its individual pageblocks. */ 1539 static void split_large_buddy(struct zone *zone, struct page *page, 1540 unsigned long pfn, int order, fpi_t fpi) 1541 { 1542 unsigned long end = pfn + (1 << order); 1543 1544 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); 1545 /* Caller removed page from freelist, buddy info cleared! */ 1546 VM_WARN_ON_ONCE(PageBuddy(page)); 1547 1548 if (order > pageblock_order) 1549 order = pageblock_order; 1550 1551 do { 1552 int mt = get_pfnblock_migratetype(page, pfn); 1553 1554 __free_one_page(page, pfn, zone, order, mt, fpi); 1555 pfn += 1 << order; 1556 if (pfn == end) 1557 break; 1558 page = pfn_to_page(pfn); 1559 } while (1); 1560 } 1561 1562 static void add_page_to_zone_llist(struct zone *zone, struct page *page, 1563 unsigned int order) 1564 { 1565 /* Remember the order */ 1566 page->private = order; 1567 /* Add the page to the free list */ 1568 llist_add(&page->pcp_llist, &zone->trylock_free_pages); 1569 } 1570 1571 static void free_one_page(struct zone *zone, struct page *page, 1572 unsigned long pfn, unsigned int order, 1573 fpi_t fpi_flags) 1574 { 1575 struct llist_head *llhead; 1576 unsigned long flags; 1577 1578 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 1579 if (!spin_trylock_irqsave(&zone->lock, flags)) { 1580 add_page_to_zone_llist(zone, page, order); 1581 return; 1582 } 1583 } else { 1584 spin_lock_irqsave(&zone->lock, flags); 1585 } 1586 1587 /* The lock succeeded. Process deferred pages. */ 1588 llhead = &zone->trylock_free_pages; 1589 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) { 1590 struct llist_node *llnode; 1591 struct page *p, *tmp; 1592 1593 llnode = llist_del_all(llhead); 1594 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { 1595 unsigned int p_order = p->private; 1596 1597 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); 1598 __count_vm_events(PGFREE, 1 << p_order); 1599 } 1600 } 1601 split_large_buddy(zone, page, pfn, order, fpi_flags); 1602 spin_unlock_irqrestore(&zone->lock, flags); 1603 1604 __count_vm_events(PGFREE, 1 << order); 1605 } 1606 1607 static void __free_pages_ok(struct page *page, unsigned int order, 1608 fpi_t fpi_flags) 1609 { 1610 unsigned long pfn = page_to_pfn(page); 1611 struct zone *zone = page_zone(page); 1612 1613 if (__free_pages_prepare(page, order, fpi_flags)) 1614 free_one_page(zone, page, pfn, order, fpi_flags); 1615 } 1616 1617 void __meminit __free_pages_core(struct page *page, unsigned int order, 1618 enum meminit_context context) 1619 { 1620 unsigned int nr_pages = 1 << order; 1621 struct page *p = page; 1622 unsigned int loop; 1623 1624 /* 1625 * When initializing the memmap, __init_single_page() sets the refcount 1626 * of all pages to 1 ("allocated"/"not free"). We have to set the 1627 * refcount of all involved pages to 0. 1628 * 1629 * Note that hotplugged memory pages are initialized to PageOffline(). 1630 * Pages freed from memblock might be marked as reserved. 1631 */ 1632 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1633 unlikely(context == MEMINIT_HOTPLUG)) { 1634 for (loop = 0; loop < nr_pages; loop++, p++) { 1635 VM_WARN_ON_ONCE(PageReserved(p)); 1636 __ClearPageOffline(p); 1637 set_page_count(p, 0); 1638 } 1639 1640 adjust_managed_page_count(page, nr_pages); 1641 } else { 1642 for (loop = 0; loop < nr_pages; loop++, p++) { 1643 __ClearPageReserved(p); 1644 set_page_count(p, 0); 1645 } 1646 1647 /* memblock adjusts totalram_pages() manually. */ 1648 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1649 } 1650 1651 if (page_contains_unaccepted(page, order)) { 1652 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1653 return; 1654 1655 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1656 } 1657 1658 /* 1659 * Bypass PCP and place fresh pages right to the tail, primarily 1660 * relevant for memory onlining. 1661 */ 1662 __free_pages_ok(page, order, FPI_TO_TAIL); 1663 } 1664 1665 /* 1666 * Check that the whole (or subset of) a pageblock given by the interval of 1667 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1668 * with the migration of free compaction scanner. 1669 * 1670 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1671 * 1672 * It's possible on some configurations to have a setup like node0 node1 node0 1673 * i.e. it's possible that all pages within a zones range of pages do not 1674 * belong to a single zone. We assume that a border between node0 and node1 1675 * can occur within a single pageblock, but not a node0 node1 node0 1676 * interleaving within a single pageblock. It is therefore sufficient to check 1677 * the first and last page of a pageblock and avoid checking each individual 1678 * page in a pageblock. 1679 * 1680 * Note: the function may return non-NULL struct page even for a page block 1681 * which contains a memory hole (i.e. there is no physical memory for a subset 1682 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1683 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1684 * even though the start pfn is online and valid. This should be safe most of 1685 * the time because struct pages are still initialized via init_unavailable_range() 1686 * and pfn walkers shouldn't touch any physical memory range for which they do 1687 * not recognize any specific metadata in struct pages. 1688 */ 1689 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1690 unsigned long end_pfn, struct zone *zone) 1691 { 1692 struct page *start_page; 1693 struct page *end_page; 1694 1695 /* end_pfn is one past the range we are checking */ 1696 end_pfn--; 1697 1698 if (!pfn_valid(end_pfn)) 1699 return NULL; 1700 1701 start_page = pfn_to_online_page(start_pfn); 1702 if (!start_page) 1703 return NULL; 1704 1705 if (page_zone(start_page) != zone) 1706 return NULL; 1707 1708 end_page = pfn_to_page(end_pfn); 1709 1710 /* This gives a shorter code than deriving page_zone(end_page) */ 1711 if (page_zone_id(start_page) != page_zone_id(end_page)) 1712 return NULL; 1713 1714 return start_page; 1715 } 1716 1717 /* 1718 * The order of subdivision here is critical for the IO subsystem. 1719 * Please do not alter this order without good reasons and regression 1720 * testing. Specifically, as large blocks of memory are subdivided, 1721 * the order in which smaller blocks are delivered depends on the order 1722 * they're subdivided in this function. This is the primary factor 1723 * influencing the order in which pages are delivered to the IO 1724 * subsystem according to empirical testing, and this is also justified 1725 * by considering the behavior of a buddy system containing a single 1726 * large block of memory acted on by a series of small allocations. 1727 * This behavior is a critical factor in sglist merging's success. 1728 * 1729 * -- nyc 1730 */ 1731 static inline unsigned int expand(struct zone *zone, struct page *page, int low, 1732 int high, int migratetype) 1733 { 1734 unsigned int size = 1 << high; 1735 unsigned int nr_added = 0; 1736 1737 while (high > low) { 1738 high--; 1739 size >>= 1; 1740 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1741 1742 /* 1743 * Mark as guard pages (or page), that will allow to 1744 * merge back to allocator when buddy will be freed. 1745 * Corresponding page table entries will not be touched, 1746 * pages will stay not present in virtual address space 1747 */ 1748 if (set_page_guard(zone, &page[size], high)) 1749 continue; 1750 1751 __add_to_free_list(&page[size], zone, high, migratetype, false); 1752 set_buddy_order(&page[size], high); 1753 nr_added += size; 1754 } 1755 1756 return nr_added; 1757 } 1758 1759 static __always_inline void page_del_and_expand(struct zone *zone, 1760 struct page *page, int low, 1761 int high, int migratetype) 1762 { 1763 int nr_pages = 1 << high; 1764 1765 __del_page_from_free_list(page, zone, high, migratetype); 1766 nr_pages -= expand(zone, page, low, high, migratetype); 1767 account_freepages(zone, -nr_pages, migratetype); 1768 } 1769 1770 static void check_new_page_bad(struct page *page) 1771 { 1772 if (unlikely(PageHWPoison(page))) { 1773 /* Don't complain about hwpoisoned pages */ 1774 if (PageBuddy(page)) 1775 __ClearPageBuddy(page); 1776 return; 1777 } 1778 1779 bad_page(page, 1780 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1781 } 1782 1783 /* 1784 * This page is about to be returned from the page allocator 1785 */ 1786 static bool check_new_page(struct page *page) 1787 { 1788 if (likely(page_expected_state(page, 1789 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1790 return false; 1791 1792 check_new_page_bad(page); 1793 return true; 1794 } 1795 1796 static inline bool check_new_pages(struct page *page, unsigned int order) 1797 { 1798 if (is_check_pages_enabled()) { 1799 for (int i = 0; i < (1 << order); i++) { 1800 struct page *p = page + i; 1801 1802 if (check_new_page(p)) 1803 return true; 1804 } 1805 } 1806 1807 return false; 1808 } 1809 1810 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1811 { 1812 /* Don't skip if a software KASAN mode is enabled. */ 1813 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1814 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1815 return false; 1816 1817 /* Skip, if hardware tag-based KASAN is not enabled. */ 1818 if (!kasan_hw_tags_enabled()) 1819 return true; 1820 1821 /* 1822 * With hardware tag-based KASAN enabled, skip if this has been 1823 * requested via __GFP_SKIP_KASAN. 1824 */ 1825 return flags & __GFP_SKIP_KASAN; 1826 } 1827 1828 static inline bool should_skip_init(gfp_t flags) 1829 { 1830 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1831 if (!kasan_hw_tags_enabled()) 1832 return false; 1833 1834 /* For hardware tag-based KASAN, skip if requested. */ 1835 return (flags & __GFP_SKIP_ZERO); 1836 } 1837 1838 inline void post_alloc_hook(struct page *page, unsigned int order, 1839 gfp_t gfp_flags) 1840 { 1841 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1842 !should_skip_init(gfp_flags); 1843 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1844 int i; 1845 1846 set_page_private(page, 0); 1847 1848 arch_alloc_page(page, order); 1849 debug_pagealloc_map_pages(page, 1 << order); 1850 1851 /* 1852 * Page unpoisoning must happen before memory initialization. 1853 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1854 * allocations and the page unpoisoning code will complain. 1855 */ 1856 kernel_unpoison_pages(page, 1 << order); 1857 1858 /* 1859 * As memory initialization might be integrated into KASAN, 1860 * KASAN unpoisoning and memory initialization code must be 1861 * kept together to avoid discrepancies in behavior. 1862 */ 1863 1864 /* 1865 * If memory tags should be zeroed 1866 * (which happens only when memory should be initialized as well). 1867 */ 1868 if (zero_tags) 1869 init = !tag_clear_highpages(page, 1 << order); 1870 1871 if (!should_skip_kasan_unpoison(gfp_flags) && 1872 kasan_unpoison_pages(page, order, init)) { 1873 /* Take note that memory was initialized by KASAN. */ 1874 if (kasan_has_integrated_init()) 1875 init = false; 1876 } else { 1877 /* 1878 * If memory tags have not been set by KASAN, reset the page 1879 * tags to ensure page_address() dereferencing does not fault. 1880 */ 1881 for (i = 0; i != 1 << order; ++i) 1882 page_kasan_tag_reset(page + i); 1883 } 1884 /* If memory is still not initialized, initialize it now. */ 1885 if (init) 1886 kernel_init_pages(page, 1 << order); 1887 1888 set_page_owner(page, order, gfp_flags); 1889 page_table_check_alloc(page, order); 1890 pgalloc_tag_add(page, current, 1 << order); 1891 } 1892 1893 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1894 unsigned int alloc_flags) 1895 { 1896 post_alloc_hook(page, order, gfp_flags); 1897 1898 if (order && (gfp_flags & __GFP_COMP)) 1899 prep_compound_page(page, order); 1900 1901 /* 1902 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1903 * allocate the page. The expectation is that the caller is taking 1904 * steps that will free more memory. The caller should avoid the page 1905 * being used for !PFMEMALLOC purposes. 1906 */ 1907 if (alloc_flags & ALLOC_NO_WATERMARKS) 1908 set_page_pfmemalloc(page); 1909 else 1910 clear_page_pfmemalloc(page); 1911 } 1912 1913 /* 1914 * Go through the free lists for the given migratetype and remove 1915 * the smallest available page from the freelists 1916 */ 1917 static __always_inline 1918 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1919 int migratetype) 1920 { 1921 unsigned int current_order; 1922 struct free_area *area; 1923 struct page *page; 1924 1925 /* Find a page of the appropriate size in the preferred list */ 1926 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1927 area = &(zone->free_area[current_order]); 1928 page = get_page_from_free_area(area, migratetype); 1929 if (!page) 1930 continue; 1931 1932 page_del_and_expand(zone, page, order, current_order, 1933 migratetype); 1934 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1935 pcp_allowed_order(order) && 1936 migratetype < MIGRATE_PCPTYPES); 1937 return page; 1938 } 1939 1940 return NULL; 1941 } 1942 1943 1944 /* 1945 * This array describes the order lists are fallen back to when 1946 * the free lists for the desirable migrate type are depleted 1947 * 1948 * The other migratetypes do not have fallbacks. 1949 */ 1950 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1951 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1952 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1953 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1954 }; 1955 1956 #ifdef CONFIG_CMA 1957 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1958 unsigned int order) 1959 { 1960 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1961 } 1962 #else 1963 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1964 unsigned int order) { return NULL; } 1965 #endif 1966 1967 /* 1968 * Move all free pages of a block to new type's freelist. Caller needs to 1969 * change the block type. 1970 */ 1971 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1972 int old_mt, int new_mt) 1973 { 1974 struct page *page; 1975 unsigned long pfn, end_pfn; 1976 unsigned int order; 1977 int pages_moved = 0; 1978 1979 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1980 end_pfn = pageblock_end_pfn(start_pfn); 1981 1982 for (pfn = start_pfn; pfn < end_pfn;) { 1983 page = pfn_to_page(pfn); 1984 if (!PageBuddy(page)) { 1985 pfn++; 1986 continue; 1987 } 1988 1989 /* Make sure we are not inadvertently changing nodes */ 1990 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1991 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1992 1993 order = buddy_order(page); 1994 1995 move_to_free_list(page, zone, order, old_mt, new_mt); 1996 1997 pfn += 1 << order; 1998 pages_moved += 1 << order; 1999 } 2000 2001 return pages_moved; 2002 } 2003 2004 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 2005 unsigned long *start_pfn, 2006 int *num_free, int *num_movable) 2007 { 2008 unsigned long pfn, start, end; 2009 2010 pfn = page_to_pfn(page); 2011 start = pageblock_start_pfn(pfn); 2012 end = pageblock_end_pfn(pfn); 2013 2014 /* 2015 * The caller only has the lock for @zone, don't touch ranges 2016 * that straddle into other zones. While we could move part of 2017 * the range that's inside the zone, this call is usually 2018 * accompanied by other operations such as migratetype updates 2019 * which also should be locked. 2020 */ 2021 if (!zone_spans_pfn(zone, start)) 2022 return false; 2023 if (!zone_spans_pfn(zone, end - 1)) 2024 return false; 2025 2026 *start_pfn = start; 2027 2028 if (num_free) { 2029 *num_free = 0; 2030 *num_movable = 0; 2031 for (pfn = start; pfn < end;) { 2032 page = pfn_to_page(pfn); 2033 if (PageBuddy(page)) { 2034 int nr = 1 << buddy_order(page); 2035 2036 *num_free += nr; 2037 pfn += nr; 2038 continue; 2039 } 2040 /* 2041 * We assume that pages that could be isolated for 2042 * migration are movable. But we don't actually try 2043 * isolating, as that would be expensive. 2044 */ 2045 if (PageLRU(page) || page_has_movable_ops(page)) 2046 (*num_movable)++; 2047 pfn++; 2048 } 2049 } 2050 2051 return true; 2052 } 2053 2054 static int move_freepages_block(struct zone *zone, struct page *page, 2055 int old_mt, int new_mt) 2056 { 2057 unsigned long start_pfn; 2058 int res; 2059 2060 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2061 return -1; 2062 2063 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt); 2064 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 2065 2066 return res; 2067 2068 } 2069 2070 #ifdef CONFIG_MEMORY_ISOLATION 2071 /* Look for a buddy that straddles start_pfn */ 2072 static unsigned long find_large_buddy(unsigned long start_pfn) 2073 { 2074 /* 2075 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing 2076 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking 2077 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy, 2078 * the starting order does not matter. 2079 */ 2080 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; 2081 struct page *page; 2082 unsigned long pfn = start_pfn; 2083 2084 while (!PageBuddy(page = pfn_to_page(pfn))) { 2085 /* Nothing found */ 2086 if (++order > MAX_PAGE_ORDER) 2087 return start_pfn; 2088 pfn &= ~0UL << order; 2089 } 2090 2091 /* 2092 * Found a preceding buddy, but does it straddle? 2093 */ 2094 if (pfn + (1 << buddy_order(page)) > start_pfn) 2095 return pfn; 2096 2097 /* Nothing found */ 2098 return start_pfn; 2099 } 2100 2101 static inline void toggle_pageblock_isolate(struct page *page, bool isolate) 2102 { 2103 if (isolate) 2104 set_pageblock_isolate(page); 2105 else 2106 clear_pageblock_isolate(page); 2107 } 2108 2109 /** 2110 * __move_freepages_block_isolate - move free pages in block for page isolation 2111 * @zone: the zone 2112 * @page: the pageblock page 2113 * @isolate: to isolate the given pageblock or unisolate it 2114 * 2115 * This is similar to move_freepages_block(), but handles the special 2116 * case encountered in page isolation, where the block of interest 2117 * might be part of a larger buddy spanning multiple pageblocks. 2118 * 2119 * Unlike the regular page allocator path, which moves pages while 2120 * stealing buddies off the freelist, page isolation is interested in 2121 * arbitrary pfn ranges that may have overlapping buddies on both ends. 2122 * 2123 * This function handles that. Straddling buddies are split into 2124 * individual pageblocks. Only the block of interest is moved. 2125 * 2126 * Returns %true if pages could be moved, %false otherwise. 2127 */ 2128 static bool __move_freepages_block_isolate(struct zone *zone, 2129 struct page *page, bool isolate) 2130 { 2131 unsigned long start_pfn, buddy_pfn; 2132 int from_mt; 2133 int to_mt; 2134 struct page *buddy; 2135 2136 if (isolate == get_pageblock_isolate(page)) { 2137 VM_WARN_ONCE(1, "%s a pageblock that is already in that state", 2138 isolate ? "Isolate" : "Unisolate"); 2139 return false; 2140 } 2141 2142 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 2143 return false; 2144 2145 /* No splits needed if buddies can't span multiple blocks */ 2146 if (pageblock_order == MAX_PAGE_ORDER) 2147 goto move; 2148 2149 buddy_pfn = find_large_buddy(start_pfn); 2150 buddy = pfn_to_page(buddy_pfn); 2151 /* We're a part of a larger buddy */ 2152 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) { 2153 int order = buddy_order(buddy); 2154 2155 del_page_from_free_list(buddy, zone, order, 2156 get_pfnblock_migratetype(buddy, buddy_pfn)); 2157 toggle_pageblock_isolate(page, isolate); 2158 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE); 2159 return true; 2160 } 2161 2162 move: 2163 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */ 2164 if (isolate) { 2165 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2166 MIGRATETYPE_MASK); 2167 to_mt = MIGRATE_ISOLATE; 2168 } else { 2169 from_mt = MIGRATE_ISOLATE; 2170 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), 2171 MIGRATETYPE_MASK); 2172 } 2173 2174 __move_freepages_block(zone, start_pfn, from_mt, to_mt); 2175 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate); 2176 2177 return true; 2178 } 2179 2180 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page) 2181 { 2182 return __move_freepages_block_isolate(zone, page, true); 2183 } 2184 2185 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page) 2186 { 2187 return __move_freepages_block_isolate(zone, page, false); 2188 } 2189 2190 #endif /* CONFIG_MEMORY_ISOLATION */ 2191 2192 static inline bool boost_watermark(struct zone *zone) 2193 { 2194 unsigned long max_boost; 2195 2196 if (!watermark_boost_factor) 2197 return false; 2198 /* 2199 * Don't bother in zones that are unlikely to produce results. 2200 * On small machines, including kdump capture kernels running 2201 * in a small area, boosting the watermark can cause an out of 2202 * memory situation immediately. 2203 */ 2204 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2205 return false; 2206 2207 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2208 watermark_boost_factor, 10000); 2209 2210 /* 2211 * high watermark may be uninitialised if fragmentation occurs 2212 * very early in boot so do not boost. We do not fall 2213 * through and boost by pageblock_nr_pages as failing 2214 * allocations that early means that reclaim is not going 2215 * to help and it may even be impossible to reclaim the 2216 * boosted watermark resulting in a hang. 2217 */ 2218 if (!max_boost) 2219 return false; 2220 2221 max_boost = max(pageblock_nr_pages, max_boost); 2222 2223 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2224 max_boost); 2225 2226 return true; 2227 } 2228 2229 /* 2230 * When we are falling back to another migratetype during allocation, should we 2231 * try to claim an entire block to satisfy further allocations, instead of 2232 * polluting multiple pageblocks? 2233 */ 2234 static bool should_try_claim_block(unsigned int order, int start_mt) 2235 { 2236 /* 2237 * Leaving this order check is intended, although there is 2238 * relaxed order check in next check. The reason is that 2239 * we can actually claim the whole pageblock if this condition met, 2240 * but, below check doesn't guarantee it and that is just heuristic 2241 * so could be changed anytime. 2242 */ 2243 if (order >= pageblock_order) 2244 return true; 2245 2246 /* 2247 * Above a certain threshold, always try to claim, as it's likely there 2248 * will be more free pages in the pageblock. 2249 */ 2250 if (order >= pageblock_order / 2) 2251 return true; 2252 2253 /* 2254 * Unmovable/reclaimable allocations would cause permanent 2255 * fragmentations if they fell back to allocating from a movable block 2256 * (polluting it), so we try to claim the whole block regardless of the 2257 * allocation size. Later movable allocations can always steal from this 2258 * block, which is less problematic. 2259 */ 2260 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) 2261 return true; 2262 2263 if (page_group_by_mobility_disabled) 2264 return true; 2265 2266 /* 2267 * Movable pages won't cause permanent fragmentation, so when you alloc 2268 * small pages, we just need to temporarily steal unmovable or 2269 * reclaimable pages that are closest to the request size. After a 2270 * while, memory compaction may occur to form large contiguous pages, 2271 * and the next movable allocation may not need to steal. 2272 */ 2273 return false; 2274 } 2275 2276 /* 2277 * Check whether there is a suitable fallback freepage with requested order. 2278 * If claimable is true, this function returns fallback_mt only if 2279 * we would do this whole-block claiming. This would help to reduce 2280 * fragmentation due to mixed migratetype pages in one pageblock. 2281 */ 2282 int find_suitable_fallback(struct free_area *area, unsigned int order, 2283 int migratetype, bool claimable) 2284 { 2285 int i; 2286 2287 if (claimable && !should_try_claim_block(order, migratetype)) 2288 return -2; 2289 2290 if (area->nr_free == 0) 2291 return -1; 2292 2293 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2294 int fallback_mt = fallbacks[migratetype][i]; 2295 2296 if (!free_area_empty(area, fallback_mt)) 2297 return fallback_mt; 2298 } 2299 2300 return -1; 2301 } 2302 2303 /* 2304 * This function implements actual block claiming behaviour. If order is large 2305 * enough, we can claim the whole pageblock for the requested migratetype. If 2306 * not, we check the pageblock for constituent pages; if at least half of the 2307 * pages are free or compatible, we can still claim the whole block, so pages 2308 * freed in the future will be put on the correct free list. 2309 */ 2310 static struct page * 2311 try_to_claim_block(struct zone *zone, struct page *page, 2312 int current_order, int order, int start_type, 2313 int block_type, unsigned int alloc_flags) 2314 { 2315 int free_pages, movable_pages, alike_pages; 2316 unsigned long start_pfn; 2317 2318 /* Take ownership for orders >= pageblock_order */ 2319 if (current_order >= pageblock_order) { 2320 unsigned int nr_added; 2321 2322 del_page_from_free_list(page, zone, current_order, block_type); 2323 change_pageblock_range(page, current_order, start_type); 2324 nr_added = expand(zone, page, order, current_order, start_type); 2325 account_freepages(zone, nr_added, start_type); 2326 return page; 2327 } 2328 2329 /* 2330 * Boost watermarks to increase reclaim pressure to reduce the 2331 * likelihood of future fallbacks. Wake kswapd now as the node 2332 * may be balanced overall and kswapd will not wake naturally. 2333 */ 2334 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2335 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2336 2337 /* moving whole block can fail due to zone boundary conditions */ 2338 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 2339 &movable_pages)) 2340 return NULL; 2341 2342 /* 2343 * Determine how many pages are compatible with our allocation. 2344 * For movable allocation, it's the number of movable pages which 2345 * we just obtained. For other types it's a bit more tricky. 2346 */ 2347 if (start_type == MIGRATE_MOVABLE) { 2348 alike_pages = movable_pages; 2349 } else { 2350 /* 2351 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2352 * to MOVABLE pageblock, consider all non-movable pages as 2353 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2354 * vice versa, be conservative since we can't distinguish the 2355 * exact migratetype of non-movable pages. 2356 */ 2357 if (block_type == MIGRATE_MOVABLE) 2358 alike_pages = pageblock_nr_pages 2359 - (free_pages + movable_pages); 2360 else 2361 alike_pages = 0; 2362 } 2363 /* 2364 * If a sufficient number of pages in the block are either free or of 2365 * compatible migratability as our allocation, claim the whole block. 2366 */ 2367 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2368 page_group_by_mobility_disabled) { 2369 __move_freepages_block(zone, start_pfn, block_type, start_type); 2370 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type); 2371 return __rmqueue_smallest(zone, order, start_type); 2372 } 2373 2374 return NULL; 2375 } 2376 2377 /* 2378 * Try to allocate from some fallback migratetype by claiming the entire block, 2379 * i.e. converting it to the allocation's start migratetype. 2380 * 2381 * The use of signed ints for order and current_order is a deliberate 2382 * deviation from the rest of this file, to make the for loop 2383 * condition simpler. 2384 */ 2385 static __always_inline struct page * 2386 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, 2387 unsigned int alloc_flags) 2388 { 2389 struct free_area *area; 2390 int current_order; 2391 int min_order = order; 2392 struct page *page; 2393 int fallback_mt; 2394 2395 /* 2396 * Do not steal pages from freelists belonging to other pageblocks 2397 * i.e. orders < pageblock_order. If there are no local zones free, 2398 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2399 */ 2400 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2401 min_order = pageblock_order; 2402 2403 /* 2404 * Find the largest available free page in the other list. This roughly 2405 * approximates finding the pageblock with the most free pages, which 2406 * would be too costly to do exactly. 2407 */ 2408 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2409 --current_order) { 2410 area = &(zone->free_area[current_order]); 2411 fallback_mt = find_suitable_fallback(area, current_order, 2412 start_migratetype, true); 2413 2414 /* No block in that order */ 2415 if (fallback_mt == -1) 2416 continue; 2417 2418 /* Advanced into orders too low to claim, abort */ 2419 if (fallback_mt == -2) 2420 break; 2421 2422 page = get_page_from_free_area(area, fallback_mt); 2423 page = try_to_claim_block(zone, page, current_order, order, 2424 start_migratetype, fallback_mt, 2425 alloc_flags); 2426 if (page) { 2427 trace_mm_page_alloc_extfrag(page, order, current_order, 2428 start_migratetype, fallback_mt); 2429 return page; 2430 } 2431 } 2432 2433 return NULL; 2434 } 2435 2436 /* 2437 * Try to steal a single page from some fallback migratetype. Leave the rest of 2438 * the block as its current migratetype, potentially causing fragmentation. 2439 */ 2440 static __always_inline struct page * 2441 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) 2442 { 2443 struct free_area *area; 2444 int current_order; 2445 struct page *page; 2446 int fallback_mt; 2447 2448 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2449 area = &(zone->free_area[current_order]); 2450 fallback_mt = find_suitable_fallback(area, current_order, 2451 start_migratetype, false); 2452 if (fallback_mt == -1) 2453 continue; 2454 2455 page = get_page_from_free_area(area, fallback_mt); 2456 page_del_and_expand(zone, page, order, current_order, fallback_mt); 2457 trace_mm_page_alloc_extfrag(page, order, current_order, 2458 start_migratetype, fallback_mt); 2459 return page; 2460 } 2461 2462 return NULL; 2463 } 2464 2465 enum rmqueue_mode { 2466 RMQUEUE_NORMAL, 2467 RMQUEUE_CMA, 2468 RMQUEUE_CLAIM, 2469 RMQUEUE_STEAL, 2470 }; 2471 2472 /* 2473 * Do the hard work of removing an element from the buddy allocator. 2474 * Call me with the zone->lock already held. 2475 */ 2476 static __always_inline struct page * 2477 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2478 unsigned int alloc_flags, enum rmqueue_mode *mode) 2479 { 2480 struct page *page; 2481 2482 if (IS_ENABLED(CONFIG_CMA)) { 2483 /* 2484 * Balance movable allocations between regular and CMA areas by 2485 * allocating from CMA when over half of the zone's free memory 2486 * is in the CMA area. 2487 */ 2488 if (alloc_flags & ALLOC_CMA && 2489 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2490 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2491 page = __rmqueue_cma_fallback(zone, order); 2492 if (page) 2493 return page; 2494 } 2495 } 2496 2497 /* 2498 * First try the freelists of the requested migratetype, then try 2499 * fallbacks modes with increasing levels of fragmentation risk. 2500 * 2501 * The fallback logic is expensive and rmqueue_bulk() calls in 2502 * a loop with the zone->lock held, meaning the freelists are 2503 * not subject to any outside changes. Remember in *mode where 2504 * we found pay dirt, to save us the search on the next call. 2505 */ 2506 switch (*mode) { 2507 case RMQUEUE_NORMAL: 2508 page = __rmqueue_smallest(zone, order, migratetype); 2509 if (page) 2510 return page; 2511 fallthrough; 2512 case RMQUEUE_CMA: 2513 if (alloc_flags & ALLOC_CMA) { 2514 page = __rmqueue_cma_fallback(zone, order); 2515 if (page) { 2516 *mode = RMQUEUE_CMA; 2517 return page; 2518 } 2519 } 2520 fallthrough; 2521 case RMQUEUE_CLAIM: 2522 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); 2523 if (page) { 2524 /* Replenished preferred freelist, back to normal mode. */ 2525 *mode = RMQUEUE_NORMAL; 2526 return page; 2527 } 2528 fallthrough; 2529 case RMQUEUE_STEAL: 2530 if (!(alloc_flags & ALLOC_NOFRAGMENT)) { 2531 page = __rmqueue_steal(zone, order, migratetype); 2532 if (page) { 2533 *mode = RMQUEUE_STEAL; 2534 return page; 2535 } 2536 } 2537 } 2538 return NULL; 2539 } 2540 2541 /* 2542 * Obtain a specified number of elements from the buddy allocator, all under 2543 * a single hold of the lock, for efficiency. Add them to the supplied list. 2544 * Returns the number of new pages which were placed at *list. 2545 */ 2546 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2547 unsigned long count, struct list_head *list, 2548 int migratetype, unsigned int alloc_flags) 2549 { 2550 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 2551 unsigned long flags; 2552 int i; 2553 2554 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 2555 if (!spin_trylock_irqsave(&zone->lock, flags)) 2556 return 0; 2557 } else { 2558 spin_lock_irqsave(&zone->lock, flags); 2559 } 2560 for (i = 0; i < count; ++i) { 2561 struct page *page = __rmqueue(zone, order, migratetype, 2562 alloc_flags, &rmqm); 2563 if (unlikely(page == NULL)) 2564 break; 2565 2566 /* 2567 * Split buddy pages returned by expand() are received here in 2568 * physical page order. The page is added to the tail of 2569 * caller's list. From the callers perspective, the linked list 2570 * is ordered by page number under some conditions. This is 2571 * useful for IO devices that can forward direction from the 2572 * head, thus also in the physical page order. This is useful 2573 * for IO devices that can merge IO requests if the physical 2574 * pages are ordered properly. 2575 */ 2576 list_add_tail(&page->pcp_list, list); 2577 } 2578 spin_unlock_irqrestore(&zone->lock, flags); 2579 2580 return i; 2581 } 2582 2583 /* 2584 * Called from the vmstat counter updater to decay the PCP high. 2585 * Return whether there are addition works to do. 2586 */ 2587 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2588 { 2589 int high_min, to_drain, to_drain_batched, batch; 2590 unsigned long UP_flags; 2591 bool todo = false; 2592 2593 high_min = READ_ONCE(pcp->high_min); 2594 batch = READ_ONCE(pcp->batch); 2595 /* 2596 * Decrease pcp->high periodically to try to free possible 2597 * idle PCP pages. And, avoid to free too many pages to 2598 * control latency. This caps pcp->high decrement too. 2599 */ 2600 if (pcp->high > high_min) { 2601 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2602 pcp->high - (pcp->high >> 3), high_min); 2603 if (pcp->high > high_min) 2604 todo = true; 2605 } 2606 2607 to_drain = pcp->count - pcp->high; 2608 while (to_drain > 0) { 2609 to_drain_batched = min(to_drain, batch); 2610 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2611 free_pcppages_bulk(zone, to_drain_batched, pcp, 0); 2612 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2613 todo = true; 2614 2615 to_drain -= to_drain_batched; 2616 } 2617 2618 return todo; 2619 } 2620 2621 #ifdef CONFIG_NUMA 2622 /* 2623 * Called from the vmstat counter updater to drain pagesets of this 2624 * currently executing processor on remote nodes after they have 2625 * expired. 2626 */ 2627 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2628 { 2629 unsigned long UP_flags; 2630 int to_drain, batch; 2631 2632 batch = READ_ONCE(pcp->batch); 2633 to_drain = min(pcp->count, batch); 2634 if (to_drain > 0) { 2635 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2636 free_pcppages_bulk(zone, to_drain, pcp, 0); 2637 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2638 } 2639 } 2640 #endif 2641 2642 /* 2643 * Drain pcplists of the indicated processor and zone. 2644 */ 2645 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2646 { 2647 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2648 unsigned long UP_flags; 2649 int count; 2650 2651 do { 2652 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 2653 count = pcp->count; 2654 if (count) { 2655 int to_drain = min(count, 2656 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2657 2658 free_pcppages_bulk(zone, to_drain, pcp, 0); 2659 count -= to_drain; 2660 } 2661 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 2662 } while (count); 2663 } 2664 2665 /* 2666 * Drain pcplists of all zones on the indicated processor. 2667 */ 2668 static void drain_pages(unsigned int cpu) 2669 { 2670 struct zone *zone; 2671 2672 for_each_populated_zone(zone) { 2673 drain_pages_zone(cpu, zone); 2674 } 2675 } 2676 2677 /* 2678 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2679 */ 2680 void drain_local_pages(struct zone *zone) 2681 { 2682 int cpu = smp_processor_id(); 2683 2684 if (zone) 2685 drain_pages_zone(cpu, zone); 2686 else 2687 drain_pages(cpu); 2688 } 2689 2690 /* 2691 * The implementation of drain_all_pages(), exposing an extra parameter to 2692 * drain on all cpus. 2693 * 2694 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2695 * not empty. The check for non-emptiness can however race with a free to 2696 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2697 * that need the guarantee that every CPU has drained can disable the 2698 * optimizing racy check. 2699 */ 2700 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2701 { 2702 int cpu; 2703 2704 /* 2705 * Allocate in the BSS so we won't require allocation in 2706 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2707 */ 2708 static cpumask_t cpus_with_pcps; 2709 2710 /* 2711 * Do not drain if one is already in progress unless it's specific to 2712 * a zone. Such callers are primarily CMA and memory hotplug and need 2713 * the drain to be complete when the call returns. 2714 */ 2715 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2716 if (!zone) 2717 return; 2718 mutex_lock(&pcpu_drain_mutex); 2719 } 2720 2721 /* 2722 * We don't care about racing with CPU hotplug event 2723 * as offline notification will cause the notified 2724 * cpu to drain that CPU pcps and on_each_cpu_mask 2725 * disables preemption as part of its processing 2726 */ 2727 for_each_online_cpu(cpu) { 2728 struct per_cpu_pages *pcp; 2729 struct zone *z; 2730 bool has_pcps = false; 2731 2732 if (force_all_cpus) { 2733 /* 2734 * The pcp.count check is racy, some callers need a 2735 * guarantee that no cpu is missed. 2736 */ 2737 has_pcps = true; 2738 } else if (zone) { 2739 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2740 if (pcp->count) 2741 has_pcps = true; 2742 } else { 2743 for_each_populated_zone(z) { 2744 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2745 if (pcp->count) { 2746 has_pcps = true; 2747 break; 2748 } 2749 } 2750 } 2751 2752 if (has_pcps) 2753 cpumask_set_cpu(cpu, &cpus_with_pcps); 2754 else 2755 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2756 } 2757 2758 for_each_cpu(cpu, &cpus_with_pcps) { 2759 if (zone) 2760 drain_pages_zone(cpu, zone); 2761 else 2762 drain_pages(cpu); 2763 } 2764 2765 mutex_unlock(&pcpu_drain_mutex); 2766 } 2767 2768 /* 2769 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2770 * 2771 * When zone parameter is non-NULL, spill just the single zone's pages. 2772 */ 2773 void drain_all_pages(struct zone *zone) 2774 { 2775 __drain_all_pages(zone, false); 2776 } 2777 2778 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2779 { 2780 int min_nr_free, max_nr_free; 2781 2782 /* Free as much as possible if batch freeing high-order pages. */ 2783 if (unlikely(free_high)) 2784 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2785 2786 /* Check for PCP disabled or boot pageset */ 2787 if (unlikely(high < batch)) 2788 return 1; 2789 2790 /* Leave at least pcp->batch pages on the list */ 2791 min_nr_free = batch; 2792 max_nr_free = high - batch; 2793 2794 /* 2795 * Increase the batch number to the number of the consecutive 2796 * freed pages to reduce zone lock contention. 2797 */ 2798 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2799 2800 return batch; 2801 } 2802 2803 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2804 int batch, bool free_high) 2805 { 2806 int high, high_min, high_max; 2807 2808 high_min = READ_ONCE(pcp->high_min); 2809 high_max = READ_ONCE(pcp->high_max); 2810 high = pcp->high = clamp(pcp->high, high_min, high_max); 2811 2812 if (unlikely(!high)) 2813 return 0; 2814 2815 if (unlikely(free_high)) { 2816 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2817 high_min); 2818 return 0; 2819 } 2820 2821 /* 2822 * If reclaim is active, limit the number of pages that can be 2823 * stored on pcp lists 2824 */ 2825 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2826 int free_count = max_t(int, pcp->free_count, batch); 2827 2828 pcp->high = max(high - free_count, high_min); 2829 return min(batch << 2, pcp->high); 2830 } 2831 2832 if (high_min == high_max) 2833 return high; 2834 2835 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2836 int free_count = max_t(int, pcp->free_count, batch); 2837 2838 pcp->high = max(high - free_count, high_min); 2839 high = max(pcp->count, high_min); 2840 } else if (pcp->count >= high) { 2841 int need_high = pcp->free_count + batch; 2842 2843 /* pcp->high should be large enough to hold batch freed pages */ 2844 if (pcp->high < need_high) 2845 pcp->high = clamp(need_high, high_min, high_max); 2846 } 2847 2848 return high; 2849 } 2850 2851 /* 2852 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the 2853 * pcp's watermarks below high. 2854 * 2855 * May return a freed pcp, if during page freeing the pcp spinlock cannot be 2856 * reacquired. Return true if pcp is locked, false otherwise. 2857 */ 2858 static bool free_frozen_page_commit(struct zone *zone, 2859 struct per_cpu_pages *pcp, struct page *page, int migratetype, 2860 unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags) 2861 { 2862 int high, batch; 2863 int to_free, to_free_batched; 2864 int pindex; 2865 int cpu = smp_processor_id(); 2866 int ret = true; 2867 bool free_high = false; 2868 2869 /* 2870 * On freeing, reduce the number of pages that are batch allocated. 2871 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2872 * allocations. 2873 */ 2874 pcp->alloc_factor >>= 1; 2875 __count_vm_events(PGFREE, 1 << order); 2876 pindex = order_to_pindex(migratetype, order); 2877 list_add(&page->pcp_list, &pcp->lists[pindex]); 2878 pcp->count += 1 << order; 2879 2880 batch = READ_ONCE(pcp->batch); 2881 /* 2882 * As high-order pages other than THP's stored on PCP can contribute 2883 * to fragmentation, limit the number stored when PCP is heavily 2884 * freeing without allocation. The remainder after bulk freeing 2885 * stops will be drained from vmstat refresh context. 2886 */ 2887 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2888 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) && 2889 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2890 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2891 pcp->count >= batch)); 2892 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2893 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2894 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2895 } 2896 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2897 pcp->free_count += (1 << order); 2898 2899 if (unlikely(fpi_flags & FPI_TRYLOCK)) { 2900 /* 2901 * Do not attempt to take a zone lock. Let pcp->count get 2902 * over high mark temporarily. 2903 */ 2904 return true; 2905 } 2906 2907 high = nr_pcp_high(pcp, zone, batch, free_high); 2908 if (pcp->count < high) 2909 return true; 2910 2911 to_free = nr_pcp_free(pcp, batch, high, free_high); 2912 while (to_free > 0 && pcp->count > 0) { 2913 to_free_batched = min(to_free, batch); 2914 free_pcppages_bulk(zone, to_free_batched, pcp, pindex); 2915 to_free -= to_free_batched; 2916 2917 if (to_free == 0 || pcp->count == 0) 2918 break; 2919 2920 pcp_spin_unlock(pcp, *UP_flags); 2921 2922 pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags); 2923 if (!pcp) { 2924 ret = false; 2925 break; 2926 } 2927 2928 /* 2929 * Check if this thread has been migrated to a different CPU. 2930 * If that is the case, give up and indicate that the pcp is 2931 * returned in an unlocked state. 2932 */ 2933 if (smp_processor_id() != cpu) { 2934 pcp_spin_unlock(pcp, *UP_flags); 2935 ret = false; 2936 break; 2937 } 2938 } 2939 2940 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2941 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2942 ZONE_MOVABLE, 0)) { 2943 struct pglist_data *pgdat = zone->zone_pgdat; 2944 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2945 2946 /* 2947 * Assume that memory pressure on this node is gone and may be 2948 * in a reclaimable state. If a memory fallback node exists, 2949 * direct reclaim may not have been triggered, causing a 2950 * 'hopeless node' to stay in that state for a while. Let 2951 * kswapd work again by resetting kswapd_failures. 2952 */ 2953 if (kswapd_test_hopeless(pgdat) && 2954 next_memory_node(pgdat->node_id) < MAX_NUMNODES) 2955 kswapd_clear_hopeless(pgdat, KSWAPD_CLEAR_HOPELESS_PCP); 2956 } 2957 return ret; 2958 } 2959 2960 /* 2961 * Free a pcp page 2962 */ 2963 static void __free_frozen_pages(struct page *page, unsigned int order, 2964 fpi_t fpi_flags) 2965 { 2966 unsigned long UP_flags; 2967 struct per_cpu_pages *pcp; 2968 struct zone *zone; 2969 unsigned long pfn = page_to_pfn(page); 2970 int migratetype; 2971 2972 if (!pcp_allowed_order(order)) { 2973 __free_pages_ok(page, order, fpi_flags); 2974 return; 2975 } 2976 2977 if (!__free_pages_prepare(page, order, fpi_flags)) 2978 return; 2979 2980 /* 2981 * We only track unmovable, reclaimable and movable on pcp lists. 2982 * Place ISOLATE pages on the isolated list because they are being 2983 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2984 * get those areas back if necessary. Otherwise, we may have to free 2985 * excessively into the page allocator 2986 */ 2987 zone = page_zone(page); 2988 migratetype = get_pfnblock_migratetype(page, pfn); 2989 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2990 if (unlikely(is_migrate_isolate(migratetype))) { 2991 free_one_page(zone, page, pfn, order, fpi_flags); 2992 return; 2993 } 2994 migratetype = MIGRATE_MOVABLE; 2995 } 2996 2997 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT) 2998 && (in_nmi() || in_hardirq()))) { 2999 add_page_to_zone_llist(zone, page, order); 3000 return; 3001 } 3002 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 3003 if (pcp) { 3004 if (!free_frozen_page_commit(zone, pcp, page, migratetype, 3005 order, fpi_flags, &UP_flags)) 3006 return; 3007 pcp_spin_unlock(pcp, UP_flags); 3008 } else { 3009 free_one_page(zone, page, pfn, order, fpi_flags); 3010 } 3011 } 3012 3013 void free_frozen_pages(struct page *page, unsigned int order) 3014 { 3015 __free_frozen_pages(page, order, FPI_NONE); 3016 } 3017 3018 void free_frozen_pages_nolock(struct page *page, unsigned int order) 3019 { 3020 __free_frozen_pages(page, order, FPI_TRYLOCK); 3021 } 3022 3023 /* 3024 * Free a batch of folios 3025 */ 3026 void free_unref_folios(struct folio_batch *folios) 3027 { 3028 unsigned long UP_flags; 3029 struct per_cpu_pages *pcp = NULL; 3030 struct zone *locked_zone = NULL; 3031 int i, j; 3032 3033 /* Prepare folios for freeing */ 3034 for (i = 0, j = 0; i < folios->nr; i++) { 3035 struct folio *folio = folios->folios[i]; 3036 unsigned long pfn = folio_pfn(folio); 3037 unsigned int order = folio_order(folio); 3038 3039 if (!__free_pages_prepare(&folio->page, order, FPI_NONE)) 3040 continue; 3041 /* 3042 * Free orders not handled on the PCP directly to the 3043 * allocator. 3044 */ 3045 if (!pcp_allowed_order(order)) { 3046 free_one_page(folio_zone(folio), &folio->page, 3047 pfn, order, FPI_NONE); 3048 continue; 3049 } 3050 folio->private = (void *)(unsigned long)order; 3051 if (j != i) 3052 folios->folios[j] = folio; 3053 j++; 3054 } 3055 folios->nr = j; 3056 3057 for (i = 0; i < folios->nr; i++) { 3058 struct folio *folio = folios->folios[i]; 3059 struct zone *zone = folio_zone(folio); 3060 unsigned long pfn = folio_pfn(folio); 3061 unsigned int order = (unsigned long)folio->private; 3062 int migratetype; 3063 3064 folio->private = NULL; 3065 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 3066 3067 /* Different zone requires a different pcp lock */ 3068 if (zone != locked_zone || 3069 is_migrate_isolate(migratetype)) { 3070 if (pcp) { 3071 pcp_spin_unlock(pcp, UP_flags); 3072 locked_zone = NULL; 3073 pcp = NULL; 3074 } 3075 3076 /* 3077 * Free isolated pages directly to the 3078 * allocator, see comment in free_frozen_pages. 3079 */ 3080 if (is_migrate_isolate(migratetype)) { 3081 free_one_page(zone, &folio->page, pfn, 3082 order, FPI_NONE); 3083 continue; 3084 } 3085 3086 /* 3087 * trylock is necessary as folios may be getting freed 3088 * from IRQ or SoftIRQ context after an IO completion. 3089 */ 3090 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 3091 if (unlikely(!pcp)) { 3092 free_one_page(zone, &folio->page, pfn, 3093 order, FPI_NONE); 3094 continue; 3095 } 3096 locked_zone = zone; 3097 } 3098 3099 /* 3100 * Non-isolated types over MIGRATE_PCPTYPES get added 3101 * to the MIGRATE_MOVABLE pcp list. 3102 */ 3103 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3104 migratetype = MIGRATE_MOVABLE; 3105 3106 trace_mm_page_free_batched(&folio->page); 3107 if (!free_frozen_page_commit(zone, pcp, &folio->page, 3108 migratetype, order, FPI_NONE, &UP_flags)) { 3109 pcp = NULL; 3110 locked_zone = NULL; 3111 } 3112 } 3113 3114 if (pcp) 3115 pcp_spin_unlock(pcp, UP_flags); 3116 folio_batch_reinit(folios); 3117 } 3118 3119 static void __split_page(struct page *page, unsigned int order) 3120 { 3121 VM_WARN_ON_PAGE(PageCompound(page), page); 3122 3123 split_page_owner(page, order, 0); 3124 pgalloc_tag_split(page_folio(page), order, 0); 3125 split_page_memcg(page, order); 3126 } 3127 3128 /* 3129 * split_page takes a non-compound higher-order page, and splits it into 3130 * n (1<<order) sub-pages: page[0..n] 3131 * Each sub-page must be freed individually. 3132 * 3133 * Note: this is probably too low level an operation for use in drivers. 3134 * Please consult with lkml before using this in your driver. 3135 */ 3136 void split_page(struct page *page, unsigned int order) 3137 { 3138 int i; 3139 3140 VM_WARN_ON_PAGE(!page_count(page), page); 3141 3142 for (i = 1; i < (1 << order); i++) 3143 set_page_refcounted(page + i); 3144 3145 __split_page(page, order); 3146 } 3147 EXPORT_SYMBOL_GPL(split_page); 3148 3149 int __isolate_free_page(struct page *page, unsigned int order) 3150 { 3151 struct zone *zone = page_zone(page); 3152 int mt = get_pageblock_migratetype(page); 3153 3154 if (!is_migrate_isolate(mt)) { 3155 unsigned long watermark; 3156 /* 3157 * Obey watermarks as if the page was being allocated. We can 3158 * emulate a high-order watermark check with a raised order-0 3159 * watermark, because we already know our high-order page 3160 * exists. 3161 */ 3162 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3163 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3164 return 0; 3165 } 3166 3167 del_page_from_free_list(page, zone, order, mt); 3168 3169 /* 3170 * Set the pageblock if the isolated page is at least half of a 3171 * pageblock 3172 */ 3173 if (order >= pageblock_order - 1) { 3174 struct page *endpage = page + (1 << order) - 1; 3175 for (; page < endpage; page += pageblock_nr_pages) { 3176 int mt = get_pageblock_migratetype(page); 3177 /* 3178 * Only change normal pageblocks (i.e., they can merge 3179 * with others) 3180 */ 3181 if (migratetype_is_mergeable(mt)) 3182 move_freepages_block(zone, page, mt, 3183 MIGRATE_MOVABLE); 3184 } 3185 } 3186 3187 return 1UL << order; 3188 } 3189 3190 /** 3191 * __putback_isolated_page - Return a now-isolated page back where we got it 3192 * @page: Page that was isolated 3193 * @order: Order of the isolated page 3194 * @mt: The page's pageblock's migratetype 3195 * 3196 * This function is meant to return a page pulled from the free lists via 3197 * __isolate_free_page back to the free lists they were pulled from. 3198 */ 3199 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3200 { 3201 struct zone *zone = page_zone(page); 3202 3203 /* zone lock should be held when this function is called */ 3204 lockdep_assert_held(&zone->lock); 3205 3206 /* Return isolated page to tail of freelist. */ 3207 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3208 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3209 } 3210 3211 /* 3212 * Update NUMA hit/miss statistics 3213 */ 3214 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3215 long nr_account) 3216 { 3217 #ifdef CONFIG_NUMA 3218 enum numa_stat_item local_stat = NUMA_LOCAL; 3219 3220 /* skip numa counters update if numa stats is disabled */ 3221 if (!static_branch_likely(&vm_numa_stat_key)) 3222 return; 3223 3224 if (zone_to_nid(z) != numa_node_id()) 3225 local_stat = NUMA_OTHER; 3226 3227 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3228 __count_numa_events(z, NUMA_HIT, nr_account); 3229 else { 3230 __count_numa_events(z, NUMA_MISS, nr_account); 3231 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3232 } 3233 __count_numa_events(z, local_stat, nr_account); 3234 #endif 3235 } 3236 3237 static __always_inline 3238 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 3239 unsigned int order, unsigned int alloc_flags, 3240 int migratetype) 3241 { 3242 struct page *page; 3243 unsigned long flags; 3244 3245 do { 3246 page = NULL; 3247 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { 3248 if (!spin_trylock_irqsave(&zone->lock, flags)) 3249 return NULL; 3250 } else { 3251 spin_lock_irqsave(&zone->lock, flags); 3252 } 3253 if (alloc_flags & ALLOC_HIGHATOMIC) 3254 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3255 if (!page) { 3256 enum rmqueue_mode rmqm = RMQUEUE_NORMAL; 3257 3258 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); 3259 3260 /* 3261 * If the allocation fails, allow OOM handling and 3262 * order-0 (atomic) allocs access to HIGHATOMIC 3263 * reserves as failing now is worse than failing a 3264 * high-order atomic allocation in the future. 3265 */ 3266 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 3267 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3268 3269 if (!page) { 3270 spin_unlock_irqrestore(&zone->lock, flags); 3271 return NULL; 3272 } 3273 } 3274 spin_unlock_irqrestore(&zone->lock, flags); 3275 } while (check_new_pages(page, order)); 3276 3277 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3278 zone_statistics(preferred_zone, zone, 1); 3279 3280 return page; 3281 } 3282 3283 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 3284 { 3285 int high, base_batch, batch, max_nr_alloc; 3286 int high_max, high_min; 3287 3288 base_batch = READ_ONCE(pcp->batch); 3289 high_min = READ_ONCE(pcp->high_min); 3290 high_max = READ_ONCE(pcp->high_max); 3291 high = pcp->high = clamp(pcp->high, high_min, high_max); 3292 3293 /* Check for PCP disabled or boot pageset */ 3294 if (unlikely(high < base_batch)) 3295 return 1; 3296 3297 if (order) 3298 batch = base_batch; 3299 else 3300 batch = (base_batch << pcp->alloc_factor); 3301 3302 /* 3303 * If we had larger pcp->high, we could avoid to allocate from 3304 * zone. 3305 */ 3306 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3307 high = pcp->high = min(high + batch, high_max); 3308 3309 if (!order) { 3310 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 3311 /* 3312 * Double the number of pages allocated each time there is 3313 * subsequent allocation of order-0 pages without any freeing. 3314 */ 3315 if (batch <= max_nr_alloc && 3316 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 3317 pcp->alloc_factor++; 3318 batch = min(batch, max_nr_alloc); 3319 } 3320 3321 /* 3322 * Scale batch relative to order if batch implies free pages 3323 * can be stored on the PCP. Batch can be 1 for small zones or 3324 * for boot pagesets which should never store free pages as 3325 * the pages may belong to arbitrary zones. 3326 */ 3327 if (batch > 1) 3328 batch = max(batch >> order, 2); 3329 3330 return batch; 3331 } 3332 3333 /* Remove page from the per-cpu list, caller must protect the list */ 3334 static inline 3335 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3336 int migratetype, 3337 unsigned int alloc_flags, 3338 struct per_cpu_pages *pcp, 3339 struct list_head *list) 3340 { 3341 struct page *page; 3342 3343 do { 3344 if (list_empty(list)) { 3345 int batch = nr_pcp_alloc(pcp, zone, order); 3346 int alloced; 3347 3348 alloced = rmqueue_bulk(zone, order, 3349 batch, list, 3350 migratetype, alloc_flags); 3351 3352 pcp->count += alloced << order; 3353 if (unlikely(list_empty(list))) 3354 return NULL; 3355 } 3356 3357 page = list_first_entry(list, struct page, pcp_list); 3358 list_del(&page->pcp_list); 3359 pcp->count -= 1 << order; 3360 } while (check_new_pages(page, order)); 3361 3362 return page; 3363 } 3364 3365 /* Lock and remove page from the per-cpu list */ 3366 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3367 struct zone *zone, unsigned int order, 3368 int migratetype, unsigned int alloc_flags) 3369 { 3370 struct per_cpu_pages *pcp; 3371 struct list_head *list; 3372 struct page *page; 3373 unsigned long UP_flags; 3374 3375 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 3376 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 3377 if (!pcp) 3378 return NULL; 3379 3380 /* 3381 * On allocation, reduce the number of pages that are batch freed. 3382 * See nr_pcp_free() where free_factor is increased for subsequent 3383 * frees. 3384 */ 3385 pcp->free_count >>= 1; 3386 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3387 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3388 pcp_spin_unlock(pcp, UP_flags); 3389 if (page) { 3390 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3391 zone_statistics(preferred_zone, zone, 1); 3392 } 3393 return page; 3394 } 3395 3396 /* 3397 * Allocate a page from the given zone. 3398 * Use pcplists for THP or "cheap" high-order allocations. 3399 */ 3400 3401 /* 3402 * Do not instrument rmqueue() with KMSAN. This function may call 3403 * __msan_poison_alloca() through a call to set_pfnblock_migratetype(). 3404 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3405 * may call rmqueue() again, which will result in a deadlock. 3406 */ 3407 __no_sanitize_memory 3408 static inline 3409 struct page *rmqueue(struct zone *preferred_zone, 3410 struct zone *zone, unsigned int order, 3411 gfp_t gfp_flags, unsigned int alloc_flags, 3412 int migratetype) 3413 { 3414 struct page *page; 3415 3416 if (likely(pcp_allowed_order(order))) { 3417 page = rmqueue_pcplist(preferred_zone, zone, order, 3418 migratetype, alloc_flags); 3419 if (likely(page)) 3420 goto out; 3421 } 3422 3423 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3424 migratetype); 3425 3426 out: 3427 /* Separate test+clear to avoid unnecessary atomics */ 3428 if ((alloc_flags & ALLOC_KSWAPD) && 3429 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3430 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3431 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3432 } 3433 3434 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3435 return page; 3436 } 3437 3438 /* 3439 * Reserve the pageblock(s) surrounding an allocation request for 3440 * exclusive use of high-order atomic allocations if there are no 3441 * empty page blocks that contain a page with a suitable order 3442 */ 3443 static void reserve_highatomic_pageblock(struct page *page, int order, 3444 struct zone *zone) 3445 { 3446 int mt; 3447 unsigned long max_managed, flags; 3448 3449 /* 3450 * The number reserved as: minimum is 1 pageblock, maximum is 3451 * roughly 1% of a zone. But if 1% of a zone falls below a 3452 * pageblock size, then don't reserve any pageblocks. 3453 * Check is race-prone but harmless. 3454 */ 3455 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 3456 return; 3457 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 3458 if (zone->nr_reserved_highatomic >= max_managed) 3459 return; 3460 3461 spin_lock_irqsave(&zone->lock, flags); 3462 3463 /* Recheck the nr_reserved_highatomic limit under the lock */ 3464 if (zone->nr_reserved_highatomic >= max_managed) 3465 goto out_unlock; 3466 3467 /* Yoink! */ 3468 mt = get_pageblock_migratetype(page); 3469 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 3470 if (!migratetype_is_mergeable(mt)) 3471 goto out_unlock; 3472 3473 if (order < pageblock_order) { 3474 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 3475 goto out_unlock; 3476 zone->nr_reserved_highatomic += pageblock_nr_pages; 3477 } else { 3478 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 3479 zone->nr_reserved_highatomic += 1 << order; 3480 } 3481 3482 out_unlock: 3483 spin_unlock_irqrestore(&zone->lock, flags); 3484 } 3485 3486 /* 3487 * Used when an allocation is about to fail under memory pressure. This 3488 * potentially hurts the reliability of high-order allocations when under 3489 * intense memory pressure but failed atomic allocations should be easier 3490 * to recover from than an OOM. 3491 * 3492 * If @force is true, try to unreserve pageblocks even though highatomic 3493 * pageblock is exhausted. 3494 */ 3495 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 3496 bool force) 3497 { 3498 struct zonelist *zonelist = ac->zonelist; 3499 unsigned long flags; 3500 struct zoneref *z; 3501 struct zone *zone; 3502 struct page *page; 3503 int order; 3504 int ret; 3505 3506 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 3507 ac->nodemask) { 3508 /* 3509 * Preserve at least one pageblock unless memory pressure 3510 * is really high. 3511 */ 3512 if (!force && zone->nr_reserved_highatomic <= 3513 pageblock_nr_pages) 3514 continue; 3515 3516 spin_lock_irqsave(&zone->lock, flags); 3517 for (order = 0; order < NR_PAGE_ORDERS; order++) { 3518 struct free_area *area = &(zone->free_area[order]); 3519 unsigned long size; 3520 3521 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 3522 if (!page) 3523 continue; 3524 3525 size = max(pageblock_nr_pages, 1UL << order); 3526 /* 3527 * It should never happen but changes to 3528 * locking could inadvertently allow a per-cpu 3529 * drain to add pages to MIGRATE_HIGHATOMIC 3530 * while unreserving so be safe and watch for 3531 * underflows. 3532 */ 3533 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) 3534 size = zone->nr_reserved_highatomic; 3535 zone->nr_reserved_highatomic -= size; 3536 3537 /* 3538 * Convert to ac->migratetype and avoid the normal 3539 * pageblock stealing heuristics. Minimally, the caller 3540 * is doing the work and needs the pages. More 3541 * importantly, if the block was always converted to 3542 * MIGRATE_UNMOVABLE or another type then the number 3543 * of pageblocks that cannot be completely freed 3544 * may increase. 3545 */ 3546 if (order < pageblock_order) 3547 ret = move_freepages_block(zone, page, 3548 MIGRATE_HIGHATOMIC, 3549 ac->migratetype); 3550 else { 3551 move_to_free_list(page, zone, order, 3552 MIGRATE_HIGHATOMIC, 3553 ac->migratetype); 3554 change_pageblock_range(page, order, 3555 ac->migratetype); 3556 ret = 1; 3557 } 3558 /* 3559 * Reserving the block(s) already succeeded, 3560 * so this should not fail on zone boundaries. 3561 */ 3562 WARN_ON_ONCE(ret == -1); 3563 if (ret > 0) { 3564 spin_unlock_irqrestore(&zone->lock, flags); 3565 return ret; 3566 } 3567 } 3568 spin_unlock_irqrestore(&zone->lock, flags); 3569 } 3570 3571 return false; 3572 } 3573 3574 static inline long __zone_watermark_unusable_free(struct zone *z, 3575 unsigned int order, unsigned int alloc_flags) 3576 { 3577 long unusable_free = (1 << order) - 1; 3578 3579 /* 3580 * If the caller does not have rights to reserves below the min 3581 * watermark then subtract the free pages reserved for highatomic. 3582 */ 3583 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3584 unusable_free += READ_ONCE(z->nr_free_highatomic); 3585 3586 #ifdef CONFIG_CMA 3587 /* If allocation can't use CMA areas don't use free CMA pages */ 3588 if (!(alloc_flags & ALLOC_CMA)) 3589 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3590 #endif 3591 3592 return unusable_free; 3593 } 3594 3595 /* 3596 * Return true if free base pages are above 'mark'. For high-order checks it 3597 * will return true of the order-0 watermark is reached and there is at least 3598 * one free page of a suitable size. Checking now avoids taking the zone lock 3599 * to check in the allocation paths if no pages are free. 3600 */ 3601 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3602 int highest_zoneidx, unsigned int alloc_flags, 3603 long free_pages) 3604 { 3605 long min = mark; 3606 int o; 3607 3608 /* free_pages may go negative - that's OK */ 3609 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3610 3611 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3612 /* 3613 * __GFP_HIGH allows access to 50% of the min reserve as well 3614 * as OOM. 3615 */ 3616 if (alloc_flags & ALLOC_MIN_RESERVE) { 3617 min -= min / 2; 3618 3619 /* 3620 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3621 * access more reserves than just __GFP_HIGH. Other 3622 * non-blocking allocations requests such as GFP_NOWAIT 3623 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3624 * access to the min reserve. 3625 */ 3626 if (alloc_flags & ALLOC_NON_BLOCK) 3627 min -= min / 4; 3628 } 3629 3630 /* 3631 * OOM victims can try even harder than the normal reserve 3632 * users on the grounds that it's definitely going to be in 3633 * the exit path shortly and free memory. Any allocation it 3634 * makes during the free path will be small and short-lived. 3635 */ 3636 if (alloc_flags & ALLOC_OOM) 3637 min -= min / 2; 3638 } 3639 3640 /* 3641 * Check watermarks for an order-0 allocation request. If these 3642 * are not met, then a high-order request also cannot go ahead 3643 * even if a suitable page happened to be free. 3644 */ 3645 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3646 return false; 3647 3648 /* If this is an order-0 request then the watermark is fine */ 3649 if (!order) 3650 return true; 3651 3652 /* For a high-order request, check at least one suitable page is free */ 3653 for (o = order; o < NR_PAGE_ORDERS; o++) { 3654 struct free_area *area = &z->free_area[o]; 3655 int mt; 3656 3657 if (!area->nr_free) 3658 continue; 3659 3660 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3661 if (!free_area_empty(area, mt)) 3662 return true; 3663 } 3664 3665 #ifdef CONFIG_CMA 3666 if ((alloc_flags & ALLOC_CMA) && 3667 !free_area_empty(area, MIGRATE_CMA)) { 3668 return true; 3669 } 3670 #endif 3671 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3672 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3673 return true; 3674 } 3675 } 3676 return false; 3677 } 3678 3679 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3680 int highest_zoneidx, unsigned int alloc_flags) 3681 { 3682 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3683 zone_page_state(z, NR_FREE_PAGES)); 3684 } 3685 3686 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3687 unsigned long mark, int highest_zoneidx, 3688 unsigned int alloc_flags, gfp_t gfp_mask) 3689 { 3690 long free_pages; 3691 3692 free_pages = zone_page_state(z, NR_FREE_PAGES); 3693 3694 /* 3695 * Fast check for order-0 only. If this fails then the reserves 3696 * need to be calculated. 3697 */ 3698 if (!order) { 3699 long usable_free; 3700 long reserved; 3701 3702 usable_free = free_pages; 3703 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3704 3705 /* reserved may over estimate high-atomic reserves. */ 3706 usable_free -= min(usable_free, reserved); 3707 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3708 return true; 3709 } 3710 3711 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3712 free_pages)) 3713 return true; 3714 3715 /* 3716 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3717 * when checking the min watermark. The min watermark is the 3718 * point where boosting is ignored so that kswapd is woken up 3719 * when below the low watermark. 3720 */ 3721 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3722 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3723 mark = z->_watermark[WMARK_MIN]; 3724 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3725 alloc_flags, free_pages); 3726 } 3727 3728 return false; 3729 } 3730 3731 #ifdef CONFIG_NUMA 3732 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3733 3734 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3735 { 3736 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3737 node_reclaim_distance; 3738 } 3739 #else /* CONFIG_NUMA */ 3740 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3741 { 3742 return true; 3743 } 3744 #endif /* CONFIG_NUMA */ 3745 3746 /* 3747 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3748 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3749 * premature use of a lower zone may cause lowmem pressure problems that 3750 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3751 * probably too small. It only makes sense to spread allocations to avoid 3752 * fragmentation between the Normal and DMA32 zones. 3753 */ 3754 static inline unsigned int 3755 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3756 { 3757 unsigned int alloc_flags; 3758 3759 /* 3760 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3761 * to save a branch. 3762 */ 3763 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3764 3765 if (defrag_mode) { 3766 alloc_flags |= ALLOC_NOFRAGMENT; 3767 return alloc_flags; 3768 } 3769 3770 #ifdef CONFIG_ZONE_DMA32 3771 if (!zone) 3772 return alloc_flags; 3773 3774 if (zone_idx(zone) != ZONE_NORMAL) 3775 return alloc_flags; 3776 3777 /* 3778 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3779 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3780 * on UMA that if Normal is populated then so is DMA32. 3781 */ 3782 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3783 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3784 return alloc_flags; 3785 3786 alloc_flags |= ALLOC_NOFRAGMENT; 3787 #endif /* CONFIG_ZONE_DMA32 */ 3788 return alloc_flags; 3789 } 3790 3791 /* Must be called after current_gfp_context() which can change gfp_mask */ 3792 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3793 unsigned int alloc_flags) 3794 { 3795 #ifdef CONFIG_CMA 3796 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3797 alloc_flags |= ALLOC_CMA; 3798 #endif 3799 return alloc_flags; 3800 } 3801 3802 /* 3803 * get_page_from_freelist goes through the zonelist trying to allocate 3804 * a page. 3805 */ 3806 static struct page * 3807 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3808 const struct alloc_context *ac) 3809 { 3810 struct zoneref *z; 3811 struct zone *zone; 3812 struct pglist_data *last_pgdat = NULL; 3813 bool last_pgdat_dirty_ok = false; 3814 bool no_fallback; 3815 bool skip_kswapd_nodes = nr_online_nodes > 1; 3816 bool skipped_kswapd_nodes = false; 3817 3818 retry: 3819 /* 3820 * Scan zonelist, looking for a zone with enough free. 3821 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c. 3822 */ 3823 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3824 z = ac->preferred_zoneref; 3825 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3826 ac->nodemask) { 3827 struct page *page; 3828 unsigned long mark; 3829 3830 if (cpusets_enabled() && 3831 (alloc_flags & ALLOC_CPUSET) && 3832 !__cpuset_zone_allowed(zone, gfp_mask)) 3833 continue; 3834 /* 3835 * When allocating a page cache page for writing, we 3836 * want to get it from a node that is within its dirty 3837 * limit, such that no single node holds more than its 3838 * proportional share of globally allowed dirty pages. 3839 * The dirty limits take into account the node's 3840 * lowmem reserves and high watermark so that kswapd 3841 * should be able to balance it without having to 3842 * write pages from its LRU list. 3843 * 3844 * XXX: For now, allow allocations to potentially 3845 * exceed the per-node dirty limit in the slowpath 3846 * (spread_dirty_pages unset) before going into reclaim, 3847 * which is important when on a NUMA setup the allowed 3848 * nodes are together not big enough to reach the 3849 * global limit. The proper fix for these situations 3850 * will require awareness of nodes in the 3851 * dirty-throttling and the flusher threads. 3852 */ 3853 if (ac->spread_dirty_pages) { 3854 if (last_pgdat != zone->zone_pgdat) { 3855 last_pgdat = zone->zone_pgdat; 3856 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3857 } 3858 3859 if (!last_pgdat_dirty_ok) 3860 continue; 3861 } 3862 3863 if (no_fallback && !defrag_mode && nr_online_nodes > 1 && 3864 zone != zonelist_zone(ac->preferred_zoneref)) { 3865 int local_nid; 3866 3867 /* 3868 * If moving to a remote node, retry but allow 3869 * fragmenting fallbacks. Locality is more important 3870 * than fragmentation avoidance. 3871 */ 3872 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3873 if (zone_to_nid(zone) != local_nid) { 3874 alloc_flags &= ~ALLOC_NOFRAGMENT; 3875 goto retry; 3876 } 3877 } 3878 3879 /* 3880 * If kswapd is already active on a node, keep looking 3881 * for other nodes that might be idle. This can happen 3882 * if another process has NUMA bindings and is causing 3883 * kswapd wakeups on only some nodes. Avoid accidental 3884 * "node_reclaim_mode"-like behavior in this case. 3885 */ 3886 if (skip_kswapd_nodes && 3887 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) { 3888 skipped_kswapd_nodes = true; 3889 continue; 3890 } 3891 3892 cond_accept_memory(zone, order, alloc_flags); 3893 3894 /* 3895 * Detect whether the number of free pages is below high 3896 * watermark. If so, we will decrease pcp->high and free 3897 * PCP pages in free path to reduce the possibility of 3898 * premature page reclaiming. Detection is done here to 3899 * avoid to do that in hotter free path. 3900 */ 3901 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3902 goto check_alloc_wmark; 3903 3904 mark = high_wmark_pages(zone); 3905 if (zone_watermark_fast(zone, order, mark, 3906 ac->highest_zoneidx, alloc_flags, 3907 gfp_mask)) 3908 goto try_this_zone; 3909 else 3910 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3911 3912 check_alloc_wmark: 3913 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3914 if (!zone_watermark_fast(zone, order, mark, 3915 ac->highest_zoneidx, alloc_flags, 3916 gfp_mask)) { 3917 int ret; 3918 3919 if (cond_accept_memory(zone, order, alloc_flags)) 3920 goto try_this_zone; 3921 3922 /* 3923 * Watermark failed for this zone, but see if we can 3924 * grow this zone if it contains deferred pages. 3925 */ 3926 if (deferred_pages_enabled()) { 3927 if (_deferred_grow_zone(zone, order)) 3928 goto try_this_zone; 3929 } 3930 /* Checked here to keep the fast path fast */ 3931 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3932 if (alloc_flags & ALLOC_NO_WATERMARKS) 3933 goto try_this_zone; 3934 3935 if (!node_reclaim_enabled() || 3936 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3937 continue; 3938 3939 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3940 switch (ret) { 3941 case NODE_RECLAIM_NOSCAN: 3942 /* did not scan */ 3943 continue; 3944 case NODE_RECLAIM_FULL: 3945 /* scanned but unreclaimable */ 3946 continue; 3947 default: 3948 /* did we reclaim enough */ 3949 if (zone_watermark_ok(zone, order, mark, 3950 ac->highest_zoneidx, alloc_flags)) 3951 goto try_this_zone; 3952 3953 continue; 3954 } 3955 } 3956 3957 try_this_zone: 3958 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3959 gfp_mask, alloc_flags, ac->migratetype); 3960 if (page) { 3961 prep_new_page(page, order, gfp_mask, alloc_flags); 3962 3963 /* 3964 * If this is a high-order atomic allocation then check 3965 * if the pageblock should be reserved for the future 3966 */ 3967 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3968 reserve_highatomic_pageblock(page, order, zone); 3969 3970 return page; 3971 } else { 3972 if (cond_accept_memory(zone, order, alloc_flags)) 3973 goto try_this_zone; 3974 3975 /* Try again if zone has deferred pages */ 3976 if (deferred_pages_enabled()) { 3977 if (_deferred_grow_zone(zone, order)) 3978 goto try_this_zone; 3979 } 3980 } 3981 } 3982 3983 /* 3984 * If we skipped over nodes with active kswapds and found no 3985 * idle nodes, retry and place anywhere the watermarks permit. 3986 */ 3987 if (skip_kswapd_nodes && skipped_kswapd_nodes) { 3988 skip_kswapd_nodes = false; 3989 goto retry; 3990 } 3991 3992 /* 3993 * It's possible on a UMA machine to get through all zones that are 3994 * fragmented. If avoiding fragmentation, reset and try again. 3995 */ 3996 if (no_fallback && !defrag_mode) { 3997 alloc_flags &= ~ALLOC_NOFRAGMENT; 3998 goto retry; 3999 } 4000 4001 return NULL; 4002 } 4003 4004 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 4005 { 4006 unsigned int filter = SHOW_MEM_FILTER_NODES; 4007 4008 /* 4009 * This documents exceptions given to allocations in certain 4010 * contexts that are allowed to allocate outside current's set 4011 * of allowed nodes. 4012 */ 4013 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4014 if (tsk_is_oom_victim(current) || 4015 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4016 filter &= ~SHOW_MEM_FILTER_NODES; 4017 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4018 filter &= ~SHOW_MEM_FILTER_NODES; 4019 4020 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 4021 mem_cgroup_show_protected_memory(NULL); 4022 } 4023 4024 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4025 { 4026 struct va_format vaf; 4027 va_list args; 4028 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4029 4030 if ((gfp_mask & __GFP_NOWARN) || 4031 !__ratelimit(&nopage_rs) || 4032 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4033 return; 4034 4035 va_start(args, fmt); 4036 vaf.fmt = fmt; 4037 vaf.va = &args; 4038 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4039 current->comm, &vaf, gfp_mask, &gfp_mask, 4040 nodemask_pr_args(nodemask)); 4041 va_end(args); 4042 4043 cpuset_print_current_mems_allowed(); 4044 pr_cont("\n"); 4045 dump_stack(); 4046 warn_alloc_show_mem(gfp_mask, nodemask); 4047 } 4048 4049 static inline struct page * 4050 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4051 unsigned int alloc_flags, 4052 const struct alloc_context *ac) 4053 { 4054 struct page *page; 4055 4056 page = get_page_from_freelist(gfp_mask, order, 4057 alloc_flags|ALLOC_CPUSET, ac); 4058 /* 4059 * fallback to ignore cpuset restriction if our nodes 4060 * are depleted 4061 */ 4062 if (!page) 4063 page = get_page_from_freelist(gfp_mask, order, 4064 alloc_flags, ac); 4065 return page; 4066 } 4067 4068 static inline struct page * 4069 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4070 const struct alloc_context *ac, unsigned long *did_some_progress) 4071 { 4072 struct oom_control oc = { 4073 .zonelist = ac->zonelist, 4074 .nodemask = ac->nodemask, 4075 .memcg = NULL, 4076 .gfp_mask = gfp_mask, 4077 .order = order, 4078 }; 4079 struct page *page; 4080 4081 *did_some_progress = 0; 4082 4083 /* 4084 * Acquire the oom lock. If that fails, somebody else is 4085 * making progress for us. 4086 */ 4087 if (!mutex_trylock(&oom_lock)) { 4088 *did_some_progress = 1; 4089 schedule_timeout_uninterruptible(1); 4090 return NULL; 4091 } 4092 4093 /* 4094 * Go through the zonelist yet one more time, keep very high watermark 4095 * here, this is only to catch a parallel oom killing, we must fail if 4096 * we're still under heavy pressure. But make sure that this reclaim 4097 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4098 * allocation which will never fail due to oom_lock already held. 4099 */ 4100 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4101 ~__GFP_DIRECT_RECLAIM, order, 4102 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4103 if (page) 4104 goto out; 4105 4106 /* Coredumps can quickly deplete all memory reserves */ 4107 if (current->flags & PF_DUMPCORE) 4108 goto out; 4109 /* The OOM killer will not help higher order allocs */ 4110 if (order > PAGE_ALLOC_COSTLY_ORDER) 4111 goto out; 4112 /* 4113 * We have already exhausted all our reclaim opportunities without any 4114 * success so it is time to admit defeat. We will skip the OOM killer 4115 * because it is very likely that the caller has a more reasonable 4116 * fallback than shooting a random task. 4117 * 4118 * The OOM killer may not free memory on a specific node. 4119 */ 4120 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4121 goto out; 4122 /* The OOM killer does not needlessly kill tasks for lowmem */ 4123 if (ac->highest_zoneidx < ZONE_NORMAL) 4124 goto out; 4125 if (pm_suspended_storage()) 4126 goto out; 4127 /* 4128 * XXX: GFP_NOFS allocations should rather fail than rely on 4129 * other request to make a forward progress. 4130 * We are in an unfortunate situation where out_of_memory cannot 4131 * do much for this context but let's try it to at least get 4132 * access to memory reserved if the current task is killed (see 4133 * out_of_memory). Once filesystems are ready to handle allocation 4134 * failures more gracefully we should just bail out here. 4135 */ 4136 4137 /* Exhausted what can be done so it's blame time */ 4138 if (out_of_memory(&oc) || 4139 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4140 *did_some_progress = 1; 4141 4142 /* 4143 * Help non-failing allocations by giving them access to memory 4144 * reserves 4145 */ 4146 if (gfp_mask & __GFP_NOFAIL) 4147 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4148 ALLOC_NO_WATERMARKS, ac); 4149 } 4150 out: 4151 mutex_unlock(&oom_lock); 4152 return page; 4153 } 4154 4155 /* 4156 * Maximum number of compaction retries with a progress before OOM 4157 * killer is consider as the only way to move forward. 4158 */ 4159 #define MAX_COMPACT_RETRIES 16 4160 4161 #ifdef CONFIG_COMPACTION 4162 /* Try memory compaction for high-order allocations before reclaim */ 4163 static struct page * 4164 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4165 unsigned int alloc_flags, const struct alloc_context *ac, 4166 enum compact_priority prio, enum compact_result *compact_result) 4167 { 4168 struct page *page = NULL; 4169 unsigned long pflags; 4170 unsigned int noreclaim_flag; 4171 4172 if (!order) 4173 return NULL; 4174 4175 psi_memstall_enter(&pflags); 4176 delayacct_compact_start(); 4177 noreclaim_flag = memalloc_noreclaim_save(); 4178 4179 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4180 prio, &page); 4181 4182 memalloc_noreclaim_restore(noreclaim_flag); 4183 psi_memstall_leave(&pflags); 4184 delayacct_compact_end(); 4185 4186 if (*compact_result == COMPACT_SKIPPED) 4187 return NULL; 4188 /* 4189 * At least in one zone compaction wasn't deferred or skipped, so let's 4190 * count a compaction stall 4191 */ 4192 count_vm_event(COMPACTSTALL); 4193 4194 /* Prep a captured page if available */ 4195 if (page) 4196 prep_new_page(page, order, gfp_mask, alloc_flags); 4197 4198 /* Try get a page from the freelist if available */ 4199 if (!page) 4200 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4201 4202 if (page) { 4203 struct zone *zone = page_zone(page); 4204 4205 zone->compact_blockskip_flush = false; 4206 compaction_defer_reset(zone, order, true); 4207 count_vm_event(COMPACTSUCCESS); 4208 return page; 4209 } 4210 4211 /* 4212 * It's bad if compaction run occurs and fails. The most likely reason 4213 * is that pages exist, but not enough to satisfy watermarks. 4214 */ 4215 count_vm_event(COMPACTFAIL); 4216 4217 cond_resched(); 4218 4219 return NULL; 4220 } 4221 4222 static inline bool 4223 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4224 enum compact_result compact_result, 4225 enum compact_priority *compact_priority, 4226 int *compaction_retries) 4227 { 4228 int max_retries = MAX_COMPACT_RETRIES; 4229 int min_priority; 4230 bool ret = false; 4231 int retries = *compaction_retries; 4232 enum compact_priority priority = *compact_priority; 4233 4234 if (!order) 4235 return false; 4236 4237 if (fatal_signal_pending(current)) 4238 return false; 4239 4240 /* 4241 * Compaction was skipped due to a lack of free order-0 4242 * migration targets. Continue if reclaim can help. 4243 */ 4244 if (compact_result == COMPACT_SKIPPED) { 4245 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4246 goto out; 4247 } 4248 4249 /* 4250 * Compaction managed to coalesce some page blocks, but the 4251 * allocation failed presumably due to a race. Retry some. 4252 */ 4253 if (compact_result == COMPACT_SUCCESS) { 4254 /* 4255 * !costly requests are much more important than 4256 * __GFP_RETRY_MAYFAIL costly ones because they are de 4257 * facto nofail and invoke OOM killer to move on while 4258 * costly can fail and users are ready to cope with 4259 * that. 1/4 retries is rather arbitrary but we would 4260 * need much more detailed feedback from compaction to 4261 * make a better decision. 4262 */ 4263 if (order > PAGE_ALLOC_COSTLY_ORDER) 4264 max_retries /= 4; 4265 4266 if (++(*compaction_retries) <= max_retries) { 4267 ret = true; 4268 goto out; 4269 } 4270 } 4271 4272 /* 4273 * Compaction failed. Retry with increasing priority. 4274 */ 4275 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4276 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4277 4278 if (*compact_priority > min_priority) { 4279 (*compact_priority)--; 4280 *compaction_retries = 0; 4281 ret = true; 4282 } 4283 out: 4284 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4285 return ret; 4286 } 4287 #else 4288 static inline struct page * 4289 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4290 unsigned int alloc_flags, const struct alloc_context *ac, 4291 enum compact_priority prio, enum compact_result *compact_result) 4292 { 4293 *compact_result = COMPACT_SKIPPED; 4294 return NULL; 4295 } 4296 4297 static inline bool 4298 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4299 enum compact_result compact_result, 4300 enum compact_priority *compact_priority, 4301 int *compaction_retries) 4302 { 4303 struct zone *zone; 4304 struct zoneref *z; 4305 4306 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4307 return false; 4308 4309 /* 4310 * There are setups with compaction disabled which would prefer to loop 4311 * inside the allocator rather than hit the oom killer prematurely. 4312 * Let's give them a good hope and keep retrying while the order-0 4313 * watermarks are OK. 4314 */ 4315 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4316 ac->highest_zoneidx, ac->nodemask) { 4317 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4318 ac->highest_zoneidx, alloc_flags)) 4319 return true; 4320 } 4321 return false; 4322 } 4323 #endif /* CONFIG_COMPACTION */ 4324 4325 #ifdef CONFIG_LOCKDEP 4326 static struct lockdep_map __fs_reclaim_map = 4327 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4328 4329 static bool __need_reclaim(gfp_t gfp_mask) 4330 { 4331 /* no reclaim without waiting on it */ 4332 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4333 return false; 4334 4335 /* this guy won't enter reclaim */ 4336 if (current->flags & PF_MEMALLOC) 4337 return false; 4338 4339 if (gfp_mask & __GFP_NOLOCKDEP) 4340 return false; 4341 4342 return true; 4343 } 4344 4345 void __fs_reclaim_acquire(unsigned long ip) 4346 { 4347 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4348 } 4349 4350 void __fs_reclaim_release(unsigned long ip) 4351 { 4352 lock_release(&__fs_reclaim_map, ip); 4353 } 4354 4355 void fs_reclaim_acquire(gfp_t gfp_mask) 4356 { 4357 gfp_mask = current_gfp_context(gfp_mask); 4358 4359 if (__need_reclaim(gfp_mask)) { 4360 if (gfp_mask & __GFP_FS) 4361 __fs_reclaim_acquire(_RET_IP_); 4362 4363 #ifdef CONFIG_MMU_NOTIFIER 4364 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4365 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4366 #endif 4367 4368 } 4369 } 4370 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4371 4372 void fs_reclaim_release(gfp_t gfp_mask) 4373 { 4374 gfp_mask = current_gfp_context(gfp_mask); 4375 4376 if (__need_reclaim(gfp_mask)) { 4377 if (gfp_mask & __GFP_FS) 4378 __fs_reclaim_release(_RET_IP_); 4379 } 4380 } 4381 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4382 #endif 4383 4384 /* 4385 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4386 * have been rebuilt so allocation retries. Reader side does not lock and 4387 * retries the allocation if zonelist changes. Writer side is protected by the 4388 * embedded spin_lock. 4389 */ 4390 static DEFINE_SEQLOCK(zonelist_update_seq); 4391 4392 static unsigned int zonelist_iter_begin(void) 4393 { 4394 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4395 return read_seqbegin(&zonelist_update_seq); 4396 4397 return 0; 4398 } 4399 4400 static unsigned int check_retry_zonelist(unsigned int seq) 4401 { 4402 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4403 return read_seqretry(&zonelist_update_seq, seq); 4404 4405 return seq; 4406 } 4407 4408 /* Perform direct synchronous page reclaim */ 4409 static unsigned long 4410 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4411 const struct alloc_context *ac) 4412 { 4413 unsigned int noreclaim_flag; 4414 unsigned long progress; 4415 4416 cond_resched(); 4417 4418 /* We now go into synchronous reclaim */ 4419 cpuset_memory_pressure_bump(); 4420 fs_reclaim_acquire(gfp_mask); 4421 noreclaim_flag = memalloc_noreclaim_save(); 4422 4423 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4424 ac->nodemask); 4425 4426 memalloc_noreclaim_restore(noreclaim_flag); 4427 fs_reclaim_release(gfp_mask); 4428 4429 cond_resched(); 4430 4431 return progress; 4432 } 4433 4434 /* The really slow allocator path where we enter direct reclaim */ 4435 static inline struct page * 4436 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4437 unsigned int alloc_flags, const struct alloc_context *ac, 4438 unsigned long *did_some_progress) 4439 { 4440 struct page *page = NULL; 4441 unsigned long pflags; 4442 bool drained = false; 4443 4444 psi_memstall_enter(&pflags); 4445 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4446 if (unlikely(!(*did_some_progress))) 4447 goto out; 4448 4449 retry: 4450 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4451 4452 /* 4453 * If an allocation failed after direct reclaim, it could be because 4454 * pages are pinned on the per-cpu lists or in high alloc reserves. 4455 * Shrink them and try again 4456 */ 4457 if (!page && !drained) { 4458 unreserve_highatomic_pageblock(ac, false); 4459 drain_all_pages(NULL); 4460 drained = true; 4461 goto retry; 4462 } 4463 out: 4464 psi_memstall_leave(&pflags); 4465 4466 return page; 4467 } 4468 4469 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4470 const struct alloc_context *ac) 4471 { 4472 struct zoneref *z; 4473 struct zone *zone; 4474 pg_data_t *last_pgdat = NULL; 4475 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4476 unsigned int reclaim_order; 4477 4478 if (defrag_mode) 4479 reclaim_order = max(order, pageblock_order); 4480 else 4481 reclaim_order = order; 4482 4483 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4484 ac->nodemask) { 4485 if (!managed_zone(zone)) 4486 continue; 4487 if (last_pgdat == zone->zone_pgdat) 4488 continue; 4489 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); 4490 last_pgdat = zone->zone_pgdat; 4491 } 4492 } 4493 4494 static inline unsigned int 4495 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4496 { 4497 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4498 4499 /* 4500 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4501 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4502 * to save two branches. 4503 */ 4504 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4505 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4506 4507 /* 4508 * The caller may dip into page reserves a bit more if the caller 4509 * cannot run direct reclaim, or if the caller has realtime scheduling 4510 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4511 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4512 */ 4513 alloc_flags |= (__force int) 4514 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4515 4516 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4517 /* 4518 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4519 * if it can't schedule. 4520 */ 4521 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4522 alloc_flags |= ALLOC_NON_BLOCK; 4523 4524 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) 4525 alloc_flags |= ALLOC_HIGHATOMIC; 4526 } 4527 4528 /* 4529 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4530 * GFP_ATOMIC) rather than fail, see the comment for 4531 * cpuset_current_node_allowed(). 4532 */ 4533 if (alloc_flags & ALLOC_MIN_RESERVE) 4534 alloc_flags &= ~ALLOC_CPUSET; 4535 } else if (unlikely(rt_or_dl_task(current)) && in_task()) 4536 alloc_flags |= ALLOC_MIN_RESERVE; 4537 4538 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4539 4540 if (defrag_mode) 4541 alloc_flags |= ALLOC_NOFRAGMENT; 4542 4543 return alloc_flags; 4544 } 4545 4546 static bool oom_reserves_allowed(struct task_struct *tsk) 4547 { 4548 if (!tsk_is_oom_victim(tsk)) 4549 return false; 4550 4551 /* 4552 * !MMU doesn't have oom reaper so give access to memory reserves 4553 * only to the thread with TIF_MEMDIE set 4554 */ 4555 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4556 return false; 4557 4558 return true; 4559 } 4560 4561 /* 4562 * Distinguish requests which really need access to full memory 4563 * reserves from oom victims which can live with a portion of it 4564 */ 4565 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4566 { 4567 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4568 return 0; 4569 if (gfp_mask & __GFP_MEMALLOC) 4570 return ALLOC_NO_WATERMARKS; 4571 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4572 return ALLOC_NO_WATERMARKS; 4573 if (!in_interrupt()) { 4574 if (current->flags & PF_MEMALLOC) 4575 return ALLOC_NO_WATERMARKS; 4576 else if (oom_reserves_allowed(current)) 4577 return ALLOC_OOM; 4578 } 4579 4580 return 0; 4581 } 4582 4583 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4584 { 4585 return !!__gfp_pfmemalloc_flags(gfp_mask); 4586 } 4587 4588 /* 4589 * Checks whether it makes sense to retry the reclaim to make a forward progress 4590 * for the given allocation request. 4591 * 4592 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4593 * without success, or when we couldn't even meet the watermark if we 4594 * reclaimed all remaining pages on the LRU lists. 4595 * 4596 * Returns true if a retry is viable or false to enter the oom path. 4597 */ 4598 static inline bool 4599 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4600 struct alloc_context *ac, int alloc_flags, 4601 bool did_some_progress, int *no_progress_loops) 4602 { 4603 struct zone *zone; 4604 struct zoneref *z; 4605 bool ret = false; 4606 4607 /* 4608 * Costly allocations might have made a progress but this doesn't mean 4609 * their order will become available due to high fragmentation so 4610 * always increment the no progress counter for them 4611 */ 4612 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4613 *no_progress_loops = 0; 4614 else 4615 (*no_progress_loops)++; 4616 4617 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4618 goto out; 4619 4620 4621 /* 4622 * Keep reclaiming pages while there is a chance this will lead 4623 * somewhere. If none of the target zones can satisfy our allocation 4624 * request even if all reclaimable pages are considered then we are 4625 * screwed and have to go OOM. 4626 */ 4627 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4628 ac->highest_zoneidx, ac->nodemask) { 4629 unsigned long available; 4630 unsigned long reclaimable; 4631 unsigned long min_wmark = min_wmark_pages(zone); 4632 bool wmark; 4633 4634 if (cpusets_enabled() && 4635 (alloc_flags & ALLOC_CPUSET) && 4636 !__cpuset_zone_allowed(zone, gfp_mask)) 4637 continue; 4638 4639 available = reclaimable = zone_reclaimable_pages(zone); 4640 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4641 4642 /* 4643 * Would the allocation succeed if we reclaimed all 4644 * reclaimable pages? 4645 */ 4646 wmark = __zone_watermark_ok(zone, order, min_wmark, 4647 ac->highest_zoneidx, alloc_flags, available); 4648 trace_reclaim_retry_zone(z, order, reclaimable, 4649 available, min_wmark, *no_progress_loops, wmark); 4650 if (wmark) { 4651 ret = true; 4652 break; 4653 } 4654 } 4655 4656 /* 4657 * Memory allocation/reclaim might be called from a WQ context and the 4658 * current implementation of the WQ concurrency control doesn't 4659 * recognize that a particular WQ is congested if the worker thread is 4660 * looping without ever sleeping. Therefore we have to do a short sleep 4661 * here rather than calling cond_resched(). 4662 */ 4663 if (current->flags & PF_WQ_WORKER) 4664 schedule_timeout_uninterruptible(1); 4665 else 4666 cond_resched(); 4667 out: 4668 /* Before OOM, exhaust highatomic_reserve */ 4669 if (!ret) 4670 return unreserve_highatomic_pageblock(ac, true); 4671 4672 return ret; 4673 } 4674 4675 static inline bool 4676 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4677 { 4678 /* 4679 * It's possible that cpuset's mems_allowed and the nodemask from 4680 * mempolicy don't intersect. This should be normally dealt with by 4681 * policy_nodemask(), but it's possible to race with cpuset update in 4682 * such a way the check therein was true, and then it became false 4683 * before we got our cpuset_mems_cookie here. 4684 * This assumes that for all allocations, ac->nodemask can come only 4685 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4686 * when it does not intersect with the cpuset restrictions) or the 4687 * caller can deal with a violated nodemask. 4688 */ 4689 if (cpusets_enabled() && ac->nodemask && 4690 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4691 ac->nodemask = NULL; 4692 return true; 4693 } 4694 4695 /* 4696 * When updating a task's mems_allowed or mempolicy nodemask, it is 4697 * possible to race with parallel threads in such a way that our 4698 * allocation can fail while the mask is being updated. If we are about 4699 * to fail, check if the cpuset changed during allocation and if so, 4700 * retry. 4701 */ 4702 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4703 return true; 4704 4705 return false; 4706 } 4707 4708 static inline struct page * 4709 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4710 struct alloc_context *ac) 4711 { 4712 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4713 bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask); 4714 bool nofail = gfp_mask & __GFP_NOFAIL; 4715 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4716 struct page *page = NULL; 4717 unsigned int alloc_flags; 4718 unsigned long did_some_progress; 4719 enum compact_priority compact_priority; 4720 enum compact_result compact_result; 4721 int compaction_retries; 4722 int no_progress_loops; 4723 unsigned int cpuset_mems_cookie; 4724 unsigned int zonelist_iter_cookie; 4725 int reserve_flags; 4726 bool compact_first = false; 4727 bool can_retry_reserves = true; 4728 4729 if (unlikely(nofail)) { 4730 /* 4731 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM, 4732 * otherwise, we may result in lockup. 4733 */ 4734 WARN_ON_ONCE(!can_direct_reclaim); 4735 /* 4736 * PF_MEMALLOC request from this context is rather bizarre 4737 * because we cannot reclaim anything and only can loop waiting 4738 * for somebody to do a work for us. 4739 */ 4740 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4741 } 4742 4743 restart: 4744 compaction_retries = 0; 4745 no_progress_loops = 0; 4746 compact_result = COMPACT_SKIPPED; 4747 compact_priority = DEF_COMPACT_PRIORITY; 4748 cpuset_mems_cookie = read_mems_allowed_begin(); 4749 zonelist_iter_cookie = zonelist_iter_begin(); 4750 4751 /* 4752 * For costly allocations, try direct compaction first, as it's likely 4753 * that we have enough base pages and don't need to reclaim. For non- 4754 * movable high-order allocations, do that as well, as compaction will 4755 * try prevent permanent fragmentation by migrating from blocks of the 4756 * same migratetype. 4757 */ 4758 if (can_compact && (costly_order || (order > 0 && 4759 ac->migratetype != MIGRATE_MOVABLE))) { 4760 compact_first = true; 4761 compact_priority = INIT_COMPACT_PRIORITY; 4762 } 4763 4764 /* 4765 * The fast path uses conservative alloc_flags to succeed only until 4766 * kswapd needs to be woken up, and to avoid the cost of setting up 4767 * alloc_flags precisely. So we do that now. 4768 */ 4769 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4770 4771 /* 4772 * We need to recalculate the starting point for the zonelist iterator 4773 * because we might have used different nodemask in the fast path, or 4774 * there was a cpuset modification and we are retrying - otherwise we 4775 * could end up iterating over non-eligible zones endlessly. 4776 */ 4777 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4778 ac->highest_zoneidx, ac->nodemask); 4779 if (!zonelist_zone(ac->preferred_zoneref)) 4780 goto nopage; 4781 4782 /* 4783 * Check for insane configurations where the cpuset doesn't contain 4784 * any suitable zone to satisfy the request - e.g. non-movable 4785 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4786 */ 4787 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4788 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4789 ac->highest_zoneidx, 4790 &cpuset_current_mems_allowed); 4791 if (!zonelist_zone(z)) 4792 goto nopage; 4793 } 4794 4795 retry: 4796 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4797 if (alloc_flags & ALLOC_KSWAPD) 4798 wake_all_kswapds(order, gfp_mask, ac); 4799 4800 /* 4801 * The adjusted alloc_flags might result in immediate success, so try 4802 * that first 4803 */ 4804 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4805 if (page) 4806 goto got_pg; 4807 4808 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4809 if (reserve_flags) 4810 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4811 (alloc_flags & ALLOC_KSWAPD); 4812 4813 /* 4814 * Reset the nodemask and zonelist iterators if memory policies can be 4815 * ignored. These allocations are high priority and system rather than 4816 * user oriented. 4817 */ 4818 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4819 ac->nodemask = NULL; 4820 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4821 ac->highest_zoneidx, ac->nodemask); 4822 4823 /* 4824 * The first time we adjust anything due to being allowed to 4825 * ignore memory policies or watermarks, retry immediately. This 4826 * allows us to keep the first allocation attempt optimistic so 4827 * it can succeed in a zone that is still above watermarks. 4828 */ 4829 if (can_retry_reserves) { 4830 can_retry_reserves = false; 4831 goto retry; 4832 } 4833 } 4834 4835 /* Caller is not willing to reclaim, we can't balance anything */ 4836 if (!can_direct_reclaim) 4837 goto nopage; 4838 4839 /* Avoid recursion of direct reclaim */ 4840 if (current->flags & PF_MEMALLOC) 4841 goto nopage; 4842 4843 /* Try direct reclaim and then allocating */ 4844 if (!compact_first) { 4845 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, 4846 ac, &did_some_progress); 4847 if (page) 4848 goto got_pg; 4849 } 4850 4851 /* Try direct compaction and then allocating */ 4852 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4853 compact_priority, &compact_result); 4854 if (page) 4855 goto got_pg; 4856 4857 if (compact_first) { 4858 /* 4859 * THP page faults may attempt local node only first, but are 4860 * then allowed to only compact, not reclaim, see 4861 * alloc_pages_mpol(). 4862 * 4863 * Compaction has failed above and we don't want such THP 4864 * allocations to put reclaim pressure on a single node in a 4865 * situation where other nodes might have plenty of available 4866 * memory. 4867 */ 4868 if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE)) 4869 goto nopage; 4870 4871 /* 4872 * For the initial compaction attempt we have lowered its 4873 * priority. Restore it for further retries, if those are 4874 * allowed. With __GFP_NORETRY there will be a single round of 4875 * reclaim and compaction with the lowered priority. 4876 */ 4877 if (!(gfp_mask & __GFP_NORETRY)) 4878 compact_priority = DEF_COMPACT_PRIORITY; 4879 4880 compact_first = false; 4881 goto retry; 4882 } 4883 4884 /* Do not loop if specifically requested */ 4885 if (gfp_mask & __GFP_NORETRY) 4886 goto nopage; 4887 4888 /* 4889 * Do not retry costly high order allocations unless they are 4890 * __GFP_RETRY_MAYFAIL and we can compact 4891 */ 4892 if (costly_order && (!can_compact || 4893 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4894 goto nopage; 4895 4896 /* 4897 * Deal with possible cpuset update races or zonelist updates to avoid 4898 * infinite retries. No "goto retry;" can be placed above this check 4899 * unless it can execute just once. 4900 */ 4901 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4902 check_retry_zonelist(zonelist_iter_cookie)) 4903 goto restart; 4904 4905 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4906 did_some_progress > 0, &no_progress_loops)) 4907 goto retry; 4908 4909 /* 4910 * It doesn't make any sense to retry for the compaction if the order-0 4911 * reclaim is not able to make any progress because the current 4912 * implementation of the compaction depends on the sufficient amount 4913 * of free memory (see __compaction_suitable) 4914 */ 4915 if (did_some_progress > 0 && can_compact && 4916 should_compact_retry(ac, order, alloc_flags, 4917 compact_result, &compact_priority, 4918 &compaction_retries)) 4919 goto retry; 4920 4921 /* Reclaim/compaction failed to prevent the fallback */ 4922 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) { 4923 alloc_flags &= ~ALLOC_NOFRAGMENT; 4924 goto retry; 4925 } 4926 4927 /* 4928 * Deal with possible cpuset update races or zonelist updates to avoid 4929 * a unnecessary OOM kill. 4930 */ 4931 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4932 check_retry_zonelist(zonelist_iter_cookie)) 4933 goto restart; 4934 4935 /* Reclaim has failed us, start killing things */ 4936 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4937 if (page) 4938 goto got_pg; 4939 4940 /* Avoid allocations with no watermarks from looping endlessly */ 4941 if (tsk_is_oom_victim(current) && 4942 (alloc_flags & ALLOC_OOM || 4943 (gfp_mask & __GFP_NOMEMALLOC))) 4944 goto nopage; 4945 4946 /* Retry as long as the OOM killer is making progress */ 4947 if (did_some_progress) { 4948 no_progress_loops = 0; 4949 goto retry; 4950 } 4951 4952 nopage: 4953 /* 4954 * Deal with possible cpuset update races or zonelist updates to avoid 4955 * a unnecessary OOM kill. 4956 */ 4957 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4958 check_retry_zonelist(zonelist_iter_cookie)) 4959 goto restart; 4960 4961 /* 4962 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4963 * we always retry 4964 */ 4965 if (unlikely(nofail)) { 4966 /* 4967 * Lacking direct_reclaim we can't do anything to reclaim memory, 4968 * we disregard these unreasonable nofail requests and still 4969 * return NULL 4970 */ 4971 if (!can_direct_reclaim) 4972 goto fail; 4973 4974 /* 4975 * Help non-failing allocations by giving some access to memory 4976 * reserves normally used for high priority non-blocking 4977 * allocations but do not use ALLOC_NO_WATERMARKS because this 4978 * could deplete whole memory reserves which would just make 4979 * the situation worse. 4980 */ 4981 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4982 if (page) 4983 goto got_pg; 4984 4985 cond_resched(); 4986 goto retry; 4987 } 4988 fail: 4989 warn_alloc(gfp_mask, ac->nodemask, 4990 "page allocation failure: order:%u", order); 4991 got_pg: 4992 return page; 4993 } 4994 4995 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4996 int preferred_nid, nodemask_t *nodemask, 4997 struct alloc_context *ac, gfp_t *alloc_gfp, 4998 unsigned int *alloc_flags) 4999 { 5000 ac->highest_zoneidx = gfp_zone(gfp_mask); 5001 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 5002 ac->nodemask = nodemask; 5003 ac->migratetype = gfp_migratetype(gfp_mask); 5004 5005 if (cpusets_enabled()) { 5006 *alloc_gfp |= __GFP_HARDWALL; 5007 /* 5008 * When we are in the interrupt context, it is irrelevant 5009 * to the current task context. It means that any node ok. 5010 */ 5011 if (in_task() && !ac->nodemask) 5012 ac->nodemask = &cpuset_current_mems_allowed; 5013 else 5014 *alloc_flags |= ALLOC_CPUSET; 5015 } 5016 5017 might_alloc(gfp_mask); 5018 5019 /* 5020 * Don't invoke should_fail logic, since it may call 5021 * get_random_u32() and printk() which need to spin_lock. 5022 */ 5023 if (!(*alloc_flags & ALLOC_TRYLOCK) && 5024 should_fail_alloc_page(gfp_mask, order)) 5025 return false; 5026 5027 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5028 5029 /* Dirty zone balancing only done in the fast path */ 5030 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5031 5032 /* 5033 * The preferred zone is used for statistics but crucially it is 5034 * also used as the starting point for the zonelist iterator. It 5035 * may get reset for allocations that ignore memory policies. 5036 */ 5037 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5038 ac->highest_zoneidx, ac->nodemask); 5039 5040 return true; 5041 } 5042 5043 /* 5044 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array 5045 * @gfp: GFP flags for the allocation 5046 * @preferred_nid: The preferred NUMA node ID to allocate from 5047 * @nodemask: Set of nodes to allocate from, may be NULL 5048 * @nr_pages: The number of pages desired in the array 5049 * @page_array: Array to store the pages 5050 * 5051 * This is a batched version of the page allocator that attempts to allocate 5052 * @nr_pages quickly. Pages are added to @page_array. 5053 * 5054 * Note that only the elements in @page_array that were cleared to %NULL on 5055 * entry are populated with newly allocated pages. @nr_pages is the maximum 5056 * number of pages that will be stored in the array. 5057 * 5058 * Returns the number of pages in @page_array, including ones already 5059 * allocated on entry. This can be less than the number requested in @nr_pages, 5060 * but all empty slots are filled from the beginning. I.e., if all slots in 5061 * @page_array were set to %NULL on entry, the slots from 0 to the return value 5062 * - 1 will be filled. 5063 */ 5064 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 5065 nodemask_t *nodemask, int nr_pages, 5066 struct page **page_array) 5067 { 5068 struct page *page; 5069 unsigned long UP_flags; 5070 struct zone *zone; 5071 struct zoneref *z; 5072 struct per_cpu_pages *pcp; 5073 struct list_head *pcp_list; 5074 struct alloc_context ac; 5075 gfp_t alloc_gfp; 5076 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5077 int nr_populated = 0, nr_account = 0; 5078 5079 /* 5080 * Skip populated array elements to determine if any pages need 5081 * to be allocated before disabling IRQs. 5082 */ 5083 while (nr_populated < nr_pages && page_array[nr_populated]) 5084 nr_populated++; 5085 5086 /* No pages requested? */ 5087 if (unlikely(nr_pages <= 0)) 5088 goto out; 5089 5090 /* Already populated array? */ 5091 if (unlikely(nr_pages - nr_populated == 0)) 5092 goto out; 5093 5094 /* Bulk allocator does not support memcg accounting. */ 5095 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 5096 goto failed; 5097 5098 /* Use the single page allocator for one page. */ 5099 if (nr_pages - nr_populated == 1) 5100 goto failed; 5101 5102 #ifdef CONFIG_PAGE_OWNER 5103 /* 5104 * PAGE_OWNER may recurse into the allocator to allocate space to 5105 * save the stack with pagesets.lock held. Releasing/reacquiring 5106 * removes much of the performance benefit of bulk allocation so 5107 * force the caller to allocate one page at a time as it'll have 5108 * similar performance to added complexity to the bulk allocator. 5109 */ 5110 if (static_branch_unlikely(&page_owner_inited)) 5111 goto failed; 5112 #endif 5113 5114 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5115 gfp &= gfp_allowed_mask; 5116 alloc_gfp = gfp; 5117 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5118 goto out; 5119 gfp = alloc_gfp; 5120 5121 /* Find an allowed local zone that meets the low watermark. */ 5122 z = ac.preferred_zoneref; 5123 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 5124 unsigned long mark; 5125 5126 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5127 !__cpuset_zone_allowed(zone, gfp)) { 5128 continue; 5129 } 5130 5131 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 5132 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 5133 goto failed; 5134 } 5135 5136 cond_accept_memory(zone, 0, alloc_flags); 5137 retry_this_zone: 5138 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5139 if (zone_watermark_fast(zone, 0, mark, 5140 zonelist_zone_idx(ac.preferred_zoneref), 5141 alloc_flags, gfp)) { 5142 break; 5143 } 5144 5145 if (cond_accept_memory(zone, 0, alloc_flags)) 5146 goto retry_this_zone; 5147 5148 /* Try again if zone has deferred pages */ 5149 if (deferred_pages_enabled()) { 5150 if (_deferred_grow_zone(zone, 0)) 5151 goto retry_this_zone; 5152 } 5153 } 5154 5155 /* 5156 * If there are no allowed local zones that meets the watermarks then 5157 * try to allocate a single page and reclaim if necessary. 5158 */ 5159 if (unlikely(!zone)) 5160 goto failed; 5161 5162 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 5163 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags); 5164 if (!pcp) 5165 goto failed; 5166 5167 /* Attempt the batch allocation */ 5168 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5169 while (nr_populated < nr_pages) { 5170 5171 /* Skip existing pages */ 5172 if (page_array[nr_populated]) { 5173 nr_populated++; 5174 continue; 5175 } 5176 5177 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5178 pcp, pcp_list); 5179 if (unlikely(!page)) { 5180 /* Try and allocate at least one page */ 5181 if (!nr_account) { 5182 pcp_spin_unlock(pcp, UP_flags); 5183 goto failed; 5184 } 5185 break; 5186 } 5187 nr_account++; 5188 5189 prep_new_page(page, 0, gfp, 0); 5190 set_page_refcounted(page); 5191 page_array[nr_populated++] = page; 5192 } 5193 5194 pcp_spin_unlock(pcp, UP_flags); 5195 5196 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5197 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 5198 5199 out: 5200 return nr_populated; 5201 5202 failed: 5203 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 5204 if (page) 5205 page_array[nr_populated++] = page; 5206 goto out; 5207 } 5208 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 5209 5210 /* 5211 * This is the 'heart' of the zoned buddy allocator. 5212 */ 5213 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, 5214 int preferred_nid, nodemask_t *nodemask) 5215 { 5216 struct page *page; 5217 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5218 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5219 struct alloc_context ac = { }; 5220 5221 /* 5222 * There are several places where we assume that the order value is sane 5223 * so bail out early if the request is out of bound. 5224 */ 5225 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 5226 return NULL; 5227 5228 gfp &= gfp_allowed_mask; 5229 /* 5230 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5231 * resp. GFP_NOIO which has to be inherited for all allocation requests 5232 * from a particular context which has been marked by 5233 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5234 * movable zones are not used during allocation. 5235 */ 5236 gfp = current_gfp_context(gfp); 5237 alloc_gfp = gfp; 5238 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5239 &alloc_gfp, &alloc_flags)) 5240 return NULL; 5241 5242 /* 5243 * Forbid the first pass from falling back to types that fragment 5244 * memory until all local zones are considered. 5245 */ 5246 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 5247 5248 /* First allocation attempt */ 5249 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5250 if (likely(page)) 5251 goto out; 5252 5253 alloc_gfp = gfp; 5254 ac.spread_dirty_pages = false; 5255 5256 /* 5257 * Restore the original nodemask if it was potentially replaced with 5258 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5259 */ 5260 ac.nodemask = nodemask; 5261 5262 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5263 5264 out: 5265 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 5266 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5267 free_frozen_pages(page, order); 5268 page = NULL; 5269 } 5270 5271 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5272 kmsan_alloc_page(page, order, alloc_gfp); 5273 5274 return page; 5275 } 5276 EXPORT_SYMBOL(__alloc_frozen_pages_noprof); 5277 5278 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 5279 int preferred_nid, nodemask_t *nodemask) 5280 { 5281 struct page *page; 5282 5283 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); 5284 if (page) 5285 set_page_refcounted(page); 5286 return page; 5287 } 5288 EXPORT_SYMBOL(__alloc_pages_noprof); 5289 5290 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 5291 nodemask_t *nodemask) 5292 { 5293 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 5294 preferred_nid, nodemask); 5295 return page_rmappable_folio(page); 5296 } 5297 EXPORT_SYMBOL(__folio_alloc_noprof); 5298 5299 /* 5300 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5301 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5302 * you need to access high mem. 5303 */ 5304 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 5305 { 5306 struct page *page; 5307 5308 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 5309 if (!page) 5310 return 0; 5311 return (unsigned long) page_address(page); 5312 } 5313 EXPORT_SYMBOL(get_free_pages_noprof); 5314 5315 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 5316 { 5317 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 5318 } 5319 EXPORT_SYMBOL(get_zeroed_page_noprof); 5320 5321 static void ___free_pages(struct page *page, unsigned int order, 5322 fpi_t fpi_flags) 5323 { 5324 /* get PageHead before we drop reference */ 5325 int head = PageHead(page); 5326 /* get alloc tag in case the page is released by others */ 5327 struct alloc_tag *tag = pgalloc_tag_get(page); 5328 5329 if (put_page_testzero(page)) 5330 __free_frozen_pages(page, order, fpi_flags); 5331 else if (!head) { 5332 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5333 while (order-- > 0) { 5334 /* 5335 * The "tail" pages of this non-compound high-order 5336 * page will have no code tags, so to avoid warnings 5337 * mark them as empty. 5338 */ 5339 clear_page_tag_ref(page + (1 << order)); 5340 __free_frozen_pages(page + (1 << order), order, 5341 fpi_flags); 5342 } 5343 } 5344 } 5345 5346 /** 5347 * __free_pages - Free pages allocated with alloc_pages(). 5348 * @page: The page pointer returned from alloc_pages(). 5349 * @order: The order of the allocation. 5350 * 5351 * This function can free multi-page allocations that are not compound 5352 * pages. It does not check that the @order passed in matches that of 5353 * the allocation, so it is easy to leak memory. Freeing more memory 5354 * than was allocated will probably emit a warning. 5355 * 5356 * If the last reference to this page is speculative, it will be released 5357 * by put_page() which only frees the first page of a non-compound 5358 * allocation. To prevent the remaining pages from being leaked, we free 5359 * the subsequent pages here. If you want to use the page's reference 5360 * count to decide when to free the allocation, you should allocate a 5361 * compound page, and use put_page() instead of __free_pages(). 5362 * 5363 * Context: May be called in interrupt context or while holding a normal 5364 * spinlock, but not in NMI context or while holding a raw spinlock. 5365 */ 5366 void __free_pages(struct page *page, unsigned int order) 5367 { 5368 ___free_pages(page, order, FPI_NONE); 5369 } 5370 EXPORT_SYMBOL(__free_pages); 5371 5372 /* 5373 * Can be called while holding raw_spin_lock or from IRQ and NMI for any 5374 * page type (not only those that came from alloc_pages_nolock) 5375 */ 5376 void free_pages_nolock(struct page *page, unsigned int order) 5377 { 5378 ___free_pages(page, order, FPI_TRYLOCK); 5379 } 5380 5381 /** 5382 * free_pages - Free pages allocated with __get_free_pages(). 5383 * @addr: The virtual address tied to a page returned from __get_free_pages(). 5384 * @order: The order of the allocation. 5385 * 5386 * This function behaves the same as __free_pages(). Use this function 5387 * to free pages when you only have a valid virtual address. If you have 5388 * the page, call __free_pages() instead. 5389 */ 5390 void free_pages(unsigned long addr, unsigned int order) 5391 { 5392 if (addr != 0) { 5393 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5394 __free_pages(virt_to_page((void *)addr), order); 5395 } 5396 } 5397 5398 EXPORT_SYMBOL(free_pages); 5399 5400 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5401 size_t size) 5402 { 5403 if (addr) { 5404 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5405 struct page *page = virt_to_page((void *)addr); 5406 struct page *last = page + nr; 5407 5408 __split_page(page, order); 5409 while (page < --last) 5410 set_page_refcounted(last); 5411 5412 last = page + (1UL << order); 5413 for (page += nr; page < last; page++) 5414 __free_pages_ok(page, 0, FPI_TO_TAIL); 5415 } 5416 return (void *)addr; 5417 } 5418 5419 /** 5420 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5421 * @size: the number of bytes to allocate 5422 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5423 * 5424 * This function is similar to alloc_pages(), except that it allocates the 5425 * minimum number of pages to satisfy the request. alloc_pages() can only 5426 * allocate memory in power-of-two pages. 5427 * 5428 * This function is also limited by MAX_PAGE_ORDER. 5429 * 5430 * Memory allocated by this function must be released by free_pages_exact(). 5431 * 5432 * Return: pointer to the allocated area or %NULL in case of error. 5433 */ 5434 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 5435 { 5436 unsigned int order = get_order(size); 5437 unsigned long addr; 5438 5439 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5440 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5441 5442 addr = get_free_pages_noprof(gfp_mask, order); 5443 return make_alloc_exact(addr, order, size); 5444 } 5445 EXPORT_SYMBOL(alloc_pages_exact_noprof); 5446 5447 /** 5448 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5449 * pages on a node. 5450 * @nid: the preferred node ID where memory should be allocated 5451 * @size: the number of bytes to allocate 5452 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5453 * 5454 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5455 * back. 5456 * 5457 * Return: pointer to the allocated area or %NULL in case of error. 5458 */ 5459 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5460 { 5461 unsigned int order = get_order(size); 5462 struct page *p; 5463 5464 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5465 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5466 5467 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5468 if (!p) 5469 return NULL; 5470 return make_alloc_exact((unsigned long)page_address(p), order, size); 5471 } 5472 5473 /** 5474 * free_pages_exact - release memory allocated via alloc_pages_exact() 5475 * @virt: the value returned by alloc_pages_exact. 5476 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5477 * 5478 * Release the memory allocated by a previous call to alloc_pages_exact. 5479 */ 5480 void free_pages_exact(void *virt, size_t size) 5481 { 5482 unsigned long addr = (unsigned long)virt; 5483 unsigned long end = addr + PAGE_ALIGN(size); 5484 5485 while (addr < end) { 5486 free_page(addr); 5487 addr += PAGE_SIZE; 5488 } 5489 } 5490 EXPORT_SYMBOL(free_pages_exact); 5491 5492 /** 5493 * nr_free_zone_pages - count number of pages beyond high watermark 5494 * @offset: The zone index of the highest zone 5495 * 5496 * nr_free_zone_pages() counts the number of pages which are beyond the 5497 * high watermark within all zones at or below a given zone index. For each 5498 * zone, the number of pages is calculated as: 5499 * 5500 * nr_free_zone_pages = managed_pages - high_pages 5501 * 5502 * Return: number of pages beyond high watermark. 5503 */ 5504 static unsigned long nr_free_zone_pages(int offset) 5505 { 5506 struct zoneref *z; 5507 struct zone *zone; 5508 5509 /* Just pick one node, since fallback list is circular */ 5510 unsigned long sum = 0; 5511 5512 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5513 5514 for_each_zone_zonelist(zone, z, zonelist, offset) { 5515 unsigned long size = zone_managed_pages(zone); 5516 unsigned long high = high_wmark_pages(zone); 5517 if (size > high) 5518 sum += size - high; 5519 } 5520 5521 return sum; 5522 } 5523 5524 /** 5525 * nr_free_buffer_pages - count number of pages beyond high watermark 5526 * 5527 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5528 * watermark within ZONE_DMA and ZONE_NORMAL. 5529 * 5530 * Return: number of pages beyond high watermark within ZONE_DMA and 5531 * ZONE_NORMAL. 5532 */ 5533 unsigned long nr_free_buffer_pages(void) 5534 { 5535 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5536 } 5537 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5538 5539 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5540 { 5541 zoneref->zone = zone; 5542 zoneref->zone_idx = zone_idx(zone); 5543 } 5544 5545 /* 5546 * Builds allocation fallback zone lists. 5547 * 5548 * Add all populated zones of a node to the zonelist. 5549 */ 5550 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5551 { 5552 struct zone *zone; 5553 enum zone_type zone_type = MAX_NR_ZONES; 5554 int nr_zones = 0; 5555 5556 do { 5557 zone_type--; 5558 zone = pgdat->node_zones + zone_type; 5559 if (populated_zone(zone)) { 5560 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5561 check_highest_zone(zone_type); 5562 } 5563 } while (zone_type); 5564 5565 return nr_zones; 5566 } 5567 5568 #ifdef CONFIG_NUMA 5569 5570 static int __parse_numa_zonelist_order(char *s) 5571 { 5572 /* 5573 * We used to support different zonelists modes but they turned 5574 * out to be just not useful. Let's keep the warning in place 5575 * if somebody still use the cmd line parameter so that we do 5576 * not fail it silently 5577 */ 5578 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5579 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5580 return -EINVAL; 5581 } 5582 return 0; 5583 } 5584 5585 static char numa_zonelist_order[] = "Node"; 5586 #define NUMA_ZONELIST_ORDER_LEN 16 5587 /* 5588 * sysctl handler for numa_zonelist_order 5589 */ 5590 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5591 void *buffer, size_t *length, loff_t *ppos) 5592 { 5593 if (write) 5594 return __parse_numa_zonelist_order(buffer); 5595 return proc_dostring(table, write, buffer, length, ppos); 5596 } 5597 5598 static int node_load[MAX_NUMNODES]; 5599 5600 /** 5601 * find_next_best_node - find the next node that should appear in a given node's fallback list 5602 * @node: node whose fallback list we're appending 5603 * @used_node_mask: nodemask_t of already used nodes 5604 * 5605 * We use a number of factors to determine which is the next node that should 5606 * appear on a given node's fallback list. The node should not have appeared 5607 * already in @node's fallback list, and it should be the next closest node 5608 * according to the distance array (which contains arbitrary distance values 5609 * from each node to each node in the system), and should also prefer nodes 5610 * with no CPUs, since presumably they'll have very little allocation pressure 5611 * on them otherwise. 5612 * 5613 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5614 */ 5615 int find_next_best_node(int node, nodemask_t *used_node_mask) 5616 { 5617 int n, val; 5618 int min_val = INT_MAX; 5619 int best_node = NUMA_NO_NODE; 5620 5621 /* 5622 * Use the local node if we haven't already, but for memoryless local 5623 * node, we should skip it and fall back to other nodes. 5624 */ 5625 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5626 node_set(node, *used_node_mask); 5627 return node; 5628 } 5629 5630 for_each_node_state(n, N_MEMORY) { 5631 5632 /* Don't want a node to appear more than once */ 5633 if (node_isset(n, *used_node_mask)) 5634 continue; 5635 5636 /* Use the distance array to find the distance */ 5637 val = node_distance(node, n); 5638 5639 /* Penalize nodes under us ("prefer the next node") */ 5640 val += (n < node); 5641 5642 /* Give preference to headless and unused nodes */ 5643 if (!cpumask_empty(cpumask_of_node(n))) 5644 val += PENALTY_FOR_NODE_WITH_CPUS; 5645 5646 /* Slight preference for less loaded node */ 5647 val *= MAX_NUMNODES; 5648 val += node_load[n]; 5649 5650 if (val < min_val) { 5651 min_val = val; 5652 best_node = n; 5653 } 5654 } 5655 5656 if (best_node >= 0) 5657 node_set(best_node, *used_node_mask); 5658 5659 return best_node; 5660 } 5661 5662 5663 /* 5664 * Build zonelists ordered by node and zones within node. 5665 * This results in maximum locality--normal zone overflows into local 5666 * DMA zone, if any--but risks exhausting DMA zone. 5667 */ 5668 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5669 unsigned nr_nodes) 5670 { 5671 struct zoneref *zonerefs; 5672 int i; 5673 5674 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5675 5676 for (i = 0; i < nr_nodes; i++) { 5677 int nr_zones; 5678 5679 pg_data_t *node = NODE_DATA(node_order[i]); 5680 5681 nr_zones = build_zonerefs_node(node, zonerefs); 5682 zonerefs += nr_zones; 5683 } 5684 zonerefs->zone = NULL; 5685 zonerefs->zone_idx = 0; 5686 } 5687 5688 /* 5689 * Build __GFP_THISNODE zonelists 5690 */ 5691 static void build_thisnode_zonelists(pg_data_t *pgdat) 5692 { 5693 struct zoneref *zonerefs; 5694 int nr_zones; 5695 5696 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5697 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5698 zonerefs += nr_zones; 5699 zonerefs->zone = NULL; 5700 zonerefs->zone_idx = 0; 5701 } 5702 5703 static void build_zonelists(pg_data_t *pgdat) 5704 { 5705 static int node_order[MAX_NUMNODES]; 5706 int node, nr_nodes = 0; 5707 nodemask_t used_mask = NODE_MASK_NONE; 5708 int local_node, prev_node; 5709 5710 /* NUMA-aware ordering of nodes */ 5711 local_node = pgdat->node_id; 5712 prev_node = local_node; 5713 5714 memset(node_order, 0, sizeof(node_order)); 5715 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5716 /* 5717 * We don't want to pressure a particular node. 5718 * So adding penalty to the first node in same 5719 * distance group to make it round-robin. 5720 */ 5721 if (node_distance(local_node, node) != 5722 node_distance(local_node, prev_node)) 5723 node_load[node] += 1; 5724 5725 node_order[nr_nodes++] = node; 5726 prev_node = node; 5727 } 5728 5729 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5730 build_thisnode_zonelists(pgdat); 5731 pr_info("Fallback order for Node %d: ", local_node); 5732 for (node = 0; node < nr_nodes; node++) 5733 pr_cont("%d ", node_order[node]); 5734 pr_cont("\n"); 5735 } 5736 5737 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5738 /* 5739 * Return node id of node used for "local" allocations. 5740 * I.e., first node id of first zone in arg node's generic zonelist. 5741 * Used for initializing percpu 'numa_mem', which is used primarily 5742 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5743 */ 5744 int local_memory_node(int node) 5745 { 5746 struct zoneref *z; 5747 5748 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5749 gfp_zone(GFP_KERNEL), 5750 NULL); 5751 return zonelist_node_idx(z); 5752 } 5753 #endif 5754 5755 static void setup_min_unmapped_ratio(void); 5756 static void setup_min_slab_ratio(void); 5757 #else /* CONFIG_NUMA */ 5758 5759 static void build_zonelists(pg_data_t *pgdat) 5760 { 5761 struct zoneref *zonerefs; 5762 int nr_zones; 5763 5764 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5765 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5766 zonerefs += nr_zones; 5767 5768 zonerefs->zone = NULL; 5769 zonerefs->zone_idx = 0; 5770 } 5771 5772 #endif /* CONFIG_NUMA */ 5773 5774 /* 5775 * Boot pageset table. One per cpu which is going to be used for all 5776 * zones and all nodes. The parameters will be set in such a way 5777 * that an item put on a list will immediately be handed over to 5778 * the buddy list. This is safe since pageset manipulation is done 5779 * with interrupts disabled. 5780 * 5781 * The boot_pagesets must be kept even after bootup is complete for 5782 * unused processors and/or zones. They do play a role for bootstrapping 5783 * hotplugged processors. 5784 * 5785 * zoneinfo_show() and maybe other functions do 5786 * not check if the processor is online before following the pageset pointer. 5787 * Other parts of the kernel may not check if the zone is available. 5788 */ 5789 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5790 /* These effectively disable the pcplists in the boot pageset completely */ 5791 #define BOOT_PAGESET_HIGH 0 5792 #define BOOT_PAGESET_BATCH 1 5793 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5794 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5795 5796 static void __build_all_zonelists(void *data) 5797 { 5798 int nid; 5799 int __maybe_unused cpu; 5800 pg_data_t *self = data; 5801 unsigned long flags; 5802 5803 /* 5804 * The zonelist_update_seq must be acquired with irqsave because the 5805 * reader can be invoked from IRQ with GFP_ATOMIC. 5806 */ 5807 write_seqlock_irqsave(&zonelist_update_seq, flags); 5808 /* 5809 * Also disable synchronous printk() to prevent any printk() from 5810 * trying to hold port->lock, for 5811 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5812 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5813 */ 5814 printk_deferred_enter(); 5815 5816 #ifdef CONFIG_NUMA 5817 memset(node_load, 0, sizeof(node_load)); 5818 #endif 5819 5820 /* 5821 * This node is hotadded and no memory is yet present. So just 5822 * building zonelists is fine - no need to touch other nodes. 5823 */ 5824 if (self && !node_online(self->node_id)) { 5825 build_zonelists(self); 5826 } else { 5827 /* 5828 * All possible nodes have pgdat preallocated 5829 * in free_area_init 5830 */ 5831 for_each_node(nid) { 5832 pg_data_t *pgdat = NODE_DATA(nid); 5833 5834 build_zonelists(pgdat); 5835 } 5836 5837 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5838 /* 5839 * We now know the "local memory node" for each node-- 5840 * i.e., the node of the first zone in the generic zonelist. 5841 * Set up numa_mem percpu variable for on-line cpus. During 5842 * boot, only the boot cpu should be on-line; we'll init the 5843 * secondary cpus' numa_mem as they come on-line. During 5844 * node/memory hotplug, we'll fixup all on-line cpus. 5845 */ 5846 for_each_online_cpu(cpu) 5847 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5848 #endif 5849 } 5850 5851 printk_deferred_exit(); 5852 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5853 } 5854 5855 static noinline void __init 5856 build_all_zonelists_init(void) 5857 { 5858 int cpu; 5859 5860 __build_all_zonelists(NULL); 5861 5862 /* 5863 * Initialize the boot_pagesets that are going to be used 5864 * for bootstrapping processors. The real pagesets for 5865 * each zone will be allocated later when the per cpu 5866 * allocator is available. 5867 * 5868 * boot_pagesets are used also for bootstrapping offline 5869 * cpus if the system is already booted because the pagesets 5870 * are needed to initialize allocators on a specific cpu too. 5871 * F.e. the percpu allocator needs the page allocator which 5872 * needs the percpu allocator in order to allocate its pagesets 5873 * (a chicken-egg dilemma). 5874 */ 5875 for_each_possible_cpu(cpu) 5876 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5877 5878 mminit_verify_zonelist(); 5879 cpuset_init_current_mems_allowed(); 5880 } 5881 5882 /* 5883 * unless system_state == SYSTEM_BOOTING. 5884 * 5885 * __ref due to call of __init annotated helper build_all_zonelists_init 5886 * [protected by SYSTEM_BOOTING]. 5887 */ 5888 void __ref build_all_zonelists(pg_data_t *pgdat) 5889 { 5890 unsigned long vm_total_pages; 5891 5892 if (system_state == SYSTEM_BOOTING) { 5893 build_all_zonelists_init(); 5894 } else { 5895 __build_all_zonelists(pgdat); 5896 /* cpuset refresh routine should be here */ 5897 } 5898 /* Get the number of free pages beyond high watermark in all zones. */ 5899 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5900 /* 5901 * Disable grouping by mobility if the number of pages in the 5902 * system is too low to allow the mechanism to work. It would be 5903 * more accurate, but expensive to check per-zone. This check is 5904 * made on memory-hotadd so a system can start with mobility 5905 * disabled and enable it later 5906 */ 5907 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5908 page_group_by_mobility_disabled = 1; 5909 else 5910 page_group_by_mobility_disabled = 0; 5911 5912 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5913 nr_online_nodes, 5914 str_off_on(page_group_by_mobility_disabled), 5915 vm_total_pages); 5916 #ifdef CONFIG_NUMA 5917 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5918 #endif 5919 } 5920 5921 static int zone_batchsize(struct zone *zone) 5922 { 5923 #ifdef CONFIG_MMU 5924 int batch; 5925 5926 /* 5927 * The number of pages to batch allocate is either ~0.025% 5928 * of the zone or 256KB, whichever is smaller. The batch 5929 * size is striking a balance between allocation latency 5930 * and zone lock contention. 5931 */ 5932 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE); 5933 if (batch <= 1) 5934 return 1; 5935 5936 /* 5937 * Clamp the batch to a 2^n - 1 value. Having a power 5938 * of 2 value was found to be more likely to have 5939 * suboptimal cache aliasing properties in some cases. 5940 * 5941 * For example if 2 tasks are alternately allocating 5942 * batches of pages, one task can end up with a lot 5943 * of pages of one half of the possible page colors 5944 * and the other with pages of the other colors. 5945 */ 5946 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5947 5948 return batch; 5949 5950 #else 5951 /* The deferral and batching of frees should be suppressed under NOMMU 5952 * conditions. 5953 * 5954 * The problem is that NOMMU needs to be able to allocate large chunks 5955 * of contiguous memory as there's no hardware page translation to 5956 * assemble apparent contiguous memory from discontiguous pages. 5957 * 5958 * Queueing large contiguous runs of pages for batching, however, 5959 * causes the pages to actually be freed in smaller chunks. As there 5960 * can be a significant delay between the individual batches being 5961 * recycled, this leads to the once large chunks of space being 5962 * fragmented and becoming unavailable for high-order allocations. 5963 */ 5964 return 1; 5965 #endif 5966 } 5967 5968 static int percpu_pagelist_high_fraction; 5969 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5970 int high_fraction) 5971 { 5972 #ifdef CONFIG_MMU 5973 int high; 5974 int nr_split_cpus; 5975 unsigned long total_pages; 5976 5977 if (!high_fraction) { 5978 /* 5979 * By default, the high value of the pcp is based on the zone 5980 * low watermark so that if they are full then background 5981 * reclaim will not be started prematurely. 5982 */ 5983 total_pages = low_wmark_pages(zone); 5984 } else { 5985 /* 5986 * If percpu_pagelist_high_fraction is configured, the high 5987 * value is based on a fraction of the managed pages in the 5988 * zone. 5989 */ 5990 total_pages = zone_managed_pages(zone) / high_fraction; 5991 } 5992 5993 /* 5994 * Split the high value across all online CPUs local to the zone. Note 5995 * that early in boot that CPUs may not be online yet and that during 5996 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5997 * onlined. For memory nodes that have no CPUs, split the high value 5998 * across all online CPUs to mitigate the risk that reclaim is triggered 5999 * prematurely due to pages stored on pcp lists. 6000 */ 6001 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6002 if (!nr_split_cpus) 6003 nr_split_cpus = num_online_cpus(); 6004 high = total_pages / nr_split_cpus; 6005 6006 /* 6007 * Ensure high is at least batch*4. The multiple is based on the 6008 * historical relationship between high and batch. 6009 */ 6010 high = max(high, batch << 2); 6011 6012 return high; 6013 #else 6014 return 0; 6015 #endif 6016 } 6017 6018 /* 6019 * pcp->high and pcp->batch values are related and generally batch is lower 6020 * than high. They are also related to pcp->count such that count is lower 6021 * than high, and as soon as it reaches high, the pcplist is flushed. 6022 * 6023 * However, guaranteeing these relations at all times would require e.g. write 6024 * barriers here but also careful usage of read barriers at the read side, and 6025 * thus be prone to error and bad for performance. Thus the update only prevents 6026 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 6027 * should ensure they can cope with those fields changing asynchronously, and 6028 * fully trust only the pcp->count field on the local CPU with interrupts 6029 * disabled. 6030 * 6031 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6032 * outside of boot time (or some other assurance that no concurrent updaters 6033 * exist). 6034 */ 6035 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 6036 unsigned long high_max, unsigned long batch) 6037 { 6038 WRITE_ONCE(pcp->batch, batch); 6039 WRITE_ONCE(pcp->high_min, high_min); 6040 WRITE_ONCE(pcp->high_max, high_max); 6041 } 6042 6043 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6044 { 6045 int pindex; 6046 6047 memset(pcp, 0, sizeof(*pcp)); 6048 memset(pzstats, 0, sizeof(*pzstats)); 6049 6050 spin_lock_init(&pcp->lock); 6051 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6052 INIT_LIST_HEAD(&pcp->lists[pindex]); 6053 6054 /* 6055 * Set batch and high values safe for a boot pageset. A true percpu 6056 * pageset's initialization will update them subsequently. Here we don't 6057 * need to be as careful as pageset_update() as nobody can access the 6058 * pageset yet. 6059 */ 6060 pcp->high_min = BOOT_PAGESET_HIGH; 6061 pcp->high_max = BOOT_PAGESET_HIGH; 6062 pcp->batch = BOOT_PAGESET_BATCH; 6063 } 6064 6065 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 6066 unsigned long high_max, unsigned long batch) 6067 { 6068 struct per_cpu_pages *pcp; 6069 int cpu; 6070 6071 for_each_possible_cpu(cpu) { 6072 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6073 pageset_update(pcp, high_min, high_max, batch); 6074 } 6075 } 6076 6077 /* 6078 * Calculate and set new high and batch values for all per-cpu pagesets of a 6079 * zone based on the zone's size. 6080 */ 6081 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 6082 { 6083 int new_high_min, new_high_max, new_batch; 6084 6085 new_batch = zone_batchsize(zone); 6086 if (percpu_pagelist_high_fraction) { 6087 new_high_min = zone_highsize(zone, new_batch, cpu_online, 6088 percpu_pagelist_high_fraction); 6089 /* 6090 * PCP high is tuned manually, disable auto-tuning via 6091 * setting high_min and high_max to the manual value. 6092 */ 6093 new_high_max = new_high_min; 6094 } else { 6095 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 6096 new_high_max = zone_highsize(zone, new_batch, cpu_online, 6097 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 6098 } 6099 6100 if (zone->pageset_high_min == new_high_min && 6101 zone->pageset_high_max == new_high_max && 6102 zone->pageset_batch == new_batch) 6103 return; 6104 6105 zone->pageset_high_min = new_high_min; 6106 zone->pageset_high_max = new_high_max; 6107 zone->pageset_batch = new_batch; 6108 6109 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 6110 new_batch); 6111 } 6112 6113 void __meminit setup_zone_pageset(struct zone *zone) 6114 { 6115 int cpu; 6116 6117 /* Size may be 0 on !SMP && !NUMA */ 6118 if (sizeof(struct per_cpu_zonestat) > 0) 6119 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 6120 6121 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 6122 for_each_possible_cpu(cpu) { 6123 struct per_cpu_pages *pcp; 6124 struct per_cpu_zonestat *pzstats; 6125 6126 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6127 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6128 per_cpu_pages_init(pcp, pzstats); 6129 } 6130 6131 zone_set_pageset_high_and_batch(zone, 0); 6132 } 6133 6134 /* 6135 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6136 * page high values need to be recalculated. 6137 */ 6138 static void zone_pcp_update(struct zone *zone, int cpu_online) 6139 { 6140 mutex_lock(&pcp_batch_high_lock); 6141 zone_set_pageset_high_and_batch(zone, cpu_online); 6142 mutex_unlock(&pcp_batch_high_lock); 6143 } 6144 6145 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 6146 { 6147 struct per_cpu_pages *pcp; 6148 struct cpu_cacheinfo *cci; 6149 unsigned long UP_flags; 6150 6151 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6152 cci = get_cpu_cacheinfo(cpu); 6153 /* 6154 * If data cache slice of CPU is large enough, "pcp->batch" 6155 * pages can be preserved in PCP before draining PCP for 6156 * consecutive high-order pages freeing without allocation. 6157 * This can reduce zone lock contention without hurting 6158 * cache-hot pages sharing. 6159 */ 6160 pcp_spin_lock_maybe_irqsave(pcp, UP_flags); 6161 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 6162 pcp->flags |= PCPF_FREE_HIGH_BATCH; 6163 else 6164 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 6165 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags); 6166 } 6167 6168 void setup_pcp_cacheinfo(unsigned int cpu) 6169 { 6170 struct zone *zone; 6171 6172 for_each_populated_zone(zone) 6173 zone_pcp_update_cacheinfo(zone, cpu); 6174 } 6175 6176 /* 6177 * Allocate per cpu pagesets and initialize them. 6178 * Before this call only boot pagesets were available. 6179 */ 6180 void __init setup_per_cpu_pageset(void) 6181 { 6182 struct pglist_data *pgdat; 6183 struct zone *zone; 6184 int __maybe_unused cpu; 6185 6186 for_each_populated_zone(zone) 6187 setup_zone_pageset(zone); 6188 6189 #ifdef CONFIG_NUMA 6190 /* 6191 * Unpopulated zones continue using the boot pagesets. 6192 * The numa stats for these pagesets need to be reset. 6193 * Otherwise, they will end up skewing the stats of 6194 * the nodes these zones are associated with. 6195 */ 6196 for_each_possible_cpu(cpu) { 6197 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 6198 memset(pzstats->vm_numa_event, 0, 6199 sizeof(pzstats->vm_numa_event)); 6200 } 6201 #endif 6202 6203 for_each_online_pgdat(pgdat) 6204 pgdat->per_cpu_nodestats = 6205 alloc_percpu(struct per_cpu_nodestat); 6206 } 6207 6208 __meminit void zone_pcp_init(struct zone *zone) 6209 { 6210 /* 6211 * per cpu subsystem is not up at this point. The following code 6212 * relies on the ability of the linker to provide the 6213 * offset of a (static) per cpu variable into the per cpu area. 6214 */ 6215 zone->per_cpu_pageset = &boot_pageset; 6216 zone->per_cpu_zonestats = &boot_zonestats; 6217 zone->pageset_high_min = BOOT_PAGESET_HIGH; 6218 zone->pageset_high_max = BOOT_PAGESET_HIGH; 6219 zone->pageset_batch = BOOT_PAGESET_BATCH; 6220 6221 if (populated_zone(zone)) 6222 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 6223 zone->present_pages, zone_batchsize(zone)); 6224 } 6225 6226 static void setup_per_zone_lowmem_reserve(void); 6227 6228 void adjust_managed_page_count(struct page *page, long count) 6229 { 6230 atomic_long_add(count, &page_zone(page)->managed_pages); 6231 totalram_pages_add(count); 6232 setup_per_zone_lowmem_reserve(); 6233 } 6234 EXPORT_SYMBOL(adjust_managed_page_count); 6235 6236 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6237 { 6238 void *pos; 6239 unsigned long pages = 0; 6240 6241 start = (void *)PAGE_ALIGN((unsigned long)start); 6242 end = (void *)((unsigned long)end & PAGE_MASK); 6243 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6244 struct page *page = virt_to_page(pos); 6245 void *direct_map_addr; 6246 6247 /* 6248 * 'direct_map_addr' might be different from 'pos' 6249 * because some architectures' virt_to_page() 6250 * work with aliases. Getting the direct map 6251 * address ensures that we get a _writeable_ 6252 * alias for the memset(). 6253 */ 6254 direct_map_addr = page_address(page); 6255 /* 6256 * Perform a kasan-unchecked memset() since this memory 6257 * has not been initialized. 6258 */ 6259 direct_map_addr = kasan_reset_tag(direct_map_addr); 6260 if ((unsigned int)poison <= 0xFF) 6261 memset(direct_map_addr, poison, PAGE_SIZE); 6262 6263 free_reserved_page(page); 6264 } 6265 6266 if (pages && s) 6267 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6268 6269 return pages; 6270 } 6271 6272 void free_reserved_page(struct page *page) 6273 { 6274 clear_page_tag_ref(page); 6275 ClearPageReserved(page); 6276 init_page_count(page); 6277 __free_page(page); 6278 adjust_managed_page_count(page, 1); 6279 } 6280 EXPORT_SYMBOL(free_reserved_page); 6281 6282 static int page_alloc_cpu_dead(unsigned int cpu) 6283 { 6284 struct zone *zone; 6285 6286 lru_add_drain_cpu(cpu); 6287 mlock_drain_remote(cpu); 6288 drain_pages(cpu); 6289 6290 /* 6291 * Spill the event counters of the dead processor 6292 * into the current processors event counters. 6293 * This artificially elevates the count of the current 6294 * processor. 6295 */ 6296 vm_events_fold_cpu(cpu); 6297 6298 /* 6299 * Zero the differential counters of the dead processor 6300 * so that the vm statistics are consistent. 6301 * 6302 * This is only okay since the processor is dead and cannot 6303 * race with what we are doing. 6304 */ 6305 cpu_vm_stats_fold(cpu); 6306 6307 for_each_populated_zone(zone) 6308 zone_pcp_update(zone, 0); 6309 6310 return 0; 6311 } 6312 6313 static int page_alloc_cpu_online(unsigned int cpu) 6314 { 6315 struct zone *zone; 6316 6317 for_each_populated_zone(zone) 6318 zone_pcp_update(zone, 1); 6319 return 0; 6320 } 6321 6322 void __init page_alloc_init_cpuhp(void) 6323 { 6324 int ret; 6325 6326 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6327 "mm/page_alloc:pcp", 6328 page_alloc_cpu_online, 6329 page_alloc_cpu_dead); 6330 WARN_ON(ret < 0); 6331 } 6332 6333 /* 6334 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6335 * or min_free_kbytes changes. 6336 */ 6337 static void calculate_totalreserve_pages(void) 6338 { 6339 struct pglist_data *pgdat; 6340 unsigned long reserve_pages = 0; 6341 enum zone_type i, j; 6342 6343 for_each_online_pgdat(pgdat) { 6344 6345 pgdat->totalreserve_pages = 0; 6346 6347 for (i = 0; i < MAX_NR_ZONES; i++) { 6348 struct zone *zone = pgdat->node_zones + i; 6349 long max = 0; 6350 unsigned long managed_pages = zone_managed_pages(zone); 6351 6352 /* 6353 * lowmem_reserve[j] is monotonically non-decreasing 6354 * in j for a given zone (see 6355 * setup_per_zone_lowmem_reserve()). The maximum 6356 * valid reserve lives at the highest index with a 6357 * non-zero value, so scan backwards and stop at the 6358 * first hit. 6359 */ 6360 for (j = MAX_NR_ZONES - 1; j > i; j--) { 6361 if (!zone->lowmem_reserve[j]) 6362 continue; 6363 6364 max = zone->lowmem_reserve[j]; 6365 break; 6366 } 6367 /* we treat the high watermark as reserved pages. */ 6368 max += high_wmark_pages(zone); 6369 6370 max = min_t(unsigned long, max, managed_pages); 6371 6372 pgdat->totalreserve_pages += max; 6373 6374 reserve_pages += max; 6375 } 6376 } 6377 totalreserve_pages = reserve_pages; 6378 trace_mm_calculate_totalreserve_pages(totalreserve_pages); 6379 } 6380 6381 /* 6382 * setup_per_zone_lowmem_reserve - called whenever 6383 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6384 * has a correct pages reserved value, so an adequate number of 6385 * pages are left in the zone after a successful __alloc_pages(). 6386 */ 6387 static void setup_per_zone_lowmem_reserve(void) 6388 { 6389 struct pglist_data *pgdat; 6390 enum zone_type i, j; 6391 /* 6392 * For a given zone node_zones[i], lowmem_reserve[j] (j > i) 6393 * represents how many pages in zone i must effectively be kept 6394 * in reserve when deciding whether an allocation class that is 6395 * allowed to allocate from zones up to j may fall back into 6396 * zone i. 6397 * 6398 * As j increases, the allocation class can use a strictly larger 6399 * set of fallback zones and therefore must not be allowed to 6400 * deplete low zones more aggressively than a less flexible one. 6401 * As a result, lowmem_reserve[j] is required to be monotonically 6402 * non-decreasing in j for each zone i. Callers such as 6403 * calculate_totalreserve_pages() rely on this monotonicity when 6404 * selecting the maximum reserve entry. 6405 */ 6406 for_each_online_pgdat(pgdat) { 6407 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6408 struct zone *zone = &pgdat->node_zones[i]; 6409 int ratio = sysctl_lowmem_reserve_ratio[i]; 6410 bool clear = !ratio || !zone_managed_pages(zone); 6411 unsigned long managed_pages = 0; 6412 6413 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6414 struct zone *upper_zone = &pgdat->node_zones[j]; 6415 6416 managed_pages += zone_managed_pages(upper_zone); 6417 6418 if (clear) 6419 zone->lowmem_reserve[j] = 0; 6420 else 6421 zone->lowmem_reserve[j] = managed_pages / ratio; 6422 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, 6423 zone->lowmem_reserve[j]); 6424 } 6425 } 6426 } 6427 6428 /* update totalreserve_pages */ 6429 calculate_totalreserve_pages(); 6430 } 6431 6432 static void __setup_per_zone_wmarks(void) 6433 { 6434 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6435 unsigned long lowmem_pages = 0; 6436 struct zone *zone; 6437 unsigned long flags; 6438 6439 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 6440 for_each_zone(zone) { 6441 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 6442 lowmem_pages += zone_managed_pages(zone); 6443 } 6444 6445 for_each_zone(zone) { 6446 u64 tmp; 6447 6448 spin_lock_irqsave(&zone->lock, flags); 6449 tmp = (u64)pages_min * zone_managed_pages(zone); 6450 tmp = div64_ul(tmp, lowmem_pages); 6451 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 6452 /* 6453 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6454 * need highmem and movable zones pages, so cap pages_min 6455 * to a small value here. 6456 * 6457 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6458 * deltas control async page reclaim, and so should 6459 * not be capped for highmem and movable zones. 6460 */ 6461 unsigned long min_pages; 6462 6463 min_pages = zone_managed_pages(zone) / 1024; 6464 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6465 zone->_watermark[WMARK_MIN] = min_pages; 6466 } else { 6467 /* 6468 * If it's a lowmem zone, reserve a number of pages 6469 * proportionate to the zone's size. 6470 */ 6471 zone->_watermark[WMARK_MIN] = tmp; 6472 } 6473 6474 /* 6475 * Set the kswapd watermarks distance according to the 6476 * scale factor in proportion to available memory, but 6477 * ensure a minimum size on small systems. 6478 */ 6479 tmp = max_t(u64, tmp >> 2, 6480 mult_frac(zone_managed_pages(zone), 6481 watermark_scale_factor, 10000)); 6482 6483 zone->watermark_boost = 0; 6484 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6485 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6486 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6487 trace_mm_setup_per_zone_wmarks(zone); 6488 6489 spin_unlock_irqrestore(&zone->lock, flags); 6490 } 6491 6492 /* update totalreserve_pages */ 6493 calculate_totalreserve_pages(); 6494 } 6495 6496 /** 6497 * setup_per_zone_wmarks - called when min_free_kbytes changes 6498 * or when memory is hot-{added|removed} 6499 * 6500 * Ensures that the watermark[min,low,high] values for each zone are set 6501 * correctly with respect to min_free_kbytes. 6502 */ 6503 void setup_per_zone_wmarks(void) 6504 { 6505 struct zone *zone; 6506 static DEFINE_SPINLOCK(lock); 6507 6508 spin_lock(&lock); 6509 __setup_per_zone_wmarks(); 6510 spin_unlock(&lock); 6511 6512 /* 6513 * The watermark size have changed so update the pcpu batch 6514 * and high limits or the limits may be inappropriate. 6515 */ 6516 for_each_zone(zone) 6517 zone_pcp_update(zone, 0); 6518 } 6519 6520 /* 6521 * Initialise min_free_kbytes. 6522 * 6523 * For small machines we want it small (128k min). For large machines 6524 * we want it large (256MB max). But it is not linear, because network 6525 * bandwidth does not increase linearly with machine size. We use 6526 * 6527 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6528 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6529 * 6530 * which yields 6531 * 6532 * 16MB: 512k 6533 * 32MB: 724k 6534 * 64MB: 1024k 6535 * 128MB: 1448k 6536 * 256MB: 2048k 6537 * 512MB: 2896k 6538 * 1024MB: 4096k 6539 * 2048MB: 5792k 6540 * 4096MB: 8192k 6541 * 8192MB: 11584k 6542 * 16384MB: 16384k 6543 */ 6544 void calculate_min_free_kbytes(void) 6545 { 6546 unsigned long lowmem_kbytes; 6547 int new_min_free_kbytes; 6548 6549 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6550 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6551 6552 if (new_min_free_kbytes > user_min_free_kbytes) 6553 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6554 else 6555 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6556 new_min_free_kbytes, user_min_free_kbytes); 6557 6558 } 6559 6560 int __meminit init_per_zone_wmark_min(void) 6561 { 6562 calculate_min_free_kbytes(); 6563 setup_per_zone_wmarks(); 6564 refresh_zone_stat_thresholds(); 6565 setup_per_zone_lowmem_reserve(); 6566 6567 #ifdef CONFIG_NUMA 6568 setup_min_unmapped_ratio(); 6569 setup_min_slab_ratio(); 6570 #endif 6571 6572 khugepaged_min_free_kbytes_update(); 6573 6574 return 0; 6575 } 6576 postcore_initcall(init_per_zone_wmark_min) 6577 6578 /* 6579 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6580 * that we can call two helper functions whenever min_free_kbytes 6581 * changes. 6582 */ 6583 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6584 void *buffer, size_t *length, loff_t *ppos) 6585 { 6586 int rc; 6587 6588 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6589 if (rc) 6590 return rc; 6591 6592 if (write) { 6593 user_min_free_kbytes = min_free_kbytes; 6594 setup_per_zone_wmarks(); 6595 } 6596 return 0; 6597 } 6598 6599 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6600 void *buffer, size_t *length, loff_t *ppos) 6601 { 6602 int rc; 6603 6604 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6605 if (rc) 6606 return rc; 6607 6608 if (write) 6609 setup_per_zone_wmarks(); 6610 6611 return 0; 6612 } 6613 6614 #ifdef CONFIG_NUMA 6615 static void setup_min_unmapped_ratio(void) 6616 { 6617 pg_data_t *pgdat; 6618 struct zone *zone; 6619 6620 for_each_online_pgdat(pgdat) 6621 pgdat->min_unmapped_pages = 0; 6622 6623 for_each_zone(zone) 6624 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6625 sysctl_min_unmapped_ratio) / 100; 6626 } 6627 6628 6629 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6630 void *buffer, size_t *length, loff_t *ppos) 6631 { 6632 int rc; 6633 6634 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6635 if (rc) 6636 return rc; 6637 6638 setup_min_unmapped_ratio(); 6639 6640 return 0; 6641 } 6642 6643 static void setup_min_slab_ratio(void) 6644 { 6645 pg_data_t *pgdat; 6646 struct zone *zone; 6647 6648 for_each_online_pgdat(pgdat) 6649 pgdat->min_slab_pages = 0; 6650 6651 for_each_zone(zone) 6652 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6653 sysctl_min_slab_ratio) / 100; 6654 } 6655 6656 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6657 void *buffer, size_t *length, loff_t *ppos) 6658 { 6659 int rc; 6660 6661 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6662 if (rc) 6663 return rc; 6664 6665 setup_min_slab_ratio(); 6666 6667 return 0; 6668 } 6669 #endif 6670 6671 /* 6672 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6673 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6674 * whenever sysctl_lowmem_reserve_ratio changes. 6675 * 6676 * The reserve ratio obviously has absolutely no relation with the 6677 * minimum watermarks. The lowmem reserve ratio can only make sense 6678 * if in function of the boot time zone sizes. 6679 */ 6680 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6681 int write, void *buffer, size_t *length, loff_t *ppos) 6682 { 6683 int i; 6684 6685 proc_dointvec_minmax(table, write, buffer, length, ppos); 6686 6687 for (i = 0; i < MAX_NR_ZONES; i++) { 6688 if (sysctl_lowmem_reserve_ratio[i] < 1) 6689 sysctl_lowmem_reserve_ratio[i] = 0; 6690 } 6691 6692 setup_per_zone_lowmem_reserve(); 6693 return 0; 6694 } 6695 6696 /* 6697 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6698 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6699 * pagelist can have before it gets flushed back to buddy allocator. 6700 */ 6701 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6702 int write, void *buffer, size_t *length, loff_t *ppos) 6703 { 6704 struct zone *zone; 6705 int old_percpu_pagelist_high_fraction; 6706 int ret; 6707 6708 /* 6709 * Avoid using pcp_batch_high_lock for reads as the value is read 6710 * atomically and a race with offlining is harmless. 6711 */ 6712 6713 if (!write) 6714 return proc_dointvec_minmax(table, write, buffer, length, ppos); 6715 6716 mutex_lock(&pcp_batch_high_lock); 6717 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6718 6719 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6720 if (ret < 0) 6721 goto out; 6722 6723 /* Sanity checking to avoid pcp imbalance */ 6724 if (percpu_pagelist_high_fraction && 6725 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6726 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6727 ret = -EINVAL; 6728 goto out; 6729 } 6730 6731 /* No change? */ 6732 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6733 goto out; 6734 6735 for_each_populated_zone(zone) 6736 zone_set_pageset_high_and_batch(zone, 0); 6737 out: 6738 mutex_unlock(&pcp_batch_high_lock); 6739 return ret; 6740 } 6741 6742 static const struct ctl_table page_alloc_sysctl_table[] = { 6743 { 6744 .procname = "min_free_kbytes", 6745 .data = &min_free_kbytes, 6746 .maxlen = sizeof(min_free_kbytes), 6747 .mode = 0644, 6748 .proc_handler = min_free_kbytes_sysctl_handler, 6749 .extra1 = SYSCTL_ZERO, 6750 }, 6751 { 6752 .procname = "watermark_boost_factor", 6753 .data = &watermark_boost_factor, 6754 .maxlen = sizeof(watermark_boost_factor), 6755 .mode = 0644, 6756 .proc_handler = proc_dointvec_minmax, 6757 .extra1 = SYSCTL_ZERO, 6758 }, 6759 { 6760 .procname = "watermark_scale_factor", 6761 .data = &watermark_scale_factor, 6762 .maxlen = sizeof(watermark_scale_factor), 6763 .mode = 0644, 6764 .proc_handler = watermark_scale_factor_sysctl_handler, 6765 .extra1 = SYSCTL_ONE, 6766 .extra2 = SYSCTL_THREE_THOUSAND, 6767 }, 6768 { 6769 .procname = "defrag_mode", 6770 .data = &defrag_mode, 6771 .maxlen = sizeof(defrag_mode), 6772 .mode = 0644, 6773 .proc_handler = proc_dointvec_minmax, 6774 .extra1 = SYSCTL_ZERO, 6775 .extra2 = SYSCTL_ONE, 6776 }, 6777 { 6778 .procname = "percpu_pagelist_high_fraction", 6779 .data = &percpu_pagelist_high_fraction, 6780 .maxlen = sizeof(percpu_pagelist_high_fraction), 6781 .mode = 0644, 6782 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6783 .extra1 = SYSCTL_ZERO, 6784 }, 6785 { 6786 .procname = "lowmem_reserve_ratio", 6787 .data = &sysctl_lowmem_reserve_ratio, 6788 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6789 .mode = 0644, 6790 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6791 }, 6792 #ifdef CONFIG_NUMA 6793 { 6794 .procname = "numa_zonelist_order", 6795 .data = &numa_zonelist_order, 6796 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6797 .mode = 0644, 6798 .proc_handler = numa_zonelist_order_handler, 6799 }, 6800 { 6801 .procname = "min_unmapped_ratio", 6802 .data = &sysctl_min_unmapped_ratio, 6803 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6804 .mode = 0644, 6805 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6806 .extra1 = SYSCTL_ZERO, 6807 .extra2 = SYSCTL_ONE_HUNDRED, 6808 }, 6809 { 6810 .procname = "min_slab_ratio", 6811 .data = &sysctl_min_slab_ratio, 6812 .maxlen = sizeof(sysctl_min_slab_ratio), 6813 .mode = 0644, 6814 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6815 .extra1 = SYSCTL_ZERO, 6816 .extra2 = SYSCTL_ONE_HUNDRED, 6817 }, 6818 #endif 6819 }; 6820 6821 void __init page_alloc_sysctl_init(void) 6822 { 6823 register_sysctl_init("vm", page_alloc_sysctl_table); 6824 } 6825 6826 #ifdef CONFIG_CONTIG_ALLOC 6827 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6828 static void alloc_contig_dump_pages(struct list_head *page_list) 6829 { 6830 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6831 6832 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6833 struct page *page; 6834 6835 dump_stack(); 6836 list_for_each_entry(page, page_list, lru) 6837 dump_page(page, "migration failure"); 6838 } 6839 } 6840 6841 /* [start, end) must belong to a single zone. */ 6842 static int __alloc_contig_migrate_range(struct compact_control *cc, 6843 unsigned long start, unsigned long end) 6844 { 6845 /* This function is based on compact_zone() from compaction.c. */ 6846 unsigned int nr_reclaimed; 6847 unsigned long pfn = start; 6848 unsigned int tries = 0; 6849 int ret = 0; 6850 struct migration_target_control mtc = { 6851 .nid = zone_to_nid(cc->zone), 6852 .gfp_mask = cc->gfp_mask, 6853 .reason = MR_CONTIG_RANGE, 6854 }; 6855 6856 lru_cache_disable(); 6857 6858 while (pfn < end || !list_empty(&cc->migratepages)) { 6859 if (fatal_signal_pending(current)) { 6860 ret = -EINTR; 6861 break; 6862 } 6863 6864 if (list_empty(&cc->migratepages)) { 6865 cc->nr_migratepages = 0; 6866 ret = isolate_migratepages_range(cc, pfn, end); 6867 if (ret && ret != -EAGAIN) 6868 break; 6869 pfn = cc->migrate_pfn; 6870 tries = 0; 6871 } else if (++tries == 5) { 6872 ret = -EBUSY; 6873 break; 6874 } 6875 6876 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6877 &cc->migratepages); 6878 cc->nr_migratepages -= nr_reclaimed; 6879 6880 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6881 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6882 6883 /* 6884 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6885 * to retry again over this error, so do the same here. 6886 */ 6887 if (ret == -ENOMEM) 6888 break; 6889 } 6890 6891 lru_cache_enable(); 6892 if (ret < 0) { 6893 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6894 alloc_contig_dump_pages(&cc->migratepages); 6895 putback_movable_pages(&cc->migratepages); 6896 } 6897 6898 return (ret < 0) ? ret : 0; 6899 } 6900 6901 static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask) 6902 { 6903 int order; 6904 6905 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6906 struct page *page, *next; 6907 int nr_pages = 1 << order; 6908 6909 list_for_each_entry_safe(page, next, &list[order], lru) { 6910 int i; 6911 6912 post_alloc_hook(page, order, gfp_mask); 6913 if (!order) 6914 continue; 6915 6916 __split_page(page, order); 6917 6918 /* Add all subpages to the order-0 head, in sequence. */ 6919 list_del(&page->lru); 6920 for (i = 0; i < nr_pages; i++) 6921 list_add_tail(&page[i].lru, &list[0]); 6922 } 6923 } 6924 } 6925 6926 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) 6927 { 6928 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6929 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6930 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6931 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6932 6933 /* 6934 * We are given the range to allocate; node, mobility and placement 6935 * hints are irrelevant at this point. We'll simply ignore them. 6936 */ 6937 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | 6938 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); 6939 6940 /* 6941 * We only support most reclaim flags (but not NOFAIL/NORETRY), and 6942 * selected action flags. 6943 */ 6944 if (gfp_mask & ~(reclaim_mask | action_mask)) 6945 return -EINVAL; 6946 6947 /* 6948 * Flags to control page compaction/migration/reclaim, to free up our 6949 * page range. Migratable pages are movable, __GFP_MOVABLE is implied 6950 * for them. 6951 * 6952 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that 6953 * to not degrade callers. 6954 */ 6955 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | 6956 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 6957 return 0; 6958 } 6959 6960 static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages) 6961 { 6962 for (; nr_pages--; pfn++) 6963 free_frozen_pages(pfn_to_page(pfn), 0); 6964 } 6965 6966 /** 6967 * alloc_contig_frozen_range() -- tries to allocate given range of frozen pages 6968 * @start: start PFN to allocate 6969 * @end: one-past-the-last PFN to allocate 6970 * @alloc_flags: allocation information 6971 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some 6972 * action and reclaim modifiers are supported. Reclaim modifiers 6973 * control allocation behavior during compaction/migration/reclaim. 6974 * 6975 * The PFN range does not have to be pageblock aligned. The PFN range must 6976 * belong to a single zone. 6977 * 6978 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6979 * pageblocks in the range. Once isolated, the pageblocks should not 6980 * be modified by others. 6981 * 6982 * All frozen pages which PFN is in [start, end) are allocated for the 6983 * caller, and they could be freed with free_contig_frozen_range(), 6984 * free_frozen_pages() also could be used to free compound frozen pages 6985 * directly. 6986 * 6987 * Return: zero on success or negative error code. 6988 */ 6989 int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end, 6990 acr_flags_t alloc_flags, gfp_t gfp_mask) 6991 { 6992 const unsigned int order = ilog2(end - start); 6993 unsigned long outer_start, outer_end; 6994 int ret = 0; 6995 6996 struct compact_control cc = { 6997 .nr_migratepages = 0, 6998 .order = -1, 6999 .zone = page_zone(pfn_to_page(start)), 7000 .mode = MIGRATE_SYNC, 7001 .ignore_skip_hint = true, 7002 .no_set_skip_hint = true, 7003 .alloc_contig = true, 7004 }; 7005 INIT_LIST_HEAD(&cc.migratepages); 7006 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ? 7007 PB_ISOLATE_MODE_CMA_ALLOC : 7008 PB_ISOLATE_MODE_OTHER; 7009 7010 /* 7011 * In contrast to the buddy, we allow for orders here that exceed 7012 * MAX_PAGE_ORDER, so we must manually make sure that we are not 7013 * exceeding the maximum folio order. 7014 */ 7015 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER)) 7016 return -EINVAL; 7017 7018 gfp_mask = current_gfp_context(gfp_mask); 7019 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) 7020 return -EINVAL; 7021 7022 /* 7023 * What we do here is we mark all pageblocks in range as 7024 * MIGRATE_ISOLATE. Because pageblock and max order pages may 7025 * have different sizes, and due to the way page allocator 7026 * work, start_isolate_page_range() has special handlings for this. 7027 * 7028 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 7029 * migrate the pages from an unaligned range (ie. pages that 7030 * we are interested in). This will put all the pages in 7031 * range back to page allocator as MIGRATE_ISOLATE. 7032 * 7033 * When this is done, we take the pages in range from page 7034 * allocator removing them from the buddy system. This way 7035 * page allocator will never consider using them. 7036 * 7037 * This lets us mark the pageblocks back as 7038 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7039 * aligned range but not in the unaligned, original range are 7040 * put back to page allocator so that buddy can use them. 7041 */ 7042 7043 ret = start_isolate_page_range(start, end, mode); 7044 if (ret) 7045 goto done; 7046 7047 drain_all_pages(cc.zone); 7048 7049 /* 7050 * In case of -EBUSY, we'd like to know which page causes problem. 7051 * So, just fall through. test_pages_isolated() has a tracepoint 7052 * which will report the busy page. 7053 * 7054 * It is possible that busy pages could become available before 7055 * the call to test_pages_isolated, and the range will actually be 7056 * allocated. So, if we fall through be sure to clear ret so that 7057 * -EBUSY is not accidentally used or returned to caller. 7058 */ 7059 ret = __alloc_contig_migrate_range(&cc, start, end); 7060 if (ret && ret != -EBUSY) 7061 goto done; 7062 7063 /* 7064 * When in-use hugetlb pages are migrated, they may simply be released 7065 * back into the free hugepage pool instead of being returned to the 7066 * buddy system. After the migration of in-use huge pages is completed, 7067 * we will invoke replace_free_hugepage_folios() to ensure that these 7068 * hugepages are properly released to the buddy system. 7069 */ 7070 ret = replace_free_hugepage_folios(start, end); 7071 if (ret) 7072 goto done; 7073 7074 /* 7075 * Pages from [start, end) are within a pageblock_nr_pages 7076 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7077 * more, all pages in [start, end) are free in page allocator. 7078 * What we are going to do is to allocate all pages from 7079 * [start, end) (that is remove them from page allocator). 7080 * 7081 * The only problem is that pages at the beginning and at the 7082 * end of interesting range may be not aligned with pages that 7083 * page allocator holds, ie. they can be part of higher order 7084 * pages. Because of this, we reserve the bigger range and 7085 * once this is done free the pages we are not interested in. 7086 * 7087 * We don't have to hold zone->lock here because the pages are 7088 * isolated thus they won't get removed from buddy. 7089 */ 7090 outer_start = find_large_buddy(start); 7091 7092 /* Make sure the range is really isolated. */ 7093 if (test_pages_isolated(outer_start, end, mode)) { 7094 ret = -EBUSY; 7095 goto done; 7096 } 7097 7098 /* Grab isolated pages from freelists. */ 7099 outer_end = isolate_freepages_range(&cc, outer_start, end); 7100 if (!outer_end) { 7101 ret = -EBUSY; 7102 goto done; 7103 } 7104 7105 if (!(gfp_mask & __GFP_COMP)) { 7106 split_free_frozen_pages(cc.freepages, gfp_mask); 7107 7108 /* Free head and tail (if any) */ 7109 if (start != outer_start) 7110 __free_contig_frozen_range(outer_start, start - outer_start); 7111 if (end != outer_end) 7112 __free_contig_frozen_range(end, outer_end - end); 7113 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) { 7114 struct page *head = pfn_to_page(start); 7115 7116 check_new_pages(head, order); 7117 prep_new_page(head, order, gfp_mask, 0); 7118 } else { 7119 ret = -EINVAL; 7120 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", 7121 start, end, outer_start, outer_end); 7122 } 7123 done: 7124 undo_isolate_page_range(start, end); 7125 return ret; 7126 } 7127 EXPORT_SYMBOL(alloc_contig_frozen_range_noprof); 7128 7129 /** 7130 * alloc_contig_range() -- tries to allocate given range of pages 7131 * @start: start PFN to allocate 7132 * @end: one-past-the-last PFN to allocate 7133 * @alloc_flags: allocation information 7134 * @gfp_mask: GFP mask. 7135 * 7136 * This routine is a wrapper around alloc_contig_frozen_range(), it can't 7137 * be used to allocate compound pages, the refcount of each allocated page 7138 * will be set to one. 7139 * 7140 * All pages which PFN is in [start, end) are allocated for the caller, 7141 * and should be freed with free_contig_range() or by manually calling 7142 * __free_page() on each allocated page. 7143 * 7144 * Return: zero on success or negative error code. 7145 */ 7146 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 7147 acr_flags_t alloc_flags, gfp_t gfp_mask) 7148 { 7149 int ret; 7150 7151 if (WARN_ON(gfp_mask & __GFP_COMP)) 7152 return -EINVAL; 7153 7154 ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask); 7155 if (!ret) 7156 set_pages_refcounted(pfn_to_page(start), end - start); 7157 7158 return ret; 7159 } 7160 EXPORT_SYMBOL(alloc_contig_range_noprof); 7161 7162 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 7163 unsigned long nr_pages, bool skip_hugetlb, 7164 bool *skipped_hugetlb) 7165 { 7166 unsigned long end_pfn = start_pfn + nr_pages; 7167 struct page *page; 7168 7169 while (start_pfn < end_pfn) { 7170 unsigned long step = 1; 7171 7172 page = pfn_to_online_page(start_pfn); 7173 if (!page) 7174 return false; 7175 7176 if (page_zone(page) != z) 7177 return false; 7178 7179 if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step)) 7180 return false; 7181 7182 /* 7183 * Only consider ranges containing hugepages if those pages are 7184 * smaller than the requested contiguous region. e.g.: 7185 * Move 2MB pages to free up a 1GB range. 7186 * Don't move 1GB pages to free up a 2MB range. 7187 * 7188 * This makes contiguous allocation more reliable if multiple 7189 * hugepage sizes are used without causing needless movement. 7190 */ 7191 if (PageHuge(page)) { 7192 unsigned int order; 7193 7194 if (skip_hugetlb) { 7195 *skipped_hugetlb = true; 7196 return false; 7197 } 7198 7199 page = compound_head(page); 7200 order = compound_order(page); 7201 if ((order >= MAX_FOLIO_ORDER) || 7202 (nr_pages <= (1 << order))) 7203 return false; 7204 } 7205 7206 start_pfn += step; 7207 } 7208 return true; 7209 } 7210 7211 static bool zone_spans_last_pfn(const struct zone *zone, 7212 unsigned long start_pfn, unsigned long nr_pages) 7213 { 7214 unsigned long last_pfn = start_pfn + nr_pages - 1; 7215 7216 return zone_spans_pfn(zone, last_pfn); 7217 } 7218 7219 /** 7220 * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages 7221 * @nr_pages: Number of contiguous pages to allocate 7222 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some 7223 * action and reclaim modifiers are supported. Reclaim modifiers 7224 * control allocation behavior during compaction/migration/reclaim. 7225 * @nid: Target node 7226 * @nodemask: Mask for other possible nodes 7227 * 7228 * This routine is a wrapper around alloc_contig_frozen_range(). It scans over 7229 * zones on an applicable zonelist to find a contiguous pfn range which can then 7230 * be tried for allocation with alloc_contig_frozen_range(). This routine is 7231 * intended for allocation requests which can not be fulfilled with the buddy 7232 * allocator. 7233 * 7234 * The allocated memory is always aligned to a page boundary. If nr_pages is a 7235 * power of two, then allocated range is also guaranteed to be aligned to same 7236 * nr_pages (e.g. 1GB request would be aligned to 1GB). 7237 * 7238 * Allocated frozen pages need be freed with free_contig_frozen_range(), 7239 * or by manually calling free_frozen_pages() on each allocated frozen 7240 * non-compound page, for compound frozen pages could be freed with 7241 * free_frozen_pages() directly. 7242 * 7243 * Return: pointer to contiguous frozen pages on success, or NULL if not successful. 7244 */ 7245 struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages, 7246 gfp_t gfp_mask, int nid, nodemask_t *nodemask) 7247 { 7248 unsigned long ret, pfn, flags; 7249 struct zonelist *zonelist; 7250 struct zone *zone; 7251 struct zoneref *z; 7252 bool skip_hugetlb = true; 7253 bool skipped_hugetlb = false; 7254 7255 retry: 7256 zonelist = node_zonelist(nid, gfp_mask); 7257 for_each_zone_zonelist_nodemask(zone, z, zonelist, 7258 gfp_zone(gfp_mask), nodemask) { 7259 spin_lock_irqsave(&zone->lock, flags); 7260 7261 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 7262 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 7263 if (pfn_range_valid_contig(zone, pfn, nr_pages, 7264 skip_hugetlb, 7265 &skipped_hugetlb)) { 7266 /* 7267 * We release the zone lock here because 7268 * alloc_contig_frozen_range() will also lock 7269 * the zone at some point. If there's an 7270 * allocation spinning on this lock, it may 7271 * win the race and cause allocation to fail. 7272 */ 7273 spin_unlock_irqrestore(&zone->lock, flags); 7274 ret = alloc_contig_frozen_range_noprof(pfn, 7275 pfn + nr_pages, 7276 ACR_FLAGS_NONE, 7277 gfp_mask); 7278 if (!ret) 7279 return pfn_to_page(pfn); 7280 spin_lock_irqsave(&zone->lock, flags); 7281 } 7282 pfn += nr_pages; 7283 } 7284 spin_unlock_irqrestore(&zone->lock, flags); 7285 } 7286 /* 7287 * If we failed, retry the search, but treat regions with HugeTLB pages 7288 * as valid targets. This retains fast-allocations on first pass 7289 * without trying to migrate HugeTLB pages (which may fail). On the 7290 * second pass, we will try moving HugeTLB pages when those pages are 7291 * smaller than the requested contiguous region size. 7292 */ 7293 if (skip_hugetlb && skipped_hugetlb) { 7294 skip_hugetlb = false; 7295 goto retry; 7296 } 7297 return NULL; 7298 } 7299 EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof); 7300 7301 /** 7302 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 7303 * @nr_pages: Number of contiguous pages to allocate 7304 * @gfp_mask: GFP mask. 7305 * @nid: Target node 7306 * @nodemask: Mask for other possible nodes 7307 * 7308 * This routine is a wrapper around alloc_contig_frozen_pages(), it can't 7309 * be used to allocate compound pages, the refcount of each allocated page 7310 * will be set to one. 7311 * 7312 * Allocated pages can be freed with free_contig_range() or by manually 7313 * calling __free_page() on each allocated page. 7314 * 7315 * Return: pointer to contiguous pages on success, or NULL if not successful. 7316 */ 7317 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 7318 int nid, nodemask_t *nodemask) 7319 { 7320 struct page *page; 7321 7322 if (WARN_ON(gfp_mask & __GFP_COMP)) 7323 return NULL; 7324 7325 page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid, 7326 nodemask); 7327 if (page) 7328 set_pages_refcounted(page, nr_pages); 7329 7330 return page; 7331 } 7332 EXPORT_SYMBOL(alloc_contig_pages_noprof); 7333 7334 /** 7335 * free_contig_frozen_range() -- free the contiguous range of frozen pages 7336 * @pfn: start PFN to free 7337 * @nr_pages: Number of contiguous frozen pages to free 7338 * 7339 * This can be used to free the allocated compound/non-compound frozen pages. 7340 */ 7341 void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages) 7342 { 7343 struct page *first_page = pfn_to_page(pfn); 7344 const unsigned int order = ilog2(nr_pages); 7345 7346 if (WARN_ON_ONCE(first_page != compound_head(first_page))) 7347 return; 7348 7349 if (PageHead(first_page)) { 7350 WARN_ON_ONCE(order != compound_order(first_page)); 7351 free_frozen_pages(first_page, order); 7352 return; 7353 } 7354 7355 __free_contig_frozen_range(pfn, nr_pages); 7356 } 7357 EXPORT_SYMBOL(free_contig_frozen_range); 7358 7359 /** 7360 * free_contig_range() -- free the contiguous range of pages 7361 * @pfn: start PFN to free 7362 * @nr_pages: Number of contiguous pages to free 7363 * 7364 * This can be only used to free the allocated non-compound pages. 7365 */ 7366 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 7367 { 7368 if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn)))) 7369 return; 7370 7371 for (; nr_pages--; pfn++) 7372 __free_page(pfn_to_page(pfn)); 7373 } 7374 EXPORT_SYMBOL(free_contig_range); 7375 #endif /* CONFIG_CONTIG_ALLOC */ 7376 7377 /* 7378 * Effectively disable pcplists for the zone by setting the high limit to 0 7379 * and draining all cpus. A concurrent page freeing on another CPU that's about 7380 * to put the page on pcplist will either finish before the drain and the page 7381 * will be drained, or observe the new high limit and skip the pcplist. 7382 * 7383 * Must be paired with a call to zone_pcp_enable(). 7384 */ 7385 void zone_pcp_disable(struct zone *zone) 7386 { 7387 mutex_lock(&pcp_batch_high_lock); 7388 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 7389 __drain_all_pages(zone, true); 7390 } 7391 7392 void zone_pcp_enable(struct zone *zone) 7393 { 7394 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 7395 zone->pageset_high_max, zone->pageset_batch); 7396 mutex_unlock(&pcp_batch_high_lock); 7397 } 7398 7399 void zone_pcp_reset(struct zone *zone) 7400 { 7401 int cpu; 7402 struct per_cpu_zonestat *pzstats; 7403 7404 if (zone->per_cpu_pageset != &boot_pageset) { 7405 for_each_online_cpu(cpu) { 7406 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7407 drain_zonestat(zone, pzstats); 7408 } 7409 free_percpu(zone->per_cpu_pageset); 7410 zone->per_cpu_pageset = &boot_pageset; 7411 if (zone->per_cpu_zonestats != &boot_zonestats) { 7412 free_percpu(zone->per_cpu_zonestats); 7413 zone->per_cpu_zonestats = &boot_zonestats; 7414 } 7415 } 7416 } 7417 7418 #ifdef CONFIG_MEMORY_HOTREMOVE 7419 /* 7420 * All pages in the range must be in a single zone, must not contain holes, 7421 * must span full sections, and must be isolated before calling this function. 7422 * 7423 * Returns the number of managed (non-PageOffline()) pages in the range: the 7424 * number of pages for which memory offlining code must adjust managed page 7425 * counters using adjust_managed_page_count(). 7426 */ 7427 unsigned long __offline_isolated_pages(unsigned long start_pfn, 7428 unsigned long end_pfn) 7429 { 7430 unsigned long already_offline = 0, flags; 7431 unsigned long pfn = start_pfn; 7432 struct page *page; 7433 struct zone *zone; 7434 unsigned int order; 7435 7436 offline_mem_sections(pfn, end_pfn); 7437 zone = page_zone(pfn_to_page(pfn)); 7438 spin_lock_irqsave(&zone->lock, flags); 7439 while (pfn < end_pfn) { 7440 page = pfn_to_page(pfn); 7441 /* 7442 * The HWPoisoned page may be not in buddy system, and 7443 * page_count() is not 0. 7444 */ 7445 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7446 pfn++; 7447 continue; 7448 } 7449 /* 7450 * At this point all remaining PageOffline() pages have a 7451 * reference count of 0 and can simply be skipped. 7452 */ 7453 if (PageOffline(page)) { 7454 BUG_ON(page_count(page)); 7455 BUG_ON(PageBuddy(page)); 7456 already_offline++; 7457 pfn++; 7458 continue; 7459 } 7460 7461 BUG_ON(page_count(page)); 7462 BUG_ON(!PageBuddy(page)); 7463 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 7464 order = buddy_order(page); 7465 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 7466 pfn += (1 << order); 7467 } 7468 spin_unlock_irqrestore(&zone->lock, flags); 7469 7470 return end_pfn - start_pfn - already_offline; 7471 } 7472 #endif 7473 7474 /* 7475 * This function returns a stable result only if called under zone lock. 7476 */ 7477 bool is_free_buddy_page(const struct page *page) 7478 { 7479 unsigned long pfn = page_to_pfn(page); 7480 unsigned int order; 7481 7482 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7483 const struct page *head = page - (pfn & ((1 << order) - 1)); 7484 7485 if (PageBuddy(head) && 7486 buddy_order_unsafe(head) >= order) 7487 break; 7488 } 7489 7490 return order <= MAX_PAGE_ORDER; 7491 } 7492 EXPORT_SYMBOL(is_free_buddy_page); 7493 7494 #ifdef CONFIG_MEMORY_FAILURE 7495 static inline void add_to_free_list(struct page *page, struct zone *zone, 7496 unsigned int order, int migratetype, 7497 bool tail) 7498 { 7499 __add_to_free_list(page, zone, order, migratetype, tail); 7500 account_freepages(zone, 1 << order, migratetype); 7501 } 7502 7503 /* 7504 * Break down a higher-order page in sub-pages, and keep our target out of 7505 * buddy allocator. 7506 */ 7507 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7508 struct page *target, int low, int high, 7509 int migratetype) 7510 { 7511 unsigned long size = 1 << high; 7512 struct page *current_buddy; 7513 7514 while (high > low) { 7515 high--; 7516 size >>= 1; 7517 7518 if (target >= &page[size]) { 7519 current_buddy = page; 7520 page = page + size; 7521 } else { 7522 current_buddy = page + size; 7523 } 7524 7525 if (set_page_guard(zone, current_buddy, high)) 7526 continue; 7527 7528 add_to_free_list(current_buddy, zone, high, migratetype, false); 7529 set_buddy_order(current_buddy, high); 7530 } 7531 } 7532 7533 /* 7534 * Take a page that will be marked as poisoned off the buddy allocator. 7535 */ 7536 bool take_page_off_buddy(struct page *page) 7537 { 7538 struct zone *zone = page_zone(page); 7539 unsigned long pfn = page_to_pfn(page); 7540 unsigned long flags; 7541 unsigned int order; 7542 bool ret = false; 7543 7544 spin_lock_irqsave(&zone->lock, flags); 7545 for (order = 0; order < NR_PAGE_ORDERS; order++) { 7546 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7547 int page_order = buddy_order(page_head); 7548 7549 if (PageBuddy(page_head) && page_order >= order) { 7550 unsigned long pfn_head = page_to_pfn(page_head); 7551 int migratetype = get_pfnblock_migratetype(page_head, 7552 pfn_head); 7553 7554 del_page_from_free_list(page_head, zone, page_order, 7555 migratetype); 7556 break_down_buddy_pages(zone, page_head, page, 0, 7557 page_order, migratetype); 7558 SetPageHWPoisonTakenOff(page); 7559 ret = true; 7560 break; 7561 } 7562 if (page_count(page_head) > 0) 7563 break; 7564 } 7565 spin_unlock_irqrestore(&zone->lock, flags); 7566 return ret; 7567 } 7568 7569 /* 7570 * Cancel takeoff done by take_page_off_buddy(). 7571 */ 7572 bool put_page_back_buddy(struct page *page) 7573 { 7574 struct zone *zone = page_zone(page); 7575 unsigned long flags; 7576 bool ret = false; 7577 7578 spin_lock_irqsave(&zone->lock, flags); 7579 if (put_page_testzero(page)) { 7580 unsigned long pfn = page_to_pfn(page); 7581 int migratetype = get_pfnblock_migratetype(page, pfn); 7582 7583 ClearPageHWPoisonTakenOff(page); 7584 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7585 if (TestClearPageHWPoison(page)) { 7586 ret = true; 7587 } 7588 } 7589 spin_unlock_irqrestore(&zone->lock, flags); 7590 7591 return ret; 7592 } 7593 #endif 7594 7595 bool has_managed_zone(enum zone_type zone) 7596 { 7597 struct pglist_data *pgdat; 7598 7599 for_each_online_pgdat(pgdat) { 7600 if (managed_zone(&pgdat->node_zones[zone])) 7601 return true; 7602 } 7603 return false; 7604 } 7605 7606 #ifdef CONFIG_UNACCEPTED_MEMORY 7607 7608 static bool lazy_accept = true; 7609 7610 static int __init accept_memory_parse(char *p) 7611 { 7612 if (!strcmp(p, "lazy")) { 7613 lazy_accept = true; 7614 return 0; 7615 } else if (!strcmp(p, "eager")) { 7616 lazy_accept = false; 7617 return 0; 7618 } else { 7619 return -EINVAL; 7620 } 7621 } 7622 early_param("accept_memory", accept_memory_parse); 7623 7624 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7625 { 7626 phys_addr_t start = page_to_phys(page); 7627 7628 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 7629 } 7630 7631 static void __accept_page(struct zone *zone, unsigned long *flags, 7632 struct page *page) 7633 { 7634 list_del(&page->lru); 7635 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7636 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7637 __ClearPageUnaccepted(page); 7638 spin_unlock_irqrestore(&zone->lock, *flags); 7639 7640 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 7641 7642 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 7643 } 7644 7645 void accept_page(struct page *page) 7646 { 7647 struct zone *zone = page_zone(page); 7648 unsigned long flags; 7649 7650 spin_lock_irqsave(&zone->lock, flags); 7651 if (!PageUnaccepted(page)) { 7652 spin_unlock_irqrestore(&zone->lock, flags); 7653 return; 7654 } 7655 7656 /* Unlocks zone->lock */ 7657 __accept_page(zone, &flags, page); 7658 } 7659 7660 static bool try_to_accept_memory_one(struct zone *zone) 7661 { 7662 unsigned long flags; 7663 struct page *page; 7664 7665 spin_lock_irqsave(&zone->lock, flags); 7666 page = list_first_entry_or_null(&zone->unaccepted_pages, 7667 struct page, lru); 7668 if (!page) { 7669 spin_unlock_irqrestore(&zone->lock, flags); 7670 return false; 7671 } 7672 7673 /* Unlocks zone->lock */ 7674 __accept_page(zone, &flags, page); 7675 7676 return true; 7677 } 7678 7679 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7680 int alloc_flags) 7681 { 7682 long to_accept, wmark; 7683 bool ret = false; 7684 7685 if (list_empty(&zone->unaccepted_pages)) 7686 return false; 7687 7688 /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7689 if (alloc_flags & ALLOC_TRYLOCK) 7690 return false; 7691 7692 wmark = promo_wmark_pages(zone); 7693 7694 /* 7695 * Watermarks have not been initialized yet. 7696 * 7697 * Accepting one MAX_ORDER page to ensure progress. 7698 */ 7699 if (!wmark) 7700 return try_to_accept_memory_one(zone); 7701 7702 /* How much to accept to get to promo watermark? */ 7703 to_accept = wmark - 7704 (zone_page_state(zone, NR_FREE_PAGES) - 7705 __zone_watermark_unusable_free(zone, order, 0) - 7706 zone_page_state(zone, NR_UNACCEPTED)); 7707 7708 while (to_accept > 0) { 7709 if (!try_to_accept_memory_one(zone)) 7710 break; 7711 ret = true; 7712 to_accept -= MAX_ORDER_NR_PAGES; 7713 } 7714 7715 return ret; 7716 } 7717 7718 static bool __free_unaccepted(struct page *page) 7719 { 7720 struct zone *zone = page_zone(page); 7721 unsigned long flags; 7722 7723 if (!lazy_accept) 7724 return false; 7725 7726 spin_lock_irqsave(&zone->lock, flags); 7727 list_add_tail(&page->lru, &zone->unaccepted_pages); 7728 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7729 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7730 __SetPageUnaccepted(page); 7731 spin_unlock_irqrestore(&zone->lock, flags); 7732 7733 return true; 7734 } 7735 7736 #else 7737 7738 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7739 { 7740 return false; 7741 } 7742 7743 static bool cond_accept_memory(struct zone *zone, unsigned int order, 7744 int alloc_flags) 7745 { 7746 return false; 7747 } 7748 7749 static bool __free_unaccepted(struct page *page) 7750 { 7751 BUILD_BUG(); 7752 return false; 7753 } 7754 7755 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7756 7757 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7758 { 7759 /* 7760 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. 7761 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd 7762 * is not safe in arbitrary context. 7763 * 7764 * These two are the conditions for gfpflags_allow_spinning() being true. 7765 * 7766 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason 7767 * to warn. Also warn would trigger printk() which is unsafe from 7768 * various contexts. We cannot use printk_deferred_enter() to mitigate, 7769 * since the running context is unknown. 7770 * 7771 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below 7772 * is safe in any context. Also zeroing the page is mandatory for 7773 * BPF use cases. 7774 * 7775 * Though __GFP_NOMEMALLOC is not checked in the code path below, 7776 * specify it here to highlight that alloc_pages_nolock() 7777 * doesn't want to deplete reserves. 7778 */ 7779 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP 7780 | gfp_flags; 7781 unsigned int alloc_flags = ALLOC_TRYLOCK; 7782 struct alloc_context ac = { }; 7783 struct page *page; 7784 7785 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT); 7786 /* 7787 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is 7788 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current 7789 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will 7790 * mark the task as the owner of another rt_spin_lock which will 7791 * confuse PI logic, so return immediately if called from hard IRQ or 7792 * NMI. 7793 * 7794 * Note, irqs_disabled() case is ok. This function can be called 7795 * from raw_spin_lock_irqsave region. 7796 */ 7797 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 7798 return NULL; 7799 if (!pcp_allowed_order(order)) 7800 return NULL; 7801 7802 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7803 if (deferred_pages_enabled()) 7804 return NULL; 7805 7806 if (nid == NUMA_NO_NODE) 7807 nid = numa_node_id(); 7808 7809 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, 7810 &alloc_gfp, &alloc_flags); 7811 7812 /* 7813 * Best effort allocation from percpu free list. 7814 * If it's empty attempt to spin_trylock zone->lock. 7815 */ 7816 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 7817 7818 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ 7819 7820 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) && 7821 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { 7822 __free_frozen_pages(page, order, FPI_TRYLOCK); 7823 page = NULL; 7824 } 7825 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 7826 kmsan_alloc_page(page, order, alloc_gfp); 7827 return page; 7828 } 7829 /** 7830 * alloc_pages_nolock - opportunistic reentrant allocation from any context 7831 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed. 7832 * @nid: node to allocate from 7833 * @order: allocation order size 7834 * 7835 * Allocates pages of a given order from the given node. This is safe to 7836 * call from any context (from atomic, NMI, and also reentrant 7837 * allocator -> tracepoint -> alloc_pages_nolock_noprof). 7838 * Allocation is best effort and to be expected to fail easily so nobody should 7839 * rely on the success. Failures are not reported via warn_alloc(). 7840 * See always fail conditions below. 7841 * 7842 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN. 7843 * It means ENOMEM. There is no reason to call it again and expect !NULL. 7844 */ 7845 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) 7846 { 7847 struct page *page; 7848 7849 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order); 7850 if (page) 7851 set_page_refcounted(page); 7852 return page; 7853 } 7854 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof); 7855