1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/pagevec.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmstat.h> 39 #include <linux/fault-inject.h> 40 #include <linux/compaction.h> 41 #include <trace/events/kmem.h> 42 #include <trace/events/oom.h> 43 #include <linux/prefetch.h> 44 #include <linux/mm_inline.h> 45 #include <linux/mmu_notifier.h> 46 #include <linux/migrate.h> 47 #include <linux/sched/mm.h> 48 #include <linux/page_owner.h> 49 #include <linux/page_table_check.h> 50 #include <linux/memcontrol.h> 51 #include <linux/ftrace.h> 52 #include <linux/lockdep.h> 53 #include <linux/psi.h> 54 #include <linux/khugepaged.h> 55 #include <linux/delayacct.h> 56 #include <linux/cacheinfo.h> 57 #include <linux/pgalloc_tag.h> 58 #include <asm/div64.h> 59 #include "internal.h" 60 #include "shuffle.h" 61 #include "page_reporting.h" 62 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 64 typedef int __bitwise fpi_t; 65 66 /* No special request */ 67 #define FPI_NONE ((__force fpi_t)0) 68 69 /* 70 * Skip free page reporting notification for the (possibly merged) page. 71 * This does not hinder free page reporting from grabbing the page, 72 * reporting it and marking it "reported" - it only skips notifying 73 * the free page reporting infrastructure about a newly freed page. For 74 * example, used when temporarily pulling a page from a freelist and 75 * putting it back unmodified. 76 */ 77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 78 79 /* 80 * Place the (possibly merged) page to the tail of the freelist. Will ignore 81 * page shuffling (relevant code - e.g., memory onlining - is expected to 82 * shuffle the whole zone). 83 * 84 * Note: No code should rely on this flag for correctness - it's purely 85 * to allow for optimizations when handing back either fresh pages 86 * (memory onlining) or untouched pages (page isolation, free page 87 * reporting). 88 */ 89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 90 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 92 static DEFINE_MUTEX(pcp_batch_high_lock); 93 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 94 95 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 96 /* 97 * On SMP, spin_trylock is sufficient protection. 98 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 99 */ 100 #define pcp_trylock_prepare(flags) do { } while (0) 101 #define pcp_trylock_finish(flag) do { } while (0) 102 #else 103 104 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 105 #define pcp_trylock_prepare(flags) local_irq_save(flags) 106 #define pcp_trylock_finish(flags) local_irq_restore(flags) 107 #endif 108 109 /* 110 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 111 * a migration causing the wrong PCP to be locked and remote memory being 112 * potentially allocated, pin the task to the CPU for the lookup+lock. 113 * preempt_disable is used on !RT because it is faster than migrate_disable. 114 * migrate_disable is used on RT because otherwise RT spinlock usage is 115 * interfered with and a high priority task cannot preempt the allocator. 116 */ 117 #ifndef CONFIG_PREEMPT_RT 118 #define pcpu_task_pin() preempt_disable() 119 #define pcpu_task_unpin() preempt_enable() 120 #else 121 #define pcpu_task_pin() migrate_disable() 122 #define pcpu_task_unpin() migrate_enable() 123 #endif 124 125 /* 126 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 127 * Return value should be used with equivalent unlock helper. 128 */ 129 #define pcpu_spin_lock(type, member, ptr) \ 130 ({ \ 131 type *_ret; \ 132 pcpu_task_pin(); \ 133 _ret = this_cpu_ptr(ptr); \ 134 spin_lock(&_ret->member); \ 135 _ret; \ 136 }) 137 138 #define pcpu_spin_trylock(type, member, ptr) \ 139 ({ \ 140 type *_ret; \ 141 pcpu_task_pin(); \ 142 _ret = this_cpu_ptr(ptr); \ 143 if (!spin_trylock(&_ret->member)) { \ 144 pcpu_task_unpin(); \ 145 _ret = NULL; \ 146 } \ 147 _ret; \ 148 }) 149 150 #define pcpu_spin_unlock(member, ptr) \ 151 ({ \ 152 spin_unlock(&ptr->member); \ 153 pcpu_task_unpin(); \ 154 }) 155 156 /* struct per_cpu_pages specific helpers. */ 157 #define pcp_spin_lock(ptr) \ 158 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_trylock(ptr) \ 161 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 162 163 #define pcp_spin_unlock(ptr) \ 164 pcpu_spin_unlock(lock, ptr) 165 166 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 167 DEFINE_PER_CPU(int, numa_node); 168 EXPORT_PER_CPU_SYMBOL(numa_node); 169 #endif 170 171 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 172 173 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 174 /* 175 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 176 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 177 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 178 * defined in <linux/topology.h>. 179 */ 180 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 181 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 182 #endif 183 184 static DEFINE_MUTEX(pcpu_drain_mutex); 185 186 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 187 volatile unsigned long latent_entropy __latent_entropy; 188 EXPORT_SYMBOL(latent_entropy); 189 #endif 190 191 /* 192 * Array of node states. 193 */ 194 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 195 [N_POSSIBLE] = NODE_MASK_ALL, 196 [N_ONLINE] = { { [0] = 1UL } }, 197 #ifndef CONFIG_NUMA 198 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 199 #ifdef CONFIG_HIGHMEM 200 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 201 #endif 202 [N_MEMORY] = { { [0] = 1UL } }, 203 [N_CPU] = { { [0] = 1UL } }, 204 #endif /* NUMA */ 205 }; 206 EXPORT_SYMBOL(node_states); 207 208 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 209 210 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 211 unsigned int pageblock_order __read_mostly; 212 #endif 213 214 static void __free_pages_ok(struct page *page, unsigned int order, 215 fpi_t fpi_flags); 216 217 /* 218 * results with 256, 32 in the lowmem_reserve sysctl: 219 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 220 * 1G machine -> (16M dma, 784M normal, 224M high) 221 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 222 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 223 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 224 * 225 * TBD: should special case ZONE_DMA32 machines here - in those we normally 226 * don't need any ZONE_NORMAL reservation 227 */ 228 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 229 #ifdef CONFIG_ZONE_DMA 230 [ZONE_DMA] = 256, 231 #endif 232 #ifdef CONFIG_ZONE_DMA32 233 [ZONE_DMA32] = 256, 234 #endif 235 [ZONE_NORMAL] = 32, 236 #ifdef CONFIG_HIGHMEM 237 [ZONE_HIGHMEM] = 0, 238 #endif 239 [ZONE_MOVABLE] = 0, 240 }; 241 242 char * const zone_names[MAX_NR_ZONES] = { 243 #ifdef CONFIG_ZONE_DMA 244 "DMA", 245 #endif 246 #ifdef CONFIG_ZONE_DMA32 247 "DMA32", 248 #endif 249 "Normal", 250 #ifdef CONFIG_HIGHMEM 251 "HighMem", 252 #endif 253 "Movable", 254 #ifdef CONFIG_ZONE_DEVICE 255 "Device", 256 #endif 257 }; 258 259 const char * const migratetype_names[MIGRATE_TYPES] = { 260 "Unmovable", 261 "Movable", 262 "Reclaimable", 263 "HighAtomic", 264 #ifdef CONFIG_CMA 265 "CMA", 266 #endif 267 #ifdef CONFIG_MEMORY_ISOLATION 268 "Isolate", 269 #endif 270 }; 271 272 int min_free_kbytes = 1024; 273 int user_min_free_kbytes = -1; 274 static int watermark_boost_factor __read_mostly = 15000; 275 static int watermark_scale_factor = 10; 276 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 278 int movable_zone; 279 EXPORT_SYMBOL(movable_zone); 280 281 #if MAX_NUMNODES > 1 282 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 283 unsigned int nr_online_nodes __read_mostly = 1; 284 EXPORT_SYMBOL(nr_node_ids); 285 EXPORT_SYMBOL(nr_online_nodes); 286 #endif 287 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 290 static inline bool has_unaccepted_memory(void); 291 static bool __free_unaccepted(struct page *page); 292 293 int page_group_by_mobility_disabled __read_mostly; 294 295 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 296 /* 297 * During boot we initialize deferred pages on-demand, as needed, but once 298 * page_alloc_init_late() has finished, the deferred pages are all initialized, 299 * and we can permanently disable that path. 300 */ 301 DEFINE_STATIC_KEY_TRUE(deferred_pages); 302 303 static inline bool deferred_pages_enabled(void) 304 { 305 return static_branch_unlikely(&deferred_pages); 306 } 307 308 /* 309 * deferred_grow_zone() is __init, but it is called from 310 * get_page_from_freelist() during early boot until deferred_pages permanently 311 * disables this call. This is why we have refdata wrapper to avoid warning, 312 * and to ensure that the function body gets unloaded. 313 */ 314 static bool __ref 315 _deferred_grow_zone(struct zone *zone, unsigned int order) 316 { 317 return deferred_grow_zone(zone, order); 318 } 319 #else 320 static inline bool deferred_pages_enabled(void) 321 { 322 return false; 323 } 324 325 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 326 { 327 return false; 328 } 329 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 330 331 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 332 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 333 unsigned long pfn) 334 { 335 #ifdef CONFIG_SPARSEMEM 336 return section_to_usemap(__pfn_to_section(pfn)); 337 #else 338 return page_zone(page)->pageblock_flags; 339 #endif /* CONFIG_SPARSEMEM */ 340 } 341 342 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 343 { 344 #ifdef CONFIG_SPARSEMEM 345 pfn &= (PAGES_PER_SECTION-1); 346 #else 347 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 348 #endif /* CONFIG_SPARSEMEM */ 349 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 350 } 351 352 /** 353 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 354 * @page: The page within the block of interest 355 * @pfn: The target page frame number 356 * @mask: mask of bits that the caller is interested in 357 * 358 * Return: pageblock_bits flags 359 */ 360 unsigned long get_pfnblock_flags_mask(const struct page *page, 361 unsigned long pfn, unsigned long mask) 362 { 363 unsigned long *bitmap; 364 unsigned long bitidx, word_bitidx; 365 unsigned long word; 366 367 bitmap = get_pageblock_bitmap(page, pfn); 368 bitidx = pfn_to_bitidx(page, pfn); 369 word_bitidx = bitidx / BITS_PER_LONG; 370 bitidx &= (BITS_PER_LONG-1); 371 /* 372 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 373 * a consistent read of the memory array, so that results, even though 374 * racy, are not corrupted. 375 */ 376 word = READ_ONCE(bitmap[word_bitidx]); 377 return (word >> bitidx) & mask; 378 } 379 380 static __always_inline int get_pfnblock_migratetype(const struct page *page, 381 unsigned long pfn) 382 { 383 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 384 } 385 386 /** 387 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 388 * @page: The page within the block of interest 389 * @flags: The flags to set 390 * @pfn: The target page frame number 391 * @mask: mask of bits that the caller is interested in 392 */ 393 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 394 unsigned long pfn, 395 unsigned long mask) 396 { 397 unsigned long *bitmap; 398 unsigned long bitidx, word_bitidx; 399 unsigned long word; 400 401 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 402 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 403 404 bitmap = get_pageblock_bitmap(page, pfn); 405 bitidx = pfn_to_bitidx(page, pfn); 406 word_bitidx = bitidx / BITS_PER_LONG; 407 bitidx &= (BITS_PER_LONG-1); 408 409 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 410 411 mask <<= bitidx; 412 flags <<= bitidx; 413 414 word = READ_ONCE(bitmap[word_bitidx]); 415 do { 416 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 417 } 418 419 void set_pageblock_migratetype(struct page *page, int migratetype) 420 { 421 if (unlikely(page_group_by_mobility_disabled && 422 migratetype < MIGRATE_PCPTYPES)) 423 migratetype = MIGRATE_UNMOVABLE; 424 425 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 426 page_to_pfn(page), MIGRATETYPE_MASK); 427 } 428 429 #ifdef CONFIG_DEBUG_VM 430 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 431 { 432 int ret; 433 unsigned seq; 434 unsigned long pfn = page_to_pfn(page); 435 unsigned long sp, start_pfn; 436 437 do { 438 seq = zone_span_seqbegin(zone); 439 start_pfn = zone->zone_start_pfn; 440 sp = zone->spanned_pages; 441 ret = !zone_spans_pfn(zone, pfn); 442 } while (zone_span_seqretry(zone, seq)); 443 444 if (ret) 445 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 446 pfn, zone_to_nid(zone), zone->name, 447 start_pfn, start_pfn + sp); 448 449 return ret; 450 } 451 452 /* 453 * Temporary debugging check for pages not lying within a given zone. 454 */ 455 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) 456 { 457 if (page_outside_zone_boundaries(zone, page)) 458 return true; 459 if (zone != page_zone(page)) 460 return true; 461 462 return false; 463 } 464 #else 465 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) 466 { 467 return false; 468 } 469 #endif 470 471 static void bad_page(struct page *page, const char *reason) 472 { 473 static unsigned long resume; 474 static unsigned long nr_shown; 475 static unsigned long nr_unshown; 476 477 /* 478 * Allow a burst of 60 reports, then keep quiet for that minute; 479 * or allow a steady drip of one report per second. 480 */ 481 if (nr_shown == 60) { 482 if (time_before(jiffies, resume)) { 483 nr_unshown++; 484 goto out; 485 } 486 if (nr_unshown) { 487 pr_alert( 488 "BUG: Bad page state: %lu messages suppressed\n", 489 nr_unshown); 490 nr_unshown = 0; 491 } 492 nr_shown = 0; 493 } 494 if (nr_shown++ == 0) 495 resume = jiffies + 60 * HZ; 496 497 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 498 current->comm, page_to_pfn(page)); 499 dump_page(page, reason); 500 501 print_modules(); 502 dump_stack(); 503 out: 504 /* Leave bad fields for debug, except PageBuddy could make trouble */ 505 if (PageBuddy(page)) 506 __ClearPageBuddy(page); 507 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 508 } 509 510 static inline unsigned int order_to_pindex(int migratetype, int order) 511 { 512 bool __maybe_unused movable; 513 514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 515 if (order > PAGE_ALLOC_COSTLY_ORDER) { 516 VM_BUG_ON(order != HPAGE_PMD_ORDER); 517 518 movable = migratetype == MIGRATE_MOVABLE; 519 520 return NR_LOWORDER_PCP_LISTS + movable; 521 } 522 #else 523 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 524 #endif 525 526 return (MIGRATE_PCPTYPES * order) + migratetype; 527 } 528 529 static inline int pindex_to_order(unsigned int pindex) 530 { 531 int order = pindex / MIGRATE_PCPTYPES; 532 533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 534 if (pindex >= NR_LOWORDER_PCP_LISTS) 535 order = HPAGE_PMD_ORDER; 536 #else 537 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 538 #endif 539 540 return order; 541 } 542 543 static inline bool pcp_allowed_order(unsigned int order) 544 { 545 if (order <= PAGE_ALLOC_COSTLY_ORDER) 546 return true; 547 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 548 if (order == HPAGE_PMD_ORDER) 549 return true; 550 #endif 551 return false; 552 } 553 554 /* 555 * Higher-order pages are called "compound pages". They are structured thusly: 556 * 557 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 558 * 559 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 560 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 561 * 562 * The first tail page's ->compound_order holds the order of allocation. 563 * This usage means that zero-order pages may not be compound. 564 */ 565 566 void prep_compound_page(struct page *page, unsigned int order) 567 { 568 int i; 569 int nr_pages = 1 << order; 570 571 __SetPageHead(page); 572 for (i = 1; i < nr_pages; i++) 573 prep_compound_tail(page, i); 574 575 prep_compound_head(page, order); 576 } 577 578 static inline void set_buddy_order(struct page *page, unsigned int order) 579 { 580 set_page_private(page, order); 581 __SetPageBuddy(page); 582 } 583 584 #ifdef CONFIG_COMPACTION 585 static inline struct capture_control *task_capc(struct zone *zone) 586 { 587 struct capture_control *capc = current->capture_control; 588 589 return unlikely(capc) && 590 !(current->flags & PF_KTHREAD) && 591 !capc->page && 592 capc->cc->zone == zone ? capc : NULL; 593 } 594 595 static inline bool 596 compaction_capture(struct capture_control *capc, struct page *page, 597 int order, int migratetype) 598 { 599 if (!capc || order != capc->cc->order) 600 return false; 601 602 /* Do not accidentally pollute CMA or isolated regions*/ 603 if (is_migrate_cma(migratetype) || 604 is_migrate_isolate(migratetype)) 605 return false; 606 607 /* 608 * Do not let lower order allocations pollute a movable pageblock 609 * unless compaction is also requesting movable pages. 610 * This might let an unmovable request use a reclaimable pageblock 611 * and vice-versa but no more than normal fallback logic which can 612 * have trouble finding a high-order free page. 613 */ 614 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && 615 capc->cc->migratetype != MIGRATE_MOVABLE) 616 return false; 617 618 capc->page = page; 619 return true; 620 } 621 622 #else 623 static inline struct capture_control *task_capc(struct zone *zone) 624 { 625 return NULL; 626 } 627 628 static inline bool 629 compaction_capture(struct capture_control *capc, struct page *page, 630 int order, int migratetype) 631 { 632 return false; 633 } 634 #endif /* CONFIG_COMPACTION */ 635 636 static inline void account_freepages(struct zone *zone, int nr_pages, 637 int migratetype) 638 { 639 if (is_migrate_isolate(migratetype)) 640 return; 641 642 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 643 644 if (is_migrate_cma(migratetype)) 645 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 646 } 647 648 /* Used for pages not on another list */ 649 static inline void __add_to_free_list(struct page *page, struct zone *zone, 650 unsigned int order, int migratetype, 651 bool tail) 652 { 653 struct free_area *area = &zone->free_area[order]; 654 655 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 656 "page type is %lu, passed migratetype is %d (nr=%d)\n", 657 get_pageblock_migratetype(page), migratetype, 1 << order); 658 659 if (tail) 660 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 661 else 662 list_add(&page->buddy_list, &area->free_list[migratetype]); 663 area->nr_free++; 664 } 665 666 /* 667 * Used for pages which are on another list. Move the pages to the tail 668 * of the list - so the moved pages won't immediately be considered for 669 * allocation again (e.g., optimization for memory onlining). 670 */ 671 static inline void move_to_free_list(struct page *page, struct zone *zone, 672 unsigned int order, int old_mt, int new_mt) 673 { 674 struct free_area *area = &zone->free_area[order]; 675 676 /* Free page moving can fail, so it happens before the type update */ 677 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, 678 "page type is %lu, passed migratetype is %d (nr=%d)\n", 679 get_pageblock_migratetype(page), old_mt, 1 << order); 680 681 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); 682 683 account_freepages(zone, -(1 << order), old_mt); 684 account_freepages(zone, 1 << order, new_mt); 685 } 686 687 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, 688 unsigned int order, int migratetype) 689 { 690 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, 691 "page type is %lu, passed migratetype is %d (nr=%d)\n", 692 get_pageblock_migratetype(page), migratetype, 1 << order); 693 694 /* clear reported state and update reported page count */ 695 if (page_reported(page)) 696 __ClearPageReported(page); 697 698 list_del(&page->buddy_list); 699 __ClearPageBuddy(page); 700 set_page_private(page, 0); 701 zone->free_area[order].nr_free--; 702 } 703 704 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 705 unsigned int order, int migratetype) 706 { 707 __del_page_from_free_list(page, zone, order, migratetype); 708 account_freepages(zone, -(1 << order), migratetype); 709 } 710 711 static inline struct page *get_page_from_free_area(struct free_area *area, 712 int migratetype) 713 { 714 return list_first_entry_or_null(&area->free_list[migratetype], 715 struct page, buddy_list); 716 } 717 718 /* 719 * If this is less than the 2nd largest possible page, check if the buddy 720 * of the next-higher order is free. If it is, it's possible 721 * that pages are being freed that will coalesce soon. In case, 722 * that is happening, add the free page to the tail of the list 723 * so it's less likely to be used soon and more likely to be merged 724 * as a 2-level higher order page 725 */ 726 static inline bool 727 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 728 struct page *page, unsigned int order) 729 { 730 unsigned long higher_page_pfn; 731 struct page *higher_page; 732 733 if (order >= MAX_PAGE_ORDER - 1) 734 return false; 735 736 higher_page_pfn = buddy_pfn & pfn; 737 higher_page = page + (higher_page_pfn - pfn); 738 739 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 740 NULL) != NULL; 741 } 742 743 /* 744 * Freeing function for a buddy system allocator. 745 * 746 * The concept of a buddy system is to maintain direct-mapped table 747 * (containing bit values) for memory blocks of various "orders". 748 * The bottom level table contains the map for the smallest allocatable 749 * units of memory (here, pages), and each level above it describes 750 * pairs of units from the levels below, hence, "buddies". 751 * At a high level, all that happens here is marking the table entry 752 * at the bottom level available, and propagating the changes upward 753 * as necessary, plus some accounting needed to play nicely with other 754 * parts of the VM system. 755 * At each level, we keep a list of pages, which are heads of continuous 756 * free pages of length of (1 << order) and marked with PageBuddy. 757 * Page's order is recorded in page_private(page) field. 758 * So when we are allocating or freeing one, we can derive the state of the 759 * other. That is, if we allocate a small block, and both were 760 * free, the remainder of the region must be split into blocks. 761 * If a block is freed, and its buddy is also free, then this 762 * triggers coalescing into a block of larger size. 763 * 764 * -- nyc 765 */ 766 767 static inline void __free_one_page(struct page *page, 768 unsigned long pfn, 769 struct zone *zone, unsigned int order, 770 int migratetype, fpi_t fpi_flags) 771 { 772 struct capture_control *capc = task_capc(zone); 773 unsigned long buddy_pfn = 0; 774 unsigned long combined_pfn; 775 struct page *buddy; 776 bool to_tail; 777 778 VM_BUG_ON(!zone_is_initialized(zone)); 779 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 780 781 VM_BUG_ON(migratetype == -1); 782 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 783 VM_BUG_ON_PAGE(bad_range(zone, page), page); 784 785 account_freepages(zone, 1 << order, migratetype); 786 787 while (order < MAX_PAGE_ORDER) { 788 int buddy_mt = migratetype; 789 790 if (compaction_capture(capc, page, order, migratetype)) { 791 account_freepages(zone, -(1 << order), migratetype); 792 return; 793 } 794 795 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 796 if (!buddy) 797 goto done_merging; 798 799 if (unlikely(order >= pageblock_order)) { 800 /* 801 * We want to prevent merge between freepages on pageblock 802 * without fallbacks and normal pageblock. Without this, 803 * pageblock isolation could cause incorrect freepage or CMA 804 * accounting or HIGHATOMIC accounting. 805 */ 806 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 807 808 if (migratetype != buddy_mt && 809 (!migratetype_is_mergeable(migratetype) || 810 !migratetype_is_mergeable(buddy_mt))) 811 goto done_merging; 812 } 813 814 /* 815 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 816 * merge with it and move up one order. 817 */ 818 if (page_is_guard(buddy)) 819 clear_page_guard(zone, buddy, order); 820 else 821 __del_page_from_free_list(buddy, zone, order, buddy_mt); 822 823 if (unlikely(buddy_mt != migratetype)) { 824 /* 825 * Match buddy type. This ensures that an 826 * expand() down the line puts the sub-blocks 827 * on the right freelists. 828 */ 829 set_pageblock_migratetype(buddy, migratetype); 830 } 831 832 combined_pfn = buddy_pfn & pfn; 833 page = page + (combined_pfn - pfn); 834 pfn = combined_pfn; 835 order++; 836 } 837 838 done_merging: 839 set_buddy_order(page, order); 840 841 if (fpi_flags & FPI_TO_TAIL) 842 to_tail = true; 843 else if (is_shuffle_order(order)) 844 to_tail = shuffle_pick_tail(); 845 else 846 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 847 848 __add_to_free_list(page, zone, order, migratetype, to_tail); 849 850 /* Notify page reporting subsystem of freed page */ 851 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 852 page_reporting_notify_free(order); 853 } 854 855 /* 856 * A bad page could be due to a number of fields. Instead of multiple branches, 857 * try and check multiple fields with one check. The caller must do a detailed 858 * check if necessary. 859 */ 860 static inline bool page_expected_state(struct page *page, 861 unsigned long check_flags) 862 { 863 if (unlikely(atomic_read(&page->_mapcount) != -1)) 864 return false; 865 866 if (unlikely((unsigned long)page->mapping | 867 page_ref_count(page) | 868 #ifdef CONFIG_MEMCG 869 page->memcg_data | 870 #endif 871 #ifdef CONFIG_PAGE_POOL 872 ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 873 #endif 874 (page->flags & check_flags))) 875 return false; 876 877 return true; 878 } 879 880 static const char *page_bad_reason(struct page *page, unsigned long flags) 881 { 882 const char *bad_reason = NULL; 883 884 if (unlikely(atomic_read(&page->_mapcount) != -1)) 885 bad_reason = "nonzero mapcount"; 886 if (unlikely(page->mapping != NULL)) 887 bad_reason = "non-NULL mapping"; 888 if (unlikely(page_ref_count(page) != 0)) 889 bad_reason = "nonzero _refcount"; 890 if (unlikely(page->flags & flags)) { 891 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 892 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 893 else 894 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 895 } 896 #ifdef CONFIG_MEMCG 897 if (unlikely(page->memcg_data)) 898 bad_reason = "page still charged to cgroup"; 899 #endif 900 #ifdef CONFIG_PAGE_POOL 901 if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 902 bad_reason = "page_pool leak"; 903 #endif 904 return bad_reason; 905 } 906 907 static void free_page_is_bad_report(struct page *page) 908 { 909 bad_page(page, 910 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 911 } 912 913 static inline bool free_page_is_bad(struct page *page) 914 { 915 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 916 return false; 917 918 /* Something has gone sideways, find it */ 919 free_page_is_bad_report(page); 920 return true; 921 } 922 923 static inline bool is_check_pages_enabled(void) 924 { 925 return static_branch_unlikely(&check_pages_enabled); 926 } 927 928 static int free_tail_page_prepare(struct page *head_page, struct page *page) 929 { 930 struct folio *folio = (struct folio *)head_page; 931 int ret = 1; 932 933 /* 934 * We rely page->lru.next never has bit 0 set, unless the page 935 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 936 */ 937 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 938 939 if (!is_check_pages_enabled()) { 940 ret = 0; 941 goto out; 942 } 943 switch (page - head_page) { 944 case 1: 945 /* the first tail page: these may be in place of ->mapping */ 946 if (unlikely(folio_entire_mapcount(folio))) { 947 bad_page(page, "nonzero entire_mapcount"); 948 goto out; 949 } 950 if (unlikely(folio_large_mapcount(folio))) { 951 bad_page(page, "nonzero large_mapcount"); 952 goto out; 953 } 954 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 955 bad_page(page, "nonzero nr_pages_mapped"); 956 goto out; 957 } 958 if (unlikely(atomic_read(&folio->_pincount))) { 959 bad_page(page, "nonzero pincount"); 960 goto out; 961 } 962 break; 963 case 2: 964 /* the second tail page: deferred_list overlaps ->mapping */ 965 if (unlikely(!list_empty(&folio->_deferred_list))) { 966 bad_page(page, "on deferred list"); 967 goto out; 968 } 969 break; 970 default: 971 if (page->mapping != TAIL_MAPPING) { 972 bad_page(page, "corrupted mapping in tail page"); 973 goto out; 974 } 975 break; 976 } 977 if (unlikely(!PageTail(page))) { 978 bad_page(page, "PageTail not set"); 979 goto out; 980 } 981 if (unlikely(compound_head(page) != head_page)) { 982 bad_page(page, "compound_head not consistent"); 983 goto out; 984 } 985 ret = 0; 986 out: 987 page->mapping = NULL; 988 clear_compound_head(page); 989 return ret; 990 } 991 992 /* 993 * Skip KASAN memory poisoning when either: 994 * 995 * 1. For generic KASAN: deferred memory initialization has not yet completed. 996 * Tag-based KASAN modes skip pages freed via deferred memory initialization 997 * using page tags instead (see below). 998 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 999 * that error detection is disabled for accesses via the page address. 1000 * 1001 * Pages will have match-all tags in the following circumstances: 1002 * 1003 * 1. Pages are being initialized for the first time, including during deferred 1004 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1005 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1006 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1007 * 3. The allocation was excluded from being checked due to sampling, 1008 * see the call to kasan_unpoison_pages. 1009 * 1010 * Poisoning pages during deferred memory init will greatly lengthen the 1011 * process and cause problem in large memory systems as the deferred pages 1012 * initialization is done with interrupt disabled. 1013 * 1014 * Assuming that there will be no reference to those newly initialized 1015 * pages before they are ever allocated, this should have no effect on 1016 * KASAN memory tracking as the poison will be properly inserted at page 1017 * allocation time. The only corner case is when pages are allocated by 1018 * on-demand allocation and then freed again before the deferred pages 1019 * initialization is done, but this is not likely to happen. 1020 */ 1021 static inline bool should_skip_kasan_poison(struct page *page) 1022 { 1023 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1024 return deferred_pages_enabled(); 1025 1026 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1027 } 1028 1029 static void kernel_init_pages(struct page *page, int numpages) 1030 { 1031 int i; 1032 1033 /* s390's use of memset() could override KASAN redzones. */ 1034 kasan_disable_current(); 1035 for (i = 0; i < numpages; i++) 1036 clear_highpage_kasan_tagged(page + i); 1037 kasan_enable_current(); 1038 } 1039 1040 __always_inline bool free_pages_prepare(struct page *page, 1041 unsigned int order) 1042 { 1043 int bad = 0; 1044 bool skip_kasan_poison = should_skip_kasan_poison(page); 1045 bool init = want_init_on_free(); 1046 bool compound = PageCompound(page); 1047 1048 VM_BUG_ON_PAGE(PageTail(page), page); 1049 1050 trace_mm_page_free(page, order); 1051 kmsan_free_page(page, order); 1052 1053 if (memcg_kmem_online() && PageMemcgKmem(page)) 1054 __memcg_kmem_uncharge_page(page, order); 1055 1056 if (unlikely(PageHWPoison(page)) && !order) { 1057 /* Do not let hwpoison pages hit pcplists/buddy */ 1058 reset_page_owner(page, order); 1059 page_table_check_free(page, order); 1060 pgalloc_tag_sub(page, 1 << order); 1061 return false; 1062 } 1063 1064 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1065 1066 /* 1067 * Check tail pages before head page information is cleared to 1068 * avoid checking PageCompound for order-0 pages. 1069 */ 1070 if (unlikely(order)) { 1071 int i; 1072 1073 if (compound) 1074 page[1].flags &= ~PAGE_FLAGS_SECOND; 1075 for (i = 1; i < (1 << order); i++) { 1076 if (compound) 1077 bad += free_tail_page_prepare(page, page + i); 1078 if (is_check_pages_enabled()) { 1079 if (free_page_is_bad(page + i)) { 1080 bad++; 1081 continue; 1082 } 1083 } 1084 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1085 } 1086 } 1087 if (PageMappingFlags(page)) 1088 page->mapping = NULL; 1089 if (is_check_pages_enabled()) { 1090 if (free_page_is_bad(page)) 1091 bad++; 1092 if (bad) 1093 return false; 1094 } 1095 1096 page_cpupid_reset_last(page); 1097 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1098 reset_page_owner(page, order); 1099 page_table_check_free(page, order); 1100 pgalloc_tag_sub(page, 1 << order); 1101 1102 if (!PageHighMem(page)) { 1103 debug_check_no_locks_freed(page_address(page), 1104 PAGE_SIZE << order); 1105 debug_check_no_obj_freed(page_address(page), 1106 PAGE_SIZE << order); 1107 } 1108 1109 kernel_poison_pages(page, 1 << order); 1110 1111 /* 1112 * As memory initialization might be integrated into KASAN, 1113 * KASAN poisoning and memory initialization code must be 1114 * kept together to avoid discrepancies in behavior. 1115 * 1116 * With hardware tag-based KASAN, memory tags must be set before the 1117 * page becomes unavailable via debug_pagealloc or arch_free_page. 1118 */ 1119 if (!skip_kasan_poison) { 1120 kasan_poison_pages(page, order, init); 1121 1122 /* Memory is already initialized if KASAN did it internally. */ 1123 if (kasan_has_integrated_init()) 1124 init = false; 1125 } 1126 if (init) 1127 kernel_init_pages(page, 1 << order); 1128 1129 /* 1130 * arch_free_page() can make the page's contents inaccessible. s390 1131 * does this. So nothing which can access the page's contents should 1132 * happen after this. 1133 */ 1134 arch_free_page(page, order); 1135 1136 debug_pagealloc_unmap_pages(page, 1 << order); 1137 1138 return true; 1139 } 1140 1141 /* 1142 * Frees a number of pages from the PCP lists 1143 * Assumes all pages on list are in same zone. 1144 * count is the number of pages to free. 1145 */ 1146 static void free_pcppages_bulk(struct zone *zone, int count, 1147 struct per_cpu_pages *pcp, 1148 int pindex) 1149 { 1150 unsigned long flags; 1151 unsigned int order; 1152 struct page *page; 1153 1154 /* 1155 * Ensure proper count is passed which otherwise would stuck in the 1156 * below while (list_empty(list)) loop. 1157 */ 1158 count = min(pcp->count, count); 1159 1160 /* Ensure requested pindex is drained first. */ 1161 pindex = pindex - 1; 1162 1163 spin_lock_irqsave(&zone->lock, flags); 1164 1165 while (count > 0) { 1166 struct list_head *list; 1167 int nr_pages; 1168 1169 /* Remove pages from lists in a round-robin fashion. */ 1170 do { 1171 if (++pindex > NR_PCP_LISTS - 1) 1172 pindex = 0; 1173 list = &pcp->lists[pindex]; 1174 } while (list_empty(list)); 1175 1176 order = pindex_to_order(pindex); 1177 nr_pages = 1 << order; 1178 do { 1179 unsigned long pfn; 1180 int mt; 1181 1182 page = list_last_entry(list, struct page, pcp_list); 1183 pfn = page_to_pfn(page); 1184 mt = get_pfnblock_migratetype(page, pfn); 1185 1186 /* must delete to avoid corrupting pcp list */ 1187 list_del(&page->pcp_list); 1188 count -= nr_pages; 1189 pcp->count -= nr_pages; 1190 1191 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); 1192 trace_mm_page_pcpu_drain(page, order, mt); 1193 } while (count > 0 && !list_empty(list)); 1194 } 1195 1196 spin_unlock_irqrestore(&zone->lock, flags); 1197 } 1198 1199 static void free_one_page(struct zone *zone, struct page *page, 1200 unsigned long pfn, unsigned int order, 1201 fpi_t fpi_flags) 1202 { 1203 unsigned long flags; 1204 int migratetype; 1205 1206 spin_lock_irqsave(&zone->lock, flags); 1207 migratetype = get_pfnblock_migratetype(page, pfn); 1208 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1209 spin_unlock_irqrestore(&zone->lock, flags); 1210 } 1211 1212 static void __free_pages_ok(struct page *page, unsigned int order, 1213 fpi_t fpi_flags) 1214 { 1215 unsigned long pfn = page_to_pfn(page); 1216 struct zone *zone = page_zone(page); 1217 1218 if (!free_pages_prepare(page, order)) 1219 return; 1220 1221 free_one_page(zone, page, pfn, order, fpi_flags); 1222 1223 __count_vm_events(PGFREE, 1 << order); 1224 } 1225 1226 void __meminit __free_pages_core(struct page *page, unsigned int order, 1227 enum meminit_context context) 1228 { 1229 unsigned int nr_pages = 1 << order; 1230 struct page *p = page; 1231 unsigned int loop; 1232 1233 /* 1234 * When initializing the memmap, __init_single_page() sets the refcount 1235 * of all pages to 1 ("allocated"/"not free"). We have to set the 1236 * refcount of all involved pages to 0. 1237 * 1238 * Note that hotplugged memory pages are initialized to PageOffline(). 1239 * Pages freed from memblock might be marked as reserved. 1240 */ 1241 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 1242 unlikely(context == MEMINIT_HOTPLUG)) { 1243 for (loop = 0; loop < nr_pages; loop++, p++) { 1244 VM_WARN_ON_ONCE(PageReserved(p)); 1245 __ClearPageOffline(p); 1246 set_page_count(p, 0); 1247 } 1248 1249 /* 1250 * Freeing the page with debug_pagealloc enabled will try to 1251 * unmap it; some archs don't like double-unmappings, so 1252 * map it first. 1253 */ 1254 debug_pagealloc_map_pages(page, nr_pages); 1255 adjust_managed_page_count(page, nr_pages); 1256 } else { 1257 for (loop = 0; loop < nr_pages; loop++, p++) { 1258 __ClearPageReserved(p); 1259 set_page_count(p, 0); 1260 } 1261 1262 /* memblock adjusts totalram_pages() manually. */ 1263 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1264 } 1265 1266 if (page_contains_unaccepted(page, order)) { 1267 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) 1268 return; 1269 1270 accept_memory(page_to_phys(page), PAGE_SIZE << order); 1271 } 1272 1273 /* 1274 * Bypass PCP and place fresh pages right to the tail, primarily 1275 * relevant for memory onlining. 1276 */ 1277 __free_pages_ok(page, order, FPI_TO_TAIL); 1278 } 1279 1280 /* 1281 * Check that the whole (or subset of) a pageblock given by the interval of 1282 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1283 * with the migration of free compaction scanner. 1284 * 1285 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1286 * 1287 * It's possible on some configurations to have a setup like node0 node1 node0 1288 * i.e. it's possible that all pages within a zones range of pages do not 1289 * belong to a single zone. We assume that a border between node0 and node1 1290 * can occur within a single pageblock, but not a node0 node1 node0 1291 * interleaving within a single pageblock. It is therefore sufficient to check 1292 * the first and last page of a pageblock and avoid checking each individual 1293 * page in a pageblock. 1294 * 1295 * Note: the function may return non-NULL struct page even for a page block 1296 * which contains a memory hole (i.e. there is no physical memory for a subset 1297 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which 1298 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1299 * even though the start pfn is online and valid. This should be safe most of 1300 * the time because struct pages are still initialized via init_unavailable_range() 1301 * and pfn walkers shouldn't touch any physical memory range for which they do 1302 * not recognize any specific metadata in struct pages. 1303 */ 1304 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1305 unsigned long end_pfn, struct zone *zone) 1306 { 1307 struct page *start_page; 1308 struct page *end_page; 1309 1310 /* end_pfn is one past the range we are checking */ 1311 end_pfn--; 1312 1313 if (!pfn_valid(end_pfn)) 1314 return NULL; 1315 1316 start_page = pfn_to_online_page(start_pfn); 1317 if (!start_page) 1318 return NULL; 1319 1320 if (page_zone(start_page) != zone) 1321 return NULL; 1322 1323 end_page = pfn_to_page(end_pfn); 1324 1325 /* This gives a shorter code than deriving page_zone(end_page) */ 1326 if (page_zone_id(start_page) != page_zone_id(end_page)) 1327 return NULL; 1328 1329 return start_page; 1330 } 1331 1332 /* 1333 * The order of subdivision here is critical for the IO subsystem. 1334 * Please do not alter this order without good reasons and regression 1335 * testing. Specifically, as large blocks of memory are subdivided, 1336 * the order in which smaller blocks are delivered depends on the order 1337 * they're subdivided in this function. This is the primary factor 1338 * influencing the order in which pages are delivered to the IO 1339 * subsystem according to empirical testing, and this is also justified 1340 * by considering the behavior of a buddy system containing a single 1341 * large block of memory acted on by a series of small allocations. 1342 * This behavior is a critical factor in sglist merging's success. 1343 * 1344 * -- nyc 1345 */ 1346 static inline void expand(struct zone *zone, struct page *page, 1347 int low, int high, int migratetype) 1348 { 1349 unsigned long size = 1 << high; 1350 unsigned long nr_added = 0; 1351 1352 while (high > low) { 1353 high--; 1354 size >>= 1; 1355 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1356 1357 /* 1358 * Mark as guard pages (or page), that will allow to 1359 * merge back to allocator when buddy will be freed. 1360 * Corresponding page table entries will not be touched, 1361 * pages will stay not present in virtual address space 1362 */ 1363 if (set_page_guard(zone, &page[size], high)) 1364 continue; 1365 1366 __add_to_free_list(&page[size], zone, high, migratetype, false); 1367 set_buddy_order(&page[size], high); 1368 nr_added += size; 1369 } 1370 account_freepages(zone, nr_added, migratetype); 1371 } 1372 1373 static void check_new_page_bad(struct page *page) 1374 { 1375 if (unlikely(page->flags & __PG_HWPOISON)) { 1376 /* Don't complain about hwpoisoned pages */ 1377 if (PageBuddy(page)) 1378 __ClearPageBuddy(page); 1379 return; 1380 } 1381 1382 bad_page(page, 1383 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1384 } 1385 1386 /* 1387 * This page is about to be returned from the page allocator 1388 */ 1389 static bool check_new_page(struct page *page) 1390 { 1391 if (likely(page_expected_state(page, 1392 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1393 return false; 1394 1395 check_new_page_bad(page); 1396 return true; 1397 } 1398 1399 static inline bool check_new_pages(struct page *page, unsigned int order) 1400 { 1401 if (is_check_pages_enabled()) { 1402 for (int i = 0; i < (1 << order); i++) { 1403 struct page *p = page + i; 1404 1405 if (check_new_page(p)) 1406 return true; 1407 } 1408 } 1409 1410 return false; 1411 } 1412 1413 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1414 { 1415 /* Don't skip if a software KASAN mode is enabled. */ 1416 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1417 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1418 return false; 1419 1420 /* Skip, if hardware tag-based KASAN is not enabled. */ 1421 if (!kasan_hw_tags_enabled()) 1422 return true; 1423 1424 /* 1425 * With hardware tag-based KASAN enabled, skip if this has been 1426 * requested via __GFP_SKIP_KASAN. 1427 */ 1428 return flags & __GFP_SKIP_KASAN; 1429 } 1430 1431 static inline bool should_skip_init(gfp_t flags) 1432 { 1433 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1434 if (!kasan_hw_tags_enabled()) 1435 return false; 1436 1437 /* For hardware tag-based KASAN, skip if requested. */ 1438 return (flags & __GFP_SKIP_ZERO); 1439 } 1440 1441 inline void post_alloc_hook(struct page *page, unsigned int order, 1442 gfp_t gfp_flags) 1443 { 1444 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1445 !should_skip_init(gfp_flags); 1446 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1447 int i; 1448 1449 set_page_private(page, 0); 1450 set_page_refcounted(page); 1451 1452 arch_alloc_page(page, order); 1453 debug_pagealloc_map_pages(page, 1 << order); 1454 1455 /* 1456 * Page unpoisoning must happen before memory initialization. 1457 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1458 * allocations and the page unpoisoning code will complain. 1459 */ 1460 kernel_unpoison_pages(page, 1 << order); 1461 1462 /* 1463 * As memory initialization might be integrated into KASAN, 1464 * KASAN unpoisoning and memory initializion code must be 1465 * kept together to avoid discrepancies in behavior. 1466 */ 1467 1468 /* 1469 * If memory tags should be zeroed 1470 * (which happens only when memory should be initialized as well). 1471 */ 1472 if (zero_tags) { 1473 /* Initialize both memory and memory tags. */ 1474 for (i = 0; i != 1 << order; ++i) 1475 tag_clear_highpage(page + i); 1476 1477 /* Take note that memory was initialized by the loop above. */ 1478 init = false; 1479 } 1480 if (!should_skip_kasan_unpoison(gfp_flags) && 1481 kasan_unpoison_pages(page, order, init)) { 1482 /* Take note that memory was initialized by KASAN. */ 1483 if (kasan_has_integrated_init()) 1484 init = false; 1485 } else { 1486 /* 1487 * If memory tags have not been set by KASAN, reset the page 1488 * tags to ensure page_address() dereferencing does not fault. 1489 */ 1490 for (i = 0; i != 1 << order; ++i) 1491 page_kasan_tag_reset(page + i); 1492 } 1493 /* If memory is still not initialized, initialize it now. */ 1494 if (init) 1495 kernel_init_pages(page, 1 << order); 1496 1497 set_page_owner(page, order, gfp_flags); 1498 page_table_check_alloc(page, order); 1499 pgalloc_tag_add(page, current, 1 << order); 1500 } 1501 1502 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1503 unsigned int alloc_flags) 1504 { 1505 post_alloc_hook(page, order, gfp_flags); 1506 1507 if (order && (gfp_flags & __GFP_COMP)) 1508 prep_compound_page(page, order); 1509 1510 /* 1511 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1512 * allocate the page. The expectation is that the caller is taking 1513 * steps that will free more memory. The caller should avoid the page 1514 * being used for !PFMEMALLOC purposes. 1515 */ 1516 if (alloc_flags & ALLOC_NO_WATERMARKS) 1517 set_page_pfmemalloc(page); 1518 else 1519 clear_page_pfmemalloc(page); 1520 } 1521 1522 /* 1523 * Go through the free lists for the given migratetype and remove 1524 * the smallest available page from the freelists 1525 */ 1526 static __always_inline 1527 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1528 int migratetype) 1529 { 1530 unsigned int current_order; 1531 struct free_area *area; 1532 struct page *page; 1533 1534 /* Find a page of the appropriate size in the preferred list */ 1535 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1536 area = &(zone->free_area[current_order]); 1537 page = get_page_from_free_area(area, migratetype); 1538 if (!page) 1539 continue; 1540 del_page_from_free_list(page, zone, current_order, migratetype); 1541 expand(zone, page, order, current_order, migratetype); 1542 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1543 pcp_allowed_order(order) && 1544 migratetype < MIGRATE_PCPTYPES); 1545 return page; 1546 } 1547 1548 return NULL; 1549 } 1550 1551 1552 /* 1553 * This array describes the order lists are fallen back to when 1554 * the free lists for the desirable migrate type are depleted 1555 * 1556 * The other migratetypes do not have fallbacks. 1557 */ 1558 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = { 1559 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1560 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1561 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1562 }; 1563 1564 #ifdef CONFIG_CMA 1565 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1566 unsigned int order) 1567 { 1568 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1569 } 1570 #else 1571 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1572 unsigned int order) { return NULL; } 1573 #endif 1574 1575 /* 1576 * Change the type of a block and move all its free pages to that 1577 * type's freelist. 1578 */ 1579 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, 1580 int old_mt, int new_mt) 1581 { 1582 struct page *page; 1583 unsigned long pfn, end_pfn; 1584 unsigned int order; 1585 int pages_moved = 0; 1586 1587 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); 1588 end_pfn = pageblock_end_pfn(start_pfn); 1589 1590 for (pfn = start_pfn; pfn < end_pfn;) { 1591 page = pfn_to_page(pfn); 1592 if (!PageBuddy(page)) { 1593 pfn++; 1594 continue; 1595 } 1596 1597 /* Make sure we are not inadvertently changing nodes */ 1598 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1599 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1600 1601 order = buddy_order(page); 1602 1603 move_to_free_list(page, zone, order, old_mt, new_mt); 1604 1605 pfn += 1 << order; 1606 pages_moved += 1 << order; 1607 } 1608 1609 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); 1610 1611 return pages_moved; 1612 } 1613 1614 static bool prep_move_freepages_block(struct zone *zone, struct page *page, 1615 unsigned long *start_pfn, 1616 int *num_free, int *num_movable) 1617 { 1618 unsigned long pfn, start, end; 1619 1620 pfn = page_to_pfn(page); 1621 start = pageblock_start_pfn(pfn); 1622 end = pageblock_end_pfn(pfn); 1623 1624 /* 1625 * The caller only has the lock for @zone, don't touch ranges 1626 * that straddle into other zones. While we could move part of 1627 * the range that's inside the zone, this call is usually 1628 * accompanied by other operations such as migratetype updates 1629 * which also should be locked. 1630 */ 1631 if (!zone_spans_pfn(zone, start)) 1632 return false; 1633 if (!zone_spans_pfn(zone, end - 1)) 1634 return false; 1635 1636 *start_pfn = start; 1637 1638 if (num_free) { 1639 *num_free = 0; 1640 *num_movable = 0; 1641 for (pfn = start; pfn < end;) { 1642 page = pfn_to_page(pfn); 1643 if (PageBuddy(page)) { 1644 int nr = 1 << buddy_order(page); 1645 1646 *num_free += nr; 1647 pfn += nr; 1648 continue; 1649 } 1650 /* 1651 * We assume that pages that could be isolated for 1652 * migration are movable. But we don't actually try 1653 * isolating, as that would be expensive. 1654 */ 1655 if (PageLRU(page) || __PageMovable(page)) 1656 (*num_movable)++; 1657 pfn++; 1658 } 1659 } 1660 1661 return true; 1662 } 1663 1664 static int move_freepages_block(struct zone *zone, struct page *page, 1665 int old_mt, int new_mt) 1666 { 1667 unsigned long start_pfn; 1668 1669 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1670 return -1; 1671 1672 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); 1673 } 1674 1675 #ifdef CONFIG_MEMORY_ISOLATION 1676 /* Look for a buddy that straddles start_pfn */ 1677 static unsigned long find_large_buddy(unsigned long start_pfn) 1678 { 1679 int order = 0; 1680 struct page *page; 1681 unsigned long pfn = start_pfn; 1682 1683 while (!PageBuddy(page = pfn_to_page(pfn))) { 1684 /* Nothing found */ 1685 if (++order > MAX_PAGE_ORDER) 1686 return start_pfn; 1687 pfn &= ~0UL << order; 1688 } 1689 1690 /* 1691 * Found a preceding buddy, but does it straddle? 1692 */ 1693 if (pfn + (1 << buddy_order(page)) > start_pfn) 1694 return pfn; 1695 1696 /* Nothing found */ 1697 return start_pfn; 1698 } 1699 1700 /* Split a multi-block free page into its individual pageblocks */ 1701 static void split_large_buddy(struct zone *zone, struct page *page, 1702 unsigned long pfn, int order) 1703 { 1704 unsigned long end_pfn = pfn + (1 << order); 1705 1706 VM_WARN_ON_ONCE(order <= pageblock_order); 1707 VM_WARN_ON_ONCE(pfn & (pageblock_nr_pages - 1)); 1708 1709 /* Caller removed page from freelist, buddy info cleared! */ 1710 VM_WARN_ON_ONCE(PageBuddy(page)); 1711 1712 while (pfn != end_pfn) { 1713 int mt = get_pfnblock_migratetype(page, pfn); 1714 1715 __free_one_page(page, pfn, zone, pageblock_order, mt, FPI_NONE); 1716 pfn += pageblock_nr_pages; 1717 page = pfn_to_page(pfn); 1718 } 1719 } 1720 1721 /** 1722 * move_freepages_block_isolate - move free pages in block for page isolation 1723 * @zone: the zone 1724 * @page: the pageblock page 1725 * @migratetype: migratetype to set on the pageblock 1726 * 1727 * This is similar to move_freepages_block(), but handles the special 1728 * case encountered in page isolation, where the block of interest 1729 * might be part of a larger buddy spanning multiple pageblocks. 1730 * 1731 * Unlike the regular page allocator path, which moves pages while 1732 * stealing buddies off the freelist, page isolation is interested in 1733 * arbitrary pfn ranges that may have overlapping buddies on both ends. 1734 * 1735 * This function handles that. Straddling buddies are split into 1736 * individual pageblocks. Only the block of interest is moved. 1737 * 1738 * Returns %true if pages could be moved, %false otherwise. 1739 */ 1740 bool move_freepages_block_isolate(struct zone *zone, struct page *page, 1741 int migratetype) 1742 { 1743 unsigned long start_pfn, pfn; 1744 1745 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) 1746 return false; 1747 1748 /* No splits needed if buddies can't span multiple blocks */ 1749 if (pageblock_order == MAX_PAGE_ORDER) 1750 goto move; 1751 1752 /* We're a tail block in a larger buddy */ 1753 pfn = find_large_buddy(start_pfn); 1754 if (pfn != start_pfn) { 1755 struct page *buddy = pfn_to_page(pfn); 1756 int order = buddy_order(buddy); 1757 1758 del_page_from_free_list(buddy, zone, order, 1759 get_pfnblock_migratetype(buddy, pfn)); 1760 set_pageblock_migratetype(page, migratetype); 1761 split_large_buddy(zone, buddy, pfn, order); 1762 return true; 1763 } 1764 1765 /* We're the starting block of a larger buddy */ 1766 if (PageBuddy(page) && buddy_order(page) > pageblock_order) { 1767 int order = buddy_order(page); 1768 1769 del_page_from_free_list(page, zone, order, 1770 get_pfnblock_migratetype(page, pfn)); 1771 set_pageblock_migratetype(page, migratetype); 1772 split_large_buddy(zone, page, pfn, order); 1773 return true; 1774 } 1775 move: 1776 __move_freepages_block(zone, start_pfn, 1777 get_pfnblock_migratetype(page, start_pfn), 1778 migratetype); 1779 return true; 1780 } 1781 #endif /* CONFIG_MEMORY_ISOLATION */ 1782 1783 static void change_pageblock_range(struct page *pageblock_page, 1784 int start_order, int migratetype) 1785 { 1786 int nr_pageblocks = 1 << (start_order - pageblock_order); 1787 1788 while (nr_pageblocks--) { 1789 set_pageblock_migratetype(pageblock_page, migratetype); 1790 pageblock_page += pageblock_nr_pages; 1791 } 1792 } 1793 1794 /* 1795 * When we are falling back to another migratetype during allocation, try to 1796 * steal extra free pages from the same pageblocks to satisfy further 1797 * allocations, instead of polluting multiple pageblocks. 1798 * 1799 * If we are stealing a relatively large buddy page, it is likely there will 1800 * be more free pages in the pageblock, so try to steal them all. For 1801 * reclaimable and unmovable allocations, we steal regardless of page size, 1802 * as fragmentation caused by those allocations polluting movable pageblocks 1803 * is worse than movable allocations stealing from unmovable and reclaimable 1804 * pageblocks. 1805 */ 1806 static bool can_steal_fallback(unsigned int order, int start_mt) 1807 { 1808 /* 1809 * Leaving this order check is intended, although there is 1810 * relaxed order check in next check. The reason is that 1811 * we can actually steal whole pageblock if this condition met, 1812 * but, below check doesn't guarantee it and that is just heuristic 1813 * so could be changed anytime. 1814 */ 1815 if (order >= pageblock_order) 1816 return true; 1817 1818 if (order >= pageblock_order / 2 || 1819 start_mt == MIGRATE_RECLAIMABLE || 1820 start_mt == MIGRATE_UNMOVABLE || 1821 page_group_by_mobility_disabled) 1822 return true; 1823 1824 return false; 1825 } 1826 1827 static inline bool boost_watermark(struct zone *zone) 1828 { 1829 unsigned long max_boost; 1830 1831 if (!watermark_boost_factor) 1832 return false; 1833 /* 1834 * Don't bother in zones that are unlikely to produce results. 1835 * On small machines, including kdump capture kernels running 1836 * in a small area, boosting the watermark can cause an out of 1837 * memory situation immediately. 1838 */ 1839 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1840 return false; 1841 1842 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1843 watermark_boost_factor, 10000); 1844 1845 /* 1846 * high watermark may be uninitialised if fragmentation occurs 1847 * very early in boot so do not boost. We do not fall 1848 * through and boost by pageblock_nr_pages as failing 1849 * allocations that early means that reclaim is not going 1850 * to help and it may even be impossible to reclaim the 1851 * boosted watermark resulting in a hang. 1852 */ 1853 if (!max_boost) 1854 return false; 1855 1856 max_boost = max(pageblock_nr_pages, max_boost); 1857 1858 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1859 max_boost); 1860 1861 return true; 1862 } 1863 1864 /* 1865 * This function implements actual steal behaviour. If order is large enough, we 1866 * can claim the whole pageblock for the requested migratetype. If not, we check 1867 * the pageblock for constituent pages; if at least half of the pages are free 1868 * or compatible, we can still claim the whole block, so pages freed in the 1869 * future will be put on the correct free list. Otherwise, we isolate exactly 1870 * the order we need from the fallback block and leave its migratetype alone. 1871 */ 1872 static struct page * 1873 steal_suitable_fallback(struct zone *zone, struct page *page, 1874 int current_order, int order, int start_type, 1875 unsigned int alloc_flags, bool whole_block) 1876 { 1877 int free_pages, movable_pages, alike_pages; 1878 unsigned long start_pfn; 1879 int block_type; 1880 1881 block_type = get_pageblock_migratetype(page); 1882 1883 /* 1884 * This can happen due to races and we want to prevent broken 1885 * highatomic accounting. 1886 */ 1887 if (is_migrate_highatomic(block_type)) 1888 goto single_page; 1889 1890 /* Take ownership for orders >= pageblock_order */ 1891 if (current_order >= pageblock_order) { 1892 del_page_from_free_list(page, zone, current_order, block_type); 1893 change_pageblock_range(page, current_order, start_type); 1894 expand(zone, page, order, current_order, start_type); 1895 return page; 1896 } 1897 1898 /* 1899 * Boost watermarks to increase reclaim pressure to reduce the 1900 * likelihood of future fallbacks. Wake kswapd now as the node 1901 * may be balanced overall and kswapd will not wake naturally. 1902 */ 1903 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1904 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1905 1906 /* We are not allowed to try stealing from the whole block */ 1907 if (!whole_block) 1908 goto single_page; 1909 1910 /* moving whole block can fail due to zone boundary conditions */ 1911 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, 1912 &movable_pages)) 1913 goto single_page; 1914 1915 /* 1916 * Determine how many pages are compatible with our allocation. 1917 * For movable allocation, it's the number of movable pages which 1918 * we just obtained. For other types it's a bit more tricky. 1919 */ 1920 if (start_type == MIGRATE_MOVABLE) { 1921 alike_pages = movable_pages; 1922 } else { 1923 /* 1924 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1925 * to MOVABLE pageblock, consider all non-movable pages as 1926 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1927 * vice versa, be conservative since we can't distinguish the 1928 * exact migratetype of non-movable pages. 1929 */ 1930 if (block_type == MIGRATE_MOVABLE) 1931 alike_pages = pageblock_nr_pages 1932 - (free_pages + movable_pages); 1933 else 1934 alike_pages = 0; 1935 } 1936 /* 1937 * If a sufficient number of pages in the block are either free or of 1938 * compatible migratability as our allocation, claim the whole block. 1939 */ 1940 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1941 page_group_by_mobility_disabled) { 1942 __move_freepages_block(zone, start_pfn, block_type, start_type); 1943 return __rmqueue_smallest(zone, order, start_type); 1944 } 1945 1946 single_page: 1947 del_page_from_free_list(page, zone, current_order, block_type); 1948 expand(zone, page, order, current_order, block_type); 1949 return page; 1950 } 1951 1952 /* 1953 * Check whether there is a suitable fallback freepage with requested order. 1954 * If only_stealable is true, this function returns fallback_mt only if 1955 * we can steal other freepages all together. This would help to reduce 1956 * fragmentation due to mixed migratetype pages in one pageblock. 1957 */ 1958 int find_suitable_fallback(struct free_area *area, unsigned int order, 1959 int migratetype, bool only_stealable, bool *can_steal) 1960 { 1961 int i; 1962 int fallback_mt; 1963 1964 if (area->nr_free == 0) 1965 return -1; 1966 1967 *can_steal = false; 1968 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1969 fallback_mt = fallbacks[migratetype][i]; 1970 if (free_area_empty(area, fallback_mt)) 1971 continue; 1972 1973 if (can_steal_fallback(order, migratetype)) 1974 *can_steal = true; 1975 1976 if (!only_stealable) 1977 return fallback_mt; 1978 1979 if (*can_steal) 1980 return fallback_mt; 1981 } 1982 1983 return -1; 1984 } 1985 1986 /* 1987 * Reserve the pageblock(s) surrounding an allocation request for 1988 * exclusive use of high-order atomic allocations if there are no 1989 * empty page blocks that contain a page with a suitable order 1990 */ 1991 static void reserve_highatomic_pageblock(struct page *page, int order, 1992 struct zone *zone) 1993 { 1994 int mt; 1995 unsigned long max_managed, flags; 1996 1997 /* 1998 * The number reserved as: minimum is 1 pageblock, maximum is 1999 * roughly 1% of a zone. But if 1% of a zone falls below a 2000 * pageblock size, then don't reserve any pageblocks. 2001 * Check is race-prone but harmless. 2002 */ 2003 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) 2004 return; 2005 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); 2006 if (zone->nr_reserved_highatomic >= max_managed) 2007 return; 2008 2009 spin_lock_irqsave(&zone->lock, flags); 2010 2011 /* Recheck the nr_reserved_highatomic limit under the lock */ 2012 if (zone->nr_reserved_highatomic >= max_managed) 2013 goto out_unlock; 2014 2015 /* Yoink! */ 2016 mt = get_pageblock_migratetype(page); 2017 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2018 if (!migratetype_is_mergeable(mt)) 2019 goto out_unlock; 2020 2021 if (order < pageblock_order) { 2022 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) 2023 goto out_unlock; 2024 zone->nr_reserved_highatomic += pageblock_nr_pages; 2025 } else { 2026 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); 2027 zone->nr_reserved_highatomic += 1 << order; 2028 } 2029 2030 out_unlock: 2031 spin_unlock_irqrestore(&zone->lock, flags); 2032 } 2033 2034 /* 2035 * Used when an allocation is about to fail under memory pressure. This 2036 * potentially hurts the reliability of high-order allocations when under 2037 * intense memory pressure but failed atomic allocations should be easier 2038 * to recover from than an OOM. 2039 * 2040 * If @force is true, try to unreserve pageblocks even though highatomic 2041 * pageblock is exhausted. 2042 */ 2043 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2044 bool force) 2045 { 2046 struct zonelist *zonelist = ac->zonelist; 2047 unsigned long flags; 2048 struct zoneref *z; 2049 struct zone *zone; 2050 struct page *page; 2051 int order; 2052 int ret; 2053 2054 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2055 ac->nodemask) { 2056 /* 2057 * Preserve at least one pageblock unless memory pressure 2058 * is really high. 2059 */ 2060 if (!force && zone->nr_reserved_highatomic <= 2061 pageblock_nr_pages) 2062 continue; 2063 2064 spin_lock_irqsave(&zone->lock, flags); 2065 for (order = 0; order < NR_PAGE_ORDERS; order++) { 2066 struct free_area *area = &(zone->free_area[order]); 2067 int mt; 2068 2069 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2070 if (!page) 2071 continue; 2072 2073 mt = get_pageblock_migratetype(page); 2074 /* 2075 * In page freeing path, migratetype change is racy so 2076 * we can counter several free pages in a pageblock 2077 * in this loop although we changed the pageblock type 2078 * from highatomic to ac->migratetype. So we should 2079 * adjust the count once. 2080 */ 2081 if (is_migrate_highatomic(mt)) { 2082 unsigned long size; 2083 /* 2084 * It should never happen but changes to 2085 * locking could inadvertently allow a per-cpu 2086 * drain to add pages to MIGRATE_HIGHATOMIC 2087 * while unreserving so be safe and watch for 2088 * underflows. 2089 */ 2090 size = max(pageblock_nr_pages, 1UL << order); 2091 size = min(size, zone->nr_reserved_highatomic); 2092 zone->nr_reserved_highatomic -= size; 2093 } 2094 2095 /* 2096 * Convert to ac->migratetype and avoid the normal 2097 * pageblock stealing heuristics. Minimally, the caller 2098 * is doing the work and needs the pages. More 2099 * importantly, if the block was always converted to 2100 * MIGRATE_UNMOVABLE or another type then the number 2101 * of pageblocks that cannot be completely freed 2102 * may increase. 2103 */ 2104 if (order < pageblock_order) 2105 ret = move_freepages_block(zone, page, mt, 2106 ac->migratetype); 2107 else { 2108 move_to_free_list(page, zone, order, mt, 2109 ac->migratetype); 2110 change_pageblock_range(page, order, 2111 ac->migratetype); 2112 ret = 1; 2113 } 2114 /* 2115 * Reserving the block(s) already succeeded, 2116 * so this should not fail on zone boundaries. 2117 */ 2118 WARN_ON_ONCE(ret == -1); 2119 if (ret > 0) { 2120 spin_unlock_irqrestore(&zone->lock, flags); 2121 return ret; 2122 } 2123 } 2124 spin_unlock_irqrestore(&zone->lock, flags); 2125 } 2126 2127 return false; 2128 } 2129 2130 /* 2131 * Try finding a free buddy page on the fallback list and put it on the free 2132 * list of requested migratetype, possibly along with other pages from the same 2133 * block, depending on fragmentation avoidance heuristics. Returns true if 2134 * fallback was found so that __rmqueue_smallest() can grab it. 2135 * 2136 * The use of signed ints for order and current_order is a deliberate 2137 * deviation from the rest of this file, to make the for loop 2138 * condition simpler. 2139 */ 2140 static __always_inline struct page * 2141 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2142 unsigned int alloc_flags) 2143 { 2144 struct free_area *area; 2145 int current_order; 2146 int min_order = order; 2147 struct page *page; 2148 int fallback_mt; 2149 bool can_steal; 2150 2151 /* 2152 * Do not steal pages from freelists belonging to other pageblocks 2153 * i.e. orders < pageblock_order. If there are no local zones free, 2154 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2155 */ 2156 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2157 min_order = pageblock_order; 2158 2159 /* 2160 * Find the largest available free page in the other list. This roughly 2161 * approximates finding the pageblock with the most free pages, which 2162 * would be too costly to do exactly. 2163 */ 2164 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; 2165 --current_order) { 2166 area = &(zone->free_area[current_order]); 2167 fallback_mt = find_suitable_fallback(area, current_order, 2168 start_migratetype, false, &can_steal); 2169 if (fallback_mt == -1) 2170 continue; 2171 2172 /* 2173 * We cannot steal all free pages from the pageblock and the 2174 * requested migratetype is movable. In that case it's better to 2175 * steal and split the smallest available page instead of the 2176 * largest available page, because even if the next movable 2177 * allocation falls back into a different pageblock than this 2178 * one, it won't cause permanent fragmentation. 2179 */ 2180 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2181 && current_order > order) 2182 goto find_smallest; 2183 2184 goto do_steal; 2185 } 2186 2187 return NULL; 2188 2189 find_smallest: 2190 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2191 area = &(zone->free_area[current_order]); 2192 fallback_mt = find_suitable_fallback(area, current_order, 2193 start_migratetype, false, &can_steal); 2194 if (fallback_mt != -1) 2195 break; 2196 } 2197 2198 /* 2199 * This should not happen - we already found a suitable fallback 2200 * when looking for the largest page. 2201 */ 2202 VM_BUG_ON(current_order > MAX_PAGE_ORDER); 2203 2204 do_steal: 2205 page = get_page_from_free_area(area, fallback_mt); 2206 2207 /* take off list, maybe claim block, expand remainder */ 2208 page = steal_suitable_fallback(zone, page, current_order, order, 2209 start_migratetype, alloc_flags, can_steal); 2210 2211 trace_mm_page_alloc_extfrag(page, order, current_order, 2212 start_migratetype, fallback_mt); 2213 2214 return page; 2215 } 2216 2217 /* 2218 * Do the hard work of removing an element from the buddy allocator. 2219 * Call me with the zone->lock already held. 2220 */ 2221 static __always_inline struct page * 2222 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2223 unsigned int alloc_flags) 2224 { 2225 struct page *page; 2226 2227 if (IS_ENABLED(CONFIG_CMA)) { 2228 /* 2229 * Balance movable allocations between regular and CMA areas by 2230 * allocating from CMA when over half of the zone's free memory 2231 * is in the CMA area. 2232 */ 2233 if (alloc_flags & ALLOC_CMA && 2234 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2235 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2236 page = __rmqueue_cma_fallback(zone, order); 2237 if (page) 2238 return page; 2239 } 2240 } 2241 2242 page = __rmqueue_smallest(zone, order, migratetype); 2243 if (unlikely(!page)) { 2244 if (alloc_flags & ALLOC_CMA) 2245 page = __rmqueue_cma_fallback(zone, order); 2246 2247 if (!page) 2248 page = __rmqueue_fallback(zone, order, migratetype, 2249 alloc_flags); 2250 } 2251 return page; 2252 } 2253 2254 /* 2255 * Obtain a specified number of elements from the buddy allocator, all under 2256 * a single hold of the lock, for efficiency. Add them to the supplied list. 2257 * Returns the number of new pages which were placed at *list. 2258 */ 2259 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2260 unsigned long count, struct list_head *list, 2261 int migratetype, unsigned int alloc_flags) 2262 { 2263 unsigned long flags; 2264 int i; 2265 2266 spin_lock_irqsave(&zone->lock, flags); 2267 for (i = 0; i < count; ++i) { 2268 struct page *page = __rmqueue(zone, order, migratetype, 2269 alloc_flags); 2270 if (unlikely(page == NULL)) 2271 break; 2272 2273 /* 2274 * Split buddy pages returned by expand() are received here in 2275 * physical page order. The page is added to the tail of 2276 * caller's list. From the callers perspective, the linked list 2277 * is ordered by page number under some conditions. This is 2278 * useful for IO devices that can forward direction from the 2279 * head, thus also in the physical page order. This is useful 2280 * for IO devices that can merge IO requests if the physical 2281 * pages are ordered properly. 2282 */ 2283 list_add_tail(&page->pcp_list, list); 2284 } 2285 spin_unlock_irqrestore(&zone->lock, flags); 2286 2287 return i; 2288 } 2289 2290 /* 2291 * Called from the vmstat counter updater to decay the PCP high. 2292 * Return whether there are addition works to do. 2293 */ 2294 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) 2295 { 2296 int high_min, to_drain, batch; 2297 int todo = 0; 2298 2299 high_min = READ_ONCE(pcp->high_min); 2300 batch = READ_ONCE(pcp->batch); 2301 /* 2302 * Decrease pcp->high periodically to try to free possible 2303 * idle PCP pages. And, avoid to free too many pages to 2304 * control latency. This caps pcp->high decrement too. 2305 */ 2306 if (pcp->high > high_min) { 2307 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2308 pcp->high - (pcp->high >> 3), high_min); 2309 if (pcp->high > high_min) 2310 todo++; 2311 } 2312 2313 to_drain = pcp->count - pcp->high; 2314 if (to_drain > 0) { 2315 spin_lock(&pcp->lock); 2316 free_pcppages_bulk(zone, to_drain, pcp, 0); 2317 spin_unlock(&pcp->lock); 2318 todo++; 2319 } 2320 2321 return todo; 2322 } 2323 2324 #ifdef CONFIG_NUMA 2325 /* 2326 * Called from the vmstat counter updater to drain pagesets of this 2327 * currently executing processor on remote nodes after they have 2328 * expired. 2329 */ 2330 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2331 { 2332 int to_drain, batch; 2333 2334 batch = READ_ONCE(pcp->batch); 2335 to_drain = min(pcp->count, batch); 2336 if (to_drain > 0) { 2337 spin_lock(&pcp->lock); 2338 free_pcppages_bulk(zone, to_drain, pcp, 0); 2339 spin_unlock(&pcp->lock); 2340 } 2341 } 2342 #endif 2343 2344 /* 2345 * Drain pcplists of the indicated processor and zone. 2346 */ 2347 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2348 { 2349 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2350 int count; 2351 2352 do { 2353 spin_lock(&pcp->lock); 2354 count = pcp->count; 2355 if (count) { 2356 int to_drain = min(count, 2357 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2358 2359 free_pcppages_bulk(zone, to_drain, pcp, 0); 2360 count -= to_drain; 2361 } 2362 spin_unlock(&pcp->lock); 2363 } while (count); 2364 } 2365 2366 /* 2367 * Drain pcplists of all zones on the indicated processor. 2368 */ 2369 static void drain_pages(unsigned int cpu) 2370 { 2371 struct zone *zone; 2372 2373 for_each_populated_zone(zone) { 2374 drain_pages_zone(cpu, zone); 2375 } 2376 } 2377 2378 /* 2379 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2380 */ 2381 void drain_local_pages(struct zone *zone) 2382 { 2383 int cpu = smp_processor_id(); 2384 2385 if (zone) 2386 drain_pages_zone(cpu, zone); 2387 else 2388 drain_pages(cpu); 2389 } 2390 2391 /* 2392 * The implementation of drain_all_pages(), exposing an extra parameter to 2393 * drain on all cpus. 2394 * 2395 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2396 * not empty. The check for non-emptiness can however race with a free to 2397 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2398 * that need the guarantee that every CPU has drained can disable the 2399 * optimizing racy check. 2400 */ 2401 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2402 { 2403 int cpu; 2404 2405 /* 2406 * Allocate in the BSS so we won't require allocation in 2407 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2408 */ 2409 static cpumask_t cpus_with_pcps; 2410 2411 /* 2412 * Do not drain if one is already in progress unless it's specific to 2413 * a zone. Such callers are primarily CMA and memory hotplug and need 2414 * the drain to be complete when the call returns. 2415 */ 2416 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2417 if (!zone) 2418 return; 2419 mutex_lock(&pcpu_drain_mutex); 2420 } 2421 2422 /* 2423 * We don't care about racing with CPU hotplug event 2424 * as offline notification will cause the notified 2425 * cpu to drain that CPU pcps and on_each_cpu_mask 2426 * disables preemption as part of its processing 2427 */ 2428 for_each_online_cpu(cpu) { 2429 struct per_cpu_pages *pcp; 2430 struct zone *z; 2431 bool has_pcps = false; 2432 2433 if (force_all_cpus) { 2434 /* 2435 * The pcp.count check is racy, some callers need a 2436 * guarantee that no cpu is missed. 2437 */ 2438 has_pcps = true; 2439 } else if (zone) { 2440 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2441 if (pcp->count) 2442 has_pcps = true; 2443 } else { 2444 for_each_populated_zone(z) { 2445 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2446 if (pcp->count) { 2447 has_pcps = true; 2448 break; 2449 } 2450 } 2451 } 2452 2453 if (has_pcps) 2454 cpumask_set_cpu(cpu, &cpus_with_pcps); 2455 else 2456 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2457 } 2458 2459 for_each_cpu(cpu, &cpus_with_pcps) { 2460 if (zone) 2461 drain_pages_zone(cpu, zone); 2462 else 2463 drain_pages(cpu); 2464 } 2465 2466 mutex_unlock(&pcpu_drain_mutex); 2467 } 2468 2469 /* 2470 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2471 * 2472 * When zone parameter is non-NULL, spill just the single zone's pages. 2473 */ 2474 void drain_all_pages(struct zone *zone) 2475 { 2476 __drain_all_pages(zone, false); 2477 } 2478 2479 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high) 2480 { 2481 int min_nr_free, max_nr_free; 2482 2483 /* Free as much as possible if batch freeing high-order pages. */ 2484 if (unlikely(free_high)) 2485 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX); 2486 2487 /* Check for PCP disabled or boot pageset */ 2488 if (unlikely(high < batch)) 2489 return 1; 2490 2491 /* Leave at least pcp->batch pages on the list */ 2492 min_nr_free = batch; 2493 max_nr_free = high - batch; 2494 2495 /* 2496 * Increase the batch number to the number of the consecutive 2497 * freed pages to reduce zone lock contention. 2498 */ 2499 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free); 2500 2501 return batch; 2502 } 2503 2504 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2505 int batch, bool free_high) 2506 { 2507 int high, high_min, high_max; 2508 2509 high_min = READ_ONCE(pcp->high_min); 2510 high_max = READ_ONCE(pcp->high_max); 2511 high = pcp->high = clamp(pcp->high, high_min, high_max); 2512 2513 if (unlikely(!high)) 2514 return 0; 2515 2516 if (unlikely(free_high)) { 2517 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX), 2518 high_min); 2519 return 0; 2520 } 2521 2522 /* 2523 * If reclaim is active, limit the number of pages that can be 2524 * stored on pcp lists 2525 */ 2526 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { 2527 int free_count = max_t(int, pcp->free_count, batch); 2528 2529 pcp->high = max(high - free_count, high_min); 2530 return min(batch << 2, pcp->high); 2531 } 2532 2533 if (high_min == high_max) 2534 return high; 2535 2536 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { 2537 int free_count = max_t(int, pcp->free_count, batch); 2538 2539 pcp->high = max(high - free_count, high_min); 2540 high = max(pcp->count, high_min); 2541 } else if (pcp->count >= high) { 2542 int need_high = pcp->free_count + batch; 2543 2544 /* pcp->high should be large enough to hold batch freed pages */ 2545 if (pcp->high < need_high) 2546 pcp->high = clamp(need_high, high_min, high_max); 2547 } 2548 2549 return high; 2550 } 2551 2552 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2553 struct page *page, int migratetype, 2554 unsigned int order) 2555 { 2556 int high, batch; 2557 int pindex; 2558 bool free_high = false; 2559 2560 /* 2561 * On freeing, reduce the number of pages that are batch allocated. 2562 * See nr_pcp_alloc() where alloc_factor is increased for subsequent 2563 * allocations. 2564 */ 2565 pcp->alloc_factor >>= 1; 2566 __count_vm_events(PGFREE, 1 << order); 2567 pindex = order_to_pindex(migratetype, order); 2568 list_add(&page->pcp_list, &pcp->lists[pindex]); 2569 pcp->count += 1 << order; 2570 2571 batch = READ_ONCE(pcp->batch); 2572 /* 2573 * As high-order pages other than THP's stored on PCP can contribute 2574 * to fragmentation, limit the number stored when PCP is heavily 2575 * freeing without allocation. The remainder after bulk freeing 2576 * stops will be drained from vmstat refresh context. 2577 */ 2578 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { 2579 free_high = (pcp->free_count >= batch && 2580 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) && 2581 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) || 2582 pcp->count >= READ_ONCE(batch))); 2583 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER; 2584 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) { 2585 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER; 2586 } 2587 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX)) 2588 pcp->free_count += (1 << order); 2589 high = nr_pcp_high(pcp, zone, batch, free_high); 2590 if (pcp->count >= high) { 2591 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), 2592 pcp, pindex); 2593 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && 2594 zone_watermark_ok(zone, 0, high_wmark_pages(zone), 2595 ZONE_MOVABLE, 0)) 2596 clear_bit(ZONE_BELOW_HIGH, &zone->flags); 2597 } 2598 } 2599 2600 /* 2601 * Free a pcp page 2602 */ 2603 void free_unref_page(struct page *page, unsigned int order) 2604 { 2605 unsigned long __maybe_unused UP_flags; 2606 struct per_cpu_pages *pcp; 2607 struct zone *zone; 2608 unsigned long pfn = page_to_pfn(page); 2609 int migratetype; 2610 2611 if (!pcp_allowed_order(order)) { 2612 __free_pages_ok(page, order, FPI_NONE); 2613 return; 2614 } 2615 2616 if (!free_pages_prepare(page, order)) 2617 return; 2618 2619 /* 2620 * We only track unmovable, reclaimable and movable on pcp lists. 2621 * Place ISOLATE pages on the isolated list because they are being 2622 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2623 * get those areas back if necessary. Otherwise, we may have to free 2624 * excessively into the page allocator 2625 */ 2626 migratetype = get_pfnblock_migratetype(page, pfn); 2627 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2628 if (unlikely(is_migrate_isolate(migratetype))) { 2629 free_one_page(page_zone(page), page, pfn, order, FPI_NONE); 2630 return; 2631 } 2632 migratetype = MIGRATE_MOVABLE; 2633 } 2634 2635 zone = page_zone(page); 2636 pcp_trylock_prepare(UP_flags); 2637 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2638 if (pcp) { 2639 free_unref_page_commit(zone, pcp, page, migratetype, order); 2640 pcp_spin_unlock(pcp); 2641 } else { 2642 free_one_page(zone, page, pfn, order, FPI_NONE); 2643 } 2644 pcp_trylock_finish(UP_flags); 2645 } 2646 2647 /* 2648 * Free a batch of folios 2649 */ 2650 void free_unref_folios(struct folio_batch *folios) 2651 { 2652 unsigned long __maybe_unused UP_flags; 2653 struct per_cpu_pages *pcp = NULL; 2654 struct zone *locked_zone = NULL; 2655 int i, j; 2656 2657 /* Prepare folios for freeing */ 2658 for (i = 0, j = 0; i < folios->nr; i++) { 2659 struct folio *folio = folios->folios[i]; 2660 unsigned long pfn = folio_pfn(folio); 2661 unsigned int order = folio_order(folio); 2662 2663 folio_undo_large_rmappable(folio); 2664 if (!free_pages_prepare(&folio->page, order)) 2665 continue; 2666 /* 2667 * Free orders not handled on the PCP directly to the 2668 * allocator. 2669 */ 2670 if (!pcp_allowed_order(order)) { 2671 free_one_page(folio_zone(folio), &folio->page, 2672 pfn, order, FPI_NONE); 2673 continue; 2674 } 2675 folio->private = (void *)(unsigned long)order; 2676 if (j != i) 2677 folios->folios[j] = folio; 2678 j++; 2679 } 2680 folios->nr = j; 2681 2682 for (i = 0; i < folios->nr; i++) { 2683 struct folio *folio = folios->folios[i]; 2684 struct zone *zone = folio_zone(folio); 2685 unsigned long pfn = folio_pfn(folio); 2686 unsigned int order = (unsigned long)folio->private; 2687 int migratetype; 2688 2689 folio->private = NULL; 2690 migratetype = get_pfnblock_migratetype(&folio->page, pfn); 2691 2692 /* Different zone requires a different pcp lock */ 2693 if (zone != locked_zone || 2694 is_migrate_isolate(migratetype)) { 2695 if (pcp) { 2696 pcp_spin_unlock(pcp); 2697 pcp_trylock_finish(UP_flags); 2698 locked_zone = NULL; 2699 pcp = NULL; 2700 } 2701 2702 /* 2703 * Free isolated pages directly to the 2704 * allocator, see comment in free_unref_page. 2705 */ 2706 if (is_migrate_isolate(migratetype)) { 2707 free_one_page(zone, &folio->page, pfn, 2708 order, FPI_NONE); 2709 continue; 2710 } 2711 2712 /* 2713 * trylock is necessary as folios may be getting freed 2714 * from IRQ or SoftIRQ context after an IO completion. 2715 */ 2716 pcp_trylock_prepare(UP_flags); 2717 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2718 if (unlikely(!pcp)) { 2719 pcp_trylock_finish(UP_flags); 2720 free_one_page(zone, &folio->page, pfn, 2721 order, FPI_NONE); 2722 continue; 2723 } 2724 locked_zone = zone; 2725 } 2726 2727 /* 2728 * Non-isolated types over MIGRATE_PCPTYPES get added 2729 * to the MIGRATE_MOVABLE pcp list. 2730 */ 2731 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2732 migratetype = MIGRATE_MOVABLE; 2733 2734 trace_mm_page_free_batched(&folio->page); 2735 free_unref_page_commit(zone, pcp, &folio->page, migratetype, 2736 order); 2737 } 2738 2739 if (pcp) { 2740 pcp_spin_unlock(pcp); 2741 pcp_trylock_finish(UP_flags); 2742 } 2743 folio_batch_reinit(folios); 2744 } 2745 2746 /* 2747 * split_page takes a non-compound higher-order page, and splits it into 2748 * n (1<<order) sub-pages: page[0..n] 2749 * Each sub-page must be freed individually. 2750 * 2751 * Note: this is probably too low level an operation for use in drivers. 2752 * Please consult with lkml before using this in your driver. 2753 */ 2754 void split_page(struct page *page, unsigned int order) 2755 { 2756 int i; 2757 2758 VM_BUG_ON_PAGE(PageCompound(page), page); 2759 VM_BUG_ON_PAGE(!page_count(page), page); 2760 2761 for (i = 1; i < (1 << order); i++) 2762 set_page_refcounted(page + i); 2763 split_page_owner(page, order, 0); 2764 pgalloc_tag_split(page, 1 << order); 2765 split_page_memcg(page, order, 0); 2766 } 2767 EXPORT_SYMBOL_GPL(split_page); 2768 2769 int __isolate_free_page(struct page *page, unsigned int order) 2770 { 2771 struct zone *zone = page_zone(page); 2772 int mt = get_pageblock_migratetype(page); 2773 2774 if (!is_migrate_isolate(mt)) { 2775 unsigned long watermark; 2776 /* 2777 * Obey watermarks as if the page was being allocated. We can 2778 * emulate a high-order watermark check with a raised order-0 2779 * watermark, because we already know our high-order page 2780 * exists. 2781 */ 2782 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2783 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2784 return 0; 2785 } 2786 2787 del_page_from_free_list(page, zone, order, mt); 2788 2789 /* 2790 * Set the pageblock if the isolated page is at least half of a 2791 * pageblock 2792 */ 2793 if (order >= pageblock_order - 1) { 2794 struct page *endpage = page + (1 << order) - 1; 2795 for (; page < endpage; page += pageblock_nr_pages) { 2796 int mt = get_pageblock_migratetype(page); 2797 /* 2798 * Only change normal pageblocks (i.e., they can merge 2799 * with others) 2800 */ 2801 if (migratetype_is_mergeable(mt)) 2802 move_freepages_block(zone, page, mt, 2803 MIGRATE_MOVABLE); 2804 } 2805 } 2806 2807 return 1UL << order; 2808 } 2809 2810 /** 2811 * __putback_isolated_page - Return a now-isolated page back where we got it 2812 * @page: Page that was isolated 2813 * @order: Order of the isolated page 2814 * @mt: The page's pageblock's migratetype 2815 * 2816 * This function is meant to return a page pulled from the free lists via 2817 * __isolate_free_page back to the free lists they were pulled from. 2818 */ 2819 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2820 { 2821 struct zone *zone = page_zone(page); 2822 2823 /* zone lock should be held when this function is called */ 2824 lockdep_assert_held(&zone->lock); 2825 2826 /* Return isolated page to tail of freelist. */ 2827 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2828 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2829 } 2830 2831 /* 2832 * Update NUMA hit/miss statistics 2833 */ 2834 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2835 long nr_account) 2836 { 2837 #ifdef CONFIG_NUMA 2838 enum numa_stat_item local_stat = NUMA_LOCAL; 2839 2840 /* skip numa counters update if numa stats is disabled */ 2841 if (!static_branch_likely(&vm_numa_stat_key)) 2842 return; 2843 2844 if (zone_to_nid(z) != numa_node_id()) 2845 local_stat = NUMA_OTHER; 2846 2847 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2848 __count_numa_events(z, NUMA_HIT, nr_account); 2849 else { 2850 __count_numa_events(z, NUMA_MISS, nr_account); 2851 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2852 } 2853 __count_numa_events(z, local_stat, nr_account); 2854 #endif 2855 } 2856 2857 static __always_inline 2858 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2859 unsigned int order, unsigned int alloc_flags, 2860 int migratetype) 2861 { 2862 struct page *page; 2863 unsigned long flags; 2864 2865 do { 2866 page = NULL; 2867 spin_lock_irqsave(&zone->lock, flags); 2868 if (alloc_flags & ALLOC_HIGHATOMIC) 2869 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2870 if (!page) { 2871 page = __rmqueue(zone, order, migratetype, alloc_flags); 2872 2873 /* 2874 * If the allocation fails, allow OOM handling access 2875 * to HIGHATOMIC reserves as failing now is worse than 2876 * failing a high-order atomic allocation in the 2877 * future. 2878 */ 2879 if (!page && (alloc_flags & ALLOC_OOM)) 2880 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2881 2882 if (!page) { 2883 spin_unlock_irqrestore(&zone->lock, flags); 2884 return NULL; 2885 } 2886 } 2887 spin_unlock_irqrestore(&zone->lock, flags); 2888 } while (check_new_pages(page, order)); 2889 2890 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2891 zone_statistics(preferred_zone, zone, 1); 2892 2893 return page; 2894 } 2895 2896 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) 2897 { 2898 int high, base_batch, batch, max_nr_alloc; 2899 int high_max, high_min; 2900 2901 base_batch = READ_ONCE(pcp->batch); 2902 high_min = READ_ONCE(pcp->high_min); 2903 high_max = READ_ONCE(pcp->high_max); 2904 high = pcp->high = clamp(pcp->high, high_min, high_max); 2905 2906 /* Check for PCP disabled or boot pageset */ 2907 if (unlikely(high < base_batch)) 2908 return 1; 2909 2910 if (order) 2911 batch = base_batch; 2912 else 2913 batch = (base_batch << pcp->alloc_factor); 2914 2915 /* 2916 * If we had larger pcp->high, we could avoid to allocate from 2917 * zone. 2918 */ 2919 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) 2920 high = pcp->high = min(high + batch, high_max); 2921 2922 if (!order) { 2923 max_nr_alloc = max(high - pcp->count - base_batch, base_batch); 2924 /* 2925 * Double the number of pages allocated each time there is 2926 * subsequent allocation of order-0 pages without any freeing. 2927 */ 2928 if (batch <= max_nr_alloc && 2929 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX) 2930 pcp->alloc_factor++; 2931 batch = min(batch, max_nr_alloc); 2932 } 2933 2934 /* 2935 * Scale batch relative to order if batch implies free pages 2936 * can be stored on the PCP. Batch can be 1 for small zones or 2937 * for boot pagesets which should never store free pages as 2938 * the pages may belong to arbitrary zones. 2939 */ 2940 if (batch > 1) 2941 batch = max(batch >> order, 2); 2942 2943 return batch; 2944 } 2945 2946 /* Remove page from the per-cpu list, caller must protect the list */ 2947 static inline 2948 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2949 int migratetype, 2950 unsigned int alloc_flags, 2951 struct per_cpu_pages *pcp, 2952 struct list_head *list) 2953 { 2954 struct page *page; 2955 2956 do { 2957 if (list_empty(list)) { 2958 int batch = nr_pcp_alloc(pcp, zone, order); 2959 int alloced; 2960 2961 alloced = rmqueue_bulk(zone, order, 2962 batch, list, 2963 migratetype, alloc_flags); 2964 2965 pcp->count += alloced << order; 2966 if (unlikely(list_empty(list))) 2967 return NULL; 2968 } 2969 2970 page = list_first_entry(list, struct page, pcp_list); 2971 list_del(&page->pcp_list); 2972 pcp->count -= 1 << order; 2973 } while (check_new_pages(page, order)); 2974 2975 return page; 2976 } 2977 2978 /* Lock and remove page from the per-cpu list */ 2979 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2980 struct zone *zone, unsigned int order, 2981 int migratetype, unsigned int alloc_flags) 2982 { 2983 struct per_cpu_pages *pcp; 2984 struct list_head *list; 2985 struct page *page; 2986 unsigned long __maybe_unused UP_flags; 2987 2988 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2989 pcp_trylock_prepare(UP_flags); 2990 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2991 if (!pcp) { 2992 pcp_trylock_finish(UP_flags); 2993 return NULL; 2994 } 2995 2996 /* 2997 * On allocation, reduce the number of pages that are batch freed. 2998 * See nr_pcp_free() where free_factor is increased for subsequent 2999 * frees. 3000 */ 3001 pcp->free_count >>= 1; 3002 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3003 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3004 pcp_spin_unlock(pcp); 3005 pcp_trylock_finish(UP_flags); 3006 if (page) { 3007 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3008 zone_statistics(preferred_zone, zone, 1); 3009 } 3010 return page; 3011 } 3012 3013 /* 3014 * Allocate a page from the given zone. 3015 * Use pcplists for THP or "cheap" high-order allocations. 3016 */ 3017 3018 /* 3019 * Do not instrument rmqueue() with KMSAN. This function may call 3020 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3021 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3022 * may call rmqueue() again, which will result in a deadlock. 3023 */ 3024 __no_sanitize_memory 3025 static inline 3026 struct page *rmqueue(struct zone *preferred_zone, 3027 struct zone *zone, unsigned int order, 3028 gfp_t gfp_flags, unsigned int alloc_flags, 3029 int migratetype) 3030 { 3031 struct page *page; 3032 3033 /* 3034 * We most definitely don't want callers attempting to 3035 * allocate greater than order-1 page units with __GFP_NOFAIL. 3036 */ 3037 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3038 3039 if (likely(pcp_allowed_order(order))) { 3040 page = rmqueue_pcplist(preferred_zone, zone, order, 3041 migratetype, alloc_flags); 3042 if (likely(page)) 3043 goto out; 3044 } 3045 3046 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3047 migratetype); 3048 3049 out: 3050 /* Separate test+clear to avoid unnecessary atomics */ 3051 if ((alloc_flags & ALLOC_KSWAPD) && 3052 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3053 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3054 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3055 } 3056 3057 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3058 return page; 3059 } 3060 3061 static inline long __zone_watermark_unusable_free(struct zone *z, 3062 unsigned int order, unsigned int alloc_flags) 3063 { 3064 long unusable_free = (1 << order) - 1; 3065 3066 /* 3067 * If the caller does not have rights to reserves below the min 3068 * watermark then subtract the high-atomic reserves. This will 3069 * over-estimate the size of the atomic reserve but it avoids a search. 3070 */ 3071 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3072 unusable_free += z->nr_reserved_highatomic; 3073 3074 #ifdef CONFIG_CMA 3075 /* If allocation can't use CMA areas don't use free CMA pages */ 3076 if (!(alloc_flags & ALLOC_CMA)) 3077 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3078 #endif 3079 3080 return unusable_free; 3081 } 3082 3083 /* 3084 * Return true if free base pages are above 'mark'. For high-order checks it 3085 * will return true of the order-0 watermark is reached and there is at least 3086 * one free page of a suitable size. Checking now avoids taking the zone lock 3087 * to check in the allocation paths if no pages are free. 3088 */ 3089 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3090 int highest_zoneidx, unsigned int alloc_flags, 3091 long free_pages) 3092 { 3093 long min = mark; 3094 int o; 3095 3096 /* free_pages may go negative - that's OK */ 3097 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3098 3099 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3100 /* 3101 * __GFP_HIGH allows access to 50% of the min reserve as well 3102 * as OOM. 3103 */ 3104 if (alloc_flags & ALLOC_MIN_RESERVE) { 3105 min -= min / 2; 3106 3107 /* 3108 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3109 * access more reserves than just __GFP_HIGH. Other 3110 * non-blocking allocations requests such as GFP_NOWAIT 3111 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3112 * access to the min reserve. 3113 */ 3114 if (alloc_flags & ALLOC_NON_BLOCK) 3115 min -= min / 4; 3116 } 3117 3118 /* 3119 * OOM victims can try even harder than the normal reserve 3120 * users on the grounds that it's definitely going to be in 3121 * the exit path shortly and free memory. Any allocation it 3122 * makes during the free path will be small and short-lived. 3123 */ 3124 if (alloc_flags & ALLOC_OOM) 3125 min -= min / 2; 3126 } 3127 3128 /* 3129 * Check watermarks for an order-0 allocation request. If these 3130 * are not met, then a high-order request also cannot go ahead 3131 * even if a suitable page happened to be free. 3132 */ 3133 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3134 return false; 3135 3136 /* If this is an order-0 request then the watermark is fine */ 3137 if (!order) 3138 return true; 3139 3140 /* For a high-order request, check at least one suitable page is free */ 3141 for (o = order; o < NR_PAGE_ORDERS; o++) { 3142 struct free_area *area = &z->free_area[o]; 3143 int mt; 3144 3145 if (!area->nr_free) 3146 continue; 3147 3148 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3149 if (!free_area_empty(area, mt)) 3150 return true; 3151 } 3152 3153 #ifdef CONFIG_CMA 3154 if ((alloc_flags & ALLOC_CMA) && 3155 !free_area_empty(area, MIGRATE_CMA)) { 3156 return true; 3157 } 3158 #endif 3159 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3160 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3161 return true; 3162 } 3163 } 3164 return false; 3165 } 3166 3167 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3168 int highest_zoneidx, unsigned int alloc_flags) 3169 { 3170 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3171 zone_page_state(z, NR_FREE_PAGES)); 3172 } 3173 3174 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3175 unsigned long mark, int highest_zoneidx, 3176 unsigned int alloc_flags, gfp_t gfp_mask) 3177 { 3178 long free_pages; 3179 3180 free_pages = zone_page_state(z, NR_FREE_PAGES); 3181 3182 /* 3183 * Fast check for order-0 only. If this fails then the reserves 3184 * need to be calculated. 3185 */ 3186 if (!order) { 3187 long usable_free; 3188 long reserved; 3189 3190 usable_free = free_pages; 3191 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3192 3193 /* reserved may over estimate high-atomic reserves. */ 3194 usable_free -= min(usable_free, reserved); 3195 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3196 return true; 3197 } 3198 3199 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3200 free_pages)) 3201 return true; 3202 3203 /* 3204 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3205 * when checking the min watermark. The min watermark is the 3206 * point where boosting is ignored so that kswapd is woken up 3207 * when below the low watermark. 3208 */ 3209 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3210 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3211 mark = z->_watermark[WMARK_MIN]; 3212 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3213 alloc_flags, free_pages); 3214 } 3215 3216 return false; 3217 } 3218 3219 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3220 unsigned long mark, int highest_zoneidx) 3221 { 3222 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3223 3224 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3225 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3226 3227 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3228 free_pages); 3229 } 3230 3231 #ifdef CONFIG_NUMA 3232 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3233 3234 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3235 { 3236 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3237 node_reclaim_distance; 3238 } 3239 #else /* CONFIG_NUMA */ 3240 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3241 { 3242 return true; 3243 } 3244 #endif /* CONFIG_NUMA */ 3245 3246 /* 3247 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3248 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3249 * premature use of a lower zone may cause lowmem pressure problems that 3250 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3251 * probably too small. It only makes sense to spread allocations to avoid 3252 * fragmentation between the Normal and DMA32 zones. 3253 */ 3254 static inline unsigned int 3255 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3256 { 3257 unsigned int alloc_flags; 3258 3259 /* 3260 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3261 * to save a branch. 3262 */ 3263 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3264 3265 #ifdef CONFIG_ZONE_DMA32 3266 if (!zone) 3267 return alloc_flags; 3268 3269 if (zone_idx(zone) != ZONE_NORMAL) 3270 return alloc_flags; 3271 3272 /* 3273 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3274 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3275 * on UMA that if Normal is populated then so is DMA32. 3276 */ 3277 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3278 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3279 return alloc_flags; 3280 3281 alloc_flags |= ALLOC_NOFRAGMENT; 3282 #endif /* CONFIG_ZONE_DMA32 */ 3283 return alloc_flags; 3284 } 3285 3286 /* Must be called after current_gfp_context() which can change gfp_mask */ 3287 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3288 unsigned int alloc_flags) 3289 { 3290 #ifdef CONFIG_CMA 3291 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3292 alloc_flags |= ALLOC_CMA; 3293 #endif 3294 return alloc_flags; 3295 } 3296 3297 /* 3298 * get_page_from_freelist goes through the zonelist trying to allocate 3299 * a page. 3300 */ 3301 static struct page * 3302 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3303 const struct alloc_context *ac) 3304 { 3305 struct zoneref *z; 3306 struct zone *zone; 3307 struct pglist_data *last_pgdat = NULL; 3308 bool last_pgdat_dirty_ok = false; 3309 bool no_fallback; 3310 3311 retry: 3312 /* 3313 * Scan zonelist, looking for a zone with enough free. 3314 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3315 */ 3316 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3317 z = ac->preferred_zoneref; 3318 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3319 ac->nodemask) { 3320 struct page *page; 3321 unsigned long mark; 3322 3323 if (cpusets_enabled() && 3324 (alloc_flags & ALLOC_CPUSET) && 3325 !__cpuset_zone_allowed(zone, gfp_mask)) 3326 continue; 3327 /* 3328 * When allocating a page cache page for writing, we 3329 * want to get it from a node that is within its dirty 3330 * limit, such that no single node holds more than its 3331 * proportional share of globally allowed dirty pages. 3332 * The dirty limits take into account the node's 3333 * lowmem reserves and high watermark so that kswapd 3334 * should be able to balance it without having to 3335 * write pages from its LRU list. 3336 * 3337 * XXX: For now, allow allocations to potentially 3338 * exceed the per-node dirty limit in the slowpath 3339 * (spread_dirty_pages unset) before going into reclaim, 3340 * which is important when on a NUMA setup the allowed 3341 * nodes are together not big enough to reach the 3342 * global limit. The proper fix for these situations 3343 * will require awareness of nodes in the 3344 * dirty-throttling and the flusher threads. 3345 */ 3346 if (ac->spread_dirty_pages) { 3347 if (last_pgdat != zone->zone_pgdat) { 3348 last_pgdat = zone->zone_pgdat; 3349 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3350 } 3351 3352 if (!last_pgdat_dirty_ok) 3353 continue; 3354 } 3355 3356 if (no_fallback && nr_online_nodes > 1 && 3357 zone != zonelist_zone(ac->preferred_zoneref)) { 3358 int local_nid; 3359 3360 /* 3361 * If moving to a remote node, retry but allow 3362 * fragmenting fallbacks. Locality is more important 3363 * than fragmentation avoidance. 3364 */ 3365 local_nid = zonelist_node_idx(ac->preferred_zoneref); 3366 if (zone_to_nid(zone) != local_nid) { 3367 alloc_flags &= ~ALLOC_NOFRAGMENT; 3368 goto retry; 3369 } 3370 } 3371 3372 cond_accept_memory(zone, order); 3373 3374 /* 3375 * Detect whether the number of free pages is below high 3376 * watermark. If so, we will decrease pcp->high and free 3377 * PCP pages in free path to reduce the possibility of 3378 * premature page reclaiming. Detection is done here to 3379 * avoid to do that in hotter free path. 3380 */ 3381 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) 3382 goto check_alloc_wmark; 3383 3384 mark = high_wmark_pages(zone); 3385 if (zone_watermark_fast(zone, order, mark, 3386 ac->highest_zoneidx, alloc_flags, 3387 gfp_mask)) 3388 goto try_this_zone; 3389 else 3390 set_bit(ZONE_BELOW_HIGH, &zone->flags); 3391 3392 check_alloc_wmark: 3393 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3394 if (!zone_watermark_fast(zone, order, mark, 3395 ac->highest_zoneidx, alloc_flags, 3396 gfp_mask)) { 3397 int ret; 3398 3399 if (cond_accept_memory(zone, order)) 3400 goto try_this_zone; 3401 3402 /* 3403 * Watermark failed for this zone, but see if we can 3404 * grow this zone if it contains deferred pages. 3405 */ 3406 if (deferred_pages_enabled()) { 3407 if (_deferred_grow_zone(zone, order)) 3408 goto try_this_zone; 3409 } 3410 /* Checked here to keep the fast path fast */ 3411 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3412 if (alloc_flags & ALLOC_NO_WATERMARKS) 3413 goto try_this_zone; 3414 3415 if (!node_reclaim_enabled() || 3416 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3417 continue; 3418 3419 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3420 switch (ret) { 3421 case NODE_RECLAIM_NOSCAN: 3422 /* did not scan */ 3423 continue; 3424 case NODE_RECLAIM_FULL: 3425 /* scanned but unreclaimable */ 3426 continue; 3427 default: 3428 /* did we reclaim enough */ 3429 if (zone_watermark_ok(zone, order, mark, 3430 ac->highest_zoneidx, alloc_flags)) 3431 goto try_this_zone; 3432 3433 continue; 3434 } 3435 } 3436 3437 try_this_zone: 3438 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3439 gfp_mask, alloc_flags, ac->migratetype); 3440 if (page) { 3441 prep_new_page(page, order, gfp_mask, alloc_flags); 3442 3443 /* 3444 * If this is a high-order atomic allocation then check 3445 * if the pageblock should be reserved for the future 3446 */ 3447 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3448 reserve_highatomic_pageblock(page, order, zone); 3449 3450 return page; 3451 } else { 3452 if (cond_accept_memory(zone, order)) 3453 goto try_this_zone; 3454 3455 /* Try again if zone has deferred pages */ 3456 if (deferred_pages_enabled()) { 3457 if (_deferred_grow_zone(zone, order)) 3458 goto try_this_zone; 3459 } 3460 } 3461 } 3462 3463 /* 3464 * It's possible on a UMA machine to get through all zones that are 3465 * fragmented. If avoiding fragmentation, reset and try again. 3466 */ 3467 if (no_fallback) { 3468 alloc_flags &= ~ALLOC_NOFRAGMENT; 3469 goto retry; 3470 } 3471 3472 return NULL; 3473 } 3474 3475 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3476 { 3477 unsigned int filter = SHOW_MEM_FILTER_NODES; 3478 3479 /* 3480 * This documents exceptions given to allocations in certain 3481 * contexts that are allowed to allocate outside current's set 3482 * of allowed nodes. 3483 */ 3484 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3485 if (tsk_is_oom_victim(current) || 3486 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3487 filter &= ~SHOW_MEM_FILTER_NODES; 3488 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3489 filter &= ~SHOW_MEM_FILTER_NODES; 3490 3491 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3492 } 3493 3494 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3495 { 3496 struct va_format vaf; 3497 va_list args; 3498 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3499 3500 if ((gfp_mask & __GFP_NOWARN) || 3501 !__ratelimit(&nopage_rs) || 3502 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3503 return; 3504 3505 va_start(args, fmt); 3506 vaf.fmt = fmt; 3507 vaf.va = &args; 3508 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3509 current->comm, &vaf, gfp_mask, &gfp_mask, 3510 nodemask_pr_args(nodemask)); 3511 va_end(args); 3512 3513 cpuset_print_current_mems_allowed(); 3514 pr_cont("\n"); 3515 dump_stack(); 3516 warn_alloc_show_mem(gfp_mask, nodemask); 3517 } 3518 3519 static inline struct page * 3520 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3521 unsigned int alloc_flags, 3522 const struct alloc_context *ac) 3523 { 3524 struct page *page; 3525 3526 page = get_page_from_freelist(gfp_mask, order, 3527 alloc_flags|ALLOC_CPUSET, ac); 3528 /* 3529 * fallback to ignore cpuset restriction if our nodes 3530 * are depleted 3531 */ 3532 if (!page) 3533 page = get_page_from_freelist(gfp_mask, order, 3534 alloc_flags, ac); 3535 3536 return page; 3537 } 3538 3539 static inline struct page * 3540 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3541 const struct alloc_context *ac, unsigned long *did_some_progress) 3542 { 3543 struct oom_control oc = { 3544 .zonelist = ac->zonelist, 3545 .nodemask = ac->nodemask, 3546 .memcg = NULL, 3547 .gfp_mask = gfp_mask, 3548 .order = order, 3549 }; 3550 struct page *page; 3551 3552 *did_some_progress = 0; 3553 3554 /* 3555 * Acquire the oom lock. If that fails, somebody else is 3556 * making progress for us. 3557 */ 3558 if (!mutex_trylock(&oom_lock)) { 3559 *did_some_progress = 1; 3560 schedule_timeout_uninterruptible(1); 3561 return NULL; 3562 } 3563 3564 /* 3565 * Go through the zonelist yet one more time, keep very high watermark 3566 * here, this is only to catch a parallel oom killing, we must fail if 3567 * we're still under heavy pressure. But make sure that this reclaim 3568 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3569 * allocation which will never fail due to oom_lock already held. 3570 */ 3571 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3572 ~__GFP_DIRECT_RECLAIM, order, 3573 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3574 if (page) 3575 goto out; 3576 3577 /* Coredumps can quickly deplete all memory reserves */ 3578 if (current->flags & PF_DUMPCORE) 3579 goto out; 3580 /* The OOM killer will not help higher order allocs */ 3581 if (order > PAGE_ALLOC_COSTLY_ORDER) 3582 goto out; 3583 /* 3584 * We have already exhausted all our reclaim opportunities without any 3585 * success so it is time to admit defeat. We will skip the OOM killer 3586 * because it is very likely that the caller has a more reasonable 3587 * fallback than shooting a random task. 3588 * 3589 * The OOM killer may not free memory on a specific node. 3590 */ 3591 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3592 goto out; 3593 /* The OOM killer does not needlessly kill tasks for lowmem */ 3594 if (ac->highest_zoneidx < ZONE_NORMAL) 3595 goto out; 3596 if (pm_suspended_storage()) 3597 goto out; 3598 /* 3599 * XXX: GFP_NOFS allocations should rather fail than rely on 3600 * other request to make a forward progress. 3601 * We are in an unfortunate situation where out_of_memory cannot 3602 * do much for this context but let's try it to at least get 3603 * access to memory reserved if the current task is killed (see 3604 * out_of_memory). Once filesystems are ready to handle allocation 3605 * failures more gracefully we should just bail out here. 3606 */ 3607 3608 /* Exhausted what can be done so it's blame time */ 3609 if (out_of_memory(&oc) || 3610 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3611 *did_some_progress = 1; 3612 3613 /* 3614 * Help non-failing allocations by giving them access to memory 3615 * reserves 3616 */ 3617 if (gfp_mask & __GFP_NOFAIL) 3618 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3619 ALLOC_NO_WATERMARKS, ac); 3620 } 3621 out: 3622 mutex_unlock(&oom_lock); 3623 return page; 3624 } 3625 3626 /* 3627 * Maximum number of compaction retries with a progress before OOM 3628 * killer is consider as the only way to move forward. 3629 */ 3630 #define MAX_COMPACT_RETRIES 16 3631 3632 #ifdef CONFIG_COMPACTION 3633 /* Try memory compaction for high-order allocations before reclaim */ 3634 static struct page * 3635 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3636 unsigned int alloc_flags, const struct alloc_context *ac, 3637 enum compact_priority prio, enum compact_result *compact_result) 3638 { 3639 struct page *page = NULL; 3640 unsigned long pflags; 3641 unsigned int noreclaim_flag; 3642 3643 if (!order) 3644 return NULL; 3645 3646 psi_memstall_enter(&pflags); 3647 delayacct_compact_start(); 3648 noreclaim_flag = memalloc_noreclaim_save(); 3649 3650 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3651 prio, &page); 3652 3653 memalloc_noreclaim_restore(noreclaim_flag); 3654 psi_memstall_leave(&pflags); 3655 delayacct_compact_end(); 3656 3657 if (*compact_result == COMPACT_SKIPPED) 3658 return NULL; 3659 /* 3660 * At least in one zone compaction wasn't deferred or skipped, so let's 3661 * count a compaction stall 3662 */ 3663 count_vm_event(COMPACTSTALL); 3664 3665 /* Prep a captured page if available */ 3666 if (page) 3667 prep_new_page(page, order, gfp_mask, alloc_flags); 3668 3669 /* Try get a page from the freelist if available */ 3670 if (!page) 3671 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3672 3673 if (page) { 3674 struct zone *zone = page_zone(page); 3675 3676 zone->compact_blockskip_flush = false; 3677 compaction_defer_reset(zone, order, true); 3678 count_vm_event(COMPACTSUCCESS); 3679 return page; 3680 } 3681 3682 /* 3683 * It's bad if compaction run occurs and fails. The most likely reason 3684 * is that pages exist, but not enough to satisfy watermarks. 3685 */ 3686 count_vm_event(COMPACTFAIL); 3687 3688 cond_resched(); 3689 3690 return NULL; 3691 } 3692 3693 static inline bool 3694 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3695 enum compact_result compact_result, 3696 enum compact_priority *compact_priority, 3697 int *compaction_retries) 3698 { 3699 int max_retries = MAX_COMPACT_RETRIES; 3700 int min_priority; 3701 bool ret = false; 3702 int retries = *compaction_retries; 3703 enum compact_priority priority = *compact_priority; 3704 3705 if (!order) 3706 return false; 3707 3708 if (fatal_signal_pending(current)) 3709 return false; 3710 3711 /* 3712 * Compaction was skipped due to a lack of free order-0 3713 * migration targets. Continue if reclaim can help. 3714 */ 3715 if (compact_result == COMPACT_SKIPPED) { 3716 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3717 goto out; 3718 } 3719 3720 /* 3721 * Compaction managed to coalesce some page blocks, but the 3722 * allocation failed presumably due to a race. Retry some. 3723 */ 3724 if (compact_result == COMPACT_SUCCESS) { 3725 /* 3726 * !costly requests are much more important than 3727 * __GFP_RETRY_MAYFAIL costly ones because they are de 3728 * facto nofail and invoke OOM killer to move on while 3729 * costly can fail and users are ready to cope with 3730 * that. 1/4 retries is rather arbitrary but we would 3731 * need much more detailed feedback from compaction to 3732 * make a better decision. 3733 */ 3734 if (order > PAGE_ALLOC_COSTLY_ORDER) 3735 max_retries /= 4; 3736 3737 if (++(*compaction_retries) <= max_retries) { 3738 ret = true; 3739 goto out; 3740 } 3741 } 3742 3743 /* 3744 * Compaction failed. Retry with increasing priority. 3745 */ 3746 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3747 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3748 3749 if (*compact_priority > min_priority) { 3750 (*compact_priority)--; 3751 *compaction_retries = 0; 3752 ret = true; 3753 } 3754 out: 3755 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3756 return ret; 3757 } 3758 #else 3759 static inline struct page * 3760 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3761 unsigned int alloc_flags, const struct alloc_context *ac, 3762 enum compact_priority prio, enum compact_result *compact_result) 3763 { 3764 *compact_result = COMPACT_SKIPPED; 3765 return NULL; 3766 } 3767 3768 static inline bool 3769 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3770 enum compact_result compact_result, 3771 enum compact_priority *compact_priority, 3772 int *compaction_retries) 3773 { 3774 struct zone *zone; 3775 struct zoneref *z; 3776 3777 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3778 return false; 3779 3780 /* 3781 * There are setups with compaction disabled which would prefer to loop 3782 * inside the allocator rather than hit the oom killer prematurely. 3783 * Let's give them a good hope and keep retrying while the order-0 3784 * watermarks are OK. 3785 */ 3786 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3787 ac->highest_zoneidx, ac->nodemask) { 3788 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3789 ac->highest_zoneidx, alloc_flags)) 3790 return true; 3791 } 3792 return false; 3793 } 3794 #endif /* CONFIG_COMPACTION */ 3795 3796 #ifdef CONFIG_LOCKDEP 3797 static struct lockdep_map __fs_reclaim_map = 3798 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3799 3800 static bool __need_reclaim(gfp_t gfp_mask) 3801 { 3802 /* no reclaim without waiting on it */ 3803 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3804 return false; 3805 3806 /* this guy won't enter reclaim */ 3807 if (current->flags & PF_MEMALLOC) 3808 return false; 3809 3810 if (gfp_mask & __GFP_NOLOCKDEP) 3811 return false; 3812 3813 return true; 3814 } 3815 3816 void __fs_reclaim_acquire(unsigned long ip) 3817 { 3818 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3819 } 3820 3821 void __fs_reclaim_release(unsigned long ip) 3822 { 3823 lock_release(&__fs_reclaim_map, ip); 3824 } 3825 3826 void fs_reclaim_acquire(gfp_t gfp_mask) 3827 { 3828 gfp_mask = current_gfp_context(gfp_mask); 3829 3830 if (__need_reclaim(gfp_mask)) { 3831 if (gfp_mask & __GFP_FS) 3832 __fs_reclaim_acquire(_RET_IP_); 3833 3834 #ifdef CONFIG_MMU_NOTIFIER 3835 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3836 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3837 #endif 3838 3839 } 3840 } 3841 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3842 3843 void fs_reclaim_release(gfp_t gfp_mask) 3844 { 3845 gfp_mask = current_gfp_context(gfp_mask); 3846 3847 if (__need_reclaim(gfp_mask)) { 3848 if (gfp_mask & __GFP_FS) 3849 __fs_reclaim_release(_RET_IP_); 3850 } 3851 } 3852 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3853 #endif 3854 3855 /* 3856 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3857 * have been rebuilt so allocation retries. Reader side does not lock and 3858 * retries the allocation if zonelist changes. Writer side is protected by the 3859 * embedded spin_lock. 3860 */ 3861 static DEFINE_SEQLOCK(zonelist_update_seq); 3862 3863 static unsigned int zonelist_iter_begin(void) 3864 { 3865 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3866 return read_seqbegin(&zonelist_update_seq); 3867 3868 return 0; 3869 } 3870 3871 static unsigned int check_retry_zonelist(unsigned int seq) 3872 { 3873 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3874 return read_seqretry(&zonelist_update_seq, seq); 3875 3876 return seq; 3877 } 3878 3879 /* Perform direct synchronous page reclaim */ 3880 static unsigned long 3881 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3882 const struct alloc_context *ac) 3883 { 3884 unsigned int noreclaim_flag; 3885 unsigned long progress; 3886 3887 cond_resched(); 3888 3889 /* We now go into synchronous reclaim */ 3890 cpuset_memory_pressure_bump(); 3891 fs_reclaim_acquire(gfp_mask); 3892 noreclaim_flag = memalloc_noreclaim_save(); 3893 3894 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3895 ac->nodemask); 3896 3897 memalloc_noreclaim_restore(noreclaim_flag); 3898 fs_reclaim_release(gfp_mask); 3899 3900 cond_resched(); 3901 3902 return progress; 3903 } 3904 3905 /* The really slow allocator path where we enter direct reclaim */ 3906 static inline struct page * 3907 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3908 unsigned int alloc_flags, const struct alloc_context *ac, 3909 unsigned long *did_some_progress) 3910 { 3911 struct page *page = NULL; 3912 unsigned long pflags; 3913 bool drained = false; 3914 3915 psi_memstall_enter(&pflags); 3916 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3917 if (unlikely(!(*did_some_progress))) 3918 goto out; 3919 3920 retry: 3921 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3922 3923 /* 3924 * If an allocation failed after direct reclaim, it could be because 3925 * pages are pinned on the per-cpu lists or in high alloc reserves. 3926 * Shrink them and try again 3927 */ 3928 if (!page && !drained) { 3929 unreserve_highatomic_pageblock(ac, false); 3930 drain_all_pages(NULL); 3931 drained = true; 3932 goto retry; 3933 } 3934 out: 3935 psi_memstall_leave(&pflags); 3936 3937 return page; 3938 } 3939 3940 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3941 const struct alloc_context *ac) 3942 { 3943 struct zoneref *z; 3944 struct zone *zone; 3945 pg_data_t *last_pgdat = NULL; 3946 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3947 3948 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3949 ac->nodemask) { 3950 if (!managed_zone(zone)) 3951 continue; 3952 if (last_pgdat != zone->zone_pgdat) { 3953 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3954 last_pgdat = zone->zone_pgdat; 3955 } 3956 } 3957 } 3958 3959 static inline unsigned int 3960 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3961 { 3962 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3963 3964 /* 3965 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3966 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3967 * to save two branches. 3968 */ 3969 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3970 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3971 3972 /* 3973 * The caller may dip into page reserves a bit more if the caller 3974 * cannot run direct reclaim, or if the caller has realtime scheduling 3975 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3976 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3977 */ 3978 alloc_flags |= (__force int) 3979 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3980 3981 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3982 /* 3983 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3984 * if it can't schedule. 3985 */ 3986 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3987 alloc_flags |= ALLOC_NON_BLOCK; 3988 3989 if (order > 0) 3990 alloc_flags |= ALLOC_HIGHATOMIC; 3991 } 3992 3993 /* 3994 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 3995 * GFP_ATOMIC) rather than fail, see the comment for 3996 * cpuset_node_allowed(). 3997 */ 3998 if (alloc_flags & ALLOC_MIN_RESERVE) 3999 alloc_flags &= ~ALLOC_CPUSET; 4000 } else if (unlikely(rt_task(current)) && in_task()) 4001 alloc_flags |= ALLOC_MIN_RESERVE; 4002 4003 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4004 4005 return alloc_flags; 4006 } 4007 4008 static bool oom_reserves_allowed(struct task_struct *tsk) 4009 { 4010 if (!tsk_is_oom_victim(tsk)) 4011 return false; 4012 4013 /* 4014 * !MMU doesn't have oom reaper so give access to memory reserves 4015 * only to the thread with TIF_MEMDIE set 4016 */ 4017 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4018 return false; 4019 4020 return true; 4021 } 4022 4023 /* 4024 * Distinguish requests which really need access to full memory 4025 * reserves from oom victims which can live with a portion of it 4026 */ 4027 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4028 { 4029 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4030 return 0; 4031 if (gfp_mask & __GFP_MEMALLOC) 4032 return ALLOC_NO_WATERMARKS; 4033 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4034 return ALLOC_NO_WATERMARKS; 4035 if (!in_interrupt()) { 4036 if (current->flags & PF_MEMALLOC) 4037 return ALLOC_NO_WATERMARKS; 4038 else if (oom_reserves_allowed(current)) 4039 return ALLOC_OOM; 4040 } 4041 4042 return 0; 4043 } 4044 4045 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4046 { 4047 return !!__gfp_pfmemalloc_flags(gfp_mask); 4048 } 4049 4050 /* 4051 * Checks whether it makes sense to retry the reclaim to make a forward progress 4052 * for the given allocation request. 4053 * 4054 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4055 * without success, or when we couldn't even meet the watermark if we 4056 * reclaimed all remaining pages on the LRU lists. 4057 * 4058 * Returns true if a retry is viable or false to enter the oom path. 4059 */ 4060 static inline bool 4061 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4062 struct alloc_context *ac, int alloc_flags, 4063 bool did_some_progress, int *no_progress_loops) 4064 { 4065 struct zone *zone; 4066 struct zoneref *z; 4067 bool ret = false; 4068 4069 /* 4070 * Costly allocations might have made a progress but this doesn't mean 4071 * their order will become available due to high fragmentation so 4072 * always increment the no progress counter for them 4073 */ 4074 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4075 *no_progress_loops = 0; 4076 else 4077 (*no_progress_loops)++; 4078 4079 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 4080 goto out; 4081 4082 4083 /* 4084 * Keep reclaiming pages while there is a chance this will lead 4085 * somewhere. If none of the target zones can satisfy our allocation 4086 * request even if all reclaimable pages are considered then we are 4087 * screwed and have to go OOM. 4088 */ 4089 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4090 ac->highest_zoneidx, ac->nodemask) { 4091 unsigned long available; 4092 unsigned long reclaimable; 4093 unsigned long min_wmark = min_wmark_pages(zone); 4094 bool wmark; 4095 4096 available = reclaimable = zone_reclaimable_pages(zone); 4097 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4098 4099 /* 4100 * Would the allocation succeed if we reclaimed all 4101 * reclaimable pages? 4102 */ 4103 wmark = __zone_watermark_ok(zone, order, min_wmark, 4104 ac->highest_zoneidx, alloc_flags, available); 4105 trace_reclaim_retry_zone(z, order, reclaimable, 4106 available, min_wmark, *no_progress_loops, wmark); 4107 if (wmark) { 4108 ret = true; 4109 break; 4110 } 4111 } 4112 4113 /* 4114 * Memory allocation/reclaim might be called from a WQ context and the 4115 * current implementation of the WQ concurrency control doesn't 4116 * recognize that a particular WQ is congested if the worker thread is 4117 * looping without ever sleeping. Therefore we have to do a short sleep 4118 * here rather than calling cond_resched(). 4119 */ 4120 if (current->flags & PF_WQ_WORKER) 4121 schedule_timeout_uninterruptible(1); 4122 else 4123 cond_resched(); 4124 out: 4125 /* Before OOM, exhaust highatomic_reserve */ 4126 if (!ret) 4127 return unreserve_highatomic_pageblock(ac, true); 4128 4129 return ret; 4130 } 4131 4132 static inline bool 4133 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4134 { 4135 /* 4136 * It's possible that cpuset's mems_allowed and the nodemask from 4137 * mempolicy don't intersect. This should be normally dealt with by 4138 * policy_nodemask(), but it's possible to race with cpuset update in 4139 * such a way the check therein was true, and then it became false 4140 * before we got our cpuset_mems_cookie here. 4141 * This assumes that for all allocations, ac->nodemask can come only 4142 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4143 * when it does not intersect with the cpuset restrictions) or the 4144 * caller can deal with a violated nodemask. 4145 */ 4146 if (cpusets_enabled() && ac->nodemask && 4147 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4148 ac->nodemask = NULL; 4149 return true; 4150 } 4151 4152 /* 4153 * When updating a task's mems_allowed or mempolicy nodemask, it is 4154 * possible to race with parallel threads in such a way that our 4155 * allocation can fail while the mask is being updated. If we are about 4156 * to fail, check if the cpuset changed during allocation and if so, 4157 * retry. 4158 */ 4159 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4160 return true; 4161 4162 return false; 4163 } 4164 4165 static inline struct page * 4166 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4167 struct alloc_context *ac) 4168 { 4169 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4170 bool can_compact = gfp_compaction_allowed(gfp_mask); 4171 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4172 struct page *page = NULL; 4173 unsigned int alloc_flags; 4174 unsigned long did_some_progress; 4175 enum compact_priority compact_priority; 4176 enum compact_result compact_result; 4177 int compaction_retries; 4178 int no_progress_loops; 4179 unsigned int cpuset_mems_cookie; 4180 unsigned int zonelist_iter_cookie; 4181 int reserve_flags; 4182 4183 restart: 4184 compaction_retries = 0; 4185 no_progress_loops = 0; 4186 compact_priority = DEF_COMPACT_PRIORITY; 4187 cpuset_mems_cookie = read_mems_allowed_begin(); 4188 zonelist_iter_cookie = zonelist_iter_begin(); 4189 4190 /* 4191 * The fast path uses conservative alloc_flags to succeed only until 4192 * kswapd needs to be woken up, and to avoid the cost of setting up 4193 * alloc_flags precisely. So we do that now. 4194 */ 4195 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4196 4197 /* 4198 * We need to recalculate the starting point for the zonelist iterator 4199 * because we might have used different nodemask in the fast path, or 4200 * there was a cpuset modification and we are retrying - otherwise we 4201 * could end up iterating over non-eligible zones endlessly. 4202 */ 4203 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4204 ac->highest_zoneidx, ac->nodemask); 4205 if (!zonelist_zone(ac->preferred_zoneref)) 4206 goto nopage; 4207 4208 /* 4209 * Check for insane configurations where the cpuset doesn't contain 4210 * any suitable zone to satisfy the request - e.g. non-movable 4211 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4212 */ 4213 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4214 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4215 ac->highest_zoneidx, 4216 &cpuset_current_mems_allowed); 4217 if (!zonelist_zone(z)) 4218 goto nopage; 4219 } 4220 4221 if (alloc_flags & ALLOC_KSWAPD) 4222 wake_all_kswapds(order, gfp_mask, ac); 4223 4224 /* 4225 * The adjusted alloc_flags might result in immediate success, so try 4226 * that first 4227 */ 4228 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4229 if (page) 4230 goto got_pg; 4231 4232 /* 4233 * For costly allocations, try direct compaction first, as it's likely 4234 * that we have enough base pages and don't need to reclaim. For non- 4235 * movable high-order allocations, do that as well, as compaction will 4236 * try prevent permanent fragmentation by migrating from blocks of the 4237 * same migratetype. 4238 * Don't try this for allocations that are allowed to ignore 4239 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4240 */ 4241 if (can_direct_reclaim && can_compact && 4242 (costly_order || 4243 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4244 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4245 page = __alloc_pages_direct_compact(gfp_mask, order, 4246 alloc_flags, ac, 4247 INIT_COMPACT_PRIORITY, 4248 &compact_result); 4249 if (page) 4250 goto got_pg; 4251 4252 /* 4253 * Checks for costly allocations with __GFP_NORETRY, which 4254 * includes some THP page fault allocations 4255 */ 4256 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4257 /* 4258 * If allocating entire pageblock(s) and compaction 4259 * failed because all zones are below low watermarks 4260 * or is prohibited because it recently failed at this 4261 * order, fail immediately unless the allocator has 4262 * requested compaction and reclaim retry. 4263 * 4264 * Reclaim is 4265 * - potentially very expensive because zones are far 4266 * below their low watermarks or this is part of very 4267 * bursty high order allocations, 4268 * - not guaranteed to help because isolate_freepages() 4269 * may not iterate over freed pages as part of its 4270 * linear scan, and 4271 * - unlikely to make entire pageblocks free on its 4272 * own. 4273 */ 4274 if (compact_result == COMPACT_SKIPPED || 4275 compact_result == COMPACT_DEFERRED) 4276 goto nopage; 4277 4278 /* 4279 * Looks like reclaim/compaction is worth trying, but 4280 * sync compaction could be very expensive, so keep 4281 * using async compaction. 4282 */ 4283 compact_priority = INIT_COMPACT_PRIORITY; 4284 } 4285 } 4286 4287 retry: 4288 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4289 if (alloc_flags & ALLOC_KSWAPD) 4290 wake_all_kswapds(order, gfp_mask, ac); 4291 4292 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4293 if (reserve_flags) 4294 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4295 (alloc_flags & ALLOC_KSWAPD); 4296 4297 /* 4298 * Reset the nodemask and zonelist iterators if memory policies can be 4299 * ignored. These allocations are high priority and system rather than 4300 * user oriented. 4301 */ 4302 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4303 ac->nodemask = NULL; 4304 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4305 ac->highest_zoneidx, ac->nodemask); 4306 } 4307 4308 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4309 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4310 if (page) 4311 goto got_pg; 4312 4313 /* Caller is not willing to reclaim, we can't balance anything */ 4314 if (!can_direct_reclaim) 4315 goto nopage; 4316 4317 /* Avoid recursion of direct reclaim */ 4318 if (current->flags & PF_MEMALLOC) 4319 goto nopage; 4320 4321 /* Try direct reclaim and then allocating */ 4322 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4323 &did_some_progress); 4324 if (page) 4325 goto got_pg; 4326 4327 /* Try direct compaction and then allocating */ 4328 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4329 compact_priority, &compact_result); 4330 if (page) 4331 goto got_pg; 4332 4333 /* Do not loop if specifically requested */ 4334 if (gfp_mask & __GFP_NORETRY) 4335 goto nopage; 4336 4337 /* 4338 * Do not retry costly high order allocations unless they are 4339 * __GFP_RETRY_MAYFAIL and we can compact 4340 */ 4341 if (costly_order && (!can_compact || 4342 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4343 goto nopage; 4344 4345 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4346 did_some_progress > 0, &no_progress_loops)) 4347 goto retry; 4348 4349 /* 4350 * It doesn't make any sense to retry for the compaction if the order-0 4351 * reclaim is not able to make any progress because the current 4352 * implementation of the compaction depends on the sufficient amount 4353 * of free memory (see __compaction_suitable) 4354 */ 4355 if (did_some_progress > 0 && can_compact && 4356 should_compact_retry(ac, order, alloc_flags, 4357 compact_result, &compact_priority, 4358 &compaction_retries)) 4359 goto retry; 4360 4361 4362 /* 4363 * Deal with possible cpuset update races or zonelist updates to avoid 4364 * a unnecessary OOM kill. 4365 */ 4366 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4367 check_retry_zonelist(zonelist_iter_cookie)) 4368 goto restart; 4369 4370 /* Reclaim has failed us, start killing things */ 4371 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4372 if (page) 4373 goto got_pg; 4374 4375 /* Avoid allocations with no watermarks from looping endlessly */ 4376 if (tsk_is_oom_victim(current) && 4377 (alloc_flags & ALLOC_OOM || 4378 (gfp_mask & __GFP_NOMEMALLOC))) 4379 goto nopage; 4380 4381 /* Retry as long as the OOM killer is making progress */ 4382 if (did_some_progress) { 4383 no_progress_loops = 0; 4384 goto retry; 4385 } 4386 4387 nopage: 4388 /* 4389 * Deal with possible cpuset update races or zonelist updates to avoid 4390 * a unnecessary OOM kill. 4391 */ 4392 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4393 check_retry_zonelist(zonelist_iter_cookie)) 4394 goto restart; 4395 4396 /* 4397 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4398 * we always retry 4399 */ 4400 if (gfp_mask & __GFP_NOFAIL) { 4401 /* 4402 * All existing users of the __GFP_NOFAIL are blockable, so warn 4403 * of any new users that actually require GFP_NOWAIT 4404 */ 4405 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4406 goto fail; 4407 4408 /* 4409 * PF_MEMALLOC request from this context is rather bizarre 4410 * because we cannot reclaim anything and only can loop waiting 4411 * for somebody to do a work for us 4412 */ 4413 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4414 4415 /* 4416 * non failing costly orders are a hard requirement which we 4417 * are not prepared for much so let's warn about these users 4418 * so that we can identify them and convert them to something 4419 * else. 4420 */ 4421 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4422 4423 /* 4424 * Help non-failing allocations by giving some access to memory 4425 * reserves normally used for high priority non-blocking 4426 * allocations but do not use ALLOC_NO_WATERMARKS because this 4427 * could deplete whole memory reserves which would just make 4428 * the situation worse. 4429 */ 4430 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4431 if (page) 4432 goto got_pg; 4433 4434 cond_resched(); 4435 goto retry; 4436 } 4437 fail: 4438 warn_alloc(gfp_mask, ac->nodemask, 4439 "page allocation failure: order:%u", order); 4440 got_pg: 4441 return page; 4442 } 4443 4444 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4445 int preferred_nid, nodemask_t *nodemask, 4446 struct alloc_context *ac, gfp_t *alloc_gfp, 4447 unsigned int *alloc_flags) 4448 { 4449 ac->highest_zoneidx = gfp_zone(gfp_mask); 4450 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4451 ac->nodemask = nodemask; 4452 ac->migratetype = gfp_migratetype(gfp_mask); 4453 4454 if (cpusets_enabled()) { 4455 *alloc_gfp |= __GFP_HARDWALL; 4456 /* 4457 * When we are in the interrupt context, it is irrelevant 4458 * to the current task context. It means that any node ok. 4459 */ 4460 if (in_task() && !ac->nodemask) 4461 ac->nodemask = &cpuset_current_mems_allowed; 4462 else 4463 *alloc_flags |= ALLOC_CPUSET; 4464 } 4465 4466 might_alloc(gfp_mask); 4467 4468 if (should_fail_alloc_page(gfp_mask, order)) 4469 return false; 4470 4471 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4472 4473 /* Dirty zone balancing only done in the fast path */ 4474 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4475 4476 /* 4477 * The preferred zone is used for statistics but crucially it is 4478 * also used as the starting point for the zonelist iterator. It 4479 * may get reset for allocations that ignore memory policies. 4480 */ 4481 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4482 ac->highest_zoneidx, ac->nodemask); 4483 4484 return true; 4485 } 4486 4487 /* 4488 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4489 * @gfp: GFP flags for the allocation 4490 * @preferred_nid: The preferred NUMA node ID to allocate from 4491 * @nodemask: Set of nodes to allocate from, may be NULL 4492 * @nr_pages: The number of pages desired on the list or array 4493 * @page_list: Optional list to store the allocated pages 4494 * @page_array: Optional array to store the pages 4495 * 4496 * This is a batched version of the page allocator that attempts to 4497 * allocate nr_pages quickly. Pages are added to page_list if page_list 4498 * is not NULL, otherwise it is assumed that the page_array is valid. 4499 * 4500 * For lists, nr_pages is the number of pages that should be allocated. 4501 * 4502 * For arrays, only NULL elements are populated with pages and nr_pages 4503 * is the maximum number of pages that will be stored in the array. 4504 * 4505 * Returns the number of pages on the list or array. 4506 */ 4507 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 4508 nodemask_t *nodemask, int nr_pages, 4509 struct list_head *page_list, 4510 struct page **page_array) 4511 { 4512 struct page *page; 4513 unsigned long __maybe_unused UP_flags; 4514 struct zone *zone; 4515 struct zoneref *z; 4516 struct per_cpu_pages *pcp; 4517 struct list_head *pcp_list; 4518 struct alloc_context ac; 4519 gfp_t alloc_gfp; 4520 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4521 int nr_populated = 0, nr_account = 0; 4522 4523 /* 4524 * Skip populated array elements to determine if any pages need 4525 * to be allocated before disabling IRQs. 4526 */ 4527 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4528 nr_populated++; 4529 4530 /* No pages requested? */ 4531 if (unlikely(nr_pages <= 0)) 4532 goto out; 4533 4534 /* Already populated array? */ 4535 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4536 goto out; 4537 4538 /* Bulk allocator does not support memcg accounting. */ 4539 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4540 goto failed; 4541 4542 /* Use the single page allocator for one page. */ 4543 if (nr_pages - nr_populated == 1) 4544 goto failed; 4545 4546 #ifdef CONFIG_PAGE_OWNER 4547 /* 4548 * PAGE_OWNER may recurse into the allocator to allocate space to 4549 * save the stack with pagesets.lock held. Releasing/reacquiring 4550 * removes much of the performance benefit of bulk allocation so 4551 * force the caller to allocate one page at a time as it'll have 4552 * similar performance to added complexity to the bulk allocator. 4553 */ 4554 if (static_branch_unlikely(&page_owner_inited)) 4555 goto failed; 4556 #endif 4557 4558 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4559 gfp &= gfp_allowed_mask; 4560 alloc_gfp = gfp; 4561 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4562 goto out; 4563 gfp = alloc_gfp; 4564 4565 /* Find an allowed local zone that meets the low watermark. */ 4566 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4567 unsigned long mark; 4568 4569 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4570 !__cpuset_zone_allowed(zone, gfp)) { 4571 continue; 4572 } 4573 4574 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4575 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4576 goto failed; 4577 } 4578 4579 cond_accept_memory(zone, 0); 4580 retry_this_zone: 4581 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4582 if (zone_watermark_fast(zone, 0, mark, 4583 zonelist_zone_idx(ac.preferred_zoneref), 4584 alloc_flags, gfp)) { 4585 break; 4586 } 4587 4588 if (cond_accept_memory(zone, 0)) 4589 goto retry_this_zone; 4590 4591 /* Try again if zone has deferred pages */ 4592 if (deferred_pages_enabled()) { 4593 if (_deferred_grow_zone(zone, 0)) 4594 goto retry_this_zone; 4595 } 4596 } 4597 4598 /* 4599 * If there are no allowed local zones that meets the watermarks then 4600 * try to allocate a single page and reclaim if necessary. 4601 */ 4602 if (unlikely(!zone)) 4603 goto failed; 4604 4605 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4606 pcp_trylock_prepare(UP_flags); 4607 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4608 if (!pcp) 4609 goto failed_irq; 4610 4611 /* Attempt the batch allocation */ 4612 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4613 while (nr_populated < nr_pages) { 4614 4615 /* Skip existing pages */ 4616 if (page_array && page_array[nr_populated]) { 4617 nr_populated++; 4618 continue; 4619 } 4620 4621 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4622 pcp, pcp_list); 4623 if (unlikely(!page)) { 4624 /* Try and allocate at least one page */ 4625 if (!nr_account) { 4626 pcp_spin_unlock(pcp); 4627 goto failed_irq; 4628 } 4629 break; 4630 } 4631 nr_account++; 4632 4633 prep_new_page(page, 0, gfp, 0); 4634 if (page_list) 4635 list_add(&page->lru, page_list); 4636 else 4637 page_array[nr_populated] = page; 4638 nr_populated++; 4639 } 4640 4641 pcp_spin_unlock(pcp); 4642 pcp_trylock_finish(UP_flags); 4643 4644 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4645 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4646 4647 out: 4648 return nr_populated; 4649 4650 failed_irq: 4651 pcp_trylock_finish(UP_flags); 4652 4653 failed: 4654 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); 4655 if (page) { 4656 if (page_list) 4657 list_add(&page->lru, page_list); 4658 else 4659 page_array[nr_populated] = page; 4660 nr_populated++; 4661 } 4662 4663 goto out; 4664 } 4665 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); 4666 4667 /* 4668 * This is the 'heart' of the zoned buddy allocator. 4669 */ 4670 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, 4671 int preferred_nid, nodemask_t *nodemask) 4672 { 4673 struct page *page; 4674 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4675 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4676 struct alloc_context ac = { }; 4677 4678 /* 4679 * There are several places where we assume that the order value is sane 4680 * so bail out early if the request is out of bound. 4681 */ 4682 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) 4683 return NULL; 4684 4685 gfp &= gfp_allowed_mask; 4686 /* 4687 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4688 * resp. GFP_NOIO which has to be inherited for all allocation requests 4689 * from a particular context which has been marked by 4690 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4691 * movable zones are not used during allocation. 4692 */ 4693 gfp = current_gfp_context(gfp); 4694 alloc_gfp = gfp; 4695 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4696 &alloc_gfp, &alloc_flags)) 4697 return NULL; 4698 4699 /* 4700 * Forbid the first pass from falling back to types that fragment 4701 * memory until all local zones are considered. 4702 */ 4703 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4704 4705 /* First allocation attempt */ 4706 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4707 if (likely(page)) 4708 goto out; 4709 4710 alloc_gfp = gfp; 4711 ac.spread_dirty_pages = false; 4712 4713 /* 4714 * Restore the original nodemask if it was potentially replaced with 4715 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4716 */ 4717 ac.nodemask = nodemask; 4718 4719 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4720 4721 out: 4722 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4723 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4724 __free_pages(page, order); 4725 page = NULL; 4726 } 4727 4728 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4729 kmsan_alloc_page(page, order, alloc_gfp); 4730 4731 return page; 4732 } 4733 EXPORT_SYMBOL(__alloc_pages_noprof); 4734 4735 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 4736 nodemask_t *nodemask) 4737 { 4738 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, 4739 preferred_nid, nodemask); 4740 return page_rmappable_folio(page); 4741 } 4742 EXPORT_SYMBOL(__folio_alloc_noprof); 4743 4744 /* 4745 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4746 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4747 * you need to access high mem. 4748 */ 4749 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) 4750 { 4751 struct page *page; 4752 4753 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); 4754 if (!page) 4755 return 0; 4756 return (unsigned long) page_address(page); 4757 } 4758 EXPORT_SYMBOL(get_free_pages_noprof); 4759 4760 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) 4761 { 4762 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); 4763 } 4764 EXPORT_SYMBOL(get_zeroed_page_noprof); 4765 4766 /** 4767 * __free_pages - Free pages allocated with alloc_pages(). 4768 * @page: The page pointer returned from alloc_pages(). 4769 * @order: The order of the allocation. 4770 * 4771 * This function can free multi-page allocations that are not compound 4772 * pages. It does not check that the @order passed in matches that of 4773 * the allocation, so it is easy to leak memory. Freeing more memory 4774 * than was allocated will probably emit a warning. 4775 * 4776 * If the last reference to this page is speculative, it will be released 4777 * by put_page() which only frees the first page of a non-compound 4778 * allocation. To prevent the remaining pages from being leaked, we free 4779 * the subsequent pages here. If you want to use the page's reference 4780 * count to decide when to free the allocation, you should allocate a 4781 * compound page, and use put_page() instead of __free_pages(). 4782 * 4783 * Context: May be called in interrupt context or while holding a normal 4784 * spinlock, but not in NMI context or while holding a raw spinlock. 4785 */ 4786 void __free_pages(struct page *page, unsigned int order) 4787 { 4788 /* get PageHead before we drop reference */ 4789 int head = PageHead(page); 4790 struct alloc_tag *tag = pgalloc_tag_get(page); 4791 4792 if (put_page_testzero(page)) 4793 free_unref_page(page, order); 4794 else if (!head) { 4795 pgalloc_tag_sub_pages(tag, (1 << order) - 1); 4796 while (order-- > 0) 4797 free_unref_page(page + (1 << order), order); 4798 } 4799 } 4800 EXPORT_SYMBOL(__free_pages); 4801 4802 void free_pages(unsigned long addr, unsigned int order) 4803 { 4804 if (addr != 0) { 4805 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4806 __free_pages(virt_to_page((void *)addr), order); 4807 } 4808 } 4809 4810 EXPORT_SYMBOL(free_pages); 4811 4812 /* 4813 * Page Fragment: 4814 * An arbitrary-length arbitrary-offset area of memory which resides 4815 * within a 0 or higher order page. Multiple fragments within that page 4816 * are individually refcounted, in the page's reference counter. 4817 * 4818 * The page_frag functions below provide a simple allocation framework for 4819 * page fragments. This is used by the network stack and network device 4820 * drivers to provide a backing region of memory for use as either an 4821 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4822 */ 4823 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4824 gfp_t gfp_mask) 4825 { 4826 struct page *page = NULL; 4827 gfp_t gfp = gfp_mask; 4828 4829 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4830 gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | 4831 __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; 4832 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4833 PAGE_FRAG_CACHE_MAX_ORDER); 4834 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4835 #endif 4836 if (unlikely(!page)) 4837 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4838 4839 nc->va = page ? page_address(page) : NULL; 4840 4841 return page; 4842 } 4843 4844 void page_frag_cache_drain(struct page_frag_cache *nc) 4845 { 4846 if (!nc->va) 4847 return; 4848 4849 __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias); 4850 nc->va = NULL; 4851 } 4852 EXPORT_SYMBOL(page_frag_cache_drain); 4853 4854 void __page_frag_cache_drain(struct page *page, unsigned int count) 4855 { 4856 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4857 4858 if (page_ref_sub_and_test(page, count)) 4859 free_unref_page(page, compound_order(page)); 4860 } 4861 EXPORT_SYMBOL(__page_frag_cache_drain); 4862 4863 void *__page_frag_alloc_align(struct page_frag_cache *nc, 4864 unsigned int fragsz, gfp_t gfp_mask, 4865 unsigned int align_mask) 4866 { 4867 unsigned int size = PAGE_SIZE; 4868 struct page *page; 4869 int offset; 4870 4871 if (unlikely(!nc->va)) { 4872 refill: 4873 page = __page_frag_cache_refill(nc, gfp_mask); 4874 if (!page) 4875 return NULL; 4876 4877 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4878 /* if size can vary use size else just use PAGE_SIZE */ 4879 size = nc->size; 4880 #endif 4881 /* Even if we own the page, we do not use atomic_set(). 4882 * This would break get_page_unless_zero() users. 4883 */ 4884 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4885 4886 /* reset page count bias and offset to start of new frag */ 4887 nc->pfmemalloc = page_is_pfmemalloc(page); 4888 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4889 nc->offset = size; 4890 } 4891 4892 offset = nc->offset - fragsz; 4893 if (unlikely(offset < 0)) { 4894 page = virt_to_page(nc->va); 4895 4896 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4897 goto refill; 4898 4899 if (unlikely(nc->pfmemalloc)) { 4900 free_unref_page(page, compound_order(page)); 4901 goto refill; 4902 } 4903 4904 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4905 /* if size can vary use size else just use PAGE_SIZE */ 4906 size = nc->size; 4907 #endif 4908 /* OK, page count is 0, we can safely set it */ 4909 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4910 4911 /* reset page count bias and offset to start of new frag */ 4912 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4913 offset = size - fragsz; 4914 if (unlikely(offset < 0)) { 4915 /* 4916 * The caller is trying to allocate a fragment 4917 * with fragsz > PAGE_SIZE but the cache isn't big 4918 * enough to satisfy the request, this may 4919 * happen in low memory conditions. 4920 * We don't release the cache page because 4921 * it could make memory pressure worse 4922 * so we simply return NULL here. 4923 */ 4924 return NULL; 4925 } 4926 } 4927 4928 nc->pagecnt_bias--; 4929 offset &= align_mask; 4930 nc->offset = offset; 4931 4932 return nc->va + offset; 4933 } 4934 EXPORT_SYMBOL(__page_frag_alloc_align); 4935 4936 /* 4937 * Frees a page fragment allocated out of either a compound or order 0 page. 4938 */ 4939 void page_frag_free(void *addr) 4940 { 4941 struct page *page = virt_to_head_page(addr); 4942 4943 if (unlikely(put_page_testzero(page))) 4944 free_unref_page(page, compound_order(page)); 4945 } 4946 EXPORT_SYMBOL(page_frag_free); 4947 4948 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4949 size_t size) 4950 { 4951 if (addr) { 4952 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4953 struct page *page = virt_to_page((void *)addr); 4954 struct page *last = page + nr; 4955 4956 split_page_owner(page, order, 0); 4957 pgalloc_tag_split(page, 1 << order); 4958 split_page_memcg(page, order, 0); 4959 while (page < --last) 4960 set_page_refcounted(last); 4961 4962 last = page + (1UL << order); 4963 for (page += nr; page < last; page++) 4964 __free_pages_ok(page, 0, FPI_TO_TAIL); 4965 } 4966 return (void *)addr; 4967 } 4968 4969 /** 4970 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4971 * @size: the number of bytes to allocate 4972 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4973 * 4974 * This function is similar to alloc_pages(), except that it allocates the 4975 * minimum number of pages to satisfy the request. alloc_pages() can only 4976 * allocate memory in power-of-two pages. 4977 * 4978 * This function is also limited by MAX_PAGE_ORDER. 4979 * 4980 * Memory allocated by this function must be released by free_pages_exact(). 4981 * 4982 * Return: pointer to the allocated area or %NULL in case of error. 4983 */ 4984 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) 4985 { 4986 unsigned int order = get_order(size); 4987 unsigned long addr; 4988 4989 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4990 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4991 4992 addr = get_free_pages_noprof(gfp_mask, order); 4993 return make_alloc_exact(addr, order, size); 4994 } 4995 EXPORT_SYMBOL(alloc_pages_exact_noprof); 4996 4997 /** 4998 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4999 * pages on a node. 5000 * @nid: the preferred node ID where memory should be allocated 5001 * @size: the number of bytes to allocate 5002 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5003 * 5004 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5005 * back. 5006 * 5007 * Return: pointer to the allocated area or %NULL in case of error. 5008 */ 5009 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) 5010 { 5011 unsigned int order = get_order(size); 5012 struct page *p; 5013 5014 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5015 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5016 5017 p = alloc_pages_node_noprof(nid, gfp_mask, order); 5018 if (!p) 5019 return NULL; 5020 return make_alloc_exact((unsigned long)page_address(p), order, size); 5021 } 5022 5023 /** 5024 * free_pages_exact - release memory allocated via alloc_pages_exact() 5025 * @virt: the value returned by alloc_pages_exact. 5026 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5027 * 5028 * Release the memory allocated by a previous call to alloc_pages_exact. 5029 */ 5030 void free_pages_exact(void *virt, size_t size) 5031 { 5032 unsigned long addr = (unsigned long)virt; 5033 unsigned long end = addr + PAGE_ALIGN(size); 5034 5035 while (addr < end) { 5036 free_page(addr); 5037 addr += PAGE_SIZE; 5038 } 5039 } 5040 EXPORT_SYMBOL(free_pages_exact); 5041 5042 /** 5043 * nr_free_zone_pages - count number of pages beyond high watermark 5044 * @offset: The zone index of the highest zone 5045 * 5046 * nr_free_zone_pages() counts the number of pages which are beyond the 5047 * high watermark within all zones at or below a given zone index. For each 5048 * zone, the number of pages is calculated as: 5049 * 5050 * nr_free_zone_pages = managed_pages - high_pages 5051 * 5052 * Return: number of pages beyond high watermark. 5053 */ 5054 static unsigned long nr_free_zone_pages(int offset) 5055 { 5056 struct zoneref *z; 5057 struct zone *zone; 5058 5059 /* Just pick one node, since fallback list is circular */ 5060 unsigned long sum = 0; 5061 5062 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5063 5064 for_each_zone_zonelist(zone, z, zonelist, offset) { 5065 unsigned long size = zone_managed_pages(zone); 5066 unsigned long high = high_wmark_pages(zone); 5067 if (size > high) 5068 sum += size - high; 5069 } 5070 5071 return sum; 5072 } 5073 5074 /** 5075 * nr_free_buffer_pages - count number of pages beyond high watermark 5076 * 5077 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5078 * watermark within ZONE_DMA and ZONE_NORMAL. 5079 * 5080 * Return: number of pages beyond high watermark within ZONE_DMA and 5081 * ZONE_NORMAL. 5082 */ 5083 unsigned long nr_free_buffer_pages(void) 5084 { 5085 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5086 } 5087 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5088 5089 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5090 { 5091 zoneref->zone = zone; 5092 zoneref->zone_idx = zone_idx(zone); 5093 } 5094 5095 /* 5096 * Builds allocation fallback zone lists. 5097 * 5098 * Add all populated zones of a node to the zonelist. 5099 */ 5100 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5101 { 5102 struct zone *zone; 5103 enum zone_type zone_type = MAX_NR_ZONES; 5104 int nr_zones = 0; 5105 5106 do { 5107 zone_type--; 5108 zone = pgdat->node_zones + zone_type; 5109 if (populated_zone(zone)) { 5110 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5111 check_highest_zone(zone_type); 5112 } 5113 } while (zone_type); 5114 5115 return nr_zones; 5116 } 5117 5118 #ifdef CONFIG_NUMA 5119 5120 static int __parse_numa_zonelist_order(char *s) 5121 { 5122 /* 5123 * We used to support different zonelists modes but they turned 5124 * out to be just not useful. Let's keep the warning in place 5125 * if somebody still use the cmd line parameter so that we do 5126 * not fail it silently 5127 */ 5128 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5129 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5130 return -EINVAL; 5131 } 5132 return 0; 5133 } 5134 5135 static char numa_zonelist_order[] = "Node"; 5136 #define NUMA_ZONELIST_ORDER_LEN 16 5137 /* 5138 * sysctl handler for numa_zonelist_order 5139 */ 5140 static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5141 void *buffer, size_t *length, loff_t *ppos) 5142 { 5143 if (write) 5144 return __parse_numa_zonelist_order(buffer); 5145 return proc_dostring(table, write, buffer, length, ppos); 5146 } 5147 5148 static int node_load[MAX_NUMNODES]; 5149 5150 /** 5151 * find_next_best_node - find the next node that should appear in a given node's fallback list 5152 * @node: node whose fallback list we're appending 5153 * @used_node_mask: nodemask_t of already used nodes 5154 * 5155 * We use a number of factors to determine which is the next node that should 5156 * appear on a given node's fallback list. The node should not have appeared 5157 * already in @node's fallback list, and it should be the next closest node 5158 * according to the distance array (which contains arbitrary distance values 5159 * from each node to each node in the system), and should also prefer nodes 5160 * with no CPUs, since presumably they'll have very little allocation pressure 5161 * on them otherwise. 5162 * 5163 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5164 */ 5165 int find_next_best_node(int node, nodemask_t *used_node_mask) 5166 { 5167 int n, val; 5168 int min_val = INT_MAX; 5169 int best_node = NUMA_NO_NODE; 5170 5171 /* 5172 * Use the local node if we haven't already, but for memoryless local 5173 * node, we should skip it and fall back to other nodes. 5174 */ 5175 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) { 5176 node_set(node, *used_node_mask); 5177 return node; 5178 } 5179 5180 for_each_node_state(n, N_MEMORY) { 5181 5182 /* Don't want a node to appear more than once */ 5183 if (node_isset(n, *used_node_mask)) 5184 continue; 5185 5186 /* Use the distance array to find the distance */ 5187 val = node_distance(node, n); 5188 5189 /* Penalize nodes under us ("prefer the next node") */ 5190 val += (n < node); 5191 5192 /* Give preference to headless and unused nodes */ 5193 if (!cpumask_empty(cpumask_of_node(n))) 5194 val += PENALTY_FOR_NODE_WITH_CPUS; 5195 5196 /* Slight preference for less loaded node */ 5197 val *= MAX_NUMNODES; 5198 val += node_load[n]; 5199 5200 if (val < min_val) { 5201 min_val = val; 5202 best_node = n; 5203 } 5204 } 5205 5206 if (best_node >= 0) 5207 node_set(best_node, *used_node_mask); 5208 5209 return best_node; 5210 } 5211 5212 5213 /* 5214 * Build zonelists ordered by node and zones within node. 5215 * This results in maximum locality--normal zone overflows into local 5216 * DMA zone, if any--but risks exhausting DMA zone. 5217 */ 5218 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5219 unsigned nr_nodes) 5220 { 5221 struct zoneref *zonerefs; 5222 int i; 5223 5224 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5225 5226 for (i = 0; i < nr_nodes; i++) { 5227 int nr_zones; 5228 5229 pg_data_t *node = NODE_DATA(node_order[i]); 5230 5231 nr_zones = build_zonerefs_node(node, zonerefs); 5232 zonerefs += nr_zones; 5233 } 5234 zonerefs->zone = NULL; 5235 zonerefs->zone_idx = 0; 5236 } 5237 5238 /* 5239 * Build __GFP_THISNODE zonelists 5240 */ 5241 static void build_thisnode_zonelists(pg_data_t *pgdat) 5242 { 5243 struct zoneref *zonerefs; 5244 int nr_zones; 5245 5246 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5247 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5248 zonerefs += nr_zones; 5249 zonerefs->zone = NULL; 5250 zonerefs->zone_idx = 0; 5251 } 5252 5253 /* 5254 * Build zonelists ordered by zone and nodes within zones. 5255 * This results in conserving DMA zone[s] until all Normal memory is 5256 * exhausted, but results in overflowing to remote node while memory 5257 * may still exist in local DMA zone. 5258 */ 5259 5260 static void build_zonelists(pg_data_t *pgdat) 5261 { 5262 static int node_order[MAX_NUMNODES]; 5263 int node, nr_nodes = 0; 5264 nodemask_t used_mask = NODE_MASK_NONE; 5265 int local_node, prev_node; 5266 5267 /* NUMA-aware ordering of nodes */ 5268 local_node = pgdat->node_id; 5269 prev_node = local_node; 5270 5271 memset(node_order, 0, sizeof(node_order)); 5272 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5273 /* 5274 * We don't want to pressure a particular node. 5275 * So adding penalty to the first node in same 5276 * distance group to make it round-robin. 5277 */ 5278 if (node_distance(local_node, node) != 5279 node_distance(local_node, prev_node)) 5280 node_load[node] += 1; 5281 5282 node_order[nr_nodes++] = node; 5283 prev_node = node; 5284 } 5285 5286 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5287 build_thisnode_zonelists(pgdat); 5288 pr_info("Fallback order for Node %d: ", local_node); 5289 for (node = 0; node < nr_nodes; node++) 5290 pr_cont("%d ", node_order[node]); 5291 pr_cont("\n"); 5292 } 5293 5294 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5295 /* 5296 * Return node id of node used for "local" allocations. 5297 * I.e., first node id of first zone in arg node's generic zonelist. 5298 * Used for initializing percpu 'numa_mem', which is used primarily 5299 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5300 */ 5301 int local_memory_node(int node) 5302 { 5303 struct zoneref *z; 5304 5305 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5306 gfp_zone(GFP_KERNEL), 5307 NULL); 5308 return zonelist_node_idx(z); 5309 } 5310 #endif 5311 5312 static void setup_min_unmapped_ratio(void); 5313 static void setup_min_slab_ratio(void); 5314 #else /* CONFIG_NUMA */ 5315 5316 static void build_zonelists(pg_data_t *pgdat) 5317 { 5318 struct zoneref *zonerefs; 5319 int nr_zones; 5320 5321 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5322 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5323 zonerefs += nr_zones; 5324 5325 zonerefs->zone = NULL; 5326 zonerefs->zone_idx = 0; 5327 } 5328 5329 #endif /* CONFIG_NUMA */ 5330 5331 /* 5332 * Boot pageset table. One per cpu which is going to be used for all 5333 * zones and all nodes. The parameters will be set in such a way 5334 * that an item put on a list will immediately be handed over to 5335 * the buddy list. This is safe since pageset manipulation is done 5336 * with interrupts disabled. 5337 * 5338 * The boot_pagesets must be kept even after bootup is complete for 5339 * unused processors and/or zones. They do play a role for bootstrapping 5340 * hotplugged processors. 5341 * 5342 * zoneinfo_show() and maybe other functions do 5343 * not check if the processor is online before following the pageset pointer. 5344 * Other parts of the kernel may not check if the zone is available. 5345 */ 5346 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5347 /* These effectively disable the pcplists in the boot pageset completely */ 5348 #define BOOT_PAGESET_HIGH 0 5349 #define BOOT_PAGESET_BATCH 1 5350 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5351 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5352 5353 static void __build_all_zonelists(void *data) 5354 { 5355 int nid; 5356 int __maybe_unused cpu; 5357 pg_data_t *self = data; 5358 unsigned long flags; 5359 5360 /* 5361 * The zonelist_update_seq must be acquired with irqsave because the 5362 * reader can be invoked from IRQ with GFP_ATOMIC. 5363 */ 5364 write_seqlock_irqsave(&zonelist_update_seq, flags); 5365 /* 5366 * Also disable synchronous printk() to prevent any printk() from 5367 * trying to hold port->lock, for 5368 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5369 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5370 */ 5371 printk_deferred_enter(); 5372 5373 #ifdef CONFIG_NUMA 5374 memset(node_load, 0, sizeof(node_load)); 5375 #endif 5376 5377 /* 5378 * This node is hotadded and no memory is yet present. So just 5379 * building zonelists is fine - no need to touch other nodes. 5380 */ 5381 if (self && !node_online(self->node_id)) { 5382 build_zonelists(self); 5383 } else { 5384 /* 5385 * All possible nodes have pgdat preallocated 5386 * in free_area_init 5387 */ 5388 for_each_node(nid) { 5389 pg_data_t *pgdat = NODE_DATA(nid); 5390 5391 build_zonelists(pgdat); 5392 } 5393 5394 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5395 /* 5396 * We now know the "local memory node" for each node-- 5397 * i.e., the node of the first zone in the generic zonelist. 5398 * Set up numa_mem percpu variable for on-line cpus. During 5399 * boot, only the boot cpu should be on-line; we'll init the 5400 * secondary cpus' numa_mem as they come on-line. During 5401 * node/memory hotplug, we'll fixup all on-line cpus. 5402 */ 5403 for_each_online_cpu(cpu) 5404 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5405 #endif 5406 } 5407 5408 printk_deferred_exit(); 5409 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5410 } 5411 5412 static noinline void __init 5413 build_all_zonelists_init(void) 5414 { 5415 int cpu; 5416 5417 __build_all_zonelists(NULL); 5418 5419 /* 5420 * Initialize the boot_pagesets that are going to be used 5421 * for bootstrapping processors. The real pagesets for 5422 * each zone will be allocated later when the per cpu 5423 * allocator is available. 5424 * 5425 * boot_pagesets are used also for bootstrapping offline 5426 * cpus if the system is already booted because the pagesets 5427 * are needed to initialize allocators on a specific cpu too. 5428 * F.e. the percpu allocator needs the page allocator which 5429 * needs the percpu allocator in order to allocate its pagesets 5430 * (a chicken-egg dilemma). 5431 */ 5432 for_each_possible_cpu(cpu) 5433 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5434 5435 mminit_verify_zonelist(); 5436 cpuset_init_current_mems_allowed(); 5437 } 5438 5439 /* 5440 * unless system_state == SYSTEM_BOOTING. 5441 * 5442 * __ref due to call of __init annotated helper build_all_zonelists_init 5443 * [protected by SYSTEM_BOOTING]. 5444 */ 5445 void __ref build_all_zonelists(pg_data_t *pgdat) 5446 { 5447 unsigned long vm_total_pages; 5448 5449 if (system_state == SYSTEM_BOOTING) { 5450 build_all_zonelists_init(); 5451 } else { 5452 __build_all_zonelists(pgdat); 5453 /* cpuset refresh routine should be here */ 5454 } 5455 /* Get the number of free pages beyond high watermark in all zones. */ 5456 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5457 /* 5458 * Disable grouping by mobility if the number of pages in the 5459 * system is too low to allow the mechanism to work. It would be 5460 * more accurate, but expensive to check per-zone. This check is 5461 * made on memory-hotadd so a system can start with mobility 5462 * disabled and enable it later 5463 */ 5464 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5465 page_group_by_mobility_disabled = 1; 5466 else 5467 page_group_by_mobility_disabled = 0; 5468 5469 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5470 nr_online_nodes, 5471 page_group_by_mobility_disabled ? "off" : "on", 5472 vm_total_pages); 5473 #ifdef CONFIG_NUMA 5474 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5475 #endif 5476 } 5477 5478 static int zone_batchsize(struct zone *zone) 5479 { 5480 #ifdef CONFIG_MMU 5481 int batch; 5482 5483 /* 5484 * The number of pages to batch allocate is either ~0.1% 5485 * of the zone or 1MB, whichever is smaller. The batch 5486 * size is striking a balance between allocation latency 5487 * and zone lock contention. 5488 */ 5489 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5490 batch /= 4; /* We effectively *= 4 below */ 5491 if (batch < 1) 5492 batch = 1; 5493 5494 /* 5495 * Clamp the batch to a 2^n - 1 value. Having a power 5496 * of 2 value was found to be more likely to have 5497 * suboptimal cache aliasing properties in some cases. 5498 * 5499 * For example if 2 tasks are alternately allocating 5500 * batches of pages, one task can end up with a lot 5501 * of pages of one half of the possible page colors 5502 * and the other with pages of the other colors. 5503 */ 5504 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5505 5506 return batch; 5507 5508 #else 5509 /* The deferral and batching of frees should be suppressed under NOMMU 5510 * conditions. 5511 * 5512 * The problem is that NOMMU needs to be able to allocate large chunks 5513 * of contiguous memory as there's no hardware page translation to 5514 * assemble apparent contiguous memory from discontiguous pages. 5515 * 5516 * Queueing large contiguous runs of pages for batching, however, 5517 * causes the pages to actually be freed in smaller chunks. As there 5518 * can be a significant delay between the individual batches being 5519 * recycled, this leads to the once large chunks of space being 5520 * fragmented and becoming unavailable for high-order allocations. 5521 */ 5522 return 0; 5523 #endif 5524 } 5525 5526 static int percpu_pagelist_high_fraction; 5527 static int zone_highsize(struct zone *zone, int batch, int cpu_online, 5528 int high_fraction) 5529 { 5530 #ifdef CONFIG_MMU 5531 int high; 5532 int nr_split_cpus; 5533 unsigned long total_pages; 5534 5535 if (!high_fraction) { 5536 /* 5537 * By default, the high value of the pcp is based on the zone 5538 * low watermark so that if they are full then background 5539 * reclaim will not be started prematurely. 5540 */ 5541 total_pages = low_wmark_pages(zone); 5542 } else { 5543 /* 5544 * If percpu_pagelist_high_fraction is configured, the high 5545 * value is based on a fraction of the managed pages in the 5546 * zone. 5547 */ 5548 total_pages = zone_managed_pages(zone) / high_fraction; 5549 } 5550 5551 /* 5552 * Split the high value across all online CPUs local to the zone. Note 5553 * that early in boot that CPUs may not be online yet and that during 5554 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5555 * onlined. For memory nodes that have no CPUs, split the high value 5556 * across all online CPUs to mitigate the risk that reclaim is triggered 5557 * prematurely due to pages stored on pcp lists. 5558 */ 5559 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5560 if (!nr_split_cpus) 5561 nr_split_cpus = num_online_cpus(); 5562 high = total_pages / nr_split_cpus; 5563 5564 /* 5565 * Ensure high is at least batch*4. The multiple is based on the 5566 * historical relationship between high and batch. 5567 */ 5568 high = max(high, batch << 2); 5569 5570 return high; 5571 #else 5572 return 0; 5573 #endif 5574 } 5575 5576 /* 5577 * pcp->high and pcp->batch values are related and generally batch is lower 5578 * than high. They are also related to pcp->count such that count is lower 5579 * than high, and as soon as it reaches high, the pcplist is flushed. 5580 * 5581 * However, guaranteeing these relations at all times would require e.g. write 5582 * barriers here but also careful usage of read barriers at the read side, and 5583 * thus be prone to error and bad for performance. Thus the update only prevents 5584 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max 5585 * should ensure they can cope with those fields changing asynchronously, and 5586 * fully trust only the pcp->count field on the local CPU with interrupts 5587 * disabled. 5588 * 5589 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5590 * outside of boot time (or some other assurance that no concurrent updaters 5591 * exist). 5592 */ 5593 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min, 5594 unsigned long high_max, unsigned long batch) 5595 { 5596 WRITE_ONCE(pcp->batch, batch); 5597 WRITE_ONCE(pcp->high_min, high_min); 5598 WRITE_ONCE(pcp->high_max, high_max); 5599 } 5600 5601 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5602 { 5603 int pindex; 5604 5605 memset(pcp, 0, sizeof(*pcp)); 5606 memset(pzstats, 0, sizeof(*pzstats)); 5607 5608 spin_lock_init(&pcp->lock); 5609 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5610 INIT_LIST_HEAD(&pcp->lists[pindex]); 5611 5612 /* 5613 * Set batch and high values safe for a boot pageset. A true percpu 5614 * pageset's initialization will update them subsequently. Here we don't 5615 * need to be as careful as pageset_update() as nobody can access the 5616 * pageset yet. 5617 */ 5618 pcp->high_min = BOOT_PAGESET_HIGH; 5619 pcp->high_max = BOOT_PAGESET_HIGH; 5620 pcp->batch = BOOT_PAGESET_BATCH; 5621 pcp->free_count = 0; 5622 } 5623 5624 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, 5625 unsigned long high_max, unsigned long batch) 5626 { 5627 struct per_cpu_pages *pcp; 5628 int cpu; 5629 5630 for_each_possible_cpu(cpu) { 5631 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5632 pageset_update(pcp, high_min, high_max, batch); 5633 } 5634 } 5635 5636 /* 5637 * Calculate and set new high and batch values for all per-cpu pagesets of a 5638 * zone based on the zone's size. 5639 */ 5640 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5641 { 5642 int new_high_min, new_high_max, new_batch; 5643 5644 new_batch = max(1, zone_batchsize(zone)); 5645 if (percpu_pagelist_high_fraction) { 5646 new_high_min = zone_highsize(zone, new_batch, cpu_online, 5647 percpu_pagelist_high_fraction); 5648 /* 5649 * PCP high is tuned manually, disable auto-tuning via 5650 * setting high_min and high_max to the manual value. 5651 */ 5652 new_high_max = new_high_min; 5653 } else { 5654 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); 5655 new_high_max = zone_highsize(zone, new_batch, cpu_online, 5656 MIN_PERCPU_PAGELIST_HIGH_FRACTION); 5657 } 5658 5659 if (zone->pageset_high_min == new_high_min && 5660 zone->pageset_high_max == new_high_max && 5661 zone->pageset_batch == new_batch) 5662 return; 5663 5664 zone->pageset_high_min = new_high_min; 5665 zone->pageset_high_max = new_high_max; 5666 zone->pageset_batch = new_batch; 5667 5668 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, 5669 new_batch); 5670 } 5671 5672 void __meminit setup_zone_pageset(struct zone *zone) 5673 { 5674 int cpu; 5675 5676 /* Size may be 0 on !SMP && !NUMA */ 5677 if (sizeof(struct per_cpu_zonestat) > 0) 5678 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5679 5680 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5681 for_each_possible_cpu(cpu) { 5682 struct per_cpu_pages *pcp; 5683 struct per_cpu_zonestat *pzstats; 5684 5685 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5686 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5687 per_cpu_pages_init(pcp, pzstats); 5688 } 5689 5690 zone_set_pageset_high_and_batch(zone, 0); 5691 } 5692 5693 /* 5694 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5695 * page high values need to be recalculated. 5696 */ 5697 static void zone_pcp_update(struct zone *zone, int cpu_online) 5698 { 5699 mutex_lock(&pcp_batch_high_lock); 5700 zone_set_pageset_high_and_batch(zone, cpu_online); 5701 mutex_unlock(&pcp_batch_high_lock); 5702 } 5703 5704 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) 5705 { 5706 struct per_cpu_pages *pcp; 5707 struct cpu_cacheinfo *cci; 5708 5709 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5710 cci = get_cpu_cacheinfo(cpu); 5711 /* 5712 * If data cache slice of CPU is large enough, "pcp->batch" 5713 * pages can be preserved in PCP before draining PCP for 5714 * consecutive high-order pages freeing without allocation. 5715 * This can reduce zone lock contention without hurting 5716 * cache-hot pages sharing. 5717 */ 5718 spin_lock(&pcp->lock); 5719 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch) 5720 pcp->flags |= PCPF_FREE_HIGH_BATCH; 5721 else 5722 pcp->flags &= ~PCPF_FREE_HIGH_BATCH; 5723 spin_unlock(&pcp->lock); 5724 } 5725 5726 void setup_pcp_cacheinfo(unsigned int cpu) 5727 { 5728 struct zone *zone; 5729 5730 for_each_populated_zone(zone) 5731 zone_pcp_update_cacheinfo(zone, cpu); 5732 } 5733 5734 /* 5735 * Allocate per cpu pagesets and initialize them. 5736 * Before this call only boot pagesets were available. 5737 */ 5738 void __init setup_per_cpu_pageset(void) 5739 { 5740 struct pglist_data *pgdat; 5741 struct zone *zone; 5742 int __maybe_unused cpu; 5743 5744 for_each_populated_zone(zone) 5745 setup_zone_pageset(zone); 5746 5747 #ifdef CONFIG_NUMA 5748 /* 5749 * Unpopulated zones continue using the boot pagesets. 5750 * The numa stats for these pagesets need to be reset. 5751 * Otherwise, they will end up skewing the stats of 5752 * the nodes these zones are associated with. 5753 */ 5754 for_each_possible_cpu(cpu) { 5755 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5756 memset(pzstats->vm_numa_event, 0, 5757 sizeof(pzstats->vm_numa_event)); 5758 } 5759 #endif 5760 5761 for_each_online_pgdat(pgdat) 5762 pgdat->per_cpu_nodestats = 5763 alloc_percpu(struct per_cpu_nodestat); 5764 } 5765 5766 __meminit void zone_pcp_init(struct zone *zone) 5767 { 5768 /* 5769 * per cpu subsystem is not up at this point. The following code 5770 * relies on the ability of the linker to provide the 5771 * offset of a (static) per cpu variable into the per cpu area. 5772 */ 5773 zone->per_cpu_pageset = &boot_pageset; 5774 zone->per_cpu_zonestats = &boot_zonestats; 5775 zone->pageset_high_min = BOOT_PAGESET_HIGH; 5776 zone->pageset_high_max = BOOT_PAGESET_HIGH; 5777 zone->pageset_batch = BOOT_PAGESET_BATCH; 5778 5779 if (populated_zone(zone)) 5780 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5781 zone->present_pages, zone_batchsize(zone)); 5782 } 5783 5784 void adjust_managed_page_count(struct page *page, long count) 5785 { 5786 atomic_long_add(count, &page_zone(page)->managed_pages); 5787 totalram_pages_add(count); 5788 } 5789 EXPORT_SYMBOL(adjust_managed_page_count); 5790 5791 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5792 { 5793 void *pos; 5794 unsigned long pages = 0; 5795 5796 start = (void *)PAGE_ALIGN((unsigned long)start); 5797 end = (void *)((unsigned long)end & PAGE_MASK); 5798 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5799 struct page *page = virt_to_page(pos); 5800 void *direct_map_addr; 5801 5802 /* 5803 * 'direct_map_addr' might be different from 'pos' 5804 * because some architectures' virt_to_page() 5805 * work with aliases. Getting the direct map 5806 * address ensures that we get a _writeable_ 5807 * alias for the memset(). 5808 */ 5809 direct_map_addr = page_address(page); 5810 /* 5811 * Perform a kasan-unchecked memset() since this memory 5812 * has not been initialized. 5813 */ 5814 direct_map_addr = kasan_reset_tag(direct_map_addr); 5815 if ((unsigned int)poison <= 0xFF) 5816 memset(direct_map_addr, poison, PAGE_SIZE); 5817 5818 free_reserved_page(page); 5819 } 5820 5821 if (pages && s) 5822 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5823 5824 return pages; 5825 } 5826 5827 void free_reserved_page(struct page *page) 5828 { 5829 clear_page_tag_ref(page); 5830 ClearPageReserved(page); 5831 init_page_count(page); 5832 __free_page(page); 5833 adjust_managed_page_count(page, 1); 5834 } 5835 EXPORT_SYMBOL(free_reserved_page); 5836 5837 static int page_alloc_cpu_dead(unsigned int cpu) 5838 { 5839 struct zone *zone; 5840 5841 lru_add_drain_cpu(cpu); 5842 mlock_drain_remote(cpu); 5843 drain_pages(cpu); 5844 5845 /* 5846 * Spill the event counters of the dead processor 5847 * into the current processors event counters. 5848 * This artificially elevates the count of the current 5849 * processor. 5850 */ 5851 vm_events_fold_cpu(cpu); 5852 5853 /* 5854 * Zero the differential counters of the dead processor 5855 * so that the vm statistics are consistent. 5856 * 5857 * This is only okay since the processor is dead and cannot 5858 * race with what we are doing. 5859 */ 5860 cpu_vm_stats_fold(cpu); 5861 5862 for_each_populated_zone(zone) 5863 zone_pcp_update(zone, 0); 5864 5865 return 0; 5866 } 5867 5868 static int page_alloc_cpu_online(unsigned int cpu) 5869 { 5870 struct zone *zone; 5871 5872 for_each_populated_zone(zone) 5873 zone_pcp_update(zone, 1); 5874 return 0; 5875 } 5876 5877 void __init page_alloc_init_cpuhp(void) 5878 { 5879 int ret; 5880 5881 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5882 "mm/page_alloc:pcp", 5883 page_alloc_cpu_online, 5884 page_alloc_cpu_dead); 5885 WARN_ON(ret < 0); 5886 } 5887 5888 /* 5889 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5890 * or min_free_kbytes changes. 5891 */ 5892 static void calculate_totalreserve_pages(void) 5893 { 5894 struct pglist_data *pgdat; 5895 unsigned long reserve_pages = 0; 5896 enum zone_type i, j; 5897 5898 for_each_online_pgdat(pgdat) { 5899 5900 pgdat->totalreserve_pages = 0; 5901 5902 for (i = 0; i < MAX_NR_ZONES; i++) { 5903 struct zone *zone = pgdat->node_zones + i; 5904 long max = 0; 5905 unsigned long managed_pages = zone_managed_pages(zone); 5906 5907 /* Find valid and maximum lowmem_reserve in the zone */ 5908 for (j = i; j < MAX_NR_ZONES; j++) { 5909 if (zone->lowmem_reserve[j] > max) 5910 max = zone->lowmem_reserve[j]; 5911 } 5912 5913 /* we treat the high watermark as reserved pages. */ 5914 max += high_wmark_pages(zone); 5915 5916 if (max > managed_pages) 5917 max = managed_pages; 5918 5919 pgdat->totalreserve_pages += max; 5920 5921 reserve_pages += max; 5922 } 5923 } 5924 totalreserve_pages = reserve_pages; 5925 } 5926 5927 /* 5928 * setup_per_zone_lowmem_reserve - called whenever 5929 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5930 * has a correct pages reserved value, so an adequate number of 5931 * pages are left in the zone after a successful __alloc_pages(). 5932 */ 5933 static void setup_per_zone_lowmem_reserve(void) 5934 { 5935 struct pglist_data *pgdat; 5936 enum zone_type i, j; 5937 5938 for_each_online_pgdat(pgdat) { 5939 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5940 struct zone *zone = &pgdat->node_zones[i]; 5941 int ratio = sysctl_lowmem_reserve_ratio[i]; 5942 bool clear = !ratio || !zone_managed_pages(zone); 5943 unsigned long managed_pages = 0; 5944 5945 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5946 struct zone *upper_zone = &pgdat->node_zones[j]; 5947 bool empty = !zone_managed_pages(upper_zone); 5948 5949 managed_pages += zone_managed_pages(upper_zone); 5950 5951 if (clear || empty) 5952 zone->lowmem_reserve[j] = 0; 5953 else 5954 zone->lowmem_reserve[j] = managed_pages / ratio; 5955 } 5956 } 5957 } 5958 5959 /* update totalreserve_pages */ 5960 calculate_totalreserve_pages(); 5961 } 5962 5963 static void __setup_per_zone_wmarks(void) 5964 { 5965 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5966 unsigned long lowmem_pages = 0; 5967 struct zone *zone; 5968 unsigned long flags; 5969 5970 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5971 for_each_zone(zone) { 5972 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5973 lowmem_pages += zone_managed_pages(zone); 5974 } 5975 5976 for_each_zone(zone) { 5977 u64 tmp; 5978 5979 spin_lock_irqsave(&zone->lock, flags); 5980 tmp = (u64)pages_min * zone_managed_pages(zone); 5981 tmp = div64_ul(tmp, lowmem_pages); 5982 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5983 /* 5984 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5985 * need highmem and movable zones pages, so cap pages_min 5986 * to a small value here. 5987 * 5988 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5989 * deltas control async page reclaim, and so should 5990 * not be capped for highmem and movable zones. 5991 */ 5992 unsigned long min_pages; 5993 5994 min_pages = zone_managed_pages(zone) / 1024; 5995 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5996 zone->_watermark[WMARK_MIN] = min_pages; 5997 } else { 5998 /* 5999 * If it's a lowmem zone, reserve a number of pages 6000 * proportionate to the zone's size. 6001 */ 6002 zone->_watermark[WMARK_MIN] = tmp; 6003 } 6004 6005 /* 6006 * Set the kswapd watermarks distance according to the 6007 * scale factor in proportion to available memory, but 6008 * ensure a minimum size on small systems. 6009 */ 6010 tmp = max_t(u64, tmp >> 2, 6011 mult_frac(zone_managed_pages(zone), 6012 watermark_scale_factor, 10000)); 6013 6014 zone->watermark_boost = 0; 6015 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6016 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6017 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6018 6019 spin_unlock_irqrestore(&zone->lock, flags); 6020 } 6021 6022 /* update totalreserve_pages */ 6023 calculate_totalreserve_pages(); 6024 } 6025 6026 /** 6027 * setup_per_zone_wmarks - called when min_free_kbytes changes 6028 * or when memory is hot-{added|removed} 6029 * 6030 * Ensures that the watermark[min,low,high] values for each zone are set 6031 * correctly with respect to min_free_kbytes. 6032 */ 6033 void setup_per_zone_wmarks(void) 6034 { 6035 struct zone *zone; 6036 static DEFINE_SPINLOCK(lock); 6037 6038 spin_lock(&lock); 6039 __setup_per_zone_wmarks(); 6040 spin_unlock(&lock); 6041 6042 /* 6043 * The watermark size have changed so update the pcpu batch 6044 * and high limits or the limits may be inappropriate. 6045 */ 6046 for_each_zone(zone) 6047 zone_pcp_update(zone, 0); 6048 } 6049 6050 /* 6051 * Initialise min_free_kbytes. 6052 * 6053 * For small machines we want it small (128k min). For large machines 6054 * we want it large (256MB max). But it is not linear, because network 6055 * bandwidth does not increase linearly with machine size. We use 6056 * 6057 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6058 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6059 * 6060 * which yields 6061 * 6062 * 16MB: 512k 6063 * 32MB: 724k 6064 * 64MB: 1024k 6065 * 128MB: 1448k 6066 * 256MB: 2048k 6067 * 512MB: 2896k 6068 * 1024MB: 4096k 6069 * 2048MB: 5792k 6070 * 4096MB: 8192k 6071 * 8192MB: 11584k 6072 * 16384MB: 16384k 6073 */ 6074 void calculate_min_free_kbytes(void) 6075 { 6076 unsigned long lowmem_kbytes; 6077 int new_min_free_kbytes; 6078 6079 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6080 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6081 6082 if (new_min_free_kbytes > user_min_free_kbytes) 6083 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6084 else 6085 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6086 new_min_free_kbytes, user_min_free_kbytes); 6087 6088 } 6089 6090 int __meminit init_per_zone_wmark_min(void) 6091 { 6092 calculate_min_free_kbytes(); 6093 setup_per_zone_wmarks(); 6094 refresh_zone_stat_thresholds(); 6095 setup_per_zone_lowmem_reserve(); 6096 6097 #ifdef CONFIG_NUMA 6098 setup_min_unmapped_ratio(); 6099 setup_min_slab_ratio(); 6100 #endif 6101 6102 khugepaged_min_free_kbytes_update(); 6103 6104 return 0; 6105 } 6106 postcore_initcall(init_per_zone_wmark_min) 6107 6108 /* 6109 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6110 * that we can call two helper functions whenever min_free_kbytes 6111 * changes. 6112 */ 6113 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6114 void *buffer, size_t *length, loff_t *ppos) 6115 { 6116 int rc; 6117 6118 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6119 if (rc) 6120 return rc; 6121 6122 if (write) { 6123 user_min_free_kbytes = min_free_kbytes; 6124 setup_per_zone_wmarks(); 6125 } 6126 return 0; 6127 } 6128 6129 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6130 void *buffer, size_t *length, loff_t *ppos) 6131 { 6132 int rc; 6133 6134 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6135 if (rc) 6136 return rc; 6137 6138 if (write) 6139 setup_per_zone_wmarks(); 6140 6141 return 0; 6142 } 6143 6144 #ifdef CONFIG_NUMA 6145 static void setup_min_unmapped_ratio(void) 6146 { 6147 pg_data_t *pgdat; 6148 struct zone *zone; 6149 6150 for_each_online_pgdat(pgdat) 6151 pgdat->min_unmapped_pages = 0; 6152 6153 for_each_zone(zone) 6154 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6155 sysctl_min_unmapped_ratio) / 100; 6156 } 6157 6158 6159 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6160 void *buffer, size_t *length, loff_t *ppos) 6161 { 6162 int rc; 6163 6164 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6165 if (rc) 6166 return rc; 6167 6168 setup_min_unmapped_ratio(); 6169 6170 return 0; 6171 } 6172 6173 static void setup_min_slab_ratio(void) 6174 { 6175 pg_data_t *pgdat; 6176 struct zone *zone; 6177 6178 for_each_online_pgdat(pgdat) 6179 pgdat->min_slab_pages = 0; 6180 6181 for_each_zone(zone) 6182 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6183 sysctl_min_slab_ratio) / 100; 6184 } 6185 6186 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6187 void *buffer, size_t *length, loff_t *ppos) 6188 { 6189 int rc; 6190 6191 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6192 if (rc) 6193 return rc; 6194 6195 setup_min_slab_ratio(); 6196 6197 return 0; 6198 } 6199 #endif 6200 6201 /* 6202 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6203 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6204 * whenever sysctl_lowmem_reserve_ratio changes. 6205 * 6206 * The reserve ratio obviously has absolutely no relation with the 6207 * minimum watermarks. The lowmem reserve ratio can only make sense 6208 * if in function of the boot time zone sizes. 6209 */ 6210 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6211 int write, void *buffer, size_t *length, loff_t *ppos) 6212 { 6213 int i; 6214 6215 proc_dointvec_minmax(table, write, buffer, length, ppos); 6216 6217 for (i = 0; i < MAX_NR_ZONES; i++) { 6218 if (sysctl_lowmem_reserve_ratio[i] < 1) 6219 sysctl_lowmem_reserve_ratio[i] = 0; 6220 } 6221 6222 setup_per_zone_lowmem_reserve(); 6223 return 0; 6224 } 6225 6226 /* 6227 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6228 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6229 * pagelist can have before it gets flushed back to buddy allocator. 6230 */ 6231 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6232 int write, void *buffer, size_t *length, loff_t *ppos) 6233 { 6234 struct zone *zone; 6235 int old_percpu_pagelist_high_fraction; 6236 int ret; 6237 6238 mutex_lock(&pcp_batch_high_lock); 6239 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6240 6241 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6242 if (!write || ret < 0) 6243 goto out; 6244 6245 /* Sanity checking to avoid pcp imbalance */ 6246 if (percpu_pagelist_high_fraction && 6247 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6248 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6249 ret = -EINVAL; 6250 goto out; 6251 } 6252 6253 /* No change? */ 6254 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6255 goto out; 6256 6257 for_each_populated_zone(zone) 6258 zone_set_pageset_high_and_batch(zone, 0); 6259 out: 6260 mutex_unlock(&pcp_batch_high_lock); 6261 return ret; 6262 } 6263 6264 static struct ctl_table page_alloc_sysctl_table[] = { 6265 { 6266 .procname = "min_free_kbytes", 6267 .data = &min_free_kbytes, 6268 .maxlen = sizeof(min_free_kbytes), 6269 .mode = 0644, 6270 .proc_handler = min_free_kbytes_sysctl_handler, 6271 .extra1 = SYSCTL_ZERO, 6272 }, 6273 { 6274 .procname = "watermark_boost_factor", 6275 .data = &watermark_boost_factor, 6276 .maxlen = sizeof(watermark_boost_factor), 6277 .mode = 0644, 6278 .proc_handler = proc_dointvec_minmax, 6279 .extra1 = SYSCTL_ZERO, 6280 }, 6281 { 6282 .procname = "watermark_scale_factor", 6283 .data = &watermark_scale_factor, 6284 .maxlen = sizeof(watermark_scale_factor), 6285 .mode = 0644, 6286 .proc_handler = watermark_scale_factor_sysctl_handler, 6287 .extra1 = SYSCTL_ONE, 6288 .extra2 = SYSCTL_THREE_THOUSAND, 6289 }, 6290 { 6291 .procname = "percpu_pagelist_high_fraction", 6292 .data = &percpu_pagelist_high_fraction, 6293 .maxlen = sizeof(percpu_pagelist_high_fraction), 6294 .mode = 0644, 6295 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 6296 .extra1 = SYSCTL_ZERO, 6297 }, 6298 { 6299 .procname = "lowmem_reserve_ratio", 6300 .data = &sysctl_lowmem_reserve_ratio, 6301 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6302 .mode = 0644, 6303 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6304 }, 6305 #ifdef CONFIG_NUMA 6306 { 6307 .procname = "numa_zonelist_order", 6308 .data = &numa_zonelist_order, 6309 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6310 .mode = 0644, 6311 .proc_handler = numa_zonelist_order_handler, 6312 }, 6313 { 6314 .procname = "min_unmapped_ratio", 6315 .data = &sysctl_min_unmapped_ratio, 6316 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6317 .mode = 0644, 6318 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6319 .extra1 = SYSCTL_ZERO, 6320 .extra2 = SYSCTL_ONE_HUNDRED, 6321 }, 6322 { 6323 .procname = "min_slab_ratio", 6324 .data = &sysctl_min_slab_ratio, 6325 .maxlen = sizeof(sysctl_min_slab_ratio), 6326 .mode = 0644, 6327 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6328 .extra1 = SYSCTL_ZERO, 6329 .extra2 = SYSCTL_ONE_HUNDRED, 6330 }, 6331 #endif 6332 }; 6333 6334 void __init page_alloc_sysctl_init(void) 6335 { 6336 register_sysctl_init("vm", page_alloc_sysctl_table); 6337 } 6338 6339 #ifdef CONFIG_CONTIG_ALLOC 6340 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6341 static void alloc_contig_dump_pages(struct list_head *page_list) 6342 { 6343 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6344 6345 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6346 struct page *page; 6347 6348 dump_stack(); 6349 list_for_each_entry(page, page_list, lru) 6350 dump_page(page, "migration failure"); 6351 } 6352 } 6353 6354 /* 6355 * [start, end) must belong to a single zone. 6356 * @migratetype: using migratetype to filter the type of migration in 6357 * trace_mm_alloc_contig_migrate_range_info. 6358 */ 6359 int __alloc_contig_migrate_range(struct compact_control *cc, 6360 unsigned long start, unsigned long end, 6361 int migratetype) 6362 { 6363 /* This function is based on compact_zone() from compaction.c. */ 6364 unsigned int nr_reclaimed; 6365 unsigned long pfn = start; 6366 unsigned int tries = 0; 6367 int ret = 0; 6368 struct migration_target_control mtc = { 6369 .nid = zone_to_nid(cc->zone), 6370 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6371 .reason = MR_CONTIG_RANGE, 6372 }; 6373 struct page *page; 6374 unsigned long total_mapped = 0; 6375 unsigned long total_migrated = 0; 6376 unsigned long total_reclaimed = 0; 6377 6378 lru_cache_disable(); 6379 6380 while (pfn < end || !list_empty(&cc->migratepages)) { 6381 if (fatal_signal_pending(current)) { 6382 ret = -EINTR; 6383 break; 6384 } 6385 6386 if (list_empty(&cc->migratepages)) { 6387 cc->nr_migratepages = 0; 6388 ret = isolate_migratepages_range(cc, pfn, end); 6389 if (ret && ret != -EAGAIN) 6390 break; 6391 pfn = cc->migrate_pfn; 6392 tries = 0; 6393 } else if (++tries == 5) { 6394 ret = -EBUSY; 6395 break; 6396 } 6397 6398 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6399 &cc->migratepages); 6400 cc->nr_migratepages -= nr_reclaimed; 6401 6402 if (trace_mm_alloc_contig_migrate_range_info_enabled()) { 6403 total_reclaimed += nr_reclaimed; 6404 list_for_each_entry(page, &cc->migratepages, lru) { 6405 struct folio *folio = page_folio(page); 6406 6407 total_mapped += folio_mapped(folio) * 6408 folio_nr_pages(folio); 6409 } 6410 } 6411 6412 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6413 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6414 6415 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret) 6416 total_migrated += cc->nr_migratepages; 6417 6418 /* 6419 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6420 * to retry again over this error, so do the same here. 6421 */ 6422 if (ret == -ENOMEM) 6423 break; 6424 } 6425 6426 lru_cache_enable(); 6427 if (ret < 0) { 6428 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6429 alloc_contig_dump_pages(&cc->migratepages); 6430 putback_movable_pages(&cc->migratepages); 6431 } 6432 6433 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype, 6434 total_migrated, 6435 total_reclaimed, 6436 total_mapped); 6437 return (ret < 0) ? ret : 0; 6438 } 6439 6440 /** 6441 * alloc_contig_range() -- tries to allocate given range of pages 6442 * @start: start PFN to allocate 6443 * @end: one-past-the-last PFN to allocate 6444 * @migratetype: migratetype of the underlying pageblocks (either 6445 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6446 * in range must have the same migratetype and it must 6447 * be either of the two. 6448 * @gfp_mask: GFP mask to use during compaction 6449 * 6450 * The PFN range does not have to be pageblock aligned. The PFN range must 6451 * belong to a single zone. 6452 * 6453 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6454 * pageblocks in the range. Once isolated, the pageblocks should not 6455 * be modified by others. 6456 * 6457 * Return: zero on success or negative error code. On success all 6458 * pages which PFN is in [start, end) are allocated for the caller and 6459 * need to be freed with free_contig_range(). 6460 */ 6461 int alloc_contig_range_noprof(unsigned long start, unsigned long end, 6462 unsigned migratetype, gfp_t gfp_mask) 6463 { 6464 unsigned long outer_start, outer_end; 6465 int ret = 0; 6466 6467 struct compact_control cc = { 6468 .nr_migratepages = 0, 6469 .order = -1, 6470 .zone = page_zone(pfn_to_page(start)), 6471 .mode = MIGRATE_SYNC, 6472 .ignore_skip_hint = true, 6473 .no_set_skip_hint = true, 6474 .gfp_mask = current_gfp_context(gfp_mask), 6475 .alloc_contig = true, 6476 }; 6477 INIT_LIST_HEAD(&cc.migratepages); 6478 6479 /* 6480 * What we do here is we mark all pageblocks in range as 6481 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6482 * have different sizes, and due to the way page allocator 6483 * work, start_isolate_page_range() has special handlings for this. 6484 * 6485 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6486 * migrate the pages from an unaligned range (ie. pages that 6487 * we are interested in). This will put all the pages in 6488 * range back to page allocator as MIGRATE_ISOLATE. 6489 * 6490 * When this is done, we take the pages in range from page 6491 * allocator removing them from the buddy system. This way 6492 * page allocator will never consider using them. 6493 * 6494 * This lets us mark the pageblocks back as 6495 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6496 * aligned range but not in the unaligned, original range are 6497 * put back to page allocator so that buddy can use them. 6498 */ 6499 6500 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6501 if (ret) 6502 goto done; 6503 6504 drain_all_pages(cc.zone); 6505 6506 /* 6507 * In case of -EBUSY, we'd like to know which page causes problem. 6508 * So, just fall through. test_pages_isolated() has a tracepoint 6509 * which will report the busy page. 6510 * 6511 * It is possible that busy pages could become available before 6512 * the call to test_pages_isolated, and the range will actually be 6513 * allocated. So, if we fall through be sure to clear ret so that 6514 * -EBUSY is not accidentally used or returned to caller. 6515 */ 6516 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); 6517 if (ret && ret != -EBUSY) 6518 goto done; 6519 ret = 0; 6520 6521 /* 6522 * Pages from [start, end) are within a pageblock_nr_pages 6523 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6524 * more, all pages in [start, end) are free in page allocator. 6525 * What we are going to do is to allocate all pages from 6526 * [start, end) (that is remove them from page allocator). 6527 * 6528 * The only problem is that pages at the beginning and at the 6529 * end of interesting range may be not aligned with pages that 6530 * page allocator holds, ie. they can be part of higher order 6531 * pages. Because of this, we reserve the bigger range and 6532 * once this is done free the pages we are not interested in. 6533 * 6534 * We don't have to hold zone->lock here because the pages are 6535 * isolated thus they won't get removed from buddy. 6536 */ 6537 outer_start = find_large_buddy(start); 6538 6539 /* Make sure the range is really isolated. */ 6540 if (test_pages_isolated(outer_start, end, 0)) { 6541 ret = -EBUSY; 6542 goto done; 6543 } 6544 6545 /* Grab isolated pages from freelists. */ 6546 outer_end = isolate_freepages_range(&cc, outer_start, end); 6547 if (!outer_end) { 6548 ret = -EBUSY; 6549 goto done; 6550 } 6551 6552 /* Free head and tail (if any) */ 6553 if (start != outer_start) 6554 free_contig_range(outer_start, start - outer_start); 6555 if (end != outer_end) 6556 free_contig_range(end, outer_end - end); 6557 6558 done: 6559 undo_isolate_page_range(start, end, migratetype); 6560 return ret; 6561 } 6562 EXPORT_SYMBOL(alloc_contig_range_noprof); 6563 6564 static int __alloc_contig_pages(unsigned long start_pfn, 6565 unsigned long nr_pages, gfp_t gfp_mask) 6566 { 6567 unsigned long end_pfn = start_pfn + nr_pages; 6568 6569 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, 6570 gfp_mask); 6571 } 6572 6573 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6574 unsigned long nr_pages) 6575 { 6576 unsigned long i, end_pfn = start_pfn + nr_pages; 6577 struct page *page; 6578 6579 for (i = start_pfn; i < end_pfn; i++) { 6580 page = pfn_to_online_page(i); 6581 if (!page) 6582 return false; 6583 6584 if (page_zone(page) != z) 6585 return false; 6586 6587 if (PageReserved(page)) 6588 return false; 6589 6590 if (PageHuge(page)) 6591 return false; 6592 } 6593 return true; 6594 } 6595 6596 static bool zone_spans_last_pfn(const struct zone *zone, 6597 unsigned long start_pfn, unsigned long nr_pages) 6598 { 6599 unsigned long last_pfn = start_pfn + nr_pages - 1; 6600 6601 return zone_spans_pfn(zone, last_pfn); 6602 } 6603 6604 /** 6605 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6606 * @nr_pages: Number of contiguous pages to allocate 6607 * @gfp_mask: GFP mask to limit search and used during compaction 6608 * @nid: Target node 6609 * @nodemask: Mask for other possible nodes 6610 * 6611 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6612 * on an applicable zonelist to find a contiguous pfn range which can then be 6613 * tried for allocation with alloc_contig_range(). This routine is intended 6614 * for allocation requests which can not be fulfilled with the buddy allocator. 6615 * 6616 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6617 * power of two, then allocated range is also guaranteed to be aligned to same 6618 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6619 * 6620 * Allocated pages can be freed with free_contig_range() or by manually calling 6621 * __free_page() on each allocated page. 6622 * 6623 * Return: pointer to contiguous pages on success, or NULL if not successful. 6624 */ 6625 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 6626 int nid, nodemask_t *nodemask) 6627 { 6628 unsigned long ret, pfn, flags; 6629 struct zonelist *zonelist; 6630 struct zone *zone; 6631 struct zoneref *z; 6632 6633 zonelist = node_zonelist(nid, gfp_mask); 6634 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6635 gfp_zone(gfp_mask), nodemask) { 6636 spin_lock_irqsave(&zone->lock, flags); 6637 6638 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6639 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6640 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6641 /* 6642 * We release the zone lock here because 6643 * alloc_contig_range() will also lock the zone 6644 * at some point. If there's an allocation 6645 * spinning on this lock, it may win the race 6646 * and cause alloc_contig_range() to fail... 6647 */ 6648 spin_unlock_irqrestore(&zone->lock, flags); 6649 ret = __alloc_contig_pages(pfn, nr_pages, 6650 gfp_mask); 6651 if (!ret) 6652 return pfn_to_page(pfn); 6653 spin_lock_irqsave(&zone->lock, flags); 6654 } 6655 pfn += nr_pages; 6656 } 6657 spin_unlock_irqrestore(&zone->lock, flags); 6658 } 6659 return NULL; 6660 } 6661 #endif /* CONFIG_CONTIG_ALLOC */ 6662 6663 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6664 { 6665 unsigned long count = 0; 6666 6667 for (; nr_pages--; pfn++) { 6668 struct page *page = pfn_to_page(pfn); 6669 6670 count += page_count(page) != 1; 6671 __free_page(page); 6672 } 6673 WARN(count != 0, "%lu pages are still in use!\n", count); 6674 } 6675 EXPORT_SYMBOL(free_contig_range); 6676 6677 /* 6678 * Effectively disable pcplists for the zone by setting the high limit to 0 6679 * and draining all cpus. A concurrent page freeing on another CPU that's about 6680 * to put the page on pcplist will either finish before the drain and the page 6681 * will be drained, or observe the new high limit and skip the pcplist. 6682 * 6683 * Must be paired with a call to zone_pcp_enable(). 6684 */ 6685 void zone_pcp_disable(struct zone *zone) 6686 { 6687 mutex_lock(&pcp_batch_high_lock); 6688 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); 6689 __drain_all_pages(zone, true); 6690 } 6691 6692 void zone_pcp_enable(struct zone *zone) 6693 { 6694 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, 6695 zone->pageset_high_max, zone->pageset_batch); 6696 mutex_unlock(&pcp_batch_high_lock); 6697 } 6698 6699 void zone_pcp_reset(struct zone *zone) 6700 { 6701 int cpu; 6702 struct per_cpu_zonestat *pzstats; 6703 6704 if (zone->per_cpu_pageset != &boot_pageset) { 6705 for_each_online_cpu(cpu) { 6706 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6707 drain_zonestat(zone, pzstats); 6708 } 6709 free_percpu(zone->per_cpu_pageset); 6710 zone->per_cpu_pageset = &boot_pageset; 6711 if (zone->per_cpu_zonestats != &boot_zonestats) { 6712 free_percpu(zone->per_cpu_zonestats); 6713 zone->per_cpu_zonestats = &boot_zonestats; 6714 } 6715 } 6716 } 6717 6718 #ifdef CONFIG_MEMORY_HOTREMOVE 6719 /* 6720 * All pages in the range must be in a single zone, must not contain holes, 6721 * must span full sections, and must be isolated before calling this function. 6722 * 6723 * Returns the number of managed (non-PageOffline()) pages in the range: the 6724 * number of pages for which memory offlining code must adjust managed page 6725 * counters using adjust_managed_page_count(). 6726 */ 6727 unsigned long __offline_isolated_pages(unsigned long start_pfn, 6728 unsigned long end_pfn) 6729 { 6730 unsigned long already_offline = 0, flags; 6731 unsigned long pfn = start_pfn; 6732 struct page *page; 6733 struct zone *zone; 6734 unsigned int order; 6735 6736 offline_mem_sections(pfn, end_pfn); 6737 zone = page_zone(pfn_to_page(pfn)); 6738 spin_lock_irqsave(&zone->lock, flags); 6739 while (pfn < end_pfn) { 6740 page = pfn_to_page(pfn); 6741 /* 6742 * The HWPoisoned page may be not in buddy system, and 6743 * page_count() is not 0. 6744 */ 6745 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6746 pfn++; 6747 continue; 6748 } 6749 /* 6750 * At this point all remaining PageOffline() pages have a 6751 * reference count of 0 and can simply be skipped. 6752 */ 6753 if (PageOffline(page)) { 6754 BUG_ON(page_count(page)); 6755 BUG_ON(PageBuddy(page)); 6756 already_offline++; 6757 pfn++; 6758 continue; 6759 } 6760 6761 BUG_ON(page_count(page)); 6762 BUG_ON(!PageBuddy(page)); 6763 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); 6764 order = buddy_order(page); 6765 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); 6766 pfn += (1 << order); 6767 } 6768 spin_unlock_irqrestore(&zone->lock, flags); 6769 6770 return end_pfn - start_pfn - already_offline; 6771 } 6772 #endif 6773 6774 /* 6775 * This function returns a stable result only if called under zone lock. 6776 */ 6777 bool is_free_buddy_page(const struct page *page) 6778 { 6779 unsigned long pfn = page_to_pfn(page); 6780 unsigned int order; 6781 6782 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6783 const struct page *head = page - (pfn & ((1 << order) - 1)); 6784 6785 if (PageBuddy(head) && 6786 buddy_order_unsafe(head) >= order) 6787 break; 6788 } 6789 6790 return order <= MAX_PAGE_ORDER; 6791 } 6792 EXPORT_SYMBOL(is_free_buddy_page); 6793 6794 #ifdef CONFIG_MEMORY_FAILURE 6795 static inline void add_to_free_list(struct page *page, struct zone *zone, 6796 unsigned int order, int migratetype, 6797 bool tail) 6798 { 6799 __add_to_free_list(page, zone, order, migratetype, tail); 6800 account_freepages(zone, 1 << order, migratetype); 6801 } 6802 6803 /* 6804 * Break down a higher-order page in sub-pages, and keep our target out of 6805 * buddy allocator. 6806 */ 6807 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6808 struct page *target, int low, int high, 6809 int migratetype) 6810 { 6811 unsigned long size = 1 << high; 6812 struct page *current_buddy; 6813 6814 while (high > low) { 6815 high--; 6816 size >>= 1; 6817 6818 if (target >= &page[size]) { 6819 current_buddy = page; 6820 page = page + size; 6821 } else { 6822 current_buddy = page + size; 6823 } 6824 6825 if (set_page_guard(zone, current_buddy, high)) 6826 continue; 6827 6828 add_to_free_list(current_buddy, zone, high, migratetype, false); 6829 set_buddy_order(current_buddy, high); 6830 } 6831 } 6832 6833 /* 6834 * Take a page that will be marked as poisoned off the buddy allocator. 6835 */ 6836 bool take_page_off_buddy(struct page *page) 6837 { 6838 struct zone *zone = page_zone(page); 6839 unsigned long pfn = page_to_pfn(page); 6840 unsigned long flags; 6841 unsigned int order; 6842 bool ret = false; 6843 6844 spin_lock_irqsave(&zone->lock, flags); 6845 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6846 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6847 int page_order = buddy_order(page_head); 6848 6849 if (PageBuddy(page_head) && page_order >= order) { 6850 unsigned long pfn_head = page_to_pfn(page_head); 6851 int migratetype = get_pfnblock_migratetype(page_head, 6852 pfn_head); 6853 6854 del_page_from_free_list(page_head, zone, page_order, 6855 migratetype); 6856 break_down_buddy_pages(zone, page_head, page, 0, 6857 page_order, migratetype); 6858 SetPageHWPoisonTakenOff(page); 6859 ret = true; 6860 break; 6861 } 6862 if (page_count(page_head) > 0) 6863 break; 6864 } 6865 spin_unlock_irqrestore(&zone->lock, flags); 6866 return ret; 6867 } 6868 6869 /* 6870 * Cancel takeoff done by take_page_off_buddy(). 6871 */ 6872 bool put_page_back_buddy(struct page *page) 6873 { 6874 struct zone *zone = page_zone(page); 6875 unsigned long flags; 6876 bool ret = false; 6877 6878 spin_lock_irqsave(&zone->lock, flags); 6879 if (put_page_testzero(page)) { 6880 unsigned long pfn = page_to_pfn(page); 6881 int migratetype = get_pfnblock_migratetype(page, pfn); 6882 6883 ClearPageHWPoisonTakenOff(page); 6884 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6885 if (TestClearPageHWPoison(page)) { 6886 ret = true; 6887 } 6888 } 6889 spin_unlock_irqrestore(&zone->lock, flags); 6890 6891 return ret; 6892 } 6893 #endif 6894 6895 #ifdef CONFIG_ZONE_DMA 6896 bool has_managed_dma(void) 6897 { 6898 struct pglist_data *pgdat; 6899 6900 for_each_online_pgdat(pgdat) { 6901 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6902 6903 if (managed_zone(zone)) 6904 return true; 6905 } 6906 return false; 6907 } 6908 #endif /* CONFIG_ZONE_DMA */ 6909 6910 #ifdef CONFIG_UNACCEPTED_MEMORY 6911 6912 /* Counts number of zones with unaccepted pages. */ 6913 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6914 6915 static bool lazy_accept = true; 6916 6917 static int __init accept_memory_parse(char *p) 6918 { 6919 if (!strcmp(p, "lazy")) { 6920 lazy_accept = true; 6921 return 0; 6922 } else if (!strcmp(p, "eager")) { 6923 lazy_accept = false; 6924 return 0; 6925 } else { 6926 return -EINVAL; 6927 } 6928 } 6929 early_param("accept_memory", accept_memory_parse); 6930 6931 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6932 { 6933 phys_addr_t start = page_to_phys(page); 6934 6935 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); 6936 } 6937 6938 static void __accept_page(struct zone *zone, unsigned long *flags, 6939 struct page *page) 6940 { 6941 bool last; 6942 6943 list_del(&page->lru); 6944 last = list_empty(&zone->unaccepted_pages); 6945 6946 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6947 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6948 __ClearPageUnaccepted(page); 6949 spin_unlock_irqrestore(&zone->lock, *flags); 6950 6951 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); 6952 6953 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); 6954 6955 if (last) 6956 static_branch_dec(&zones_with_unaccepted_pages); 6957 } 6958 6959 void accept_page(struct page *page) 6960 { 6961 struct zone *zone = page_zone(page); 6962 unsigned long flags; 6963 6964 spin_lock_irqsave(&zone->lock, flags); 6965 if (!PageUnaccepted(page)) { 6966 spin_unlock_irqrestore(&zone->lock, flags); 6967 return; 6968 } 6969 6970 /* Unlocks zone->lock */ 6971 __accept_page(zone, &flags, page); 6972 } 6973 6974 static bool try_to_accept_memory_one(struct zone *zone) 6975 { 6976 unsigned long flags; 6977 struct page *page; 6978 6979 spin_lock_irqsave(&zone->lock, flags); 6980 page = list_first_entry_or_null(&zone->unaccepted_pages, 6981 struct page, lru); 6982 if (!page) { 6983 spin_unlock_irqrestore(&zone->lock, flags); 6984 return false; 6985 } 6986 6987 /* Unlocks zone->lock */ 6988 __accept_page(zone, &flags, page); 6989 6990 return true; 6991 } 6992 6993 static bool cond_accept_memory(struct zone *zone, unsigned int order) 6994 { 6995 long to_accept; 6996 bool ret = false; 6997 6998 if (!has_unaccepted_memory()) 6999 return false; 7000 7001 if (list_empty(&zone->unaccepted_pages)) 7002 return false; 7003 7004 /* How much to accept to get to promo watermark? */ 7005 to_accept = promo_wmark_pages(zone) - 7006 (zone_page_state(zone, NR_FREE_PAGES) - 7007 __zone_watermark_unusable_free(zone, order, 0) - 7008 zone_page_state(zone, NR_UNACCEPTED)); 7009 7010 while (to_accept > 0) { 7011 if (!try_to_accept_memory_one(zone)) 7012 break; 7013 ret = true; 7014 to_accept -= MAX_ORDER_NR_PAGES; 7015 } 7016 7017 return ret; 7018 } 7019 7020 static inline bool has_unaccepted_memory(void) 7021 { 7022 return static_branch_unlikely(&zones_with_unaccepted_pages); 7023 } 7024 7025 static bool __free_unaccepted(struct page *page) 7026 { 7027 struct zone *zone = page_zone(page); 7028 unsigned long flags; 7029 bool first = false; 7030 7031 if (!lazy_accept) 7032 return false; 7033 7034 spin_lock_irqsave(&zone->lock, flags); 7035 first = list_empty(&zone->unaccepted_pages); 7036 list_add_tail(&page->lru, &zone->unaccepted_pages); 7037 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7038 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7039 __SetPageUnaccepted(page); 7040 spin_unlock_irqrestore(&zone->lock, flags); 7041 7042 if (first) 7043 static_branch_inc(&zones_with_unaccepted_pages); 7044 7045 return true; 7046 } 7047 7048 #else 7049 7050 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7051 { 7052 return false; 7053 } 7054 7055 static bool cond_accept_memory(struct zone *zone, unsigned int order) 7056 { 7057 return false; 7058 } 7059 7060 static inline bool has_unaccepted_memory(void) 7061 { 7062 return false; 7063 } 7064 7065 static bool __free_unaccepted(struct page *page) 7066 { 7067 BUILD_BUG(); 7068 return false; 7069 } 7070 7071 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7072