1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <asm/div64.h> 56 #include "internal.h" 57 #include "shuffle.h" 58 #include "page_reporting.h" 59 60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 61 typedef int __bitwise fpi_t; 62 63 /* No special request */ 64 #define FPI_NONE ((__force fpi_t)0) 65 66 /* 67 * Skip free page reporting notification for the (possibly merged) page. 68 * This does not hinder free page reporting from grabbing the page, 69 * reporting it and marking it "reported" - it only skips notifying 70 * the free page reporting infrastructure about a newly freed page. For 71 * example, used when temporarily pulling a page from a freelist and 72 * putting it back unmodified. 73 */ 74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 75 76 /* 77 * Place the (possibly merged) page to the tail of the freelist. Will ignore 78 * page shuffling (relevant code - e.g., memory onlining - is expected to 79 * shuffle the whole zone). 80 * 81 * Note: No code should rely on this flag for correctness - it's purely 82 * to allow for optimizations when handing back either fresh pages 83 * (memory onlining) or untouched pages (page isolation, free page 84 * reporting). 85 */ 86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 87 88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 89 static DEFINE_MUTEX(pcp_batch_high_lock); 90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 91 92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 93 /* 94 * On SMP, spin_trylock is sufficient protection. 95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 96 */ 97 #define pcp_trylock_prepare(flags) do { } while (0) 98 #define pcp_trylock_finish(flag) do { } while (0) 99 #else 100 101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 102 #define pcp_trylock_prepare(flags) local_irq_save(flags) 103 #define pcp_trylock_finish(flags) local_irq_restore(flags) 104 #endif 105 106 /* 107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 108 * a migration causing the wrong PCP to be locked and remote memory being 109 * potentially allocated, pin the task to the CPU for the lookup+lock. 110 * preempt_disable is used on !RT because it is faster than migrate_disable. 111 * migrate_disable is used on RT because otherwise RT spinlock usage is 112 * interfered with and a high priority task cannot preempt the allocator. 113 */ 114 #ifndef CONFIG_PREEMPT_RT 115 #define pcpu_task_pin() preempt_disable() 116 #define pcpu_task_unpin() preempt_enable() 117 #else 118 #define pcpu_task_pin() migrate_disable() 119 #define pcpu_task_unpin() migrate_enable() 120 #endif 121 122 /* 123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 124 * Return value should be used with equivalent unlock helper. 125 */ 126 #define pcpu_spin_lock(type, member, ptr) \ 127 ({ \ 128 type *_ret; \ 129 pcpu_task_pin(); \ 130 _ret = this_cpu_ptr(ptr); \ 131 spin_lock(&_ret->member); \ 132 _ret; \ 133 }) 134 135 #define pcpu_spin_trylock(type, member, ptr) \ 136 ({ \ 137 type *_ret; \ 138 pcpu_task_pin(); \ 139 _ret = this_cpu_ptr(ptr); \ 140 if (!spin_trylock(&_ret->member)) { \ 141 pcpu_task_unpin(); \ 142 _ret = NULL; \ 143 } \ 144 _ret; \ 145 }) 146 147 #define pcpu_spin_unlock(member, ptr) \ 148 ({ \ 149 spin_unlock(&ptr->member); \ 150 pcpu_task_unpin(); \ 151 }) 152 153 /* struct per_cpu_pages specific helpers. */ 154 #define pcp_spin_lock(ptr) \ 155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 156 157 #define pcp_spin_trylock(ptr) \ 158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_unlock(ptr) \ 161 pcpu_spin_unlock(lock, ptr) 162 163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 164 DEFINE_PER_CPU(int, numa_node); 165 EXPORT_PER_CPU_SYMBOL(numa_node); 166 #endif 167 168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 169 170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 171 /* 172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 175 * defined in <linux/topology.h>. 176 */ 177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 178 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 179 #endif 180 181 static DEFINE_MUTEX(pcpu_drain_mutex); 182 183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 184 volatile unsigned long latent_entropy __latent_entropy; 185 EXPORT_SYMBOL(latent_entropy); 186 #endif 187 188 /* 189 * Array of node states. 190 */ 191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 192 [N_POSSIBLE] = NODE_MASK_ALL, 193 [N_ONLINE] = { { [0] = 1UL } }, 194 #ifndef CONFIG_NUMA 195 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 196 #ifdef CONFIG_HIGHMEM 197 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 198 #endif 199 [N_MEMORY] = { { [0] = 1UL } }, 200 [N_CPU] = { { [0] = 1UL } }, 201 #endif /* NUMA */ 202 }; 203 EXPORT_SYMBOL(node_states); 204 205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 206 207 /* 208 * A cached value of the page's pageblock's migratetype, used when the page is 209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 210 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 211 * Also the migratetype set in the page does not necessarily match the pcplist 212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 213 * other index - this ensures that it will be put on the correct CMA freelist. 214 */ 215 static inline int get_pcppage_migratetype(struct page *page) 216 { 217 return page->index; 218 } 219 220 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 221 { 222 page->index = migratetype; 223 } 224 225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 226 unsigned int pageblock_order __read_mostly; 227 #endif 228 229 static void __free_pages_ok(struct page *page, unsigned int order, 230 fpi_t fpi_flags); 231 232 /* 233 * results with 256, 32 in the lowmem_reserve sysctl: 234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 235 * 1G machine -> (16M dma, 784M normal, 224M high) 236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 239 * 240 * TBD: should special case ZONE_DMA32 machines here - in those we normally 241 * don't need any ZONE_NORMAL reservation 242 */ 243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 244 #ifdef CONFIG_ZONE_DMA 245 [ZONE_DMA] = 256, 246 #endif 247 #ifdef CONFIG_ZONE_DMA32 248 [ZONE_DMA32] = 256, 249 #endif 250 [ZONE_NORMAL] = 32, 251 #ifdef CONFIG_HIGHMEM 252 [ZONE_HIGHMEM] = 0, 253 #endif 254 [ZONE_MOVABLE] = 0, 255 }; 256 257 char * const zone_names[MAX_NR_ZONES] = { 258 #ifdef CONFIG_ZONE_DMA 259 "DMA", 260 #endif 261 #ifdef CONFIG_ZONE_DMA32 262 "DMA32", 263 #endif 264 "Normal", 265 #ifdef CONFIG_HIGHMEM 266 "HighMem", 267 #endif 268 "Movable", 269 #ifdef CONFIG_ZONE_DEVICE 270 "Device", 271 #endif 272 }; 273 274 const char * const migratetype_names[MIGRATE_TYPES] = { 275 "Unmovable", 276 "Movable", 277 "Reclaimable", 278 "HighAtomic", 279 #ifdef CONFIG_CMA 280 "CMA", 281 #endif 282 #ifdef CONFIG_MEMORY_ISOLATION 283 "Isolate", 284 #endif 285 }; 286 287 int min_free_kbytes = 1024; 288 int user_min_free_kbytes = -1; 289 static int watermark_boost_factor __read_mostly = 15000; 290 static int watermark_scale_factor = 10; 291 292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 293 int movable_zone; 294 EXPORT_SYMBOL(movable_zone); 295 296 #if MAX_NUMNODES > 1 297 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 298 unsigned int nr_online_nodes __read_mostly = 1; 299 EXPORT_SYMBOL(nr_node_ids); 300 EXPORT_SYMBOL(nr_online_nodes); 301 #endif 302 303 static bool page_contains_unaccepted(struct page *page, unsigned int order); 304 static void accept_page(struct page *page, unsigned int order); 305 static bool try_to_accept_memory(struct zone *zone, unsigned int order); 306 static inline bool has_unaccepted_memory(void); 307 static bool __free_unaccepted(struct page *page); 308 309 int page_group_by_mobility_disabled __read_mostly; 310 311 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 312 /* 313 * During boot we initialize deferred pages on-demand, as needed, but once 314 * page_alloc_init_late() has finished, the deferred pages are all initialized, 315 * and we can permanently disable that path. 316 */ 317 DEFINE_STATIC_KEY_TRUE(deferred_pages); 318 319 static inline bool deferred_pages_enabled(void) 320 { 321 return static_branch_unlikely(&deferred_pages); 322 } 323 324 /* 325 * deferred_grow_zone() is __init, but it is called from 326 * get_page_from_freelist() during early boot until deferred_pages permanently 327 * disables this call. This is why we have refdata wrapper to avoid warning, 328 * and to ensure that the function body gets unloaded. 329 */ 330 static bool __ref 331 _deferred_grow_zone(struct zone *zone, unsigned int order) 332 { 333 return deferred_grow_zone(zone, order); 334 } 335 #else 336 static inline bool deferred_pages_enabled(void) 337 { 338 return false; 339 } 340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 341 342 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 343 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 344 unsigned long pfn) 345 { 346 #ifdef CONFIG_SPARSEMEM 347 return section_to_usemap(__pfn_to_section(pfn)); 348 #else 349 return page_zone(page)->pageblock_flags; 350 #endif /* CONFIG_SPARSEMEM */ 351 } 352 353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 354 { 355 #ifdef CONFIG_SPARSEMEM 356 pfn &= (PAGES_PER_SECTION-1); 357 #else 358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 359 #endif /* CONFIG_SPARSEMEM */ 360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 361 } 362 363 /** 364 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 365 * @page: The page within the block of interest 366 * @pfn: The target page frame number 367 * @mask: mask of bits that the caller is interested in 368 * 369 * Return: pageblock_bits flags 370 */ 371 unsigned long get_pfnblock_flags_mask(const struct page *page, 372 unsigned long pfn, unsigned long mask) 373 { 374 unsigned long *bitmap; 375 unsigned long bitidx, word_bitidx; 376 unsigned long word; 377 378 bitmap = get_pageblock_bitmap(page, pfn); 379 bitidx = pfn_to_bitidx(page, pfn); 380 word_bitidx = bitidx / BITS_PER_LONG; 381 bitidx &= (BITS_PER_LONG-1); 382 /* 383 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 384 * a consistent read of the memory array, so that results, even though 385 * racy, are not corrupted. 386 */ 387 word = READ_ONCE(bitmap[word_bitidx]); 388 return (word >> bitidx) & mask; 389 } 390 391 static __always_inline int get_pfnblock_migratetype(const struct page *page, 392 unsigned long pfn) 393 { 394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 395 } 396 397 /** 398 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 399 * @page: The page within the block of interest 400 * @flags: The flags to set 401 * @pfn: The target page frame number 402 * @mask: mask of bits that the caller is interested in 403 */ 404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 405 unsigned long pfn, 406 unsigned long mask) 407 { 408 unsigned long *bitmap; 409 unsigned long bitidx, word_bitidx; 410 unsigned long word; 411 412 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 413 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 414 415 bitmap = get_pageblock_bitmap(page, pfn); 416 bitidx = pfn_to_bitidx(page, pfn); 417 word_bitidx = bitidx / BITS_PER_LONG; 418 bitidx &= (BITS_PER_LONG-1); 419 420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 421 422 mask <<= bitidx; 423 flags <<= bitidx; 424 425 word = READ_ONCE(bitmap[word_bitidx]); 426 do { 427 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 428 } 429 430 void set_pageblock_migratetype(struct page *page, int migratetype) 431 { 432 if (unlikely(page_group_by_mobility_disabled && 433 migratetype < MIGRATE_PCPTYPES)) 434 migratetype = MIGRATE_UNMOVABLE; 435 436 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 437 page_to_pfn(page), MIGRATETYPE_MASK); 438 } 439 440 #ifdef CONFIG_DEBUG_VM 441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 442 { 443 int ret; 444 unsigned seq; 445 unsigned long pfn = page_to_pfn(page); 446 unsigned long sp, start_pfn; 447 448 do { 449 seq = zone_span_seqbegin(zone); 450 start_pfn = zone->zone_start_pfn; 451 sp = zone->spanned_pages; 452 ret = !zone_spans_pfn(zone, pfn); 453 } while (zone_span_seqretry(zone, seq)); 454 455 if (ret) 456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 457 pfn, zone_to_nid(zone), zone->name, 458 start_pfn, start_pfn + sp); 459 460 return ret; 461 } 462 463 /* 464 * Temporary debugging check for pages not lying within a given zone. 465 */ 466 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 467 { 468 if (page_outside_zone_boundaries(zone, page)) 469 return 1; 470 if (zone != page_zone(page)) 471 return 1; 472 473 return 0; 474 } 475 #else 476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 477 { 478 return 0; 479 } 480 #endif 481 482 static void bad_page(struct page *page, const char *reason) 483 { 484 static unsigned long resume; 485 static unsigned long nr_shown; 486 static unsigned long nr_unshown; 487 488 /* 489 * Allow a burst of 60 reports, then keep quiet for that minute; 490 * or allow a steady drip of one report per second. 491 */ 492 if (nr_shown == 60) { 493 if (time_before(jiffies, resume)) { 494 nr_unshown++; 495 goto out; 496 } 497 if (nr_unshown) { 498 pr_alert( 499 "BUG: Bad page state: %lu messages suppressed\n", 500 nr_unshown); 501 nr_unshown = 0; 502 } 503 nr_shown = 0; 504 } 505 if (nr_shown++ == 0) 506 resume = jiffies + 60 * HZ; 507 508 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 509 current->comm, page_to_pfn(page)); 510 dump_page(page, reason); 511 512 print_modules(); 513 dump_stack(); 514 out: 515 /* Leave bad fields for debug, except PageBuddy could make trouble */ 516 page_mapcount_reset(page); /* remove PageBuddy */ 517 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 518 } 519 520 static inline unsigned int order_to_pindex(int migratetype, int order) 521 { 522 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 523 if (order > PAGE_ALLOC_COSTLY_ORDER) { 524 VM_BUG_ON(order != pageblock_order); 525 return NR_LOWORDER_PCP_LISTS; 526 } 527 #else 528 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 529 #endif 530 531 return (MIGRATE_PCPTYPES * order) + migratetype; 532 } 533 534 static inline int pindex_to_order(unsigned int pindex) 535 { 536 int order = pindex / MIGRATE_PCPTYPES; 537 538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 539 if (pindex == NR_LOWORDER_PCP_LISTS) 540 order = pageblock_order; 541 #else 542 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 543 #endif 544 545 return order; 546 } 547 548 static inline bool pcp_allowed_order(unsigned int order) 549 { 550 if (order <= PAGE_ALLOC_COSTLY_ORDER) 551 return true; 552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 553 if (order == pageblock_order) 554 return true; 555 #endif 556 return false; 557 } 558 559 static inline void free_the_page(struct page *page, unsigned int order) 560 { 561 if (pcp_allowed_order(order)) /* Via pcp? */ 562 free_unref_page(page, order); 563 else 564 __free_pages_ok(page, order, FPI_NONE); 565 } 566 567 /* 568 * Higher-order pages are called "compound pages". They are structured thusly: 569 * 570 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 571 * 572 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 573 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 574 * 575 * The first tail page's ->compound_order holds the order of allocation. 576 * This usage means that zero-order pages may not be compound. 577 */ 578 579 void prep_compound_page(struct page *page, unsigned int order) 580 { 581 int i; 582 int nr_pages = 1 << order; 583 584 __SetPageHead(page); 585 for (i = 1; i < nr_pages; i++) 586 prep_compound_tail(page, i); 587 588 prep_compound_head(page, order); 589 } 590 591 void destroy_large_folio(struct folio *folio) 592 { 593 if (folio_test_hugetlb(folio)) { 594 free_huge_folio(folio); 595 return; 596 } 597 598 if (folio_test_large_rmappable(folio)) 599 folio_undo_large_rmappable(folio); 600 601 mem_cgroup_uncharge(folio); 602 free_the_page(&folio->page, folio_order(folio)); 603 } 604 605 static inline void set_buddy_order(struct page *page, unsigned int order) 606 { 607 set_page_private(page, order); 608 __SetPageBuddy(page); 609 } 610 611 #ifdef CONFIG_COMPACTION 612 static inline struct capture_control *task_capc(struct zone *zone) 613 { 614 struct capture_control *capc = current->capture_control; 615 616 return unlikely(capc) && 617 !(current->flags & PF_KTHREAD) && 618 !capc->page && 619 capc->cc->zone == zone ? capc : NULL; 620 } 621 622 static inline bool 623 compaction_capture(struct capture_control *capc, struct page *page, 624 int order, int migratetype) 625 { 626 if (!capc || order != capc->cc->order) 627 return false; 628 629 /* Do not accidentally pollute CMA or isolated regions*/ 630 if (is_migrate_cma(migratetype) || 631 is_migrate_isolate(migratetype)) 632 return false; 633 634 /* 635 * Do not let lower order allocations pollute a movable pageblock. 636 * This might let an unmovable request use a reclaimable pageblock 637 * and vice-versa but no more than normal fallback logic which can 638 * have trouble finding a high-order free page. 639 */ 640 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 641 return false; 642 643 capc->page = page; 644 return true; 645 } 646 647 #else 648 static inline struct capture_control *task_capc(struct zone *zone) 649 { 650 return NULL; 651 } 652 653 static inline bool 654 compaction_capture(struct capture_control *capc, struct page *page, 655 int order, int migratetype) 656 { 657 return false; 658 } 659 #endif /* CONFIG_COMPACTION */ 660 661 /* Used for pages not on another list */ 662 static inline void add_to_free_list(struct page *page, struct zone *zone, 663 unsigned int order, int migratetype) 664 { 665 struct free_area *area = &zone->free_area[order]; 666 667 list_add(&page->buddy_list, &area->free_list[migratetype]); 668 area->nr_free++; 669 } 670 671 /* Used for pages not on another list */ 672 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 673 unsigned int order, int migratetype) 674 { 675 struct free_area *area = &zone->free_area[order]; 676 677 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 678 area->nr_free++; 679 } 680 681 /* 682 * Used for pages which are on another list. Move the pages to the tail 683 * of the list - so the moved pages won't immediately be considered for 684 * allocation again (e.g., optimization for memory onlining). 685 */ 686 static inline void move_to_free_list(struct page *page, struct zone *zone, 687 unsigned int order, int migratetype) 688 { 689 struct free_area *area = &zone->free_area[order]; 690 691 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 692 } 693 694 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 695 unsigned int order) 696 { 697 /* clear reported state and update reported page count */ 698 if (page_reported(page)) 699 __ClearPageReported(page); 700 701 list_del(&page->buddy_list); 702 __ClearPageBuddy(page); 703 set_page_private(page, 0); 704 zone->free_area[order].nr_free--; 705 } 706 707 static inline struct page *get_page_from_free_area(struct free_area *area, 708 int migratetype) 709 { 710 return list_first_entry_or_null(&area->free_list[migratetype], 711 struct page, buddy_list); 712 } 713 714 /* 715 * If this is not the largest possible page, check if the buddy 716 * of the next-highest order is free. If it is, it's possible 717 * that pages are being freed that will coalesce soon. In case, 718 * that is happening, add the free page to the tail of the list 719 * so it's less likely to be used soon and more likely to be merged 720 * as a higher order page 721 */ 722 static inline bool 723 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 724 struct page *page, unsigned int order) 725 { 726 unsigned long higher_page_pfn; 727 struct page *higher_page; 728 729 if (order >= MAX_ORDER - 1) 730 return false; 731 732 higher_page_pfn = buddy_pfn & pfn; 733 higher_page = page + (higher_page_pfn - pfn); 734 735 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 736 NULL) != NULL; 737 } 738 739 /* 740 * Freeing function for a buddy system allocator. 741 * 742 * The concept of a buddy system is to maintain direct-mapped table 743 * (containing bit values) for memory blocks of various "orders". 744 * The bottom level table contains the map for the smallest allocatable 745 * units of memory (here, pages), and each level above it describes 746 * pairs of units from the levels below, hence, "buddies". 747 * At a high level, all that happens here is marking the table entry 748 * at the bottom level available, and propagating the changes upward 749 * as necessary, plus some accounting needed to play nicely with other 750 * parts of the VM system. 751 * At each level, we keep a list of pages, which are heads of continuous 752 * free pages of length of (1 << order) and marked with PageBuddy. 753 * Page's order is recorded in page_private(page) field. 754 * So when we are allocating or freeing one, we can derive the state of the 755 * other. That is, if we allocate a small block, and both were 756 * free, the remainder of the region must be split into blocks. 757 * If a block is freed, and its buddy is also free, then this 758 * triggers coalescing into a block of larger size. 759 * 760 * -- nyc 761 */ 762 763 static inline void __free_one_page(struct page *page, 764 unsigned long pfn, 765 struct zone *zone, unsigned int order, 766 int migratetype, fpi_t fpi_flags) 767 { 768 struct capture_control *capc = task_capc(zone); 769 unsigned long buddy_pfn = 0; 770 unsigned long combined_pfn; 771 struct page *buddy; 772 bool to_tail; 773 774 VM_BUG_ON(!zone_is_initialized(zone)); 775 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 776 777 VM_BUG_ON(migratetype == -1); 778 if (likely(!is_migrate_isolate(migratetype))) 779 __mod_zone_freepage_state(zone, 1 << order, migratetype); 780 781 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 782 VM_BUG_ON_PAGE(bad_range(zone, page), page); 783 784 while (order < MAX_ORDER) { 785 if (compaction_capture(capc, page, order, migratetype)) { 786 __mod_zone_freepage_state(zone, -(1 << order), 787 migratetype); 788 return; 789 } 790 791 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 792 if (!buddy) 793 goto done_merging; 794 795 if (unlikely(order >= pageblock_order)) { 796 /* 797 * We want to prevent merge between freepages on pageblock 798 * without fallbacks and normal pageblock. Without this, 799 * pageblock isolation could cause incorrect freepage or CMA 800 * accounting or HIGHATOMIC accounting. 801 */ 802 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 803 804 if (migratetype != buddy_mt 805 && (!migratetype_is_mergeable(migratetype) || 806 !migratetype_is_mergeable(buddy_mt))) 807 goto done_merging; 808 } 809 810 /* 811 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 812 * merge with it and move up one order. 813 */ 814 if (page_is_guard(buddy)) 815 clear_page_guard(zone, buddy, order, migratetype); 816 else 817 del_page_from_free_list(buddy, zone, order); 818 combined_pfn = buddy_pfn & pfn; 819 page = page + (combined_pfn - pfn); 820 pfn = combined_pfn; 821 order++; 822 } 823 824 done_merging: 825 set_buddy_order(page, order); 826 827 if (fpi_flags & FPI_TO_TAIL) 828 to_tail = true; 829 else if (is_shuffle_order(order)) 830 to_tail = shuffle_pick_tail(); 831 else 832 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 833 834 if (to_tail) 835 add_to_free_list_tail(page, zone, order, migratetype); 836 else 837 add_to_free_list(page, zone, order, migratetype); 838 839 /* Notify page reporting subsystem of freed page */ 840 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 841 page_reporting_notify_free(order); 842 } 843 844 /** 845 * split_free_page() -- split a free page at split_pfn_offset 846 * @free_page: the original free page 847 * @order: the order of the page 848 * @split_pfn_offset: split offset within the page 849 * 850 * Return -ENOENT if the free page is changed, otherwise 0 851 * 852 * It is used when the free page crosses two pageblocks with different migratetypes 853 * at split_pfn_offset within the page. The split free page will be put into 854 * separate migratetype lists afterwards. Otherwise, the function achieves 855 * nothing. 856 */ 857 int split_free_page(struct page *free_page, 858 unsigned int order, unsigned long split_pfn_offset) 859 { 860 struct zone *zone = page_zone(free_page); 861 unsigned long free_page_pfn = page_to_pfn(free_page); 862 unsigned long pfn; 863 unsigned long flags; 864 int free_page_order; 865 int mt; 866 int ret = 0; 867 868 if (split_pfn_offset == 0) 869 return ret; 870 871 spin_lock_irqsave(&zone->lock, flags); 872 873 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 874 ret = -ENOENT; 875 goto out; 876 } 877 878 mt = get_pfnblock_migratetype(free_page, free_page_pfn); 879 if (likely(!is_migrate_isolate(mt))) 880 __mod_zone_freepage_state(zone, -(1UL << order), mt); 881 882 del_page_from_free_list(free_page, zone, order); 883 for (pfn = free_page_pfn; 884 pfn < free_page_pfn + (1UL << order);) { 885 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 886 887 free_page_order = min_t(unsigned int, 888 pfn ? __ffs(pfn) : order, 889 __fls(split_pfn_offset)); 890 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 891 mt, FPI_NONE); 892 pfn += 1UL << free_page_order; 893 split_pfn_offset -= (1UL << free_page_order); 894 /* we have done the first part, now switch to second part */ 895 if (split_pfn_offset == 0) 896 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 897 } 898 out: 899 spin_unlock_irqrestore(&zone->lock, flags); 900 return ret; 901 } 902 /* 903 * A bad page could be due to a number of fields. Instead of multiple branches, 904 * try and check multiple fields with one check. The caller must do a detailed 905 * check if necessary. 906 */ 907 static inline bool page_expected_state(struct page *page, 908 unsigned long check_flags) 909 { 910 if (unlikely(atomic_read(&page->_mapcount) != -1)) 911 return false; 912 913 if (unlikely((unsigned long)page->mapping | 914 page_ref_count(page) | 915 #ifdef CONFIG_MEMCG 916 page->memcg_data | 917 #endif 918 (page->flags & check_flags))) 919 return false; 920 921 return true; 922 } 923 924 static const char *page_bad_reason(struct page *page, unsigned long flags) 925 { 926 const char *bad_reason = NULL; 927 928 if (unlikely(atomic_read(&page->_mapcount) != -1)) 929 bad_reason = "nonzero mapcount"; 930 if (unlikely(page->mapping != NULL)) 931 bad_reason = "non-NULL mapping"; 932 if (unlikely(page_ref_count(page) != 0)) 933 bad_reason = "nonzero _refcount"; 934 if (unlikely(page->flags & flags)) { 935 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 936 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 937 else 938 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 939 } 940 #ifdef CONFIG_MEMCG 941 if (unlikely(page->memcg_data)) 942 bad_reason = "page still charged to cgroup"; 943 #endif 944 return bad_reason; 945 } 946 947 static void free_page_is_bad_report(struct page *page) 948 { 949 bad_page(page, 950 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 951 } 952 953 static inline bool free_page_is_bad(struct page *page) 954 { 955 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 956 return false; 957 958 /* Something has gone sideways, find it */ 959 free_page_is_bad_report(page); 960 return true; 961 } 962 963 static inline bool is_check_pages_enabled(void) 964 { 965 return static_branch_unlikely(&check_pages_enabled); 966 } 967 968 static int free_tail_page_prepare(struct page *head_page, struct page *page) 969 { 970 struct folio *folio = (struct folio *)head_page; 971 int ret = 1; 972 973 /* 974 * We rely page->lru.next never has bit 0 set, unless the page 975 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 976 */ 977 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 978 979 if (!is_check_pages_enabled()) { 980 ret = 0; 981 goto out; 982 } 983 switch (page - head_page) { 984 case 1: 985 /* the first tail page: these may be in place of ->mapping */ 986 if (unlikely(folio_entire_mapcount(folio))) { 987 bad_page(page, "nonzero entire_mapcount"); 988 goto out; 989 } 990 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 991 bad_page(page, "nonzero nr_pages_mapped"); 992 goto out; 993 } 994 if (unlikely(atomic_read(&folio->_pincount))) { 995 bad_page(page, "nonzero pincount"); 996 goto out; 997 } 998 break; 999 case 2: 1000 /* 1001 * the second tail page: ->mapping is 1002 * deferred_list.next -- ignore value. 1003 */ 1004 break; 1005 default: 1006 if (page->mapping != TAIL_MAPPING) { 1007 bad_page(page, "corrupted mapping in tail page"); 1008 goto out; 1009 } 1010 break; 1011 } 1012 if (unlikely(!PageTail(page))) { 1013 bad_page(page, "PageTail not set"); 1014 goto out; 1015 } 1016 if (unlikely(compound_head(page) != head_page)) { 1017 bad_page(page, "compound_head not consistent"); 1018 goto out; 1019 } 1020 ret = 0; 1021 out: 1022 page->mapping = NULL; 1023 clear_compound_head(page); 1024 return ret; 1025 } 1026 1027 /* 1028 * Skip KASAN memory poisoning when either: 1029 * 1030 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1031 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1032 * using page tags instead (see below). 1033 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1034 * that error detection is disabled for accesses via the page address. 1035 * 1036 * Pages will have match-all tags in the following circumstances: 1037 * 1038 * 1. Pages are being initialized for the first time, including during deferred 1039 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1040 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1041 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1042 * 3. The allocation was excluded from being checked due to sampling, 1043 * see the call to kasan_unpoison_pages. 1044 * 1045 * Poisoning pages during deferred memory init will greatly lengthen the 1046 * process and cause problem in large memory systems as the deferred pages 1047 * initialization is done with interrupt disabled. 1048 * 1049 * Assuming that there will be no reference to those newly initialized 1050 * pages before they are ever allocated, this should have no effect on 1051 * KASAN memory tracking as the poison will be properly inserted at page 1052 * allocation time. The only corner case is when pages are allocated by 1053 * on-demand allocation and then freed again before the deferred pages 1054 * initialization is done, but this is not likely to happen. 1055 */ 1056 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1057 { 1058 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1059 return deferred_pages_enabled(); 1060 1061 return page_kasan_tag(page) == 0xff; 1062 } 1063 1064 static void kernel_init_pages(struct page *page, int numpages) 1065 { 1066 int i; 1067 1068 /* s390's use of memset() could override KASAN redzones. */ 1069 kasan_disable_current(); 1070 for (i = 0; i < numpages; i++) 1071 clear_highpage_kasan_tagged(page + i); 1072 kasan_enable_current(); 1073 } 1074 1075 static __always_inline bool free_pages_prepare(struct page *page, 1076 unsigned int order, fpi_t fpi_flags) 1077 { 1078 int bad = 0; 1079 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1080 bool init = want_init_on_free(); 1081 1082 VM_BUG_ON_PAGE(PageTail(page), page); 1083 1084 trace_mm_page_free(page, order); 1085 kmsan_free_page(page, order); 1086 1087 if (unlikely(PageHWPoison(page)) && !order) { 1088 /* 1089 * Do not let hwpoison pages hit pcplists/buddy 1090 * Untie memcg state and reset page's owner 1091 */ 1092 if (memcg_kmem_online() && PageMemcgKmem(page)) 1093 __memcg_kmem_uncharge_page(page, order); 1094 reset_page_owner(page, order); 1095 page_table_check_free(page, order); 1096 return false; 1097 } 1098 1099 /* 1100 * Check tail pages before head page information is cleared to 1101 * avoid checking PageCompound for order-0 pages. 1102 */ 1103 if (unlikely(order)) { 1104 bool compound = PageCompound(page); 1105 int i; 1106 1107 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1108 1109 if (compound) 1110 page[1].flags &= ~PAGE_FLAGS_SECOND; 1111 for (i = 1; i < (1 << order); i++) { 1112 if (compound) 1113 bad += free_tail_page_prepare(page, page + i); 1114 if (is_check_pages_enabled()) { 1115 if (free_page_is_bad(page + i)) { 1116 bad++; 1117 continue; 1118 } 1119 } 1120 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1121 } 1122 } 1123 if (PageMappingFlags(page)) 1124 page->mapping = NULL; 1125 if (memcg_kmem_online() && PageMemcgKmem(page)) 1126 __memcg_kmem_uncharge_page(page, order); 1127 if (is_check_pages_enabled()) { 1128 if (free_page_is_bad(page)) 1129 bad++; 1130 if (bad) 1131 return false; 1132 } 1133 1134 page_cpupid_reset_last(page); 1135 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1136 reset_page_owner(page, order); 1137 page_table_check_free(page, order); 1138 1139 if (!PageHighMem(page)) { 1140 debug_check_no_locks_freed(page_address(page), 1141 PAGE_SIZE << order); 1142 debug_check_no_obj_freed(page_address(page), 1143 PAGE_SIZE << order); 1144 } 1145 1146 kernel_poison_pages(page, 1 << order); 1147 1148 /* 1149 * As memory initialization might be integrated into KASAN, 1150 * KASAN poisoning and memory initialization code must be 1151 * kept together to avoid discrepancies in behavior. 1152 * 1153 * With hardware tag-based KASAN, memory tags must be set before the 1154 * page becomes unavailable via debug_pagealloc or arch_free_page. 1155 */ 1156 if (!skip_kasan_poison) { 1157 kasan_poison_pages(page, order, init); 1158 1159 /* Memory is already initialized if KASAN did it internally. */ 1160 if (kasan_has_integrated_init()) 1161 init = false; 1162 } 1163 if (init) 1164 kernel_init_pages(page, 1 << order); 1165 1166 /* 1167 * arch_free_page() can make the page's contents inaccessible. s390 1168 * does this. So nothing which can access the page's contents should 1169 * happen after this. 1170 */ 1171 arch_free_page(page, order); 1172 1173 debug_pagealloc_unmap_pages(page, 1 << order); 1174 1175 return true; 1176 } 1177 1178 /* 1179 * Frees a number of pages from the PCP lists 1180 * Assumes all pages on list are in same zone. 1181 * count is the number of pages to free. 1182 */ 1183 static void free_pcppages_bulk(struct zone *zone, int count, 1184 struct per_cpu_pages *pcp, 1185 int pindex) 1186 { 1187 unsigned long flags; 1188 unsigned int order; 1189 bool isolated_pageblocks; 1190 struct page *page; 1191 1192 /* 1193 * Ensure proper count is passed which otherwise would stuck in the 1194 * below while (list_empty(list)) loop. 1195 */ 1196 count = min(pcp->count, count); 1197 1198 /* Ensure requested pindex is drained first. */ 1199 pindex = pindex - 1; 1200 1201 spin_lock_irqsave(&zone->lock, flags); 1202 isolated_pageblocks = has_isolate_pageblock(zone); 1203 1204 while (count > 0) { 1205 struct list_head *list; 1206 int nr_pages; 1207 1208 /* Remove pages from lists in a round-robin fashion. */ 1209 do { 1210 if (++pindex > NR_PCP_LISTS - 1) 1211 pindex = 0; 1212 list = &pcp->lists[pindex]; 1213 } while (list_empty(list)); 1214 1215 order = pindex_to_order(pindex); 1216 nr_pages = 1 << order; 1217 do { 1218 int mt; 1219 1220 page = list_last_entry(list, struct page, pcp_list); 1221 mt = get_pcppage_migratetype(page); 1222 1223 /* must delete to avoid corrupting pcp list */ 1224 list_del(&page->pcp_list); 1225 count -= nr_pages; 1226 pcp->count -= nr_pages; 1227 1228 /* MIGRATE_ISOLATE page should not go to pcplists */ 1229 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1230 /* Pageblock could have been isolated meanwhile */ 1231 if (unlikely(isolated_pageblocks)) 1232 mt = get_pageblock_migratetype(page); 1233 1234 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1235 trace_mm_page_pcpu_drain(page, order, mt); 1236 } while (count > 0 && !list_empty(list)); 1237 } 1238 1239 spin_unlock_irqrestore(&zone->lock, flags); 1240 } 1241 1242 static void free_one_page(struct zone *zone, 1243 struct page *page, unsigned long pfn, 1244 unsigned int order, 1245 int migratetype, fpi_t fpi_flags) 1246 { 1247 unsigned long flags; 1248 1249 spin_lock_irqsave(&zone->lock, flags); 1250 if (unlikely(has_isolate_pageblock(zone) || 1251 is_migrate_isolate(migratetype))) { 1252 migratetype = get_pfnblock_migratetype(page, pfn); 1253 } 1254 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1255 spin_unlock_irqrestore(&zone->lock, flags); 1256 } 1257 1258 static void __free_pages_ok(struct page *page, unsigned int order, 1259 fpi_t fpi_flags) 1260 { 1261 unsigned long flags; 1262 int migratetype; 1263 unsigned long pfn = page_to_pfn(page); 1264 struct zone *zone = page_zone(page); 1265 1266 if (!free_pages_prepare(page, order, fpi_flags)) 1267 return; 1268 1269 /* 1270 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here 1271 * is used to avoid calling get_pfnblock_migratetype() under the lock. 1272 * This will reduce the lock holding time. 1273 */ 1274 migratetype = get_pfnblock_migratetype(page, pfn); 1275 1276 spin_lock_irqsave(&zone->lock, flags); 1277 if (unlikely(has_isolate_pageblock(zone) || 1278 is_migrate_isolate(migratetype))) { 1279 migratetype = get_pfnblock_migratetype(page, pfn); 1280 } 1281 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1282 spin_unlock_irqrestore(&zone->lock, flags); 1283 1284 __count_vm_events(PGFREE, 1 << order); 1285 } 1286 1287 void __free_pages_core(struct page *page, unsigned int order) 1288 { 1289 unsigned int nr_pages = 1 << order; 1290 struct page *p = page; 1291 unsigned int loop; 1292 1293 /* 1294 * When initializing the memmap, __init_single_page() sets the refcount 1295 * of all pages to 1 ("allocated"/"not free"). We have to set the 1296 * refcount of all involved pages to 0. 1297 */ 1298 prefetchw(p); 1299 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1300 prefetchw(p + 1); 1301 __ClearPageReserved(p); 1302 set_page_count(p, 0); 1303 } 1304 __ClearPageReserved(p); 1305 set_page_count(p, 0); 1306 1307 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1308 1309 if (page_contains_unaccepted(page, order)) { 1310 if (order == MAX_ORDER && __free_unaccepted(page)) 1311 return; 1312 1313 accept_page(page, order); 1314 } 1315 1316 /* 1317 * Bypass PCP and place fresh pages right to the tail, primarily 1318 * relevant for memory onlining. 1319 */ 1320 __free_pages_ok(page, order, FPI_TO_TAIL); 1321 } 1322 1323 /* 1324 * Check that the whole (or subset of) a pageblock given by the interval of 1325 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1326 * with the migration of free compaction scanner. 1327 * 1328 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1329 * 1330 * It's possible on some configurations to have a setup like node0 node1 node0 1331 * i.e. it's possible that all pages within a zones range of pages do not 1332 * belong to a single zone. We assume that a border between node0 and node1 1333 * can occur within a single pageblock, but not a node0 node1 node0 1334 * interleaving within a single pageblock. It is therefore sufficient to check 1335 * the first and last page of a pageblock and avoid checking each individual 1336 * page in a pageblock. 1337 * 1338 * Note: the function may return non-NULL struct page even for a page block 1339 * which contains a memory hole (i.e. there is no physical memory for a subset 1340 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which 1341 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1342 * even though the start pfn is online and valid. This should be safe most of 1343 * the time because struct pages are still initialized via init_unavailable_range() 1344 * and pfn walkers shouldn't touch any physical memory range for which they do 1345 * not recognize any specific metadata in struct pages. 1346 */ 1347 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1348 unsigned long end_pfn, struct zone *zone) 1349 { 1350 struct page *start_page; 1351 struct page *end_page; 1352 1353 /* end_pfn is one past the range we are checking */ 1354 end_pfn--; 1355 1356 if (!pfn_valid(end_pfn)) 1357 return NULL; 1358 1359 start_page = pfn_to_online_page(start_pfn); 1360 if (!start_page) 1361 return NULL; 1362 1363 if (page_zone(start_page) != zone) 1364 return NULL; 1365 1366 end_page = pfn_to_page(end_pfn); 1367 1368 /* This gives a shorter code than deriving page_zone(end_page) */ 1369 if (page_zone_id(start_page) != page_zone_id(end_page)) 1370 return NULL; 1371 1372 return start_page; 1373 } 1374 1375 /* 1376 * The order of subdivision here is critical for the IO subsystem. 1377 * Please do not alter this order without good reasons and regression 1378 * testing. Specifically, as large blocks of memory are subdivided, 1379 * the order in which smaller blocks are delivered depends on the order 1380 * they're subdivided in this function. This is the primary factor 1381 * influencing the order in which pages are delivered to the IO 1382 * subsystem according to empirical testing, and this is also justified 1383 * by considering the behavior of a buddy system containing a single 1384 * large block of memory acted on by a series of small allocations. 1385 * This behavior is a critical factor in sglist merging's success. 1386 * 1387 * -- nyc 1388 */ 1389 static inline void expand(struct zone *zone, struct page *page, 1390 int low, int high, int migratetype) 1391 { 1392 unsigned long size = 1 << high; 1393 1394 while (high > low) { 1395 high--; 1396 size >>= 1; 1397 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1398 1399 /* 1400 * Mark as guard pages (or page), that will allow to 1401 * merge back to allocator when buddy will be freed. 1402 * Corresponding page table entries will not be touched, 1403 * pages will stay not present in virtual address space 1404 */ 1405 if (set_page_guard(zone, &page[size], high, migratetype)) 1406 continue; 1407 1408 add_to_free_list(&page[size], zone, high, migratetype); 1409 set_buddy_order(&page[size], high); 1410 } 1411 } 1412 1413 static void check_new_page_bad(struct page *page) 1414 { 1415 if (unlikely(page->flags & __PG_HWPOISON)) { 1416 /* Don't complain about hwpoisoned pages */ 1417 page_mapcount_reset(page); /* remove PageBuddy */ 1418 return; 1419 } 1420 1421 bad_page(page, 1422 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1423 } 1424 1425 /* 1426 * This page is about to be returned from the page allocator 1427 */ 1428 static int check_new_page(struct page *page) 1429 { 1430 if (likely(page_expected_state(page, 1431 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1432 return 0; 1433 1434 check_new_page_bad(page); 1435 return 1; 1436 } 1437 1438 static inline bool check_new_pages(struct page *page, unsigned int order) 1439 { 1440 if (is_check_pages_enabled()) { 1441 for (int i = 0; i < (1 << order); i++) { 1442 struct page *p = page + i; 1443 1444 if (check_new_page(p)) 1445 return true; 1446 } 1447 } 1448 1449 return false; 1450 } 1451 1452 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1453 { 1454 /* Don't skip if a software KASAN mode is enabled. */ 1455 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1456 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1457 return false; 1458 1459 /* Skip, if hardware tag-based KASAN is not enabled. */ 1460 if (!kasan_hw_tags_enabled()) 1461 return true; 1462 1463 /* 1464 * With hardware tag-based KASAN enabled, skip if this has been 1465 * requested via __GFP_SKIP_KASAN. 1466 */ 1467 return flags & __GFP_SKIP_KASAN; 1468 } 1469 1470 static inline bool should_skip_init(gfp_t flags) 1471 { 1472 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1473 if (!kasan_hw_tags_enabled()) 1474 return false; 1475 1476 /* For hardware tag-based KASAN, skip if requested. */ 1477 return (flags & __GFP_SKIP_ZERO); 1478 } 1479 1480 inline void post_alloc_hook(struct page *page, unsigned int order, 1481 gfp_t gfp_flags) 1482 { 1483 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1484 !should_skip_init(gfp_flags); 1485 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1486 int i; 1487 1488 set_page_private(page, 0); 1489 set_page_refcounted(page); 1490 1491 arch_alloc_page(page, order); 1492 debug_pagealloc_map_pages(page, 1 << order); 1493 1494 /* 1495 * Page unpoisoning must happen before memory initialization. 1496 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1497 * allocations and the page unpoisoning code will complain. 1498 */ 1499 kernel_unpoison_pages(page, 1 << order); 1500 1501 /* 1502 * As memory initialization might be integrated into KASAN, 1503 * KASAN unpoisoning and memory initializion code must be 1504 * kept together to avoid discrepancies in behavior. 1505 */ 1506 1507 /* 1508 * If memory tags should be zeroed 1509 * (which happens only when memory should be initialized as well). 1510 */ 1511 if (zero_tags) { 1512 /* Initialize both memory and memory tags. */ 1513 for (i = 0; i != 1 << order; ++i) 1514 tag_clear_highpage(page + i); 1515 1516 /* Take note that memory was initialized by the loop above. */ 1517 init = false; 1518 } 1519 if (!should_skip_kasan_unpoison(gfp_flags) && 1520 kasan_unpoison_pages(page, order, init)) { 1521 /* Take note that memory was initialized by KASAN. */ 1522 if (kasan_has_integrated_init()) 1523 init = false; 1524 } else { 1525 /* 1526 * If memory tags have not been set by KASAN, reset the page 1527 * tags to ensure page_address() dereferencing does not fault. 1528 */ 1529 for (i = 0; i != 1 << order; ++i) 1530 page_kasan_tag_reset(page + i); 1531 } 1532 /* If memory is still not initialized, initialize it now. */ 1533 if (init) 1534 kernel_init_pages(page, 1 << order); 1535 1536 set_page_owner(page, order, gfp_flags); 1537 page_table_check_alloc(page, order); 1538 } 1539 1540 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1541 unsigned int alloc_flags) 1542 { 1543 post_alloc_hook(page, order, gfp_flags); 1544 1545 if (order && (gfp_flags & __GFP_COMP)) 1546 prep_compound_page(page, order); 1547 1548 /* 1549 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1550 * allocate the page. The expectation is that the caller is taking 1551 * steps that will free more memory. The caller should avoid the page 1552 * being used for !PFMEMALLOC purposes. 1553 */ 1554 if (alloc_flags & ALLOC_NO_WATERMARKS) 1555 set_page_pfmemalloc(page); 1556 else 1557 clear_page_pfmemalloc(page); 1558 } 1559 1560 /* 1561 * Go through the free lists for the given migratetype and remove 1562 * the smallest available page from the freelists 1563 */ 1564 static __always_inline 1565 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1566 int migratetype) 1567 { 1568 unsigned int current_order; 1569 struct free_area *area; 1570 struct page *page; 1571 1572 /* Find a page of the appropriate size in the preferred list */ 1573 for (current_order = order; current_order <= MAX_ORDER; ++current_order) { 1574 area = &(zone->free_area[current_order]); 1575 page = get_page_from_free_area(area, migratetype); 1576 if (!page) 1577 continue; 1578 del_page_from_free_list(page, zone, current_order); 1579 expand(zone, page, order, current_order, migratetype); 1580 set_pcppage_migratetype(page, migratetype); 1581 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1582 pcp_allowed_order(order) && 1583 migratetype < MIGRATE_PCPTYPES); 1584 return page; 1585 } 1586 1587 return NULL; 1588 } 1589 1590 1591 /* 1592 * This array describes the order lists are fallen back to when 1593 * the free lists for the desirable migrate type are depleted 1594 * 1595 * The other migratetypes do not have fallbacks. 1596 */ 1597 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { 1598 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1599 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1600 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1601 }; 1602 1603 #ifdef CONFIG_CMA 1604 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1605 unsigned int order) 1606 { 1607 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1608 } 1609 #else 1610 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1611 unsigned int order) { return NULL; } 1612 #endif 1613 1614 /* 1615 * Move the free pages in a range to the freelist tail of the requested type. 1616 * Note that start_page and end_pages are not aligned on a pageblock 1617 * boundary. If alignment is required, use move_freepages_block() 1618 */ 1619 static int move_freepages(struct zone *zone, 1620 unsigned long start_pfn, unsigned long end_pfn, 1621 int migratetype, int *num_movable) 1622 { 1623 struct page *page; 1624 unsigned long pfn; 1625 unsigned int order; 1626 int pages_moved = 0; 1627 1628 for (pfn = start_pfn; pfn <= end_pfn;) { 1629 page = pfn_to_page(pfn); 1630 if (!PageBuddy(page)) { 1631 /* 1632 * We assume that pages that could be isolated for 1633 * migration are movable. But we don't actually try 1634 * isolating, as that would be expensive. 1635 */ 1636 if (num_movable && 1637 (PageLRU(page) || __PageMovable(page))) 1638 (*num_movable)++; 1639 pfn++; 1640 continue; 1641 } 1642 1643 /* Make sure we are not inadvertently changing nodes */ 1644 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1645 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1646 1647 order = buddy_order(page); 1648 move_to_free_list(page, zone, order, migratetype); 1649 pfn += 1 << order; 1650 pages_moved += 1 << order; 1651 } 1652 1653 return pages_moved; 1654 } 1655 1656 int move_freepages_block(struct zone *zone, struct page *page, 1657 int migratetype, int *num_movable) 1658 { 1659 unsigned long start_pfn, end_pfn, pfn; 1660 1661 if (num_movable) 1662 *num_movable = 0; 1663 1664 pfn = page_to_pfn(page); 1665 start_pfn = pageblock_start_pfn(pfn); 1666 end_pfn = pageblock_end_pfn(pfn) - 1; 1667 1668 /* Do not cross zone boundaries */ 1669 if (!zone_spans_pfn(zone, start_pfn)) 1670 start_pfn = pfn; 1671 if (!zone_spans_pfn(zone, end_pfn)) 1672 return 0; 1673 1674 return move_freepages(zone, start_pfn, end_pfn, migratetype, 1675 num_movable); 1676 } 1677 1678 static void change_pageblock_range(struct page *pageblock_page, 1679 int start_order, int migratetype) 1680 { 1681 int nr_pageblocks = 1 << (start_order - pageblock_order); 1682 1683 while (nr_pageblocks--) { 1684 set_pageblock_migratetype(pageblock_page, migratetype); 1685 pageblock_page += pageblock_nr_pages; 1686 } 1687 } 1688 1689 /* 1690 * When we are falling back to another migratetype during allocation, try to 1691 * steal extra free pages from the same pageblocks to satisfy further 1692 * allocations, instead of polluting multiple pageblocks. 1693 * 1694 * If we are stealing a relatively large buddy page, it is likely there will 1695 * be more free pages in the pageblock, so try to steal them all. For 1696 * reclaimable and unmovable allocations, we steal regardless of page size, 1697 * as fragmentation caused by those allocations polluting movable pageblocks 1698 * is worse than movable allocations stealing from unmovable and reclaimable 1699 * pageblocks. 1700 */ 1701 static bool can_steal_fallback(unsigned int order, int start_mt) 1702 { 1703 /* 1704 * Leaving this order check is intended, although there is 1705 * relaxed order check in next check. The reason is that 1706 * we can actually steal whole pageblock if this condition met, 1707 * but, below check doesn't guarantee it and that is just heuristic 1708 * so could be changed anytime. 1709 */ 1710 if (order >= pageblock_order) 1711 return true; 1712 1713 if (order >= pageblock_order / 2 || 1714 start_mt == MIGRATE_RECLAIMABLE || 1715 start_mt == MIGRATE_UNMOVABLE || 1716 page_group_by_mobility_disabled) 1717 return true; 1718 1719 return false; 1720 } 1721 1722 static inline bool boost_watermark(struct zone *zone) 1723 { 1724 unsigned long max_boost; 1725 1726 if (!watermark_boost_factor) 1727 return false; 1728 /* 1729 * Don't bother in zones that are unlikely to produce results. 1730 * On small machines, including kdump capture kernels running 1731 * in a small area, boosting the watermark can cause an out of 1732 * memory situation immediately. 1733 */ 1734 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1735 return false; 1736 1737 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1738 watermark_boost_factor, 10000); 1739 1740 /* 1741 * high watermark may be uninitialised if fragmentation occurs 1742 * very early in boot so do not boost. We do not fall 1743 * through and boost by pageblock_nr_pages as failing 1744 * allocations that early means that reclaim is not going 1745 * to help and it may even be impossible to reclaim the 1746 * boosted watermark resulting in a hang. 1747 */ 1748 if (!max_boost) 1749 return false; 1750 1751 max_boost = max(pageblock_nr_pages, max_boost); 1752 1753 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1754 max_boost); 1755 1756 return true; 1757 } 1758 1759 /* 1760 * This function implements actual steal behaviour. If order is large enough, 1761 * we can steal whole pageblock. If not, we first move freepages in this 1762 * pageblock to our migratetype and determine how many already-allocated pages 1763 * are there in the pageblock with a compatible migratetype. If at least half 1764 * of pages are free or compatible, we can change migratetype of the pageblock 1765 * itself, so pages freed in the future will be put on the correct free list. 1766 */ 1767 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1768 unsigned int alloc_flags, int start_type, bool whole_block) 1769 { 1770 unsigned int current_order = buddy_order(page); 1771 int free_pages, movable_pages, alike_pages; 1772 int old_block_type; 1773 1774 old_block_type = get_pageblock_migratetype(page); 1775 1776 /* 1777 * This can happen due to races and we want to prevent broken 1778 * highatomic accounting. 1779 */ 1780 if (is_migrate_highatomic(old_block_type)) 1781 goto single_page; 1782 1783 /* Take ownership for orders >= pageblock_order */ 1784 if (current_order >= pageblock_order) { 1785 change_pageblock_range(page, current_order, start_type); 1786 goto single_page; 1787 } 1788 1789 /* 1790 * Boost watermarks to increase reclaim pressure to reduce the 1791 * likelihood of future fallbacks. Wake kswapd now as the node 1792 * may be balanced overall and kswapd will not wake naturally. 1793 */ 1794 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1795 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1796 1797 /* We are not allowed to try stealing from the whole block */ 1798 if (!whole_block) 1799 goto single_page; 1800 1801 free_pages = move_freepages_block(zone, page, start_type, 1802 &movable_pages); 1803 /* moving whole block can fail due to zone boundary conditions */ 1804 if (!free_pages) 1805 goto single_page; 1806 1807 /* 1808 * Determine how many pages are compatible with our allocation. 1809 * For movable allocation, it's the number of movable pages which 1810 * we just obtained. For other types it's a bit more tricky. 1811 */ 1812 if (start_type == MIGRATE_MOVABLE) { 1813 alike_pages = movable_pages; 1814 } else { 1815 /* 1816 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1817 * to MOVABLE pageblock, consider all non-movable pages as 1818 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1819 * vice versa, be conservative since we can't distinguish the 1820 * exact migratetype of non-movable pages. 1821 */ 1822 if (old_block_type == MIGRATE_MOVABLE) 1823 alike_pages = pageblock_nr_pages 1824 - (free_pages + movable_pages); 1825 else 1826 alike_pages = 0; 1827 } 1828 /* 1829 * If a sufficient number of pages in the block are either free or of 1830 * compatible migratability as our allocation, claim the whole block. 1831 */ 1832 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1833 page_group_by_mobility_disabled) 1834 set_pageblock_migratetype(page, start_type); 1835 1836 return; 1837 1838 single_page: 1839 move_to_free_list(page, zone, current_order, start_type); 1840 } 1841 1842 /* 1843 * Check whether there is a suitable fallback freepage with requested order. 1844 * If only_stealable is true, this function returns fallback_mt only if 1845 * we can steal other freepages all together. This would help to reduce 1846 * fragmentation due to mixed migratetype pages in one pageblock. 1847 */ 1848 int find_suitable_fallback(struct free_area *area, unsigned int order, 1849 int migratetype, bool only_stealable, bool *can_steal) 1850 { 1851 int i; 1852 int fallback_mt; 1853 1854 if (area->nr_free == 0) 1855 return -1; 1856 1857 *can_steal = false; 1858 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1859 fallback_mt = fallbacks[migratetype][i]; 1860 if (free_area_empty(area, fallback_mt)) 1861 continue; 1862 1863 if (can_steal_fallback(order, migratetype)) 1864 *can_steal = true; 1865 1866 if (!only_stealable) 1867 return fallback_mt; 1868 1869 if (*can_steal) 1870 return fallback_mt; 1871 } 1872 1873 return -1; 1874 } 1875 1876 /* 1877 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1878 * there are no empty page blocks that contain a page with a suitable order 1879 */ 1880 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) 1881 { 1882 int mt; 1883 unsigned long max_managed, flags; 1884 1885 /* 1886 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1887 * Check is race-prone but harmless. 1888 */ 1889 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 1890 if (zone->nr_reserved_highatomic >= max_managed) 1891 return; 1892 1893 spin_lock_irqsave(&zone->lock, flags); 1894 1895 /* Recheck the nr_reserved_highatomic limit under the lock */ 1896 if (zone->nr_reserved_highatomic >= max_managed) 1897 goto out_unlock; 1898 1899 /* Yoink! */ 1900 mt = get_pageblock_migratetype(page); 1901 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 1902 if (migratetype_is_mergeable(mt)) { 1903 zone->nr_reserved_highatomic += pageblock_nr_pages; 1904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 1906 } 1907 1908 out_unlock: 1909 spin_unlock_irqrestore(&zone->lock, flags); 1910 } 1911 1912 /* 1913 * Used when an allocation is about to fail under memory pressure. This 1914 * potentially hurts the reliability of high-order allocations when under 1915 * intense memory pressure but failed atomic allocations should be easier 1916 * to recover from than an OOM. 1917 * 1918 * If @force is true, try to unreserve a pageblock even though highatomic 1919 * pageblock is exhausted. 1920 */ 1921 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 1922 bool force) 1923 { 1924 struct zonelist *zonelist = ac->zonelist; 1925 unsigned long flags; 1926 struct zoneref *z; 1927 struct zone *zone; 1928 struct page *page; 1929 int order; 1930 bool ret; 1931 1932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 1933 ac->nodemask) { 1934 /* 1935 * Preserve at least one pageblock unless memory pressure 1936 * is really high. 1937 */ 1938 if (!force && zone->nr_reserved_highatomic <= 1939 pageblock_nr_pages) 1940 continue; 1941 1942 spin_lock_irqsave(&zone->lock, flags); 1943 for (order = 0; order <= MAX_ORDER; order++) { 1944 struct free_area *area = &(zone->free_area[order]); 1945 1946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 1947 if (!page) 1948 continue; 1949 1950 /* 1951 * In page freeing path, migratetype change is racy so 1952 * we can counter several free pages in a pageblock 1953 * in this loop although we changed the pageblock type 1954 * from highatomic to ac->migratetype. So we should 1955 * adjust the count once. 1956 */ 1957 if (is_migrate_highatomic_page(page)) { 1958 /* 1959 * It should never happen but changes to 1960 * locking could inadvertently allow a per-cpu 1961 * drain to add pages to MIGRATE_HIGHATOMIC 1962 * while unreserving so be safe and watch for 1963 * underflows. 1964 */ 1965 zone->nr_reserved_highatomic -= min( 1966 pageblock_nr_pages, 1967 zone->nr_reserved_highatomic); 1968 } 1969 1970 /* 1971 * Convert to ac->migratetype and avoid the normal 1972 * pageblock stealing heuristics. Minimally, the caller 1973 * is doing the work and needs the pages. More 1974 * importantly, if the block was always converted to 1975 * MIGRATE_UNMOVABLE or another type then the number 1976 * of pageblocks that cannot be completely freed 1977 * may increase. 1978 */ 1979 set_pageblock_migratetype(page, ac->migratetype); 1980 ret = move_freepages_block(zone, page, ac->migratetype, 1981 NULL); 1982 if (ret) { 1983 spin_unlock_irqrestore(&zone->lock, flags); 1984 return ret; 1985 } 1986 } 1987 spin_unlock_irqrestore(&zone->lock, flags); 1988 } 1989 1990 return false; 1991 } 1992 1993 /* 1994 * Try finding a free buddy page on the fallback list and put it on the free 1995 * list of requested migratetype, possibly along with other pages from the same 1996 * block, depending on fragmentation avoidance heuristics. Returns true if 1997 * fallback was found so that __rmqueue_smallest() can grab it. 1998 * 1999 * The use of signed ints for order and current_order is a deliberate 2000 * deviation from the rest of this file, to make the for loop 2001 * condition simpler. 2002 */ 2003 static __always_inline bool 2004 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2005 unsigned int alloc_flags) 2006 { 2007 struct free_area *area; 2008 int current_order; 2009 int min_order = order; 2010 struct page *page; 2011 int fallback_mt; 2012 bool can_steal; 2013 2014 /* 2015 * Do not steal pages from freelists belonging to other pageblocks 2016 * i.e. orders < pageblock_order. If there are no local zones free, 2017 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2018 */ 2019 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2020 min_order = pageblock_order; 2021 2022 /* 2023 * Find the largest available free page in the other list. This roughly 2024 * approximates finding the pageblock with the most free pages, which 2025 * would be too costly to do exactly. 2026 */ 2027 for (current_order = MAX_ORDER; current_order >= min_order; 2028 --current_order) { 2029 area = &(zone->free_area[current_order]); 2030 fallback_mt = find_suitable_fallback(area, current_order, 2031 start_migratetype, false, &can_steal); 2032 if (fallback_mt == -1) 2033 continue; 2034 2035 /* 2036 * We cannot steal all free pages from the pageblock and the 2037 * requested migratetype is movable. In that case it's better to 2038 * steal and split the smallest available page instead of the 2039 * largest available page, because even if the next movable 2040 * allocation falls back into a different pageblock than this 2041 * one, it won't cause permanent fragmentation. 2042 */ 2043 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2044 && current_order > order) 2045 goto find_smallest; 2046 2047 goto do_steal; 2048 } 2049 2050 return false; 2051 2052 find_smallest: 2053 for (current_order = order; current_order <= MAX_ORDER; 2054 current_order++) { 2055 area = &(zone->free_area[current_order]); 2056 fallback_mt = find_suitable_fallback(area, current_order, 2057 start_migratetype, false, &can_steal); 2058 if (fallback_mt != -1) 2059 break; 2060 } 2061 2062 /* 2063 * This should not happen - we already found a suitable fallback 2064 * when looking for the largest page. 2065 */ 2066 VM_BUG_ON(current_order > MAX_ORDER); 2067 2068 do_steal: 2069 page = get_page_from_free_area(area, fallback_mt); 2070 2071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2072 can_steal); 2073 2074 trace_mm_page_alloc_extfrag(page, order, current_order, 2075 start_migratetype, fallback_mt); 2076 2077 return true; 2078 2079 } 2080 2081 /* 2082 * Do the hard work of removing an element from the buddy allocator. 2083 * Call me with the zone->lock already held. 2084 */ 2085 static __always_inline struct page * 2086 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2087 unsigned int alloc_flags) 2088 { 2089 struct page *page; 2090 2091 if (IS_ENABLED(CONFIG_CMA)) { 2092 /* 2093 * Balance movable allocations between regular and CMA areas by 2094 * allocating from CMA when over half of the zone's free memory 2095 * is in the CMA area. 2096 */ 2097 if (alloc_flags & ALLOC_CMA && 2098 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2099 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2100 page = __rmqueue_cma_fallback(zone, order); 2101 if (page) 2102 return page; 2103 } 2104 } 2105 retry: 2106 page = __rmqueue_smallest(zone, order, migratetype); 2107 if (unlikely(!page)) { 2108 if (alloc_flags & ALLOC_CMA) 2109 page = __rmqueue_cma_fallback(zone, order); 2110 2111 if (!page && __rmqueue_fallback(zone, order, migratetype, 2112 alloc_flags)) 2113 goto retry; 2114 } 2115 return page; 2116 } 2117 2118 /* 2119 * Obtain a specified number of elements from the buddy allocator, all under 2120 * a single hold of the lock, for efficiency. Add them to the supplied list. 2121 * Returns the number of new pages which were placed at *list. 2122 */ 2123 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2124 unsigned long count, struct list_head *list, 2125 int migratetype, unsigned int alloc_flags) 2126 { 2127 unsigned long flags; 2128 int i; 2129 2130 spin_lock_irqsave(&zone->lock, flags); 2131 for (i = 0; i < count; ++i) { 2132 struct page *page = __rmqueue(zone, order, migratetype, 2133 alloc_flags); 2134 if (unlikely(page == NULL)) 2135 break; 2136 2137 /* 2138 * Split buddy pages returned by expand() are received here in 2139 * physical page order. The page is added to the tail of 2140 * caller's list. From the callers perspective, the linked list 2141 * is ordered by page number under some conditions. This is 2142 * useful for IO devices that can forward direction from the 2143 * head, thus also in the physical page order. This is useful 2144 * for IO devices that can merge IO requests if the physical 2145 * pages are ordered properly. 2146 */ 2147 list_add_tail(&page->pcp_list, list); 2148 if (is_migrate_cma(get_pcppage_migratetype(page))) 2149 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2150 -(1 << order)); 2151 } 2152 2153 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2154 spin_unlock_irqrestore(&zone->lock, flags); 2155 2156 return i; 2157 } 2158 2159 #ifdef CONFIG_NUMA 2160 /* 2161 * Called from the vmstat counter updater to drain pagesets of this 2162 * currently executing processor on remote nodes after they have 2163 * expired. 2164 */ 2165 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2166 { 2167 int to_drain, batch; 2168 2169 batch = READ_ONCE(pcp->batch); 2170 to_drain = min(pcp->count, batch); 2171 if (to_drain > 0) { 2172 spin_lock(&pcp->lock); 2173 free_pcppages_bulk(zone, to_drain, pcp, 0); 2174 spin_unlock(&pcp->lock); 2175 } 2176 } 2177 #endif 2178 2179 /* 2180 * Drain pcplists of the indicated processor and zone. 2181 */ 2182 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2183 { 2184 struct per_cpu_pages *pcp; 2185 2186 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2187 if (pcp->count) { 2188 spin_lock(&pcp->lock); 2189 free_pcppages_bulk(zone, pcp->count, pcp, 0); 2190 spin_unlock(&pcp->lock); 2191 } 2192 } 2193 2194 /* 2195 * Drain pcplists of all zones on the indicated processor. 2196 */ 2197 static void drain_pages(unsigned int cpu) 2198 { 2199 struct zone *zone; 2200 2201 for_each_populated_zone(zone) { 2202 drain_pages_zone(cpu, zone); 2203 } 2204 } 2205 2206 /* 2207 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2208 */ 2209 void drain_local_pages(struct zone *zone) 2210 { 2211 int cpu = smp_processor_id(); 2212 2213 if (zone) 2214 drain_pages_zone(cpu, zone); 2215 else 2216 drain_pages(cpu); 2217 } 2218 2219 /* 2220 * The implementation of drain_all_pages(), exposing an extra parameter to 2221 * drain on all cpus. 2222 * 2223 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2224 * not empty. The check for non-emptiness can however race with a free to 2225 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2226 * that need the guarantee that every CPU has drained can disable the 2227 * optimizing racy check. 2228 */ 2229 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2230 { 2231 int cpu; 2232 2233 /* 2234 * Allocate in the BSS so we won't require allocation in 2235 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2236 */ 2237 static cpumask_t cpus_with_pcps; 2238 2239 /* 2240 * Do not drain if one is already in progress unless it's specific to 2241 * a zone. Such callers are primarily CMA and memory hotplug and need 2242 * the drain to be complete when the call returns. 2243 */ 2244 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2245 if (!zone) 2246 return; 2247 mutex_lock(&pcpu_drain_mutex); 2248 } 2249 2250 /* 2251 * We don't care about racing with CPU hotplug event 2252 * as offline notification will cause the notified 2253 * cpu to drain that CPU pcps and on_each_cpu_mask 2254 * disables preemption as part of its processing 2255 */ 2256 for_each_online_cpu(cpu) { 2257 struct per_cpu_pages *pcp; 2258 struct zone *z; 2259 bool has_pcps = false; 2260 2261 if (force_all_cpus) { 2262 /* 2263 * The pcp.count check is racy, some callers need a 2264 * guarantee that no cpu is missed. 2265 */ 2266 has_pcps = true; 2267 } else if (zone) { 2268 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2269 if (pcp->count) 2270 has_pcps = true; 2271 } else { 2272 for_each_populated_zone(z) { 2273 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2274 if (pcp->count) { 2275 has_pcps = true; 2276 break; 2277 } 2278 } 2279 } 2280 2281 if (has_pcps) 2282 cpumask_set_cpu(cpu, &cpus_with_pcps); 2283 else 2284 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2285 } 2286 2287 for_each_cpu(cpu, &cpus_with_pcps) { 2288 if (zone) 2289 drain_pages_zone(cpu, zone); 2290 else 2291 drain_pages(cpu); 2292 } 2293 2294 mutex_unlock(&pcpu_drain_mutex); 2295 } 2296 2297 /* 2298 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2299 * 2300 * When zone parameter is non-NULL, spill just the single zone's pages. 2301 */ 2302 void drain_all_pages(struct zone *zone) 2303 { 2304 __drain_all_pages(zone, false); 2305 } 2306 2307 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 2308 unsigned int order) 2309 { 2310 int migratetype; 2311 2312 if (!free_pages_prepare(page, order, FPI_NONE)) 2313 return false; 2314 2315 migratetype = get_pfnblock_migratetype(page, pfn); 2316 set_pcppage_migratetype(page, migratetype); 2317 return true; 2318 } 2319 2320 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) 2321 { 2322 int min_nr_free, max_nr_free; 2323 int batch = READ_ONCE(pcp->batch); 2324 2325 /* Free everything if batch freeing high-order pages. */ 2326 if (unlikely(free_high)) 2327 return pcp->count; 2328 2329 /* Check for PCP disabled or boot pageset */ 2330 if (unlikely(high < batch)) 2331 return 1; 2332 2333 /* Leave at least pcp->batch pages on the list */ 2334 min_nr_free = batch; 2335 max_nr_free = high - batch; 2336 2337 /* 2338 * Double the number of pages freed each time there is subsequent 2339 * freeing of pages without any allocation. 2340 */ 2341 batch <<= pcp->free_factor; 2342 if (batch < max_nr_free) 2343 pcp->free_factor++; 2344 batch = clamp(batch, min_nr_free, max_nr_free); 2345 2346 return batch; 2347 } 2348 2349 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2350 bool free_high) 2351 { 2352 int high = READ_ONCE(pcp->high); 2353 2354 if (unlikely(!high || free_high)) 2355 return 0; 2356 2357 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 2358 return high; 2359 2360 /* 2361 * If reclaim is active, limit the number of pages that can be 2362 * stored on pcp lists 2363 */ 2364 return min(READ_ONCE(pcp->batch) << 2, high); 2365 } 2366 2367 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2368 struct page *page, int migratetype, 2369 unsigned int order) 2370 { 2371 int high; 2372 int pindex; 2373 bool free_high; 2374 2375 __count_vm_events(PGFREE, 1 << order); 2376 pindex = order_to_pindex(migratetype, order); 2377 list_add(&page->pcp_list, &pcp->lists[pindex]); 2378 pcp->count += 1 << order; 2379 2380 /* 2381 * As high-order pages other than THP's stored on PCP can contribute 2382 * to fragmentation, limit the number stored when PCP is heavily 2383 * freeing without allocation. The remainder after bulk freeing 2384 * stops will be drained from vmstat refresh context. 2385 */ 2386 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 2387 2388 high = nr_pcp_high(pcp, zone, free_high); 2389 if (pcp->count >= high) { 2390 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex); 2391 } 2392 } 2393 2394 /* 2395 * Free a pcp page 2396 */ 2397 void free_unref_page(struct page *page, unsigned int order) 2398 { 2399 unsigned long __maybe_unused UP_flags; 2400 struct per_cpu_pages *pcp; 2401 struct zone *zone; 2402 unsigned long pfn = page_to_pfn(page); 2403 int migratetype; 2404 2405 if (!free_unref_page_prepare(page, pfn, order)) 2406 return; 2407 2408 /* 2409 * We only track unmovable, reclaimable and movable on pcp lists. 2410 * Place ISOLATE pages on the isolated list because they are being 2411 * offlined but treat HIGHATOMIC as movable pages so we can get those 2412 * areas back if necessary. Otherwise, we may have to free 2413 * excessively into the page allocator 2414 */ 2415 migratetype = get_pcppage_migratetype(page); 2416 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2417 if (unlikely(is_migrate_isolate(migratetype))) { 2418 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 2419 return; 2420 } 2421 migratetype = MIGRATE_MOVABLE; 2422 } 2423 2424 zone = page_zone(page); 2425 pcp_trylock_prepare(UP_flags); 2426 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2427 if (pcp) { 2428 free_unref_page_commit(zone, pcp, page, migratetype, order); 2429 pcp_spin_unlock(pcp); 2430 } else { 2431 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 2432 } 2433 pcp_trylock_finish(UP_flags); 2434 } 2435 2436 /* 2437 * Free a list of 0-order pages 2438 */ 2439 void free_unref_page_list(struct list_head *list) 2440 { 2441 unsigned long __maybe_unused UP_flags; 2442 struct page *page, *next; 2443 struct per_cpu_pages *pcp = NULL; 2444 struct zone *locked_zone = NULL; 2445 int batch_count = 0; 2446 int migratetype; 2447 2448 /* Prepare pages for freeing */ 2449 list_for_each_entry_safe(page, next, list, lru) { 2450 unsigned long pfn = page_to_pfn(page); 2451 if (!free_unref_page_prepare(page, pfn, 0)) { 2452 list_del(&page->lru); 2453 continue; 2454 } 2455 2456 /* 2457 * Free isolated pages directly to the allocator, see 2458 * comment in free_unref_page. 2459 */ 2460 migratetype = get_pcppage_migratetype(page); 2461 if (unlikely(is_migrate_isolate(migratetype))) { 2462 list_del(&page->lru); 2463 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 2464 continue; 2465 } 2466 } 2467 2468 list_for_each_entry_safe(page, next, list, lru) { 2469 struct zone *zone = page_zone(page); 2470 2471 list_del(&page->lru); 2472 migratetype = get_pcppage_migratetype(page); 2473 2474 /* 2475 * Either different zone requiring a different pcp lock or 2476 * excessive lock hold times when freeing a large list of 2477 * pages. 2478 */ 2479 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { 2480 if (pcp) { 2481 pcp_spin_unlock(pcp); 2482 pcp_trylock_finish(UP_flags); 2483 } 2484 2485 batch_count = 0; 2486 2487 /* 2488 * trylock is necessary as pages may be getting freed 2489 * from IRQ or SoftIRQ context after an IO completion. 2490 */ 2491 pcp_trylock_prepare(UP_flags); 2492 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2493 if (unlikely(!pcp)) { 2494 pcp_trylock_finish(UP_flags); 2495 free_one_page(zone, page, page_to_pfn(page), 2496 0, migratetype, FPI_NONE); 2497 locked_zone = NULL; 2498 continue; 2499 } 2500 locked_zone = zone; 2501 } 2502 2503 /* 2504 * Non-isolated types over MIGRATE_PCPTYPES get added 2505 * to the MIGRATE_MOVABLE pcp list. 2506 */ 2507 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2508 migratetype = MIGRATE_MOVABLE; 2509 2510 trace_mm_page_free_batched(page); 2511 free_unref_page_commit(zone, pcp, page, migratetype, 0); 2512 batch_count++; 2513 } 2514 2515 if (pcp) { 2516 pcp_spin_unlock(pcp); 2517 pcp_trylock_finish(UP_flags); 2518 } 2519 } 2520 2521 /* 2522 * split_page takes a non-compound higher-order page, and splits it into 2523 * n (1<<order) sub-pages: page[0..n] 2524 * Each sub-page must be freed individually. 2525 * 2526 * Note: this is probably too low level an operation for use in drivers. 2527 * Please consult with lkml before using this in your driver. 2528 */ 2529 void split_page(struct page *page, unsigned int order) 2530 { 2531 int i; 2532 2533 VM_BUG_ON_PAGE(PageCompound(page), page); 2534 VM_BUG_ON_PAGE(!page_count(page), page); 2535 2536 for (i = 1; i < (1 << order); i++) 2537 set_page_refcounted(page + i); 2538 split_page_owner(page, 1 << order); 2539 split_page_memcg(page, 1 << order); 2540 } 2541 EXPORT_SYMBOL_GPL(split_page); 2542 2543 int __isolate_free_page(struct page *page, unsigned int order) 2544 { 2545 struct zone *zone = page_zone(page); 2546 int mt = get_pageblock_migratetype(page); 2547 2548 if (!is_migrate_isolate(mt)) { 2549 unsigned long watermark; 2550 /* 2551 * Obey watermarks as if the page was being allocated. We can 2552 * emulate a high-order watermark check with a raised order-0 2553 * watermark, because we already know our high-order page 2554 * exists. 2555 */ 2556 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2557 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2558 return 0; 2559 2560 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2561 } 2562 2563 del_page_from_free_list(page, zone, order); 2564 2565 /* 2566 * Set the pageblock if the isolated page is at least half of a 2567 * pageblock 2568 */ 2569 if (order >= pageblock_order - 1) { 2570 struct page *endpage = page + (1 << order) - 1; 2571 for (; page < endpage; page += pageblock_nr_pages) { 2572 int mt = get_pageblock_migratetype(page); 2573 /* 2574 * Only change normal pageblocks (i.e., they can merge 2575 * with others) 2576 */ 2577 if (migratetype_is_mergeable(mt)) 2578 set_pageblock_migratetype(page, 2579 MIGRATE_MOVABLE); 2580 } 2581 } 2582 2583 return 1UL << order; 2584 } 2585 2586 /** 2587 * __putback_isolated_page - Return a now-isolated page back where we got it 2588 * @page: Page that was isolated 2589 * @order: Order of the isolated page 2590 * @mt: The page's pageblock's migratetype 2591 * 2592 * This function is meant to return a page pulled from the free lists via 2593 * __isolate_free_page back to the free lists they were pulled from. 2594 */ 2595 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2596 { 2597 struct zone *zone = page_zone(page); 2598 2599 /* zone lock should be held when this function is called */ 2600 lockdep_assert_held(&zone->lock); 2601 2602 /* Return isolated page to tail of freelist. */ 2603 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2604 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2605 } 2606 2607 /* 2608 * Update NUMA hit/miss statistics 2609 */ 2610 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2611 long nr_account) 2612 { 2613 #ifdef CONFIG_NUMA 2614 enum numa_stat_item local_stat = NUMA_LOCAL; 2615 2616 /* skip numa counters update if numa stats is disabled */ 2617 if (!static_branch_likely(&vm_numa_stat_key)) 2618 return; 2619 2620 if (zone_to_nid(z) != numa_node_id()) 2621 local_stat = NUMA_OTHER; 2622 2623 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2624 __count_numa_events(z, NUMA_HIT, nr_account); 2625 else { 2626 __count_numa_events(z, NUMA_MISS, nr_account); 2627 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2628 } 2629 __count_numa_events(z, local_stat, nr_account); 2630 #endif 2631 } 2632 2633 static __always_inline 2634 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2635 unsigned int order, unsigned int alloc_flags, 2636 int migratetype) 2637 { 2638 struct page *page; 2639 unsigned long flags; 2640 2641 do { 2642 page = NULL; 2643 spin_lock_irqsave(&zone->lock, flags); 2644 if (alloc_flags & ALLOC_HIGHATOMIC) 2645 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2646 if (!page) { 2647 page = __rmqueue(zone, order, migratetype, alloc_flags); 2648 2649 /* 2650 * If the allocation fails, allow OOM handling access 2651 * to HIGHATOMIC reserves as failing now is worse than 2652 * failing a high-order atomic allocation in the 2653 * future. 2654 */ 2655 if (!page && (alloc_flags & ALLOC_OOM)) 2656 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2657 2658 if (!page) { 2659 spin_unlock_irqrestore(&zone->lock, flags); 2660 return NULL; 2661 } 2662 } 2663 __mod_zone_freepage_state(zone, -(1 << order), 2664 get_pcppage_migratetype(page)); 2665 spin_unlock_irqrestore(&zone->lock, flags); 2666 } while (check_new_pages(page, order)); 2667 2668 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2669 zone_statistics(preferred_zone, zone, 1); 2670 2671 return page; 2672 } 2673 2674 /* Remove page from the per-cpu list, caller must protect the list */ 2675 static inline 2676 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2677 int migratetype, 2678 unsigned int alloc_flags, 2679 struct per_cpu_pages *pcp, 2680 struct list_head *list) 2681 { 2682 struct page *page; 2683 2684 do { 2685 if (list_empty(list)) { 2686 int batch = READ_ONCE(pcp->batch); 2687 int alloced; 2688 2689 /* 2690 * Scale batch relative to order if batch implies 2691 * free pages can be stored on the PCP. Batch can 2692 * be 1 for small zones or for boot pagesets which 2693 * should never store free pages as the pages may 2694 * belong to arbitrary zones. 2695 */ 2696 if (batch > 1) 2697 batch = max(batch >> order, 2); 2698 alloced = rmqueue_bulk(zone, order, 2699 batch, list, 2700 migratetype, alloc_flags); 2701 2702 pcp->count += alloced << order; 2703 if (unlikely(list_empty(list))) 2704 return NULL; 2705 } 2706 2707 page = list_first_entry(list, struct page, pcp_list); 2708 list_del(&page->pcp_list); 2709 pcp->count -= 1 << order; 2710 } while (check_new_pages(page, order)); 2711 2712 return page; 2713 } 2714 2715 /* Lock and remove page from the per-cpu list */ 2716 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2717 struct zone *zone, unsigned int order, 2718 int migratetype, unsigned int alloc_flags) 2719 { 2720 struct per_cpu_pages *pcp; 2721 struct list_head *list; 2722 struct page *page; 2723 unsigned long __maybe_unused UP_flags; 2724 2725 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2726 pcp_trylock_prepare(UP_flags); 2727 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2728 if (!pcp) { 2729 pcp_trylock_finish(UP_flags); 2730 return NULL; 2731 } 2732 2733 /* 2734 * On allocation, reduce the number of pages that are batch freed. 2735 * See nr_pcp_free() where free_factor is increased for subsequent 2736 * frees. 2737 */ 2738 pcp->free_factor >>= 1; 2739 list = &pcp->lists[order_to_pindex(migratetype, order)]; 2740 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 2741 pcp_spin_unlock(pcp); 2742 pcp_trylock_finish(UP_flags); 2743 if (page) { 2744 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2745 zone_statistics(preferred_zone, zone, 1); 2746 } 2747 return page; 2748 } 2749 2750 /* 2751 * Allocate a page from the given zone. 2752 * Use pcplists for THP or "cheap" high-order allocations. 2753 */ 2754 2755 /* 2756 * Do not instrument rmqueue() with KMSAN. This function may call 2757 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 2758 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 2759 * may call rmqueue() again, which will result in a deadlock. 2760 */ 2761 __no_sanitize_memory 2762 static inline 2763 struct page *rmqueue(struct zone *preferred_zone, 2764 struct zone *zone, unsigned int order, 2765 gfp_t gfp_flags, unsigned int alloc_flags, 2766 int migratetype) 2767 { 2768 struct page *page; 2769 2770 /* 2771 * We most definitely don't want callers attempting to 2772 * allocate greater than order-1 page units with __GFP_NOFAIL. 2773 */ 2774 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2775 2776 if (likely(pcp_allowed_order(order))) { 2777 page = rmqueue_pcplist(preferred_zone, zone, order, 2778 migratetype, alloc_flags); 2779 if (likely(page)) 2780 goto out; 2781 } 2782 2783 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 2784 migratetype); 2785 2786 out: 2787 /* Separate test+clear to avoid unnecessary atomics */ 2788 if ((alloc_flags & ALLOC_KSWAPD) && 2789 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 2790 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2791 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2792 } 2793 2794 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2795 return page; 2796 } 2797 2798 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2799 { 2800 return __should_fail_alloc_page(gfp_mask, order); 2801 } 2802 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 2803 2804 static inline long __zone_watermark_unusable_free(struct zone *z, 2805 unsigned int order, unsigned int alloc_flags) 2806 { 2807 long unusable_free = (1 << order) - 1; 2808 2809 /* 2810 * If the caller does not have rights to reserves below the min 2811 * watermark then subtract the high-atomic reserves. This will 2812 * over-estimate the size of the atomic reserve but it avoids a search. 2813 */ 2814 if (likely(!(alloc_flags & ALLOC_RESERVES))) 2815 unusable_free += z->nr_reserved_highatomic; 2816 2817 #ifdef CONFIG_CMA 2818 /* If allocation can't use CMA areas don't use free CMA pages */ 2819 if (!(alloc_flags & ALLOC_CMA)) 2820 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 2821 #endif 2822 #ifdef CONFIG_UNACCEPTED_MEMORY 2823 unusable_free += zone_page_state(z, NR_UNACCEPTED); 2824 #endif 2825 2826 return unusable_free; 2827 } 2828 2829 /* 2830 * Return true if free base pages are above 'mark'. For high-order checks it 2831 * will return true of the order-0 watermark is reached and there is at least 2832 * one free page of a suitable size. Checking now avoids taking the zone lock 2833 * to check in the allocation paths if no pages are free. 2834 */ 2835 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2836 int highest_zoneidx, unsigned int alloc_flags, 2837 long free_pages) 2838 { 2839 long min = mark; 2840 int o; 2841 2842 /* free_pages may go negative - that's OK */ 2843 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 2844 2845 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 2846 /* 2847 * __GFP_HIGH allows access to 50% of the min reserve as well 2848 * as OOM. 2849 */ 2850 if (alloc_flags & ALLOC_MIN_RESERVE) { 2851 min -= min / 2; 2852 2853 /* 2854 * Non-blocking allocations (e.g. GFP_ATOMIC) can 2855 * access more reserves than just __GFP_HIGH. Other 2856 * non-blocking allocations requests such as GFP_NOWAIT 2857 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 2858 * access to the min reserve. 2859 */ 2860 if (alloc_flags & ALLOC_NON_BLOCK) 2861 min -= min / 4; 2862 } 2863 2864 /* 2865 * OOM victims can try even harder than the normal reserve 2866 * users on the grounds that it's definitely going to be in 2867 * the exit path shortly and free memory. Any allocation it 2868 * makes during the free path will be small and short-lived. 2869 */ 2870 if (alloc_flags & ALLOC_OOM) 2871 min -= min / 2; 2872 } 2873 2874 /* 2875 * Check watermarks for an order-0 allocation request. If these 2876 * are not met, then a high-order request also cannot go ahead 2877 * even if a suitable page happened to be free. 2878 */ 2879 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 2880 return false; 2881 2882 /* If this is an order-0 request then the watermark is fine */ 2883 if (!order) 2884 return true; 2885 2886 /* For a high-order request, check at least one suitable page is free */ 2887 for (o = order; o <= MAX_ORDER; o++) { 2888 struct free_area *area = &z->free_area[o]; 2889 int mt; 2890 2891 if (!area->nr_free) 2892 continue; 2893 2894 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2895 if (!free_area_empty(area, mt)) 2896 return true; 2897 } 2898 2899 #ifdef CONFIG_CMA 2900 if ((alloc_flags & ALLOC_CMA) && 2901 !free_area_empty(area, MIGRATE_CMA)) { 2902 return true; 2903 } 2904 #endif 2905 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 2906 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 2907 return true; 2908 } 2909 } 2910 return false; 2911 } 2912 2913 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2914 int highest_zoneidx, unsigned int alloc_flags) 2915 { 2916 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2917 zone_page_state(z, NR_FREE_PAGES)); 2918 } 2919 2920 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2921 unsigned long mark, int highest_zoneidx, 2922 unsigned int alloc_flags, gfp_t gfp_mask) 2923 { 2924 long free_pages; 2925 2926 free_pages = zone_page_state(z, NR_FREE_PAGES); 2927 2928 /* 2929 * Fast check for order-0 only. If this fails then the reserves 2930 * need to be calculated. 2931 */ 2932 if (!order) { 2933 long usable_free; 2934 long reserved; 2935 2936 usable_free = free_pages; 2937 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 2938 2939 /* reserved may over estimate high-atomic reserves. */ 2940 usable_free -= min(usable_free, reserved); 2941 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 2942 return true; 2943 } 2944 2945 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2946 free_pages)) 2947 return true; 2948 2949 /* 2950 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 2951 * when checking the min watermark. The min watermark is the 2952 * point where boosting is ignored so that kswapd is woken up 2953 * when below the low watermark. 2954 */ 2955 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 2956 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 2957 mark = z->_watermark[WMARK_MIN]; 2958 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 2959 alloc_flags, free_pages); 2960 } 2961 2962 return false; 2963 } 2964 2965 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2966 unsigned long mark, int highest_zoneidx) 2967 { 2968 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2969 2970 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2971 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2972 2973 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 2974 free_pages); 2975 } 2976 2977 #ifdef CONFIG_NUMA 2978 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 2979 2980 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2981 { 2982 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 2983 node_reclaim_distance; 2984 } 2985 #else /* CONFIG_NUMA */ 2986 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2987 { 2988 return true; 2989 } 2990 #endif /* CONFIG_NUMA */ 2991 2992 /* 2993 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 2994 * fragmentation is subtle. If the preferred zone was HIGHMEM then 2995 * premature use of a lower zone may cause lowmem pressure problems that 2996 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 2997 * probably too small. It only makes sense to spread allocations to avoid 2998 * fragmentation between the Normal and DMA32 zones. 2999 */ 3000 static inline unsigned int 3001 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3002 { 3003 unsigned int alloc_flags; 3004 3005 /* 3006 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3007 * to save a branch. 3008 */ 3009 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3010 3011 #ifdef CONFIG_ZONE_DMA32 3012 if (!zone) 3013 return alloc_flags; 3014 3015 if (zone_idx(zone) != ZONE_NORMAL) 3016 return alloc_flags; 3017 3018 /* 3019 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3020 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3021 * on UMA that if Normal is populated then so is DMA32. 3022 */ 3023 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3024 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3025 return alloc_flags; 3026 3027 alloc_flags |= ALLOC_NOFRAGMENT; 3028 #endif /* CONFIG_ZONE_DMA32 */ 3029 return alloc_flags; 3030 } 3031 3032 /* Must be called after current_gfp_context() which can change gfp_mask */ 3033 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3034 unsigned int alloc_flags) 3035 { 3036 #ifdef CONFIG_CMA 3037 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3038 alloc_flags |= ALLOC_CMA; 3039 #endif 3040 return alloc_flags; 3041 } 3042 3043 /* 3044 * get_page_from_freelist goes through the zonelist trying to allocate 3045 * a page. 3046 */ 3047 static struct page * 3048 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3049 const struct alloc_context *ac) 3050 { 3051 struct zoneref *z; 3052 struct zone *zone; 3053 struct pglist_data *last_pgdat = NULL; 3054 bool last_pgdat_dirty_ok = false; 3055 bool no_fallback; 3056 3057 retry: 3058 /* 3059 * Scan zonelist, looking for a zone with enough free. 3060 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3061 */ 3062 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3063 z = ac->preferred_zoneref; 3064 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3065 ac->nodemask) { 3066 struct page *page; 3067 unsigned long mark; 3068 3069 if (cpusets_enabled() && 3070 (alloc_flags & ALLOC_CPUSET) && 3071 !__cpuset_zone_allowed(zone, gfp_mask)) 3072 continue; 3073 /* 3074 * When allocating a page cache page for writing, we 3075 * want to get it from a node that is within its dirty 3076 * limit, such that no single node holds more than its 3077 * proportional share of globally allowed dirty pages. 3078 * The dirty limits take into account the node's 3079 * lowmem reserves and high watermark so that kswapd 3080 * should be able to balance it without having to 3081 * write pages from its LRU list. 3082 * 3083 * XXX: For now, allow allocations to potentially 3084 * exceed the per-node dirty limit in the slowpath 3085 * (spread_dirty_pages unset) before going into reclaim, 3086 * which is important when on a NUMA setup the allowed 3087 * nodes are together not big enough to reach the 3088 * global limit. The proper fix for these situations 3089 * will require awareness of nodes in the 3090 * dirty-throttling and the flusher threads. 3091 */ 3092 if (ac->spread_dirty_pages) { 3093 if (last_pgdat != zone->zone_pgdat) { 3094 last_pgdat = zone->zone_pgdat; 3095 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3096 } 3097 3098 if (!last_pgdat_dirty_ok) 3099 continue; 3100 } 3101 3102 if (no_fallback && nr_online_nodes > 1 && 3103 zone != ac->preferred_zoneref->zone) { 3104 int local_nid; 3105 3106 /* 3107 * If moving to a remote node, retry but allow 3108 * fragmenting fallbacks. Locality is more important 3109 * than fragmentation avoidance. 3110 */ 3111 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3112 if (zone_to_nid(zone) != local_nid) { 3113 alloc_flags &= ~ALLOC_NOFRAGMENT; 3114 goto retry; 3115 } 3116 } 3117 3118 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3119 if (!zone_watermark_fast(zone, order, mark, 3120 ac->highest_zoneidx, alloc_flags, 3121 gfp_mask)) { 3122 int ret; 3123 3124 if (has_unaccepted_memory()) { 3125 if (try_to_accept_memory(zone, order)) 3126 goto try_this_zone; 3127 } 3128 3129 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3130 /* 3131 * Watermark failed for this zone, but see if we can 3132 * grow this zone if it contains deferred pages. 3133 */ 3134 if (deferred_pages_enabled()) { 3135 if (_deferred_grow_zone(zone, order)) 3136 goto try_this_zone; 3137 } 3138 #endif 3139 /* Checked here to keep the fast path fast */ 3140 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3141 if (alloc_flags & ALLOC_NO_WATERMARKS) 3142 goto try_this_zone; 3143 3144 if (!node_reclaim_enabled() || 3145 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3146 continue; 3147 3148 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3149 switch (ret) { 3150 case NODE_RECLAIM_NOSCAN: 3151 /* did not scan */ 3152 continue; 3153 case NODE_RECLAIM_FULL: 3154 /* scanned but unreclaimable */ 3155 continue; 3156 default: 3157 /* did we reclaim enough */ 3158 if (zone_watermark_ok(zone, order, mark, 3159 ac->highest_zoneidx, alloc_flags)) 3160 goto try_this_zone; 3161 3162 continue; 3163 } 3164 } 3165 3166 try_this_zone: 3167 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3168 gfp_mask, alloc_flags, ac->migratetype); 3169 if (page) { 3170 prep_new_page(page, order, gfp_mask, alloc_flags); 3171 3172 /* 3173 * If this is a high-order atomic allocation then check 3174 * if the pageblock should be reserved for the future 3175 */ 3176 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3177 reserve_highatomic_pageblock(page, zone); 3178 3179 return page; 3180 } else { 3181 if (has_unaccepted_memory()) { 3182 if (try_to_accept_memory(zone, order)) 3183 goto try_this_zone; 3184 } 3185 3186 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3187 /* Try again if zone has deferred pages */ 3188 if (deferred_pages_enabled()) { 3189 if (_deferred_grow_zone(zone, order)) 3190 goto try_this_zone; 3191 } 3192 #endif 3193 } 3194 } 3195 3196 /* 3197 * It's possible on a UMA machine to get through all zones that are 3198 * fragmented. If avoiding fragmentation, reset and try again. 3199 */ 3200 if (no_fallback) { 3201 alloc_flags &= ~ALLOC_NOFRAGMENT; 3202 goto retry; 3203 } 3204 3205 return NULL; 3206 } 3207 3208 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3209 { 3210 unsigned int filter = SHOW_MEM_FILTER_NODES; 3211 3212 /* 3213 * This documents exceptions given to allocations in certain 3214 * contexts that are allowed to allocate outside current's set 3215 * of allowed nodes. 3216 */ 3217 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3218 if (tsk_is_oom_victim(current) || 3219 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3220 filter &= ~SHOW_MEM_FILTER_NODES; 3221 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3222 filter &= ~SHOW_MEM_FILTER_NODES; 3223 3224 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3225 } 3226 3227 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3228 { 3229 struct va_format vaf; 3230 va_list args; 3231 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3232 3233 if ((gfp_mask & __GFP_NOWARN) || 3234 !__ratelimit(&nopage_rs) || 3235 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3236 return; 3237 3238 va_start(args, fmt); 3239 vaf.fmt = fmt; 3240 vaf.va = &args; 3241 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3242 current->comm, &vaf, gfp_mask, &gfp_mask, 3243 nodemask_pr_args(nodemask)); 3244 va_end(args); 3245 3246 cpuset_print_current_mems_allowed(); 3247 pr_cont("\n"); 3248 dump_stack(); 3249 warn_alloc_show_mem(gfp_mask, nodemask); 3250 } 3251 3252 static inline struct page * 3253 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3254 unsigned int alloc_flags, 3255 const struct alloc_context *ac) 3256 { 3257 struct page *page; 3258 3259 page = get_page_from_freelist(gfp_mask, order, 3260 alloc_flags|ALLOC_CPUSET, ac); 3261 /* 3262 * fallback to ignore cpuset restriction if our nodes 3263 * are depleted 3264 */ 3265 if (!page) 3266 page = get_page_from_freelist(gfp_mask, order, 3267 alloc_flags, ac); 3268 3269 return page; 3270 } 3271 3272 static inline struct page * 3273 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3274 const struct alloc_context *ac, unsigned long *did_some_progress) 3275 { 3276 struct oom_control oc = { 3277 .zonelist = ac->zonelist, 3278 .nodemask = ac->nodemask, 3279 .memcg = NULL, 3280 .gfp_mask = gfp_mask, 3281 .order = order, 3282 }; 3283 struct page *page; 3284 3285 *did_some_progress = 0; 3286 3287 /* 3288 * Acquire the oom lock. If that fails, somebody else is 3289 * making progress for us. 3290 */ 3291 if (!mutex_trylock(&oom_lock)) { 3292 *did_some_progress = 1; 3293 schedule_timeout_uninterruptible(1); 3294 return NULL; 3295 } 3296 3297 /* 3298 * Go through the zonelist yet one more time, keep very high watermark 3299 * here, this is only to catch a parallel oom killing, we must fail if 3300 * we're still under heavy pressure. But make sure that this reclaim 3301 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3302 * allocation which will never fail due to oom_lock already held. 3303 */ 3304 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3305 ~__GFP_DIRECT_RECLAIM, order, 3306 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3307 if (page) 3308 goto out; 3309 3310 /* Coredumps can quickly deplete all memory reserves */ 3311 if (current->flags & PF_DUMPCORE) 3312 goto out; 3313 /* The OOM killer will not help higher order allocs */ 3314 if (order > PAGE_ALLOC_COSTLY_ORDER) 3315 goto out; 3316 /* 3317 * We have already exhausted all our reclaim opportunities without any 3318 * success so it is time to admit defeat. We will skip the OOM killer 3319 * because it is very likely that the caller has a more reasonable 3320 * fallback than shooting a random task. 3321 * 3322 * The OOM killer may not free memory on a specific node. 3323 */ 3324 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3325 goto out; 3326 /* The OOM killer does not needlessly kill tasks for lowmem */ 3327 if (ac->highest_zoneidx < ZONE_NORMAL) 3328 goto out; 3329 if (pm_suspended_storage()) 3330 goto out; 3331 /* 3332 * XXX: GFP_NOFS allocations should rather fail than rely on 3333 * other request to make a forward progress. 3334 * We are in an unfortunate situation where out_of_memory cannot 3335 * do much for this context but let's try it to at least get 3336 * access to memory reserved if the current task is killed (see 3337 * out_of_memory). Once filesystems are ready to handle allocation 3338 * failures more gracefully we should just bail out here. 3339 */ 3340 3341 /* Exhausted what can be done so it's blame time */ 3342 if (out_of_memory(&oc) || 3343 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3344 *did_some_progress = 1; 3345 3346 /* 3347 * Help non-failing allocations by giving them access to memory 3348 * reserves 3349 */ 3350 if (gfp_mask & __GFP_NOFAIL) 3351 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3352 ALLOC_NO_WATERMARKS, ac); 3353 } 3354 out: 3355 mutex_unlock(&oom_lock); 3356 return page; 3357 } 3358 3359 /* 3360 * Maximum number of compaction retries with a progress before OOM 3361 * killer is consider as the only way to move forward. 3362 */ 3363 #define MAX_COMPACT_RETRIES 16 3364 3365 #ifdef CONFIG_COMPACTION 3366 /* Try memory compaction for high-order allocations before reclaim */ 3367 static struct page * 3368 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3369 unsigned int alloc_flags, const struct alloc_context *ac, 3370 enum compact_priority prio, enum compact_result *compact_result) 3371 { 3372 struct page *page = NULL; 3373 unsigned long pflags; 3374 unsigned int noreclaim_flag; 3375 3376 if (!order) 3377 return NULL; 3378 3379 psi_memstall_enter(&pflags); 3380 delayacct_compact_start(); 3381 noreclaim_flag = memalloc_noreclaim_save(); 3382 3383 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3384 prio, &page); 3385 3386 memalloc_noreclaim_restore(noreclaim_flag); 3387 psi_memstall_leave(&pflags); 3388 delayacct_compact_end(); 3389 3390 if (*compact_result == COMPACT_SKIPPED) 3391 return NULL; 3392 /* 3393 * At least in one zone compaction wasn't deferred or skipped, so let's 3394 * count a compaction stall 3395 */ 3396 count_vm_event(COMPACTSTALL); 3397 3398 /* Prep a captured page if available */ 3399 if (page) 3400 prep_new_page(page, order, gfp_mask, alloc_flags); 3401 3402 /* Try get a page from the freelist if available */ 3403 if (!page) 3404 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3405 3406 if (page) { 3407 struct zone *zone = page_zone(page); 3408 3409 zone->compact_blockskip_flush = false; 3410 compaction_defer_reset(zone, order, true); 3411 count_vm_event(COMPACTSUCCESS); 3412 return page; 3413 } 3414 3415 /* 3416 * It's bad if compaction run occurs and fails. The most likely reason 3417 * is that pages exist, but not enough to satisfy watermarks. 3418 */ 3419 count_vm_event(COMPACTFAIL); 3420 3421 cond_resched(); 3422 3423 return NULL; 3424 } 3425 3426 static inline bool 3427 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3428 enum compact_result compact_result, 3429 enum compact_priority *compact_priority, 3430 int *compaction_retries) 3431 { 3432 int max_retries = MAX_COMPACT_RETRIES; 3433 int min_priority; 3434 bool ret = false; 3435 int retries = *compaction_retries; 3436 enum compact_priority priority = *compact_priority; 3437 3438 if (!order) 3439 return false; 3440 3441 if (fatal_signal_pending(current)) 3442 return false; 3443 3444 /* 3445 * Compaction was skipped due to a lack of free order-0 3446 * migration targets. Continue if reclaim can help. 3447 */ 3448 if (compact_result == COMPACT_SKIPPED) { 3449 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3450 goto out; 3451 } 3452 3453 /* 3454 * Compaction managed to coalesce some page blocks, but the 3455 * allocation failed presumably due to a race. Retry some. 3456 */ 3457 if (compact_result == COMPACT_SUCCESS) { 3458 /* 3459 * !costly requests are much more important than 3460 * __GFP_RETRY_MAYFAIL costly ones because they are de 3461 * facto nofail and invoke OOM killer to move on while 3462 * costly can fail and users are ready to cope with 3463 * that. 1/4 retries is rather arbitrary but we would 3464 * need much more detailed feedback from compaction to 3465 * make a better decision. 3466 */ 3467 if (order > PAGE_ALLOC_COSTLY_ORDER) 3468 max_retries /= 4; 3469 3470 if (++(*compaction_retries) <= max_retries) { 3471 ret = true; 3472 goto out; 3473 } 3474 } 3475 3476 /* 3477 * Compaction failed. Retry with increasing priority. 3478 */ 3479 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3480 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3481 3482 if (*compact_priority > min_priority) { 3483 (*compact_priority)--; 3484 *compaction_retries = 0; 3485 ret = true; 3486 } 3487 out: 3488 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3489 return ret; 3490 } 3491 #else 3492 static inline struct page * 3493 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3494 unsigned int alloc_flags, const struct alloc_context *ac, 3495 enum compact_priority prio, enum compact_result *compact_result) 3496 { 3497 *compact_result = COMPACT_SKIPPED; 3498 return NULL; 3499 } 3500 3501 static inline bool 3502 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3503 enum compact_result compact_result, 3504 enum compact_priority *compact_priority, 3505 int *compaction_retries) 3506 { 3507 struct zone *zone; 3508 struct zoneref *z; 3509 3510 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3511 return false; 3512 3513 /* 3514 * There are setups with compaction disabled which would prefer to loop 3515 * inside the allocator rather than hit the oom killer prematurely. 3516 * Let's give them a good hope and keep retrying while the order-0 3517 * watermarks are OK. 3518 */ 3519 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3520 ac->highest_zoneidx, ac->nodemask) { 3521 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3522 ac->highest_zoneidx, alloc_flags)) 3523 return true; 3524 } 3525 return false; 3526 } 3527 #endif /* CONFIG_COMPACTION */ 3528 3529 #ifdef CONFIG_LOCKDEP 3530 static struct lockdep_map __fs_reclaim_map = 3531 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3532 3533 static bool __need_reclaim(gfp_t gfp_mask) 3534 { 3535 /* no reclaim without waiting on it */ 3536 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3537 return false; 3538 3539 /* this guy won't enter reclaim */ 3540 if (current->flags & PF_MEMALLOC) 3541 return false; 3542 3543 if (gfp_mask & __GFP_NOLOCKDEP) 3544 return false; 3545 3546 return true; 3547 } 3548 3549 void __fs_reclaim_acquire(unsigned long ip) 3550 { 3551 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3552 } 3553 3554 void __fs_reclaim_release(unsigned long ip) 3555 { 3556 lock_release(&__fs_reclaim_map, ip); 3557 } 3558 3559 void fs_reclaim_acquire(gfp_t gfp_mask) 3560 { 3561 gfp_mask = current_gfp_context(gfp_mask); 3562 3563 if (__need_reclaim(gfp_mask)) { 3564 if (gfp_mask & __GFP_FS) 3565 __fs_reclaim_acquire(_RET_IP_); 3566 3567 #ifdef CONFIG_MMU_NOTIFIER 3568 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3569 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3570 #endif 3571 3572 } 3573 } 3574 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3575 3576 void fs_reclaim_release(gfp_t gfp_mask) 3577 { 3578 gfp_mask = current_gfp_context(gfp_mask); 3579 3580 if (__need_reclaim(gfp_mask)) { 3581 if (gfp_mask & __GFP_FS) 3582 __fs_reclaim_release(_RET_IP_); 3583 } 3584 } 3585 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3586 #endif 3587 3588 /* 3589 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3590 * have been rebuilt so allocation retries. Reader side does not lock and 3591 * retries the allocation if zonelist changes. Writer side is protected by the 3592 * embedded spin_lock. 3593 */ 3594 static DEFINE_SEQLOCK(zonelist_update_seq); 3595 3596 static unsigned int zonelist_iter_begin(void) 3597 { 3598 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3599 return read_seqbegin(&zonelist_update_seq); 3600 3601 return 0; 3602 } 3603 3604 static unsigned int check_retry_zonelist(unsigned int seq) 3605 { 3606 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3607 return read_seqretry(&zonelist_update_seq, seq); 3608 3609 return seq; 3610 } 3611 3612 /* Perform direct synchronous page reclaim */ 3613 static unsigned long 3614 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3615 const struct alloc_context *ac) 3616 { 3617 unsigned int noreclaim_flag; 3618 unsigned long progress; 3619 3620 cond_resched(); 3621 3622 /* We now go into synchronous reclaim */ 3623 cpuset_memory_pressure_bump(); 3624 fs_reclaim_acquire(gfp_mask); 3625 noreclaim_flag = memalloc_noreclaim_save(); 3626 3627 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3628 ac->nodemask); 3629 3630 memalloc_noreclaim_restore(noreclaim_flag); 3631 fs_reclaim_release(gfp_mask); 3632 3633 cond_resched(); 3634 3635 return progress; 3636 } 3637 3638 /* The really slow allocator path where we enter direct reclaim */ 3639 static inline struct page * 3640 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3641 unsigned int alloc_flags, const struct alloc_context *ac, 3642 unsigned long *did_some_progress) 3643 { 3644 struct page *page = NULL; 3645 unsigned long pflags; 3646 bool drained = false; 3647 3648 psi_memstall_enter(&pflags); 3649 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3650 if (unlikely(!(*did_some_progress))) 3651 goto out; 3652 3653 retry: 3654 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3655 3656 /* 3657 * If an allocation failed after direct reclaim, it could be because 3658 * pages are pinned on the per-cpu lists or in high alloc reserves. 3659 * Shrink them and try again 3660 */ 3661 if (!page && !drained) { 3662 unreserve_highatomic_pageblock(ac, false); 3663 drain_all_pages(NULL); 3664 drained = true; 3665 goto retry; 3666 } 3667 out: 3668 psi_memstall_leave(&pflags); 3669 3670 return page; 3671 } 3672 3673 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3674 const struct alloc_context *ac) 3675 { 3676 struct zoneref *z; 3677 struct zone *zone; 3678 pg_data_t *last_pgdat = NULL; 3679 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3680 3681 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3682 ac->nodemask) { 3683 if (!managed_zone(zone)) 3684 continue; 3685 if (last_pgdat != zone->zone_pgdat) { 3686 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3687 last_pgdat = zone->zone_pgdat; 3688 } 3689 } 3690 } 3691 3692 static inline unsigned int 3693 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3694 { 3695 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3696 3697 /* 3698 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3699 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3700 * to save two branches. 3701 */ 3702 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3703 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3704 3705 /* 3706 * The caller may dip into page reserves a bit more if the caller 3707 * cannot run direct reclaim, or if the caller has realtime scheduling 3708 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3709 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3710 */ 3711 alloc_flags |= (__force int) 3712 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3713 3714 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3715 /* 3716 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3717 * if it can't schedule. 3718 */ 3719 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3720 alloc_flags |= ALLOC_NON_BLOCK; 3721 3722 if (order > 0) 3723 alloc_flags |= ALLOC_HIGHATOMIC; 3724 } 3725 3726 /* 3727 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 3728 * GFP_ATOMIC) rather than fail, see the comment for 3729 * cpuset_node_allowed(). 3730 */ 3731 if (alloc_flags & ALLOC_MIN_RESERVE) 3732 alloc_flags &= ~ALLOC_CPUSET; 3733 } else if (unlikely(rt_task(current)) && in_task()) 3734 alloc_flags |= ALLOC_MIN_RESERVE; 3735 3736 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 3737 3738 return alloc_flags; 3739 } 3740 3741 static bool oom_reserves_allowed(struct task_struct *tsk) 3742 { 3743 if (!tsk_is_oom_victim(tsk)) 3744 return false; 3745 3746 /* 3747 * !MMU doesn't have oom reaper so give access to memory reserves 3748 * only to the thread with TIF_MEMDIE set 3749 */ 3750 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 3751 return false; 3752 3753 return true; 3754 } 3755 3756 /* 3757 * Distinguish requests which really need access to full memory 3758 * reserves from oom victims which can live with a portion of it 3759 */ 3760 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 3761 { 3762 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3763 return 0; 3764 if (gfp_mask & __GFP_MEMALLOC) 3765 return ALLOC_NO_WATERMARKS; 3766 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3767 return ALLOC_NO_WATERMARKS; 3768 if (!in_interrupt()) { 3769 if (current->flags & PF_MEMALLOC) 3770 return ALLOC_NO_WATERMARKS; 3771 else if (oom_reserves_allowed(current)) 3772 return ALLOC_OOM; 3773 } 3774 3775 return 0; 3776 } 3777 3778 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3779 { 3780 return !!__gfp_pfmemalloc_flags(gfp_mask); 3781 } 3782 3783 /* 3784 * Checks whether it makes sense to retry the reclaim to make a forward progress 3785 * for the given allocation request. 3786 * 3787 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 3788 * without success, or when we couldn't even meet the watermark if we 3789 * reclaimed all remaining pages on the LRU lists. 3790 * 3791 * Returns true if a retry is viable or false to enter the oom path. 3792 */ 3793 static inline bool 3794 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3795 struct alloc_context *ac, int alloc_flags, 3796 bool did_some_progress, int *no_progress_loops) 3797 { 3798 struct zone *zone; 3799 struct zoneref *z; 3800 bool ret = false; 3801 3802 /* 3803 * Costly allocations might have made a progress but this doesn't mean 3804 * their order will become available due to high fragmentation so 3805 * always increment the no progress counter for them 3806 */ 3807 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3808 *no_progress_loops = 0; 3809 else 3810 (*no_progress_loops)++; 3811 3812 /* 3813 * Make sure we converge to OOM if we cannot make any progress 3814 * several times in the row. 3815 */ 3816 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 3817 /* Before OOM, exhaust highatomic_reserve */ 3818 return unreserve_highatomic_pageblock(ac, true); 3819 } 3820 3821 /* 3822 * Keep reclaiming pages while there is a chance this will lead 3823 * somewhere. If none of the target zones can satisfy our allocation 3824 * request even if all reclaimable pages are considered then we are 3825 * screwed and have to go OOM. 3826 */ 3827 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3828 ac->highest_zoneidx, ac->nodemask) { 3829 unsigned long available; 3830 unsigned long reclaimable; 3831 unsigned long min_wmark = min_wmark_pages(zone); 3832 bool wmark; 3833 3834 available = reclaimable = zone_reclaimable_pages(zone); 3835 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3836 3837 /* 3838 * Would the allocation succeed if we reclaimed all 3839 * reclaimable pages? 3840 */ 3841 wmark = __zone_watermark_ok(zone, order, min_wmark, 3842 ac->highest_zoneidx, alloc_flags, available); 3843 trace_reclaim_retry_zone(z, order, reclaimable, 3844 available, min_wmark, *no_progress_loops, wmark); 3845 if (wmark) { 3846 ret = true; 3847 break; 3848 } 3849 } 3850 3851 /* 3852 * Memory allocation/reclaim might be called from a WQ context and the 3853 * current implementation of the WQ concurrency control doesn't 3854 * recognize that a particular WQ is congested if the worker thread is 3855 * looping without ever sleeping. Therefore we have to do a short sleep 3856 * here rather than calling cond_resched(). 3857 */ 3858 if (current->flags & PF_WQ_WORKER) 3859 schedule_timeout_uninterruptible(1); 3860 else 3861 cond_resched(); 3862 return ret; 3863 } 3864 3865 static inline bool 3866 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 3867 { 3868 /* 3869 * It's possible that cpuset's mems_allowed and the nodemask from 3870 * mempolicy don't intersect. This should be normally dealt with by 3871 * policy_nodemask(), but it's possible to race with cpuset update in 3872 * such a way the check therein was true, and then it became false 3873 * before we got our cpuset_mems_cookie here. 3874 * This assumes that for all allocations, ac->nodemask can come only 3875 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 3876 * when it does not intersect with the cpuset restrictions) or the 3877 * caller can deal with a violated nodemask. 3878 */ 3879 if (cpusets_enabled() && ac->nodemask && 3880 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 3881 ac->nodemask = NULL; 3882 return true; 3883 } 3884 3885 /* 3886 * When updating a task's mems_allowed or mempolicy nodemask, it is 3887 * possible to race with parallel threads in such a way that our 3888 * allocation can fail while the mask is being updated. If we are about 3889 * to fail, check if the cpuset changed during allocation and if so, 3890 * retry. 3891 */ 3892 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3893 return true; 3894 3895 return false; 3896 } 3897 3898 static inline struct page * 3899 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3900 struct alloc_context *ac) 3901 { 3902 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3903 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 3904 struct page *page = NULL; 3905 unsigned int alloc_flags; 3906 unsigned long did_some_progress; 3907 enum compact_priority compact_priority; 3908 enum compact_result compact_result; 3909 int compaction_retries; 3910 int no_progress_loops; 3911 unsigned int cpuset_mems_cookie; 3912 unsigned int zonelist_iter_cookie; 3913 int reserve_flags; 3914 3915 restart: 3916 compaction_retries = 0; 3917 no_progress_loops = 0; 3918 compact_priority = DEF_COMPACT_PRIORITY; 3919 cpuset_mems_cookie = read_mems_allowed_begin(); 3920 zonelist_iter_cookie = zonelist_iter_begin(); 3921 3922 /* 3923 * The fast path uses conservative alloc_flags to succeed only until 3924 * kswapd needs to be woken up, and to avoid the cost of setting up 3925 * alloc_flags precisely. So we do that now. 3926 */ 3927 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 3928 3929 /* 3930 * We need to recalculate the starting point for the zonelist iterator 3931 * because we might have used different nodemask in the fast path, or 3932 * there was a cpuset modification and we are retrying - otherwise we 3933 * could end up iterating over non-eligible zones endlessly. 3934 */ 3935 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3936 ac->highest_zoneidx, ac->nodemask); 3937 if (!ac->preferred_zoneref->zone) 3938 goto nopage; 3939 3940 /* 3941 * Check for insane configurations where the cpuset doesn't contain 3942 * any suitable zone to satisfy the request - e.g. non-movable 3943 * GFP_HIGHUSER allocations from MOVABLE nodes only. 3944 */ 3945 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 3946 struct zoneref *z = first_zones_zonelist(ac->zonelist, 3947 ac->highest_zoneidx, 3948 &cpuset_current_mems_allowed); 3949 if (!z->zone) 3950 goto nopage; 3951 } 3952 3953 if (alloc_flags & ALLOC_KSWAPD) 3954 wake_all_kswapds(order, gfp_mask, ac); 3955 3956 /* 3957 * The adjusted alloc_flags might result in immediate success, so try 3958 * that first 3959 */ 3960 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3961 if (page) 3962 goto got_pg; 3963 3964 /* 3965 * For costly allocations, try direct compaction first, as it's likely 3966 * that we have enough base pages and don't need to reclaim. For non- 3967 * movable high-order allocations, do that as well, as compaction will 3968 * try prevent permanent fragmentation by migrating from blocks of the 3969 * same migratetype. 3970 * Don't try this for allocations that are allowed to ignore 3971 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 3972 */ 3973 if (can_direct_reclaim && 3974 (costly_order || 3975 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 3976 && !gfp_pfmemalloc_allowed(gfp_mask)) { 3977 page = __alloc_pages_direct_compact(gfp_mask, order, 3978 alloc_flags, ac, 3979 INIT_COMPACT_PRIORITY, 3980 &compact_result); 3981 if (page) 3982 goto got_pg; 3983 3984 /* 3985 * Checks for costly allocations with __GFP_NORETRY, which 3986 * includes some THP page fault allocations 3987 */ 3988 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 3989 /* 3990 * If allocating entire pageblock(s) and compaction 3991 * failed because all zones are below low watermarks 3992 * or is prohibited because it recently failed at this 3993 * order, fail immediately unless the allocator has 3994 * requested compaction and reclaim retry. 3995 * 3996 * Reclaim is 3997 * - potentially very expensive because zones are far 3998 * below their low watermarks or this is part of very 3999 * bursty high order allocations, 4000 * - not guaranteed to help because isolate_freepages() 4001 * may not iterate over freed pages as part of its 4002 * linear scan, and 4003 * - unlikely to make entire pageblocks free on its 4004 * own. 4005 */ 4006 if (compact_result == COMPACT_SKIPPED || 4007 compact_result == COMPACT_DEFERRED) 4008 goto nopage; 4009 4010 /* 4011 * Looks like reclaim/compaction is worth trying, but 4012 * sync compaction could be very expensive, so keep 4013 * using async compaction. 4014 */ 4015 compact_priority = INIT_COMPACT_PRIORITY; 4016 } 4017 } 4018 4019 retry: 4020 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4021 if (alloc_flags & ALLOC_KSWAPD) 4022 wake_all_kswapds(order, gfp_mask, ac); 4023 4024 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4025 if (reserve_flags) 4026 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4027 (alloc_flags & ALLOC_KSWAPD); 4028 4029 /* 4030 * Reset the nodemask and zonelist iterators if memory policies can be 4031 * ignored. These allocations are high priority and system rather than 4032 * user oriented. 4033 */ 4034 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4035 ac->nodemask = NULL; 4036 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4037 ac->highest_zoneidx, ac->nodemask); 4038 } 4039 4040 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4041 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4042 if (page) 4043 goto got_pg; 4044 4045 /* Caller is not willing to reclaim, we can't balance anything */ 4046 if (!can_direct_reclaim) 4047 goto nopage; 4048 4049 /* Avoid recursion of direct reclaim */ 4050 if (current->flags & PF_MEMALLOC) 4051 goto nopage; 4052 4053 /* Try direct reclaim and then allocating */ 4054 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4055 &did_some_progress); 4056 if (page) 4057 goto got_pg; 4058 4059 /* Try direct compaction and then allocating */ 4060 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4061 compact_priority, &compact_result); 4062 if (page) 4063 goto got_pg; 4064 4065 /* Do not loop if specifically requested */ 4066 if (gfp_mask & __GFP_NORETRY) 4067 goto nopage; 4068 4069 /* 4070 * Do not retry costly high order allocations unless they are 4071 * __GFP_RETRY_MAYFAIL 4072 */ 4073 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 4074 goto nopage; 4075 4076 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4077 did_some_progress > 0, &no_progress_loops)) 4078 goto retry; 4079 4080 /* 4081 * It doesn't make any sense to retry for the compaction if the order-0 4082 * reclaim is not able to make any progress because the current 4083 * implementation of the compaction depends on the sufficient amount 4084 * of free memory (see __compaction_suitable) 4085 */ 4086 if (did_some_progress > 0 && 4087 should_compact_retry(ac, order, alloc_flags, 4088 compact_result, &compact_priority, 4089 &compaction_retries)) 4090 goto retry; 4091 4092 4093 /* 4094 * Deal with possible cpuset update races or zonelist updates to avoid 4095 * a unnecessary OOM kill. 4096 */ 4097 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4098 check_retry_zonelist(zonelist_iter_cookie)) 4099 goto restart; 4100 4101 /* Reclaim has failed us, start killing things */ 4102 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4103 if (page) 4104 goto got_pg; 4105 4106 /* Avoid allocations with no watermarks from looping endlessly */ 4107 if (tsk_is_oom_victim(current) && 4108 (alloc_flags & ALLOC_OOM || 4109 (gfp_mask & __GFP_NOMEMALLOC))) 4110 goto nopage; 4111 4112 /* Retry as long as the OOM killer is making progress */ 4113 if (did_some_progress) { 4114 no_progress_loops = 0; 4115 goto retry; 4116 } 4117 4118 nopage: 4119 /* 4120 * Deal with possible cpuset update races or zonelist updates to avoid 4121 * a unnecessary OOM kill. 4122 */ 4123 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4124 check_retry_zonelist(zonelist_iter_cookie)) 4125 goto restart; 4126 4127 /* 4128 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4129 * we always retry 4130 */ 4131 if (gfp_mask & __GFP_NOFAIL) { 4132 /* 4133 * All existing users of the __GFP_NOFAIL are blockable, so warn 4134 * of any new users that actually require GFP_NOWAIT 4135 */ 4136 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4137 goto fail; 4138 4139 /* 4140 * PF_MEMALLOC request from this context is rather bizarre 4141 * because we cannot reclaim anything and only can loop waiting 4142 * for somebody to do a work for us 4143 */ 4144 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4145 4146 /* 4147 * non failing costly orders are a hard requirement which we 4148 * are not prepared for much so let's warn about these users 4149 * so that we can identify them and convert them to something 4150 * else. 4151 */ 4152 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4153 4154 /* 4155 * Help non-failing allocations by giving some access to memory 4156 * reserves normally used for high priority non-blocking 4157 * allocations but do not use ALLOC_NO_WATERMARKS because this 4158 * could deplete whole memory reserves which would just make 4159 * the situation worse. 4160 */ 4161 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4162 if (page) 4163 goto got_pg; 4164 4165 cond_resched(); 4166 goto retry; 4167 } 4168 fail: 4169 warn_alloc(gfp_mask, ac->nodemask, 4170 "page allocation failure: order:%u", order); 4171 got_pg: 4172 return page; 4173 } 4174 4175 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4176 int preferred_nid, nodemask_t *nodemask, 4177 struct alloc_context *ac, gfp_t *alloc_gfp, 4178 unsigned int *alloc_flags) 4179 { 4180 ac->highest_zoneidx = gfp_zone(gfp_mask); 4181 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4182 ac->nodemask = nodemask; 4183 ac->migratetype = gfp_migratetype(gfp_mask); 4184 4185 if (cpusets_enabled()) { 4186 *alloc_gfp |= __GFP_HARDWALL; 4187 /* 4188 * When we are in the interrupt context, it is irrelevant 4189 * to the current task context. It means that any node ok. 4190 */ 4191 if (in_task() && !ac->nodemask) 4192 ac->nodemask = &cpuset_current_mems_allowed; 4193 else 4194 *alloc_flags |= ALLOC_CPUSET; 4195 } 4196 4197 might_alloc(gfp_mask); 4198 4199 if (should_fail_alloc_page(gfp_mask, order)) 4200 return false; 4201 4202 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4203 4204 /* Dirty zone balancing only done in the fast path */ 4205 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4206 4207 /* 4208 * The preferred zone is used for statistics but crucially it is 4209 * also used as the starting point for the zonelist iterator. It 4210 * may get reset for allocations that ignore memory policies. 4211 */ 4212 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4213 ac->highest_zoneidx, ac->nodemask); 4214 4215 return true; 4216 } 4217 4218 /* 4219 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4220 * @gfp: GFP flags for the allocation 4221 * @preferred_nid: The preferred NUMA node ID to allocate from 4222 * @nodemask: Set of nodes to allocate from, may be NULL 4223 * @nr_pages: The number of pages desired on the list or array 4224 * @page_list: Optional list to store the allocated pages 4225 * @page_array: Optional array to store the pages 4226 * 4227 * This is a batched version of the page allocator that attempts to 4228 * allocate nr_pages quickly. Pages are added to page_list if page_list 4229 * is not NULL, otherwise it is assumed that the page_array is valid. 4230 * 4231 * For lists, nr_pages is the number of pages that should be allocated. 4232 * 4233 * For arrays, only NULL elements are populated with pages and nr_pages 4234 * is the maximum number of pages that will be stored in the array. 4235 * 4236 * Returns the number of pages on the list or array. 4237 */ 4238 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 4239 nodemask_t *nodemask, int nr_pages, 4240 struct list_head *page_list, 4241 struct page **page_array) 4242 { 4243 struct page *page; 4244 unsigned long __maybe_unused UP_flags; 4245 struct zone *zone; 4246 struct zoneref *z; 4247 struct per_cpu_pages *pcp; 4248 struct list_head *pcp_list; 4249 struct alloc_context ac; 4250 gfp_t alloc_gfp; 4251 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4252 int nr_populated = 0, nr_account = 0; 4253 4254 /* 4255 * Skip populated array elements to determine if any pages need 4256 * to be allocated before disabling IRQs. 4257 */ 4258 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4259 nr_populated++; 4260 4261 /* No pages requested? */ 4262 if (unlikely(nr_pages <= 0)) 4263 goto out; 4264 4265 /* Already populated array? */ 4266 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4267 goto out; 4268 4269 /* Bulk allocator does not support memcg accounting. */ 4270 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4271 goto failed; 4272 4273 /* Use the single page allocator for one page. */ 4274 if (nr_pages - nr_populated == 1) 4275 goto failed; 4276 4277 #ifdef CONFIG_PAGE_OWNER 4278 /* 4279 * PAGE_OWNER may recurse into the allocator to allocate space to 4280 * save the stack with pagesets.lock held. Releasing/reacquiring 4281 * removes much of the performance benefit of bulk allocation so 4282 * force the caller to allocate one page at a time as it'll have 4283 * similar performance to added complexity to the bulk allocator. 4284 */ 4285 if (static_branch_unlikely(&page_owner_inited)) 4286 goto failed; 4287 #endif 4288 4289 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4290 gfp &= gfp_allowed_mask; 4291 alloc_gfp = gfp; 4292 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4293 goto out; 4294 gfp = alloc_gfp; 4295 4296 /* Find an allowed local zone that meets the low watermark. */ 4297 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4298 unsigned long mark; 4299 4300 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4301 !__cpuset_zone_allowed(zone, gfp)) { 4302 continue; 4303 } 4304 4305 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4306 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4307 goto failed; 4308 } 4309 4310 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4311 if (zone_watermark_fast(zone, 0, mark, 4312 zonelist_zone_idx(ac.preferred_zoneref), 4313 alloc_flags, gfp)) { 4314 break; 4315 } 4316 } 4317 4318 /* 4319 * If there are no allowed local zones that meets the watermarks then 4320 * try to allocate a single page and reclaim if necessary. 4321 */ 4322 if (unlikely(!zone)) 4323 goto failed; 4324 4325 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4326 pcp_trylock_prepare(UP_flags); 4327 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4328 if (!pcp) 4329 goto failed_irq; 4330 4331 /* Attempt the batch allocation */ 4332 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4333 while (nr_populated < nr_pages) { 4334 4335 /* Skip existing pages */ 4336 if (page_array && page_array[nr_populated]) { 4337 nr_populated++; 4338 continue; 4339 } 4340 4341 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4342 pcp, pcp_list); 4343 if (unlikely(!page)) { 4344 /* Try and allocate at least one page */ 4345 if (!nr_account) { 4346 pcp_spin_unlock(pcp); 4347 goto failed_irq; 4348 } 4349 break; 4350 } 4351 nr_account++; 4352 4353 prep_new_page(page, 0, gfp, 0); 4354 if (page_list) 4355 list_add(&page->lru, page_list); 4356 else 4357 page_array[nr_populated] = page; 4358 nr_populated++; 4359 } 4360 4361 pcp_spin_unlock(pcp); 4362 pcp_trylock_finish(UP_flags); 4363 4364 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4365 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4366 4367 out: 4368 return nr_populated; 4369 4370 failed_irq: 4371 pcp_trylock_finish(UP_flags); 4372 4373 failed: 4374 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 4375 if (page) { 4376 if (page_list) 4377 list_add(&page->lru, page_list); 4378 else 4379 page_array[nr_populated] = page; 4380 nr_populated++; 4381 } 4382 4383 goto out; 4384 } 4385 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 4386 4387 /* 4388 * This is the 'heart' of the zoned buddy allocator. 4389 */ 4390 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 4391 nodemask_t *nodemask) 4392 { 4393 struct page *page; 4394 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4395 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4396 struct alloc_context ac = { }; 4397 4398 /* 4399 * There are several places where we assume that the order value is sane 4400 * so bail out early if the request is out of bound. 4401 */ 4402 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) 4403 return NULL; 4404 4405 gfp &= gfp_allowed_mask; 4406 /* 4407 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4408 * resp. GFP_NOIO which has to be inherited for all allocation requests 4409 * from a particular context which has been marked by 4410 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4411 * movable zones are not used during allocation. 4412 */ 4413 gfp = current_gfp_context(gfp); 4414 alloc_gfp = gfp; 4415 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4416 &alloc_gfp, &alloc_flags)) 4417 return NULL; 4418 4419 /* 4420 * Forbid the first pass from falling back to types that fragment 4421 * memory until all local zones are considered. 4422 */ 4423 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4424 4425 /* First allocation attempt */ 4426 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4427 if (likely(page)) 4428 goto out; 4429 4430 alloc_gfp = gfp; 4431 ac.spread_dirty_pages = false; 4432 4433 /* 4434 * Restore the original nodemask if it was potentially replaced with 4435 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4436 */ 4437 ac.nodemask = nodemask; 4438 4439 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4440 4441 out: 4442 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4443 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4444 __free_pages(page, order); 4445 page = NULL; 4446 } 4447 4448 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4449 kmsan_alloc_page(page, order, alloc_gfp); 4450 4451 return page; 4452 } 4453 EXPORT_SYMBOL(__alloc_pages); 4454 4455 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 4456 nodemask_t *nodemask) 4457 { 4458 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 4459 preferred_nid, nodemask); 4460 struct folio *folio = (struct folio *)page; 4461 4462 if (folio && order > 1) 4463 folio_prep_large_rmappable(folio); 4464 return folio; 4465 } 4466 EXPORT_SYMBOL(__folio_alloc); 4467 4468 /* 4469 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4470 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4471 * you need to access high mem. 4472 */ 4473 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4474 { 4475 struct page *page; 4476 4477 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 4478 if (!page) 4479 return 0; 4480 return (unsigned long) page_address(page); 4481 } 4482 EXPORT_SYMBOL(__get_free_pages); 4483 4484 unsigned long get_zeroed_page(gfp_t gfp_mask) 4485 { 4486 return __get_free_page(gfp_mask | __GFP_ZERO); 4487 } 4488 EXPORT_SYMBOL(get_zeroed_page); 4489 4490 /** 4491 * __free_pages - Free pages allocated with alloc_pages(). 4492 * @page: The page pointer returned from alloc_pages(). 4493 * @order: The order of the allocation. 4494 * 4495 * This function can free multi-page allocations that are not compound 4496 * pages. It does not check that the @order passed in matches that of 4497 * the allocation, so it is easy to leak memory. Freeing more memory 4498 * than was allocated will probably emit a warning. 4499 * 4500 * If the last reference to this page is speculative, it will be released 4501 * by put_page() which only frees the first page of a non-compound 4502 * allocation. To prevent the remaining pages from being leaked, we free 4503 * the subsequent pages here. If you want to use the page's reference 4504 * count to decide when to free the allocation, you should allocate a 4505 * compound page, and use put_page() instead of __free_pages(). 4506 * 4507 * Context: May be called in interrupt context or while holding a normal 4508 * spinlock, but not in NMI context or while holding a raw spinlock. 4509 */ 4510 void __free_pages(struct page *page, unsigned int order) 4511 { 4512 /* get PageHead before we drop reference */ 4513 int head = PageHead(page); 4514 4515 if (put_page_testzero(page)) 4516 free_the_page(page, order); 4517 else if (!head) 4518 while (order-- > 0) 4519 free_the_page(page + (1 << order), order); 4520 } 4521 EXPORT_SYMBOL(__free_pages); 4522 4523 void free_pages(unsigned long addr, unsigned int order) 4524 { 4525 if (addr != 0) { 4526 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4527 __free_pages(virt_to_page((void *)addr), order); 4528 } 4529 } 4530 4531 EXPORT_SYMBOL(free_pages); 4532 4533 /* 4534 * Page Fragment: 4535 * An arbitrary-length arbitrary-offset area of memory which resides 4536 * within a 0 or higher order page. Multiple fragments within that page 4537 * are individually refcounted, in the page's reference counter. 4538 * 4539 * The page_frag functions below provide a simple allocation framework for 4540 * page fragments. This is used by the network stack and network device 4541 * drivers to provide a backing region of memory for use as either an 4542 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4543 */ 4544 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4545 gfp_t gfp_mask) 4546 { 4547 struct page *page = NULL; 4548 gfp_t gfp = gfp_mask; 4549 4550 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4551 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4552 __GFP_NOMEMALLOC; 4553 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4554 PAGE_FRAG_CACHE_MAX_ORDER); 4555 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4556 #endif 4557 if (unlikely(!page)) 4558 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4559 4560 nc->va = page ? page_address(page) : NULL; 4561 4562 return page; 4563 } 4564 4565 void __page_frag_cache_drain(struct page *page, unsigned int count) 4566 { 4567 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4568 4569 if (page_ref_sub_and_test(page, count)) 4570 free_the_page(page, compound_order(page)); 4571 } 4572 EXPORT_SYMBOL(__page_frag_cache_drain); 4573 4574 void *page_frag_alloc_align(struct page_frag_cache *nc, 4575 unsigned int fragsz, gfp_t gfp_mask, 4576 unsigned int align_mask) 4577 { 4578 unsigned int size = PAGE_SIZE; 4579 struct page *page; 4580 int offset; 4581 4582 if (unlikely(!nc->va)) { 4583 refill: 4584 page = __page_frag_cache_refill(nc, gfp_mask); 4585 if (!page) 4586 return NULL; 4587 4588 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4589 /* if size can vary use size else just use PAGE_SIZE */ 4590 size = nc->size; 4591 #endif 4592 /* Even if we own the page, we do not use atomic_set(). 4593 * This would break get_page_unless_zero() users. 4594 */ 4595 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4596 4597 /* reset page count bias and offset to start of new frag */ 4598 nc->pfmemalloc = page_is_pfmemalloc(page); 4599 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4600 nc->offset = size; 4601 } 4602 4603 offset = nc->offset - fragsz; 4604 if (unlikely(offset < 0)) { 4605 page = virt_to_page(nc->va); 4606 4607 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4608 goto refill; 4609 4610 if (unlikely(nc->pfmemalloc)) { 4611 free_the_page(page, compound_order(page)); 4612 goto refill; 4613 } 4614 4615 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4616 /* if size can vary use size else just use PAGE_SIZE */ 4617 size = nc->size; 4618 #endif 4619 /* OK, page count is 0, we can safely set it */ 4620 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4621 4622 /* reset page count bias and offset to start of new frag */ 4623 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4624 offset = size - fragsz; 4625 if (unlikely(offset < 0)) { 4626 /* 4627 * The caller is trying to allocate a fragment 4628 * with fragsz > PAGE_SIZE but the cache isn't big 4629 * enough to satisfy the request, this may 4630 * happen in low memory conditions. 4631 * We don't release the cache page because 4632 * it could make memory pressure worse 4633 * so we simply return NULL here. 4634 */ 4635 return NULL; 4636 } 4637 } 4638 4639 nc->pagecnt_bias--; 4640 offset &= align_mask; 4641 nc->offset = offset; 4642 4643 return nc->va + offset; 4644 } 4645 EXPORT_SYMBOL(page_frag_alloc_align); 4646 4647 /* 4648 * Frees a page fragment allocated out of either a compound or order 0 page. 4649 */ 4650 void page_frag_free(void *addr) 4651 { 4652 struct page *page = virt_to_head_page(addr); 4653 4654 if (unlikely(put_page_testzero(page))) 4655 free_the_page(page, compound_order(page)); 4656 } 4657 EXPORT_SYMBOL(page_frag_free); 4658 4659 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4660 size_t size) 4661 { 4662 if (addr) { 4663 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4664 struct page *page = virt_to_page((void *)addr); 4665 struct page *last = page + nr; 4666 4667 split_page_owner(page, 1 << order); 4668 split_page_memcg(page, 1 << order); 4669 while (page < --last) 4670 set_page_refcounted(last); 4671 4672 last = page + (1UL << order); 4673 for (page += nr; page < last; page++) 4674 __free_pages_ok(page, 0, FPI_TO_TAIL); 4675 } 4676 return (void *)addr; 4677 } 4678 4679 /** 4680 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4681 * @size: the number of bytes to allocate 4682 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4683 * 4684 * This function is similar to alloc_pages(), except that it allocates the 4685 * minimum number of pages to satisfy the request. alloc_pages() can only 4686 * allocate memory in power-of-two pages. 4687 * 4688 * This function is also limited by MAX_ORDER. 4689 * 4690 * Memory allocated by this function must be released by free_pages_exact(). 4691 * 4692 * Return: pointer to the allocated area or %NULL in case of error. 4693 */ 4694 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4695 { 4696 unsigned int order = get_order(size); 4697 unsigned long addr; 4698 4699 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4700 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4701 4702 addr = __get_free_pages(gfp_mask, order); 4703 return make_alloc_exact(addr, order, size); 4704 } 4705 EXPORT_SYMBOL(alloc_pages_exact); 4706 4707 /** 4708 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4709 * pages on a node. 4710 * @nid: the preferred node ID where memory should be allocated 4711 * @size: the number of bytes to allocate 4712 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4713 * 4714 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4715 * back. 4716 * 4717 * Return: pointer to the allocated area or %NULL in case of error. 4718 */ 4719 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4720 { 4721 unsigned int order = get_order(size); 4722 struct page *p; 4723 4724 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4725 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4726 4727 p = alloc_pages_node(nid, gfp_mask, order); 4728 if (!p) 4729 return NULL; 4730 return make_alloc_exact((unsigned long)page_address(p), order, size); 4731 } 4732 4733 /** 4734 * free_pages_exact - release memory allocated via alloc_pages_exact() 4735 * @virt: the value returned by alloc_pages_exact. 4736 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4737 * 4738 * Release the memory allocated by a previous call to alloc_pages_exact. 4739 */ 4740 void free_pages_exact(void *virt, size_t size) 4741 { 4742 unsigned long addr = (unsigned long)virt; 4743 unsigned long end = addr + PAGE_ALIGN(size); 4744 4745 while (addr < end) { 4746 free_page(addr); 4747 addr += PAGE_SIZE; 4748 } 4749 } 4750 EXPORT_SYMBOL(free_pages_exact); 4751 4752 /** 4753 * nr_free_zone_pages - count number of pages beyond high watermark 4754 * @offset: The zone index of the highest zone 4755 * 4756 * nr_free_zone_pages() counts the number of pages which are beyond the 4757 * high watermark within all zones at or below a given zone index. For each 4758 * zone, the number of pages is calculated as: 4759 * 4760 * nr_free_zone_pages = managed_pages - high_pages 4761 * 4762 * Return: number of pages beyond high watermark. 4763 */ 4764 static unsigned long nr_free_zone_pages(int offset) 4765 { 4766 struct zoneref *z; 4767 struct zone *zone; 4768 4769 /* Just pick one node, since fallback list is circular */ 4770 unsigned long sum = 0; 4771 4772 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4773 4774 for_each_zone_zonelist(zone, z, zonelist, offset) { 4775 unsigned long size = zone_managed_pages(zone); 4776 unsigned long high = high_wmark_pages(zone); 4777 if (size > high) 4778 sum += size - high; 4779 } 4780 4781 return sum; 4782 } 4783 4784 /** 4785 * nr_free_buffer_pages - count number of pages beyond high watermark 4786 * 4787 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4788 * watermark within ZONE_DMA and ZONE_NORMAL. 4789 * 4790 * Return: number of pages beyond high watermark within ZONE_DMA and 4791 * ZONE_NORMAL. 4792 */ 4793 unsigned long nr_free_buffer_pages(void) 4794 { 4795 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4796 } 4797 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4798 4799 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4800 { 4801 zoneref->zone = zone; 4802 zoneref->zone_idx = zone_idx(zone); 4803 } 4804 4805 /* 4806 * Builds allocation fallback zone lists. 4807 * 4808 * Add all populated zones of a node to the zonelist. 4809 */ 4810 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 4811 { 4812 struct zone *zone; 4813 enum zone_type zone_type = MAX_NR_ZONES; 4814 int nr_zones = 0; 4815 4816 do { 4817 zone_type--; 4818 zone = pgdat->node_zones + zone_type; 4819 if (populated_zone(zone)) { 4820 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 4821 check_highest_zone(zone_type); 4822 } 4823 } while (zone_type); 4824 4825 return nr_zones; 4826 } 4827 4828 #ifdef CONFIG_NUMA 4829 4830 static int __parse_numa_zonelist_order(char *s) 4831 { 4832 /* 4833 * We used to support different zonelists modes but they turned 4834 * out to be just not useful. Let's keep the warning in place 4835 * if somebody still use the cmd line parameter so that we do 4836 * not fail it silently 4837 */ 4838 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 4839 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 4840 return -EINVAL; 4841 } 4842 return 0; 4843 } 4844 4845 static char numa_zonelist_order[] = "Node"; 4846 #define NUMA_ZONELIST_ORDER_LEN 16 4847 /* 4848 * sysctl handler for numa_zonelist_order 4849 */ 4850 static int numa_zonelist_order_handler(struct ctl_table *table, int write, 4851 void *buffer, size_t *length, loff_t *ppos) 4852 { 4853 if (write) 4854 return __parse_numa_zonelist_order(buffer); 4855 return proc_dostring(table, write, buffer, length, ppos); 4856 } 4857 4858 static int node_load[MAX_NUMNODES]; 4859 4860 /** 4861 * find_next_best_node - find the next node that should appear in a given node's fallback list 4862 * @node: node whose fallback list we're appending 4863 * @used_node_mask: nodemask_t of already used nodes 4864 * 4865 * We use a number of factors to determine which is the next node that should 4866 * appear on a given node's fallback list. The node should not have appeared 4867 * already in @node's fallback list, and it should be the next closest node 4868 * according to the distance array (which contains arbitrary distance values 4869 * from each node to each node in the system), and should also prefer nodes 4870 * with no CPUs, since presumably they'll have very little allocation pressure 4871 * on them otherwise. 4872 * 4873 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 4874 */ 4875 int find_next_best_node(int node, nodemask_t *used_node_mask) 4876 { 4877 int n, val; 4878 int min_val = INT_MAX; 4879 int best_node = NUMA_NO_NODE; 4880 4881 /* Use the local node if we haven't already */ 4882 if (!node_isset(node, *used_node_mask)) { 4883 node_set(node, *used_node_mask); 4884 return node; 4885 } 4886 4887 for_each_node_state(n, N_MEMORY) { 4888 4889 /* Don't want a node to appear more than once */ 4890 if (node_isset(n, *used_node_mask)) 4891 continue; 4892 4893 /* Use the distance array to find the distance */ 4894 val = node_distance(node, n); 4895 4896 /* Penalize nodes under us ("prefer the next node") */ 4897 val += (n < node); 4898 4899 /* Give preference to headless and unused nodes */ 4900 if (!cpumask_empty(cpumask_of_node(n))) 4901 val += PENALTY_FOR_NODE_WITH_CPUS; 4902 4903 /* Slight preference for less loaded node */ 4904 val *= MAX_NUMNODES; 4905 val += node_load[n]; 4906 4907 if (val < min_val) { 4908 min_val = val; 4909 best_node = n; 4910 } 4911 } 4912 4913 if (best_node >= 0) 4914 node_set(best_node, *used_node_mask); 4915 4916 return best_node; 4917 } 4918 4919 4920 /* 4921 * Build zonelists ordered by node and zones within node. 4922 * This results in maximum locality--normal zone overflows into local 4923 * DMA zone, if any--but risks exhausting DMA zone. 4924 */ 4925 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 4926 unsigned nr_nodes) 4927 { 4928 struct zoneref *zonerefs; 4929 int i; 4930 4931 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 4932 4933 for (i = 0; i < nr_nodes; i++) { 4934 int nr_zones; 4935 4936 pg_data_t *node = NODE_DATA(node_order[i]); 4937 4938 nr_zones = build_zonerefs_node(node, zonerefs); 4939 zonerefs += nr_zones; 4940 } 4941 zonerefs->zone = NULL; 4942 zonerefs->zone_idx = 0; 4943 } 4944 4945 /* 4946 * Build gfp_thisnode zonelists 4947 */ 4948 static void build_thisnode_zonelists(pg_data_t *pgdat) 4949 { 4950 struct zoneref *zonerefs; 4951 int nr_zones; 4952 4953 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 4954 nr_zones = build_zonerefs_node(pgdat, zonerefs); 4955 zonerefs += nr_zones; 4956 zonerefs->zone = NULL; 4957 zonerefs->zone_idx = 0; 4958 } 4959 4960 /* 4961 * Build zonelists ordered by zone and nodes within zones. 4962 * This results in conserving DMA zone[s] until all Normal memory is 4963 * exhausted, but results in overflowing to remote node while memory 4964 * may still exist in local DMA zone. 4965 */ 4966 4967 static void build_zonelists(pg_data_t *pgdat) 4968 { 4969 static int node_order[MAX_NUMNODES]; 4970 int node, nr_nodes = 0; 4971 nodemask_t used_mask = NODE_MASK_NONE; 4972 int local_node, prev_node; 4973 4974 /* NUMA-aware ordering of nodes */ 4975 local_node = pgdat->node_id; 4976 prev_node = local_node; 4977 4978 memset(node_order, 0, sizeof(node_order)); 4979 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4980 /* 4981 * We don't want to pressure a particular node. 4982 * So adding penalty to the first node in same 4983 * distance group to make it round-robin. 4984 */ 4985 if (node_distance(local_node, node) != 4986 node_distance(local_node, prev_node)) 4987 node_load[node] += 1; 4988 4989 node_order[nr_nodes++] = node; 4990 prev_node = node; 4991 } 4992 4993 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 4994 build_thisnode_zonelists(pgdat); 4995 pr_info("Fallback order for Node %d: ", local_node); 4996 for (node = 0; node < nr_nodes; node++) 4997 pr_cont("%d ", node_order[node]); 4998 pr_cont("\n"); 4999 } 5000 5001 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5002 /* 5003 * Return node id of node used for "local" allocations. 5004 * I.e., first node id of first zone in arg node's generic zonelist. 5005 * Used for initializing percpu 'numa_mem', which is used primarily 5006 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5007 */ 5008 int local_memory_node(int node) 5009 { 5010 struct zoneref *z; 5011 5012 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5013 gfp_zone(GFP_KERNEL), 5014 NULL); 5015 return zone_to_nid(z->zone); 5016 } 5017 #endif 5018 5019 static void setup_min_unmapped_ratio(void); 5020 static void setup_min_slab_ratio(void); 5021 #else /* CONFIG_NUMA */ 5022 5023 static void build_zonelists(pg_data_t *pgdat) 5024 { 5025 int node, local_node; 5026 struct zoneref *zonerefs; 5027 int nr_zones; 5028 5029 local_node = pgdat->node_id; 5030 5031 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5032 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5033 zonerefs += nr_zones; 5034 5035 /* 5036 * Now we build the zonelist so that it contains the zones 5037 * of all the other nodes. 5038 * We don't want to pressure a particular node, so when 5039 * building the zones for node N, we make sure that the 5040 * zones coming right after the local ones are those from 5041 * node N+1 (modulo N) 5042 */ 5043 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5044 if (!node_online(node)) 5045 continue; 5046 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5047 zonerefs += nr_zones; 5048 } 5049 for (node = 0; node < local_node; node++) { 5050 if (!node_online(node)) 5051 continue; 5052 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5053 zonerefs += nr_zones; 5054 } 5055 5056 zonerefs->zone = NULL; 5057 zonerefs->zone_idx = 0; 5058 } 5059 5060 #endif /* CONFIG_NUMA */ 5061 5062 /* 5063 * Boot pageset table. One per cpu which is going to be used for all 5064 * zones and all nodes. The parameters will be set in such a way 5065 * that an item put on a list will immediately be handed over to 5066 * the buddy list. This is safe since pageset manipulation is done 5067 * with interrupts disabled. 5068 * 5069 * The boot_pagesets must be kept even after bootup is complete for 5070 * unused processors and/or zones. They do play a role for bootstrapping 5071 * hotplugged processors. 5072 * 5073 * zoneinfo_show() and maybe other functions do 5074 * not check if the processor is online before following the pageset pointer. 5075 * Other parts of the kernel may not check if the zone is available. 5076 */ 5077 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5078 /* These effectively disable the pcplists in the boot pageset completely */ 5079 #define BOOT_PAGESET_HIGH 0 5080 #define BOOT_PAGESET_BATCH 1 5081 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5082 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5083 5084 static void __build_all_zonelists(void *data) 5085 { 5086 int nid; 5087 int __maybe_unused cpu; 5088 pg_data_t *self = data; 5089 unsigned long flags; 5090 5091 /* 5092 * The zonelist_update_seq must be acquired with irqsave because the 5093 * reader can be invoked from IRQ with GFP_ATOMIC. 5094 */ 5095 write_seqlock_irqsave(&zonelist_update_seq, flags); 5096 /* 5097 * Also disable synchronous printk() to prevent any printk() from 5098 * trying to hold port->lock, for 5099 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5100 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5101 */ 5102 printk_deferred_enter(); 5103 5104 #ifdef CONFIG_NUMA 5105 memset(node_load, 0, sizeof(node_load)); 5106 #endif 5107 5108 /* 5109 * This node is hotadded and no memory is yet present. So just 5110 * building zonelists is fine - no need to touch other nodes. 5111 */ 5112 if (self && !node_online(self->node_id)) { 5113 build_zonelists(self); 5114 } else { 5115 /* 5116 * All possible nodes have pgdat preallocated 5117 * in free_area_init 5118 */ 5119 for_each_node(nid) { 5120 pg_data_t *pgdat = NODE_DATA(nid); 5121 5122 build_zonelists(pgdat); 5123 } 5124 5125 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5126 /* 5127 * We now know the "local memory node" for each node-- 5128 * i.e., the node of the first zone in the generic zonelist. 5129 * Set up numa_mem percpu variable for on-line cpus. During 5130 * boot, only the boot cpu should be on-line; we'll init the 5131 * secondary cpus' numa_mem as they come on-line. During 5132 * node/memory hotplug, we'll fixup all on-line cpus. 5133 */ 5134 for_each_online_cpu(cpu) 5135 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5136 #endif 5137 } 5138 5139 printk_deferred_exit(); 5140 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5141 } 5142 5143 static noinline void __init 5144 build_all_zonelists_init(void) 5145 { 5146 int cpu; 5147 5148 __build_all_zonelists(NULL); 5149 5150 /* 5151 * Initialize the boot_pagesets that are going to be used 5152 * for bootstrapping processors. The real pagesets for 5153 * each zone will be allocated later when the per cpu 5154 * allocator is available. 5155 * 5156 * boot_pagesets are used also for bootstrapping offline 5157 * cpus if the system is already booted because the pagesets 5158 * are needed to initialize allocators on a specific cpu too. 5159 * F.e. the percpu allocator needs the page allocator which 5160 * needs the percpu allocator in order to allocate its pagesets 5161 * (a chicken-egg dilemma). 5162 */ 5163 for_each_possible_cpu(cpu) 5164 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5165 5166 mminit_verify_zonelist(); 5167 cpuset_init_current_mems_allowed(); 5168 } 5169 5170 /* 5171 * unless system_state == SYSTEM_BOOTING. 5172 * 5173 * __ref due to call of __init annotated helper build_all_zonelists_init 5174 * [protected by SYSTEM_BOOTING]. 5175 */ 5176 void __ref build_all_zonelists(pg_data_t *pgdat) 5177 { 5178 unsigned long vm_total_pages; 5179 5180 if (system_state == SYSTEM_BOOTING) { 5181 build_all_zonelists_init(); 5182 } else { 5183 __build_all_zonelists(pgdat); 5184 /* cpuset refresh routine should be here */ 5185 } 5186 /* Get the number of free pages beyond high watermark in all zones. */ 5187 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5188 /* 5189 * Disable grouping by mobility if the number of pages in the 5190 * system is too low to allow the mechanism to work. It would be 5191 * more accurate, but expensive to check per-zone. This check is 5192 * made on memory-hotadd so a system can start with mobility 5193 * disabled and enable it later 5194 */ 5195 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5196 page_group_by_mobility_disabled = 1; 5197 else 5198 page_group_by_mobility_disabled = 0; 5199 5200 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5201 nr_online_nodes, 5202 page_group_by_mobility_disabled ? "off" : "on", 5203 vm_total_pages); 5204 #ifdef CONFIG_NUMA 5205 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5206 #endif 5207 } 5208 5209 static int zone_batchsize(struct zone *zone) 5210 { 5211 #ifdef CONFIG_MMU 5212 int batch; 5213 5214 /* 5215 * The number of pages to batch allocate is either ~0.1% 5216 * of the zone or 1MB, whichever is smaller. The batch 5217 * size is striking a balance between allocation latency 5218 * and zone lock contention. 5219 */ 5220 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5221 batch /= 4; /* We effectively *= 4 below */ 5222 if (batch < 1) 5223 batch = 1; 5224 5225 /* 5226 * Clamp the batch to a 2^n - 1 value. Having a power 5227 * of 2 value was found to be more likely to have 5228 * suboptimal cache aliasing properties in some cases. 5229 * 5230 * For example if 2 tasks are alternately allocating 5231 * batches of pages, one task can end up with a lot 5232 * of pages of one half of the possible page colors 5233 * and the other with pages of the other colors. 5234 */ 5235 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5236 5237 return batch; 5238 5239 #else 5240 /* The deferral and batching of frees should be suppressed under NOMMU 5241 * conditions. 5242 * 5243 * The problem is that NOMMU needs to be able to allocate large chunks 5244 * of contiguous memory as there's no hardware page translation to 5245 * assemble apparent contiguous memory from discontiguous pages. 5246 * 5247 * Queueing large contiguous runs of pages for batching, however, 5248 * causes the pages to actually be freed in smaller chunks. As there 5249 * can be a significant delay between the individual batches being 5250 * recycled, this leads to the once large chunks of space being 5251 * fragmented and becoming unavailable for high-order allocations. 5252 */ 5253 return 0; 5254 #endif 5255 } 5256 5257 static int percpu_pagelist_high_fraction; 5258 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 5259 { 5260 #ifdef CONFIG_MMU 5261 int high; 5262 int nr_split_cpus; 5263 unsigned long total_pages; 5264 5265 if (!percpu_pagelist_high_fraction) { 5266 /* 5267 * By default, the high value of the pcp is based on the zone 5268 * low watermark so that if they are full then background 5269 * reclaim will not be started prematurely. 5270 */ 5271 total_pages = low_wmark_pages(zone); 5272 } else { 5273 /* 5274 * If percpu_pagelist_high_fraction is configured, the high 5275 * value is based on a fraction of the managed pages in the 5276 * zone. 5277 */ 5278 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 5279 } 5280 5281 /* 5282 * Split the high value across all online CPUs local to the zone. Note 5283 * that early in boot that CPUs may not be online yet and that during 5284 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5285 * onlined. For memory nodes that have no CPUs, split pcp->high across 5286 * all online CPUs to mitigate the risk that reclaim is triggered 5287 * prematurely due to pages stored on pcp lists. 5288 */ 5289 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5290 if (!nr_split_cpus) 5291 nr_split_cpus = num_online_cpus(); 5292 high = total_pages / nr_split_cpus; 5293 5294 /* 5295 * Ensure high is at least batch*4. The multiple is based on the 5296 * historical relationship between high and batch. 5297 */ 5298 high = max(high, batch << 2); 5299 5300 return high; 5301 #else 5302 return 0; 5303 #endif 5304 } 5305 5306 /* 5307 * pcp->high and pcp->batch values are related and generally batch is lower 5308 * than high. They are also related to pcp->count such that count is lower 5309 * than high, and as soon as it reaches high, the pcplist is flushed. 5310 * 5311 * However, guaranteeing these relations at all times would require e.g. write 5312 * barriers here but also careful usage of read barriers at the read side, and 5313 * thus be prone to error and bad for performance. Thus the update only prevents 5314 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 5315 * can cope with those fields changing asynchronously, and fully trust only the 5316 * pcp->count field on the local CPU with interrupts disabled. 5317 * 5318 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5319 * outside of boot time (or some other assurance that no concurrent updaters 5320 * exist). 5321 */ 5322 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5323 unsigned long batch) 5324 { 5325 WRITE_ONCE(pcp->batch, batch); 5326 WRITE_ONCE(pcp->high, high); 5327 } 5328 5329 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5330 { 5331 int pindex; 5332 5333 memset(pcp, 0, sizeof(*pcp)); 5334 memset(pzstats, 0, sizeof(*pzstats)); 5335 5336 spin_lock_init(&pcp->lock); 5337 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5338 INIT_LIST_HEAD(&pcp->lists[pindex]); 5339 5340 /* 5341 * Set batch and high values safe for a boot pageset. A true percpu 5342 * pageset's initialization will update them subsequently. Here we don't 5343 * need to be as careful as pageset_update() as nobody can access the 5344 * pageset yet. 5345 */ 5346 pcp->high = BOOT_PAGESET_HIGH; 5347 pcp->batch = BOOT_PAGESET_BATCH; 5348 pcp->free_factor = 0; 5349 } 5350 5351 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 5352 unsigned long batch) 5353 { 5354 struct per_cpu_pages *pcp; 5355 int cpu; 5356 5357 for_each_possible_cpu(cpu) { 5358 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5359 pageset_update(pcp, high, batch); 5360 } 5361 } 5362 5363 /* 5364 * Calculate and set new high and batch values for all per-cpu pagesets of a 5365 * zone based on the zone's size. 5366 */ 5367 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5368 { 5369 int new_high, new_batch; 5370 5371 new_batch = max(1, zone_batchsize(zone)); 5372 new_high = zone_highsize(zone, new_batch, cpu_online); 5373 5374 if (zone->pageset_high == new_high && 5375 zone->pageset_batch == new_batch) 5376 return; 5377 5378 zone->pageset_high = new_high; 5379 zone->pageset_batch = new_batch; 5380 5381 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 5382 } 5383 5384 void __meminit setup_zone_pageset(struct zone *zone) 5385 { 5386 int cpu; 5387 5388 /* Size may be 0 on !SMP && !NUMA */ 5389 if (sizeof(struct per_cpu_zonestat) > 0) 5390 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5391 5392 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5393 for_each_possible_cpu(cpu) { 5394 struct per_cpu_pages *pcp; 5395 struct per_cpu_zonestat *pzstats; 5396 5397 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5398 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5399 per_cpu_pages_init(pcp, pzstats); 5400 } 5401 5402 zone_set_pageset_high_and_batch(zone, 0); 5403 } 5404 5405 /* 5406 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5407 * page high values need to be recalculated. 5408 */ 5409 static void zone_pcp_update(struct zone *zone, int cpu_online) 5410 { 5411 mutex_lock(&pcp_batch_high_lock); 5412 zone_set_pageset_high_and_batch(zone, cpu_online); 5413 mutex_unlock(&pcp_batch_high_lock); 5414 } 5415 5416 /* 5417 * Allocate per cpu pagesets and initialize them. 5418 * Before this call only boot pagesets were available. 5419 */ 5420 void __init setup_per_cpu_pageset(void) 5421 { 5422 struct pglist_data *pgdat; 5423 struct zone *zone; 5424 int __maybe_unused cpu; 5425 5426 for_each_populated_zone(zone) 5427 setup_zone_pageset(zone); 5428 5429 #ifdef CONFIG_NUMA 5430 /* 5431 * Unpopulated zones continue using the boot pagesets. 5432 * The numa stats for these pagesets need to be reset. 5433 * Otherwise, they will end up skewing the stats of 5434 * the nodes these zones are associated with. 5435 */ 5436 for_each_possible_cpu(cpu) { 5437 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5438 memset(pzstats->vm_numa_event, 0, 5439 sizeof(pzstats->vm_numa_event)); 5440 } 5441 #endif 5442 5443 for_each_online_pgdat(pgdat) 5444 pgdat->per_cpu_nodestats = 5445 alloc_percpu(struct per_cpu_nodestat); 5446 } 5447 5448 __meminit void zone_pcp_init(struct zone *zone) 5449 { 5450 /* 5451 * per cpu subsystem is not up at this point. The following code 5452 * relies on the ability of the linker to provide the 5453 * offset of a (static) per cpu variable into the per cpu area. 5454 */ 5455 zone->per_cpu_pageset = &boot_pageset; 5456 zone->per_cpu_zonestats = &boot_zonestats; 5457 zone->pageset_high = BOOT_PAGESET_HIGH; 5458 zone->pageset_batch = BOOT_PAGESET_BATCH; 5459 5460 if (populated_zone(zone)) 5461 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5462 zone->present_pages, zone_batchsize(zone)); 5463 } 5464 5465 void adjust_managed_page_count(struct page *page, long count) 5466 { 5467 atomic_long_add(count, &page_zone(page)->managed_pages); 5468 totalram_pages_add(count); 5469 #ifdef CONFIG_HIGHMEM 5470 if (PageHighMem(page)) 5471 totalhigh_pages_add(count); 5472 #endif 5473 } 5474 EXPORT_SYMBOL(adjust_managed_page_count); 5475 5476 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5477 { 5478 void *pos; 5479 unsigned long pages = 0; 5480 5481 start = (void *)PAGE_ALIGN((unsigned long)start); 5482 end = (void *)((unsigned long)end & PAGE_MASK); 5483 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5484 struct page *page = virt_to_page(pos); 5485 void *direct_map_addr; 5486 5487 /* 5488 * 'direct_map_addr' might be different from 'pos' 5489 * because some architectures' virt_to_page() 5490 * work with aliases. Getting the direct map 5491 * address ensures that we get a _writeable_ 5492 * alias for the memset(). 5493 */ 5494 direct_map_addr = page_address(page); 5495 /* 5496 * Perform a kasan-unchecked memset() since this memory 5497 * has not been initialized. 5498 */ 5499 direct_map_addr = kasan_reset_tag(direct_map_addr); 5500 if ((unsigned int)poison <= 0xFF) 5501 memset(direct_map_addr, poison, PAGE_SIZE); 5502 5503 free_reserved_page(page); 5504 } 5505 5506 if (pages && s) 5507 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5508 5509 return pages; 5510 } 5511 5512 static int page_alloc_cpu_dead(unsigned int cpu) 5513 { 5514 struct zone *zone; 5515 5516 lru_add_drain_cpu(cpu); 5517 mlock_drain_remote(cpu); 5518 drain_pages(cpu); 5519 5520 /* 5521 * Spill the event counters of the dead processor 5522 * into the current processors event counters. 5523 * This artificially elevates the count of the current 5524 * processor. 5525 */ 5526 vm_events_fold_cpu(cpu); 5527 5528 /* 5529 * Zero the differential counters of the dead processor 5530 * so that the vm statistics are consistent. 5531 * 5532 * This is only okay since the processor is dead and cannot 5533 * race with what we are doing. 5534 */ 5535 cpu_vm_stats_fold(cpu); 5536 5537 for_each_populated_zone(zone) 5538 zone_pcp_update(zone, 0); 5539 5540 return 0; 5541 } 5542 5543 static int page_alloc_cpu_online(unsigned int cpu) 5544 { 5545 struct zone *zone; 5546 5547 for_each_populated_zone(zone) 5548 zone_pcp_update(zone, 1); 5549 return 0; 5550 } 5551 5552 void __init page_alloc_init_cpuhp(void) 5553 { 5554 int ret; 5555 5556 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5557 "mm/page_alloc:pcp", 5558 page_alloc_cpu_online, 5559 page_alloc_cpu_dead); 5560 WARN_ON(ret < 0); 5561 } 5562 5563 /* 5564 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5565 * or min_free_kbytes changes. 5566 */ 5567 static void calculate_totalreserve_pages(void) 5568 { 5569 struct pglist_data *pgdat; 5570 unsigned long reserve_pages = 0; 5571 enum zone_type i, j; 5572 5573 for_each_online_pgdat(pgdat) { 5574 5575 pgdat->totalreserve_pages = 0; 5576 5577 for (i = 0; i < MAX_NR_ZONES; i++) { 5578 struct zone *zone = pgdat->node_zones + i; 5579 long max = 0; 5580 unsigned long managed_pages = zone_managed_pages(zone); 5581 5582 /* Find valid and maximum lowmem_reserve in the zone */ 5583 for (j = i; j < MAX_NR_ZONES; j++) { 5584 if (zone->lowmem_reserve[j] > max) 5585 max = zone->lowmem_reserve[j]; 5586 } 5587 5588 /* we treat the high watermark as reserved pages. */ 5589 max += high_wmark_pages(zone); 5590 5591 if (max > managed_pages) 5592 max = managed_pages; 5593 5594 pgdat->totalreserve_pages += max; 5595 5596 reserve_pages += max; 5597 } 5598 } 5599 totalreserve_pages = reserve_pages; 5600 } 5601 5602 /* 5603 * setup_per_zone_lowmem_reserve - called whenever 5604 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5605 * has a correct pages reserved value, so an adequate number of 5606 * pages are left in the zone after a successful __alloc_pages(). 5607 */ 5608 static void setup_per_zone_lowmem_reserve(void) 5609 { 5610 struct pglist_data *pgdat; 5611 enum zone_type i, j; 5612 5613 for_each_online_pgdat(pgdat) { 5614 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5615 struct zone *zone = &pgdat->node_zones[i]; 5616 int ratio = sysctl_lowmem_reserve_ratio[i]; 5617 bool clear = !ratio || !zone_managed_pages(zone); 5618 unsigned long managed_pages = 0; 5619 5620 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5621 struct zone *upper_zone = &pgdat->node_zones[j]; 5622 5623 managed_pages += zone_managed_pages(upper_zone); 5624 5625 if (clear) 5626 zone->lowmem_reserve[j] = 0; 5627 else 5628 zone->lowmem_reserve[j] = managed_pages / ratio; 5629 } 5630 } 5631 } 5632 5633 /* update totalreserve_pages */ 5634 calculate_totalreserve_pages(); 5635 } 5636 5637 static void __setup_per_zone_wmarks(void) 5638 { 5639 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5640 unsigned long lowmem_pages = 0; 5641 struct zone *zone; 5642 unsigned long flags; 5643 5644 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5645 for_each_zone(zone) { 5646 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5647 lowmem_pages += zone_managed_pages(zone); 5648 } 5649 5650 for_each_zone(zone) { 5651 u64 tmp; 5652 5653 spin_lock_irqsave(&zone->lock, flags); 5654 tmp = (u64)pages_min * zone_managed_pages(zone); 5655 do_div(tmp, lowmem_pages); 5656 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5657 /* 5658 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5659 * need highmem and movable zones pages, so cap pages_min 5660 * to a small value here. 5661 * 5662 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5663 * deltas control async page reclaim, and so should 5664 * not be capped for highmem and movable zones. 5665 */ 5666 unsigned long min_pages; 5667 5668 min_pages = zone_managed_pages(zone) / 1024; 5669 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5670 zone->_watermark[WMARK_MIN] = min_pages; 5671 } else { 5672 /* 5673 * If it's a lowmem zone, reserve a number of pages 5674 * proportionate to the zone's size. 5675 */ 5676 zone->_watermark[WMARK_MIN] = tmp; 5677 } 5678 5679 /* 5680 * Set the kswapd watermarks distance according to the 5681 * scale factor in proportion to available memory, but 5682 * ensure a minimum size on small systems. 5683 */ 5684 tmp = max_t(u64, tmp >> 2, 5685 mult_frac(zone_managed_pages(zone), 5686 watermark_scale_factor, 10000)); 5687 5688 zone->watermark_boost = 0; 5689 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5690 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5691 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 5692 5693 spin_unlock_irqrestore(&zone->lock, flags); 5694 } 5695 5696 /* update totalreserve_pages */ 5697 calculate_totalreserve_pages(); 5698 } 5699 5700 /** 5701 * setup_per_zone_wmarks - called when min_free_kbytes changes 5702 * or when memory is hot-{added|removed} 5703 * 5704 * Ensures that the watermark[min,low,high] values for each zone are set 5705 * correctly with respect to min_free_kbytes. 5706 */ 5707 void setup_per_zone_wmarks(void) 5708 { 5709 struct zone *zone; 5710 static DEFINE_SPINLOCK(lock); 5711 5712 spin_lock(&lock); 5713 __setup_per_zone_wmarks(); 5714 spin_unlock(&lock); 5715 5716 /* 5717 * The watermark size have changed so update the pcpu batch 5718 * and high limits or the limits may be inappropriate. 5719 */ 5720 for_each_zone(zone) 5721 zone_pcp_update(zone, 0); 5722 } 5723 5724 /* 5725 * Initialise min_free_kbytes. 5726 * 5727 * For small machines we want it small (128k min). For large machines 5728 * we want it large (256MB max). But it is not linear, because network 5729 * bandwidth does not increase linearly with machine size. We use 5730 * 5731 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5732 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5733 * 5734 * which yields 5735 * 5736 * 16MB: 512k 5737 * 32MB: 724k 5738 * 64MB: 1024k 5739 * 128MB: 1448k 5740 * 256MB: 2048k 5741 * 512MB: 2896k 5742 * 1024MB: 4096k 5743 * 2048MB: 5792k 5744 * 4096MB: 8192k 5745 * 8192MB: 11584k 5746 * 16384MB: 16384k 5747 */ 5748 void calculate_min_free_kbytes(void) 5749 { 5750 unsigned long lowmem_kbytes; 5751 int new_min_free_kbytes; 5752 5753 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5754 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5755 5756 if (new_min_free_kbytes > user_min_free_kbytes) 5757 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 5758 else 5759 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5760 new_min_free_kbytes, user_min_free_kbytes); 5761 5762 } 5763 5764 int __meminit init_per_zone_wmark_min(void) 5765 { 5766 calculate_min_free_kbytes(); 5767 setup_per_zone_wmarks(); 5768 refresh_zone_stat_thresholds(); 5769 setup_per_zone_lowmem_reserve(); 5770 5771 #ifdef CONFIG_NUMA 5772 setup_min_unmapped_ratio(); 5773 setup_min_slab_ratio(); 5774 #endif 5775 5776 khugepaged_min_free_kbytes_update(); 5777 5778 return 0; 5779 } 5780 postcore_initcall(init_per_zone_wmark_min) 5781 5782 /* 5783 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5784 * that we can call two helper functions whenever min_free_kbytes 5785 * changes. 5786 */ 5787 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 5788 void *buffer, size_t *length, loff_t *ppos) 5789 { 5790 int rc; 5791 5792 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5793 if (rc) 5794 return rc; 5795 5796 if (write) { 5797 user_min_free_kbytes = min_free_kbytes; 5798 setup_per_zone_wmarks(); 5799 } 5800 return 0; 5801 } 5802 5803 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 5804 void *buffer, size_t *length, loff_t *ppos) 5805 { 5806 int rc; 5807 5808 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5809 if (rc) 5810 return rc; 5811 5812 if (write) 5813 setup_per_zone_wmarks(); 5814 5815 return 0; 5816 } 5817 5818 #ifdef CONFIG_NUMA 5819 static void setup_min_unmapped_ratio(void) 5820 { 5821 pg_data_t *pgdat; 5822 struct zone *zone; 5823 5824 for_each_online_pgdat(pgdat) 5825 pgdat->min_unmapped_pages = 0; 5826 5827 for_each_zone(zone) 5828 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 5829 sysctl_min_unmapped_ratio) / 100; 5830 } 5831 5832 5833 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 5834 void *buffer, size_t *length, loff_t *ppos) 5835 { 5836 int rc; 5837 5838 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5839 if (rc) 5840 return rc; 5841 5842 setup_min_unmapped_ratio(); 5843 5844 return 0; 5845 } 5846 5847 static void setup_min_slab_ratio(void) 5848 { 5849 pg_data_t *pgdat; 5850 struct zone *zone; 5851 5852 for_each_online_pgdat(pgdat) 5853 pgdat->min_slab_pages = 0; 5854 5855 for_each_zone(zone) 5856 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 5857 sysctl_min_slab_ratio) / 100; 5858 } 5859 5860 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 5861 void *buffer, size_t *length, loff_t *ppos) 5862 { 5863 int rc; 5864 5865 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5866 if (rc) 5867 return rc; 5868 5869 setup_min_slab_ratio(); 5870 5871 return 0; 5872 } 5873 #endif 5874 5875 /* 5876 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5877 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5878 * whenever sysctl_lowmem_reserve_ratio changes. 5879 * 5880 * The reserve ratio obviously has absolutely no relation with the 5881 * minimum watermarks. The lowmem reserve ratio can only make sense 5882 * if in function of the boot time zone sizes. 5883 */ 5884 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, 5885 int write, void *buffer, size_t *length, loff_t *ppos) 5886 { 5887 int i; 5888 5889 proc_dointvec_minmax(table, write, buffer, length, ppos); 5890 5891 for (i = 0; i < MAX_NR_ZONES; i++) { 5892 if (sysctl_lowmem_reserve_ratio[i] < 1) 5893 sysctl_lowmem_reserve_ratio[i] = 0; 5894 } 5895 5896 setup_per_zone_lowmem_reserve(); 5897 return 0; 5898 } 5899 5900 /* 5901 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 5902 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5903 * pagelist can have before it gets flushed back to buddy allocator. 5904 */ 5905 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 5906 int write, void *buffer, size_t *length, loff_t *ppos) 5907 { 5908 struct zone *zone; 5909 int old_percpu_pagelist_high_fraction; 5910 int ret; 5911 5912 mutex_lock(&pcp_batch_high_lock); 5913 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 5914 5915 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5916 if (!write || ret < 0) 5917 goto out; 5918 5919 /* Sanity checking to avoid pcp imbalance */ 5920 if (percpu_pagelist_high_fraction && 5921 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 5922 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 5923 ret = -EINVAL; 5924 goto out; 5925 } 5926 5927 /* No change? */ 5928 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 5929 goto out; 5930 5931 for_each_populated_zone(zone) 5932 zone_set_pageset_high_and_batch(zone, 0); 5933 out: 5934 mutex_unlock(&pcp_batch_high_lock); 5935 return ret; 5936 } 5937 5938 static struct ctl_table page_alloc_sysctl_table[] = { 5939 { 5940 .procname = "min_free_kbytes", 5941 .data = &min_free_kbytes, 5942 .maxlen = sizeof(min_free_kbytes), 5943 .mode = 0644, 5944 .proc_handler = min_free_kbytes_sysctl_handler, 5945 .extra1 = SYSCTL_ZERO, 5946 }, 5947 { 5948 .procname = "watermark_boost_factor", 5949 .data = &watermark_boost_factor, 5950 .maxlen = sizeof(watermark_boost_factor), 5951 .mode = 0644, 5952 .proc_handler = proc_dointvec_minmax, 5953 .extra1 = SYSCTL_ZERO, 5954 }, 5955 { 5956 .procname = "watermark_scale_factor", 5957 .data = &watermark_scale_factor, 5958 .maxlen = sizeof(watermark_scale_factor), 5959 .mode = 0644, 5960 .proc_handler = watermark_scale_factor_sysctl_handler, 5961 .extra1 = SYSCTL_ONE, 5962 .extra2 = SYSCTL_THREE_THOUSAND, 5963 }, 5964 { 5965 .procname = "percpu_pagelist_high_fraction", 5966 .data = &percpu_pagelist_high_fraction, 5967 .maxlen = sizeof(percpu_pagelist_high_fraction), 5968 .mode = 0644, 5969 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 5970 .extra1 = SYSCTL_ZERO, 5971 }, 5972 { 5973 .procname = "lowmem_reserve_ratio", 5974 .data = &sysctl_lowmem_reserve_ratio, 5975 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 5976 .mode = 0644, 5977 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 5978 }, 5979 #ifdef CONFIG_NUMA 5980 { 5981 .procname = "numa_zonelist_order", 5982 .data = &numa_zonelist_order, 5983 .maxlen = NUMA_ZONELIST_ORDER_LEN, 5984 .mode = 0644, 5985 .proc_handler = numa_zonelist_order_handler, 5986 }, 5987 { 5988 .procname = "min_unmapped_ratio", 5989 .data = &sysctl_min_unmapped_ratio, 5990 .maxlen = sizeof(sysctl_min_unmapped_ratio), 5991 .mode = 0644, 5992 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 5993 .extra1 = SYSCTL_ZERO, 5994 .extra2 = SYSCTL_ONE_HUNDRED, 5995 }, 5996 { 5997 .procname = "min_slab_ratio", 5998 .data = &sysctl_min_slab_ratio, 5999 .maxlen = sizeof(sysctl_min_slab_ratio), 6000 .mode = 0644, 6001 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6002 .extra1 = SYSCTL_ZERO, 6003 .extra2 = SYSCTL_ONE_HUNDRED, 6004 }, 6005 #endif 6006 {} 6007 }; 6008 6009 void __init page_alloc_sysctl_init(void) 6010 { 6011 register_sysctl_init("vm", page_alloc_sysctl_table); 6012 } 6013 6014 #ifdef CONFIG_CONTIG_ALLOC 6015 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6016 static void alloc_contig_dump_pages(struct list_head *page_list) 6017 { 6018 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6019 6020 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6021 struct page *page; 6022 6023 dump_stack(); 6024 list_for_each_entry(page, page_list, lru) 6025 dump_page(page, "migration failure"); 6026 } 6027 } 6028 6029 /* [start, end) must belong to a single zone. */ 6030 int __alloc_contig_migrate_range(struct compact_control *cc, 6031 unsigned long start, unsigned long end) 6032 { 6033 /* This function is based on compact_zone() from compaction.c. */ 6034 unsigned int nr_reclaimed; 6035 unsigned long pfn = start; 6036 unsigned int tries = 0; 6037 int ret = 0; 6038 struct migration_target_control mtc = { 6039 .nid = zone_to_nid(cc->zone), 6040 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6041 }; 6042 6043 lru_cache_disable(); 6044 6045 while (pfn < end || !list_empty(&cc->migratepages)) { 6046 if (fatal_signal_pending(current)) { 6047 ret = -EINTR; 6048 break; 6049 } 6050 6051 if (list_empty(&cc->migratepages)) { 6052 cc->nr_migratepages = 0; 6053 ret = isolate_migratepages_range(cc, pfn, end); 6054 if (ret && ret != -EAGAIN) 6055 break; 6056 pfn = cc->migrate_pfn; 6057 tries = 0; 6058 } else if (++tries == 5) { 6059 ret = -EBUSY; 6060 break; 6061 } 6062 6063 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6064 &cc->migratepages); 6065 cc->nr_migratepages -= nr_reclaimed; 6066 6067 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6068 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6069 6070 /* 6071 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6072 * to retry again over this error, so do the same here. 6073 */ 6074 if (ret == -ENOMEM) 6075 break; 6076 } 6077 6078 lru_cache_enable(); 6079 if (ret < 0) { 6080 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6081 alloc_contig_dump_pages(&cc->migratepages); 6082 putback_movable_pages(&cc->migratepages); 6083 return ret; 6084 } 6085 return 0; 6086 } 6087 6088 /** 6089 * alloc_contig_range() -- tries to allocate given range of pages 6090 * @start: start PFN to allocate 6091 * @end: one-past-the-last PFN to allocate 6092 * @migratetype: migratetype of the underlying pageblocks (either 6093 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6094 * in range must have the same migratetype and it must 6095 * be either of the two. 6096 * @gfp_mask: GFP mask to use during compaction 6097 * 6098 * The PFN range does not have to be pageblock aligned. The PFN range must 6099 * belong to a single zone. 6100 * 6101 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6102 * pageblocks in the range. Once isolated, the pageblocks should not 6103 * be modified by others. 6104 * 6105 * Return: zero on success or negative error code. On success all 6106 * pages which PFN is in [start, end) are allocated for the caller and 6107 * need to be freed with free_contig_range(). 6108 */ 6109 int alloc_contig_range(unsigned long start, unsigned long end, 6110 unsigned migratetype, gfp_t gfp_mask) 6111 { 6112 unsigned long outer_start, outer_end; 6113 int order; 6114 int ret = 0; 6115 6116 struct compact_control cc = { 6117 .nr_migratepages = 0, 6118 .order = -1, 6119 .zone = page_zone(pfn_to_page(start)), 6120 .mode = MIGRATE_SYNC, 6121 .ignore_skip_hint = true, 6122 .no_set_skip_hint = true, 6123 .gfp_mask = current_gfp_context(gfp_mask), 6124 .alloc_contig = true, 6125 }; 6126 INIT_LIST_HEAD(&cc.migratepages); 6127 6128 /* 6129 * What we do here is we mark all pageblocks in range as 6130 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6131 * have different sizes, and due to the way page allocator 6132 * work, start_isolate_page_range() has special handlings for this. 6133 * 6134 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6135 * migrate the pages from an unaligned range (ie. pages that 6136 * we are interested in). This will put all the pages in 6137 * range back to page allocator as MIGRATE_ISOLATE. 6138 * 6139 * When this is done, we take the pages in range from page 6140 * allocator removing them from the buddy system. This way 6141 * page allocator will never consider using them. 6142 * 6143 * This lets us mark the pageblocks back as 6144 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6145 * aligned range but not in the unaligned, original range are 6146 * put back to page allocator so that buddy can use them. 6147 */ 6148 6149 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6150 if (ret) 6151 goto done; 6152 6153 drain_all_pages(cc.zone); 6154 6155 /* 6156 * In case of -EBUSY, we'd like to know which page causes problem. 6157 * So, just fall through. test_pages_isolated() has a tracepoint 6158 * which will report the busy page. 6159 * 6160 * It is possible that busy pages could become available before 6161 * the call to test_pages_isolated, and the range will actually be 6162 * allocated. So, if we fall through be sure to clear ret so that 6163 * -EBUSY is not accidentally used or returned to caller. 6164 */ 6165 ret = __alloc_contig_migrate_range(&cc, start, end); 6166 if (ret && ret != -EBUSY) 6167 goto done; 6168 ret = 0; 6169 6170 /* 6171 * Pages from [start, end) are within a pageblock_nr_pages 6172 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6173 * more, all pages in [start, end) are free in page allocator. 6174 * What we are going to do is to allocate all pages from 6175 * [start, end) (that is remove them from page allocator). 6176 * 6177 * The only problem is that pages at the beginning and at the 6178 * end of interesting range may be not aligned with pages that 6179 * page allocator holds, ie. they can be part of higher order 6180 * pages. Because of this, we reserve the bigger range and 6181 * once this is done free the pages we are not interested in. 6182 * 6183 * We don't have to hold zone->lock here because the pages are 6184 * isolated thus they won't get removed from buddy. 6185 */ 6186 6187 order = 0; 6188 outer_start = start; 6189 while (!PageBuddy(pfn_to_page(outer_start))) { 6190 if (++order > MAX_ORDER) { 6191 outer_start = start; 6192 break; 6193 } 6194 outer_start &= ~0UL << order; 6195 } 6196 6197 if (outer_start != start) { 6198 order = buddy_order(pfn_to_page(outer_start)); 6199 6200 /* 6201 * outer_start page could be small order buddy page and 6202 * it doesn't include start page. Adjust outer_start 6203 * in this case to report failed page properly 6204 * on tracepoint in test_pages_isolated() 6205 */ 6206 if (outer_start + (1UL << order) <= start) 6207 outer_start = start; 6208 } 6209 6210 /* Make sure the range is really isolated. */ 6211 if (test_pages_isolated(outer_start, end, 0)) { 6212 ret = -EBUSY; 6213 goto done; 6214 } 6215 6216 /* Grab isolated pages from freelists. */ 6217 outer_end = isolate_freepages_range(&cc, outer_start, end); 6218 if (!outer_end) { 6219 ret = -EBUSY; 6220 goto done; 6221 } 6222 6223 /* Free head and tail (if any) */ 6224 if (start != outer_start) 6225 free_contig_range(outer_start, start - outer_start); 6226 if (end != outer_end) 6227 free_contig_range(end, outer_end - end); 6228 6229 done: 6230 undo_isolate_page_range(start, end, migratetype); 6231 return ret; 6232 } 6233 EXPORT_SYMBOL(alloc_contig_range); 6234 6235 static int __alloc_contig_pages(unsigned long start_pfn, 6236 unsigned long nr_pages, gfp_t gfp_mask) 6237 { 6238 unsigned long end_pfn = start_pfn + nr_pages; 6239 6240 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 6241 gfp_mask); 6242 } 6243 6244 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6245 unsigned long nr_pages) 6246 { 6247 unsigned long i, end_pfn = start_pfn + nr_pages; 6248 struct page *page; 6249 6250 for (i = start_pfn; i < end_pfn; i++) { 6251 page = pfn_to_online_page(i); 6252 if (!page) 6253 return false; 6254 6255 if (page_zone(page) != z) 6256 return false; 6257 6258 if (PageReserved(page)) 6259 return false; 6260 6261 if (PageHuge(page)) 6262 return false; 6263 } 6264 return true; 6265 } 6266 6267 static bool zone_spans_last_pfn(const struct zone *zone, 6268 unsigned long start_pfn, unsigned long nr_pages) 6269 { 6270 unsigned long last_pfn = start_pfn + nr_pages - 1; 6271 6272 return zone_spans_pfn(zone, last_pfn); 6273 } 6274 6275 /** 6276 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6277 * @nr_pages: Number of contiguous pages to allocate 6278 * @gfp_mask: GFP mask to limit search and used during compaction 6279 * @nid: Target node 6280 * @nodemask: Mask for other possible nodes 6281 * 6282 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6283 * on an applicable zonelist to find a contiguous pfn range which can then be 6284 * tried for allocation with alloc_contig_range(). This routine is intended 6285 * for allocation requests which can not be fulfilled with the buddy allocator. 6286 * 6287 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6288 * power of two, then allocated range is also guaranteed to be aligned to same 6289 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6290 * 6291 * Allocated pages can be freed with free_contig_range() or by manually calling 6292 * __free_page() on each allocated page. 6293 * 6294 * Return: pointer to contiguous pages on success, or NULL if not successful. 6295 */ 6296 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 6297 int nid, nodemask_t *nodemask) 6298 { 6299 unsigned long ret, pfn, flags; 6300 struct zonelist *zonelist; 6301 struct zone *zone; 6302 struct zoneref *z; 6303 6304 zonelist = node_zonelist(nid, gfp_mask); 6305 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6306 gfp_zone(gfp_mask), nodemask) { 6307 spin_lock_irqsave(&zone->lock, flags); 6308 6309 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6310 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6311 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6312 /* 6313 * We release the zone lock here because 6314 * alloc_contig_range() will also lock the zone 6315 * at some point. If there's an allocation 6316 * spinning on this lock, it may win the race 6317 * and cause alloc_contig_range() to fail... 6318 */ 6319 spin_unlock_irqrestore(&zone->lock, flags); 6320 ret = __alloc_contig_pages(pfn, nr_pages, 6321 gfp_mask); 6322 if (!ret) 6323 return pfn_to_page(pfn); 6324 spin_lock_irqsave(&zone->lock, flags); 6325 } 6326 pfn += nr_pages; 6327 } 6328 spin_unlock_irqrestore(&zone->lock, flags); 6329 } 6330 return NULL; 6331 } 6332 #endif /* CONFIG_CONTIG_ALLOC */ 6333 6334 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6335 { 6336 unsigned long count = 0; 6337 6338 for (; nr_pages--; pfn++) { 6339 struct page *page = pfn_to_page(pfn); 6340 6341 count += page_count(page) != 1; 6342 __free_page(page); 6343 } 6344 WARN(count != 0, "%lu pages are still in use!\n", count); 6345 } 6346 EXPORT_SYMBOL(free_contig_range); 6347 6348 /* 6349 * Effectively disable pcplists for the zone by setting the high limit to 0 6350 * and draining all cpus. A concurrent page freeing on another CPU that's about 6351 * to put the page on pcplist will either finish before the drain and the page 6352 * will be drained, or observe the new high limit and skip the pcplist. 6353 * 6354 * Must be paired with a call to zone_pcp_enable(). 6355 */ 6356 void zone_pcp_disable(struct zone *zone) 6357 { 6358 mutex_lock(&pcp_batch_high_lock); 6359 __zone_set_pageset_high_and_batch(zone, 0, 1); 6360 __drain_all_pages(zone, true); 6361 } 6362 6363 void zone_pcp_enable(struct zone *zone) 6364 { 6365 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 6366 mutex_unlock(&pcp_batch_high_lock); 6367 } 6368 6369 void zone_pcp_reset(struct zone *zone) 6370 { 6371 int cpu; 6372 struct per_cpu_zonestat *pzstats; 6373 6374 if (zone->per_cpu_pageset != &boot_pageset) { 6375 for_each_online_cpu(cpu) { 6376 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6377 drain_zonestat(zone, pzstats); 6378 } 6379 free_percpu(zone->per_cpu_pageset); 6380 zone->per_cpu_pageset = &boot_pageset; 6381 if (zone->per_cpu_zonestats != &boot_zonestats) { 6382 free_percpu(zone->per_cpu_zonestats); 6383 zone->per_cpu_zonestats = &boot_zonestats; 6384 } 6385 } 6386 } 6387 6388 #ifdef CONFIG_MEMORY_HOTREMOVE 6389 /* 6390 * All pages in the range must be in a single zone, must not contain holes, 6391 * must span full sections, and must be isolated before calling this function. 6392 */ 6393 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6394 { 6395 unsigned long pfn = start_pfn; 6396 struct page *page; 6397 struct zone *zone; 6398 unsigned int order; 6399 unsigned long flags; 6400 6401 offline_mem_sections(pfn, end_pfn); 6402 zone = page_zone(pfn_to_page(pfn)); 6403 spin_lock_irqsave(&zone->lock, flags); 6404 while (pfn < end_pfn) { 6405 page = pfn_to_page(pfn); 6406 /* 6407 * The HWPoisoned page may be not in buddy system, and 6408 * page_count() is not 0. 6409 */ 6410 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6411 pfn++; 6412 continue; 6413 } 6414 /* 6415 * At this point all remaining PageOffline() pages have a 6416 * reference count of 0 and can simply be skipped. 6417 */ 6418 if (PageOffline(page)) { 6419 BUG_ON(page_count(page)); 6420 BUG_ON(PageBuddy(page)); 6421 pfn++; 6422 continue; 6423 } 6424 6425 BUG_ON(page_count(page)); 6426 BUG_ON(!PageBuddy(page)); 6427 order = buddy_order(page); 6428 del_page_from_free_list(page, zone, order); 6429 pfn += (1 << order); 6430 } 6431 spin_unlock_irqrestore(&zone->lock, flags); 6432 } 6433 #endif 6434 6435 /* 6436 * This function returns a stable result only if called under zone lock. 6437 */ 6438 bool is_free_buddy_page(struct page *page) 6439 { 6440 unsigned long pfn = page_to_pfn(page); 6441 unsigned int order; 6442 6443 for (order = 0; order <= MAX_ORDER; order++) { 6444 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6445 6446 if (PageBuddy(page_head) && 6447 buddy_order_unsafe(page_head) >= order) 6448 break; 6449 } 6450 6451 return order <= MAX_ORDER; 6452 } 6453 EXPORT_SYMBOL(is_free_buddy_page); 6454 6455 #ifdef CONFIG_MEMORY_FAILURE 6456 /* 6457 * Break down a higher-order page in sub-pages, and keep our target out of 6458 * buddy allocator. 6459 */ 6460 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6461 struct page *target, int low, int high, 6462 int migratetype) 6463 { 6464 unsigned long size = 1 << high; 6465 struct page *current_buddy, *next_page; 6466 6467 while (high > low) { 6468 high--; 6469 size >>= 1; 6470 6471 if (target >= &page[size]) { 6472 next_page = page + size; 6473 current_buddy = page; 6474 } else { 6475 next_page = page; 6476 current_buddy = page + size; 6477 } 6478 6479 if (set_page_guard(zone, current_buddy, high, migratetype)) 6480 continue; 6481 6482 if (current_buddy != target) { 6483 add_to_free_list(current_buddy, zone, high, migratetype); 6484 set_buddy_order(current_buddy, high); 6485 page = next_page; 6486 } 6487 } 6488 } 6489 6490 /* 6491 * Take a page that will be marked as poisoned off the buddy allocator. 6492 */ 6493 bool take_page_off_buddy(struct page *page) 6494 { 6495 struct zone *zone = page_zone(page); 6496 unsigned long pfn = page_to_pfn(page); 6497 unsigned long flags; 6498 unsigned int order; 6499 bool ret = false; 6500 6501 spin_lock_irqsave(&zone->lock, flags); 6502 for (order = 0; order <= MAX_ORDER; order++) { 6503 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6504 int page_order = buddy_order(page_head); 6505 6506 if (PageBuddy(page_head) && page_order >= order) { 6507 unsigned long pfn_head = page_to_pfn(page_head); 6508 int migratetype = get_pfnblock_migratetype(page_head, 6509 pfn_head); 6510 6511 del_page_from_free_list(page_head, zone, page_order); 6512 break_down_buddy_pages(zone, page_head, page, 0, 6513 page_order, migratetype); 6514 SetPageHWPoisonTakenOff(page); 6515 if (!is_migrate_isolate(migratetype)) 6516 __mod_zone_freepage_state(zone, -1, migratetype); 6517 ret = true; 6518 break; 6519 } 6520 if (page_count(page_head) > 0) 6521 break; 6522 } 6523 spin_unlock_irqrestore(&zone->lock, flags); 6524 return ret; 6525 } 6526 6527 /* 6528 * Cancel takeoff done by take_page_off_buddy(). 6529 */ 6530 bool put_page_back_buddy(struct page *page) 6531 { 6532 struct zone *zone = page_zone(page); 6533 unsigned long pfn = page_to_pfn(page); 6534 unsigned long flags; 6535 int migratetype = get_pfnblock_migratetype(page, pfn); 6536 bool ret = false; 6537 6538 spin_lock_irqsave(&zone->lock, flags); 6539 if (put_page_testzero(page)) { 6540 ClearPageHWPoisonTakenOff(page); 6541 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6542 if (TestClearPageHWPoison(page)) { 6543 ret = true; 6544 } 6545 } 6546 spin_unlock_irqrestore(&zone->lock, flags); 6547 6548 return ret; 6549 } 6550 #endif 6551 6552 #ifdef CONFIG_ZONE_DMA 6553 bool has_managed_dma(void) 6554 { 6555 struct pglist_data *pgdat; 6556 6557 for_each_online_pgdat(pgdat) { 6558 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6559 6560 if (managed_zone(zone)) 6561 return true; 6562 } 6563 return false; 6564 } 6565 #endif /* CONFIG_ZONE_DMA */ 6566 6567 #ifdef CONFIG_UNACCEPTED_MEMORY 6568 6569 /* Counts number of zones with unaccepted pages. */ 6570 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6571 6572 static bool lazy_accept = true; 6573 6574 static int __init accept_memory_parse(char *p) 6575 { 6576 if (!strcmp(p, "lazy")) { 6577 lazy_accept = true; 6578 return 0; 6579 } else if (!strcmp(p, "eager")) { 6580 lazy_accept = false; 6581 return 0; 6582 } else { 6583 return -EINVAL; 6584 } 6585 } 6586 early_param("accept_memory", accept_memory_parse); 6587 6588 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6589 { 6590 phys_addr_t start = page_to_phys(page); 6591 phys_addr_t end = start + (PAGE_SIZE << order); 6592 6593 return range_contains_unaccepted_memory(start, end); 6594 } 6595 6596 static void accept_page(struct page *page, unsigned int order) 6597 { 6598 phys_addr_t start = page_to_phys(page); 6599 6600 accept_memory(start, start + (PAGE_SIZE << order)); 6601 } 6602 6603 static bool try_to_accept_memory_one(struct zone *zone) 6604 { 6605 unsigned long flags; 6606 struct page *page; 6607 bool last; 6608 6609 if (list_empty(&zone->unaccepted_pages)) 6610 return false; 6611 6612 spin_lock_irqsave(&zone->lock, flags); 6613 page = list_first_entry_or_null(&zone->unaccepted_pages, 6614 struct page, lru); 6615 if (!page) { 6616 spin_unlock_irqrestore(&zone->lock, flags); 6617 return false; 6618 } 6619 6620 list_del(&page->lru); 6621 last = list_empty(&zone->unaccepted_pages); 6622 6623 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6624 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6625 spin_unlock_irqrestore(&zone->lock, flags); 6626 6627 accept_page(page, MAX_ORDER); 6628 6629 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); 6630 6631 if (last) 6632 static_branch_dec(&zones_with_unaccepted_pages); 6633 6634 return true; 6635 } 6636 6637 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6638 { 6639 long to_accept; 6640 int ret = false; 6641 6642 /* How much to accept to get to high watermark? */ 6643 to_accept = high_wmark_pages(zone) - 6644 (zone_page_state(zone, NR_FREE_PAGES) - 6645 __zone_watermark_unusable_free(zone, order, 0)); 6646 6647 /* Accept at least one page */ 6648 do { 6649 if (!try_to_accept_memory_one(zone)) 6650 break; 6651 ret = true; 6652 to_accept -= MAX_ORDER_NR_PAGES; 6653 } while (to_accept > 0); 6654 6655 return ret; 6656 } 6657 6658 static inline bool has_unaccepted_memory(void) 6659 { 6660 return static_branch_unlikely(&zones_with_unaccepted_pages); 6661 } 6662 6663 static bool __free_unaccepted(struct page *page) 6664 { 6665 struct zone *zone = page_zone(page); 6666 unsigned long flags; 6667 bool first = false; 6668 6669 if (!lazy_accept) 6670 return false; 6671 6672 spin_lock_irqsave(&zone->lock, flags); 6673 first = list_empty(&zone->unaccepted_pages); 6674 list_add_tail(&page->lru, &zone->unaccepted_pages); 6675 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6676 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 6677 spin_unlock_irqrestore(&zone->lock, flags); 6678 6679 if (first) 6680 static_branch_inc(&zones_with_unaccepted_pages); 6681 6682 return true; 6683 } 6684 6685 #else 6686 6687 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6688 { 6689 return false; 6690 } 6691 6692 static void accept_page(struct page *page, unsigned int order) 6693 { 6694 } 6695 6696 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6697 { 6698 return false; 6699 } 6700 6701 static inline bool has_unaccepted_memory(void) 6702 { 6703 return false; 6704 } 6705 6706 static bool __free_unaccepted(struct page *page) 6707 { 6708 BUILD_BUG(); 6709 return false; 6710 } 6711 6712 #endif /* CONFIG_UNACCEPTED_MEMORY */ 6713