1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* internal.h: mm/ internal definitions 3 * 4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __MM_INTERNAL_H 8 #define __MM_INTERNAL_H 9 10 #include <linux/fs.h> 11 #include <linux/khugepaged.h> 12 #include <linux/mm.h> 13 #include <linux/mm_inline.h> 14 #include <linux/pagemap.h> 15 #include <linux/pagewalk.h> 16 #include <linux/rmap.h> 17 #include <linux/swap.h> 18 #include <linux/leafops.h> 19 #include <linux/swap_cgroup.h> 20 #include <linux/tracepoint-defs.h> 21 22 /* Internal core VMA manipulation functions. */ 23 #include "vma.h" 24 25 struct folio_batch; 26 27 /* 28 * Maintains state across a page table move. The operation assumes both source 29 * and destination VMAs already exist and are specified by the user. 30 * 31 * Partial moves are permitted, but the old and new ranges must both reside 32 * within a VMA. 33 * 34 * mmap lock must be held in write and VMA write locks must be held on any VMA 35 * that is visible. 36 * 37 * Use the PAGETABLE_MOVE() macro to initialise this struct. 38 * 39 * The old_addr and new_addr fields are updated as the page table move is 40 * executed. 41 * 42 * NOTE: The page table move is affected by reading from [old_addr, old_end), 43 * and old_addr may be updated for better page table alignment, so len_in 44 * represents the length of the range being copied as specified by the user. 45 */ 46 struct pagetable_move_control { 47 struct vm_area_struct *old; /* Source VMA. */ 48 struct vm_area_struct *new; /* Destination VMA. */ 49 unsigned long old_addr; /* Address from which the move begins. */ 50 unsigned long old_end; /* Exclusive address at which old range ends. */ 51 unsigned long new_addr; /* Address to move page tables to. */ 52 unsigned long len_in; /* Bytes to remap specified by user. */ 53 54 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 55 bool for_stack; /* Is this an early temp stack being moved? */ 56 }; 57 58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 59 struct pagetable_move_control name = { \ 60 .old = old_, \ 61 .new = new_, \ 62 .old_addr = old_addr_, \ 63 .old_end = (old_addr_) + (len_), \ 64 .new_addr = new_addr_, \ 65 .len_in = len_, \ 66 } 67 68 /* 69 * The set of flags that only affect watermark checking and reclaim 70 * behaviour. This is used by the MM to obey the caller constraints 71 * about IO, FS and watermark checking while ignoring placement 72 * hints such as HIGHMEM usage. 73 */ 74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 75 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ 76 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ 77 __GFP_NOLOCKDEP) 78 79 /* The GFP flags allowed during early boot */ 80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) 81 82 /* Control allocation cpuset and node placement constraints */ 83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) 84 85 /* Do not use these with a slab allocator */ 86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) 87 88 /* 89 * Different from WARN_ON_ONCE(), no warning will be issued 90 * when we specify __GFP_NOWARN. 91 */ 92 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \ 93 static bool __section(".data..once") __warned; \ 94 int __ret_warn_once = !!(cond); \ 95 \ 96 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ 97 __warned = true; \ 98 WARN_ON(1); \ 99 } \ 100 unlikely(__ret_warn_once); \ 101 }) 102 103 void page_writeback_init(void); 104 105 /* 106 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, 107 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit 108 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently 109 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. 110 */ 111 #define ENTIRELY_MAPPED 0x800000 112 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) 113 114 /* 115 * Flags passed to __show_mem() and show_free_areas() to suppress output in 116 * various contexts. 117 */ 118 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 119 120 /* 121 * How many individual pages have an elevated _mapcount. Excludes 122 * the folio's entire_mapcount. 123 * 124 * Don't use this function outside of debugging code. 125 */ 126 static inline int folio_nr_pages_mapped(const struct folio *folio) 127 { 128 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) 129 return -1; 130 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; 131 } 132 133 /* 134 * Retrieve the first entry of a folio based on a provided entry within the 135 * folio. We cannot rely on folio->swap as there is no guarantee that it has 136 * been initialized. Used for calling arch_swap_restore() 137 */ 138 static inline swp_entry_t folio_swap(swp_entry_t entry, 139 const struct folio *folio) 140 { 141 swp_entry_t swap = { 142 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), 143 }; 144 145 return swap; 146 } 147 148 static inline void *folio_raw_mapping(const struct folio *folio) 149 { 150 unsigned long mapping = (unsigned long)folio->mapping; 151 152 return (void *)(mapping & ~FOLIO_MAPPING_FLAGS); 153 } 154 155 /* 156 * This is a file-backed mapping, and is about to be memory mapped - invoke its 157 * mmap hook and safely handle error conditions. On error, VMA hooks will be 158 * mutated. 159 * 160 * @file: File which backs the mapping. 161 * @vma: VMA which we are mapping. 162 * 163 * Returns: 0 if success, error otherwise. 164 */ 165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma) 166 { 167 int err = vfs_mmap(file, vma); 168 169 if (likely(!err)) 170 return 0; 171 172 /* 173 * OK, we tried to call the file hook for mmap(), but an error 174 * arose. The mapping is in an inconsistent state and we must not invoke 175 * any further hooks on it. 176 */ 177 vma->vm_ops = &vma_dummy_vm_ops; 178 179 return err; 180 } 181 182 /* 183 * If the VMA has a close hook then close it, and since closing it might leave 184 * it in an inconsistent state which makes the use of any hooks suspect, clear 185 * them down by installing dummy empty hooks. 186 */ 187 static inline void vma_close(struct vm_area_struct *vma) 188 { 189 if (vma->vm_ops && vma->vm_ops->close) { 190 vma->vm_ops->close(vma); 191 192 /* 193 * The mapping is in an inconsistent state, and no further hooks 194 * may be invoked upon it. 195 */ 196 vma->vm_ops = &vma_dummy_vm_ops; 197 } 198 } 199 200 #ifdef CONFIG_MMU 201 202 static inline void get_anon_vma(struct anon_vma *anon_vma) 203 { 204 atomic_inc(&anon_vma->refcount); 205 } 206 207 void __put_anon_vma(struct anon_vma *anon_vma); 208 209 static inline void put_anon_vma(struct anon_vma *anon_vma) 210 { 211 if (atomic_dec_and_test(&anon_vma->refcount)) 212 __put_anon_vma(anon_vma); 213 } 214 215 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 216 { 217 down_write(&anon_vma->root->rwsem); 218 } 219 220 static inline int anon_vma_trylock_write(struct anon_vma *anon_vma) 221 { 222 return down_write_trylock(&anon_vma->root->rwsem); 223 } 224 225 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 226 { 227 up_write(&anon_vma->root->rwsem); 228 } 229 230 static inline void anon_vma_lock_read(struct anon_vma *anon_vma) 231 { 232 down_read(&anon_vma->root->rwsem); 233 } 234 235 static inline int anon_vma_trylock_read(struct anon_vma *anon_vma) 236 { 237 return down_read_trylock(&anon_vma->root->rwsem); 238 } 239 240 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) 241 { 242 up_read(&anon_vma->root->rwsem); 243 } 244 245 struct anon_vma *folio_get_anon_vma(const struct folio *folio); 246 247 /* Operations which modify VMAs. */ 248 enum vma_operation { 249 VMA_OP_SPLIT, 250 VMA_OP_MERGE_UNFAULTED, 251 VMA_OP_REMAP, 252 VMA_OP_FORK, 253 }; 254 255 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src, 256 enum vma_operation operation); 257 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma); 258 int __anon_vma_prepare(struct vm_area_struct *vma); 259 void unlink_anon_vmas(struct vm_area_struct *vma); 260 261 static inline int anon_vma_prepare(struct vm_area_struct *vma) 262 { 263 if (likely(vma->anon_vma)) 264 return 0; 265 266 return __anon_vma_prepare(vma); 267 } 268 269 /* Flags for folio_pte_batch(). */ 270 typedef int __bitwise fpb_t; 271 272 /* Compare PTEs respecting the dirty bit. */ 273 #define FPB_RESPECT_DIRTY ((__force fpb_t)BIT(0)) 274 275 /* Compare PTEs respecting the soft-dirty bit. */ 276 #define FPB_RESPECT_SOFT_DIRTY ((__force fpb_t)BIT(1)) 277 278 /* Compare PTEs respecting the writable bit. */ 279 #define FPB_RESPECT_WRITE ((__force fpb_t)BIT(2)) 280 281 /* 282 * Merge PTE write bits: if any PTE in the batch is writable, modify the 283 * PTE at @ptentp to be writable. 284 */ 285 #define FPB_MERGE_WRITE ((__force fpb_t)BIT(3)) 286 287 /* 288 * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty, 289 * modify the PTE at @ptentp to be young or dirty, respectively. 290 */ 291 #define FPB_MERGE_YOUNG_DIRTY ((__force fpb_t)BIT(4)) 292 293 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) 294 { 295 if (!(flags & FPB_RESPECT_DIRTY)) 296 pte = pte_mkclean(pte); 297 if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY))) 298 pte = pte_clear_soft_dirty(pte); 299 if (likely(!(flags & FPB_RESPECT_WRITE))) 300 pte = pte_wrprotect(pte); 301 return pte_mkold(pte); 302 } 303 304 /** 305 * folio_pte_batch_flags - detect a PTE batch for a large folio 306 * @folio: The large folio to detect a PTE batch for. 307 * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL. 308 * @ptep: Page table pointer for the first entry. 309 * @ptentp: Pointer to a COPY of the first page table entry whose flags this 310 * function updates based on @flags if appropriate. 311 * @max_nr: The maximum number of table entries to consider. 312 * @flags: Flags to modify the PTE batch semantics. 313 * 314 * Detect a PTE batch: consecutive (present) PTEs that map consecutive 315 * pages of the same large folio in a single VMA and a single page table. 316 * 317 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, 318 * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set) 319 * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set). 320 * 321 * @ptep must map any page of the folio. max_nr must be at least one and 322 * must be limited by the caller so scanning cannot exceed a single VMA and 323 * a single page table. 324 * 325 * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will 326 * be updated: it's crucial that a pointer to a COPY of the first 327 * page table entry, obtained through ptep_get(), is provided as @ptentp. 328 * 329 * This function will be inlined to optimize based on the input parameters; 330 * consider using folio_pte_batch() instead if applicable. 331 * 332 * Return: the number of table entries in the batch. 333 */ 334 static inline unsigned int folio_pte_batch_flags(struct folio *folio, 335 struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp, 336 unsigned int max_nr, fpb_t flags) 337 { 338 bool any_writable = false, any_young = false, any_dirty = false; 339 pte_t expected_pte, pte = *ptentp; 340 unsigned int nr, cur_nr; 341 342 VM_WARN_ON_FOLIO(!pte_present(pte), folio); 343 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); 344 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); 345 /* 346 * Ensure this is a pointer to a copy not a pointer into a page table. 347 * If this is a stack value, it won't be a valid virtual address, but 348 * that's fine because it also cannot be pointing into the page table. 349 */ 350 VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp))); 351 352 /* Limit max_nr to the actual remaining PFNs in the folio we could batch. */ 353 max_nr = min_t(unsigned long, max_nr, 354 folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); 355 356 nr = pte_batch_hint(ptep, pte); 357 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); 358 ptep = ptep + nr; 359 360 while (nr < max_nr) { 361 pte = ptep_get(ptep); 362 363 if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte)) 364 break; 365 366 if (flags & FPB_MERGE_WRITE) 367 any_writable |= pte_write(pte); 368 if (flags & FPB_MERGE_YOUNG_DIRTY) { 369 any_young |= pte_young(pte); 370 any_dirty |= pte_dirty(pte); 371 } 372 373 cur_nr = pte_batch_hint(ptep, pte); 374 expected_pte = pte_advance_pfn(expected_pte, cur_nr); 375 ptep += cur_nr; 376 nr += cur_nr; 377 } 378 379 if (any_writable) 380 *ptentp = pte_mkwrite(*ptentp, vma); 381 if (any_young) 382 *ptentp = pte_mkyoung(*ptentp); 383 if (any_dirty) 384 *ptentp = pte_mkdirty(*ptentp); 385 386 return min(nr, max_nr); 387 } 388 389 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, 390 unsigned int max_nr); 391 392 /** 393 * pte_move_swp_offset - Move the swap entry offset field of a swap pte 394 * forward or backward by delta 395 * @pte: The initial pte state; must be a swap entry 396 * @delta: The direction and the offset we are moving; forward if delta 397 * is positive; backward if delta is negative 398 * 399 * Moves the swap offset, while maintaining all other fields, including 400 * swap type, and any swp pte bits. The resulting pte is returned. 401 */ 402 static inline pte_t pte_move_swp_offset(pte_t pte, long delta) 403 { 404 const softleaf_t entry = softleaf_from_pte(pte); 405 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), 406 (swp_offset(entry) + delta))); 407 408 if (pte_swp_soft_dirty(pte)) 409 new = pte_swp_mksoft_dirty(new); 410 if (pte_swp_exclusive(pte)) 411 new = pte_swp_mkexclusive(new); 412 if (pte_swp_uffd_wp(pte)) 413 new = pte_swp_mkuffd_wp(new); 414 415 return new; 416 } 417 418 419 /** 420 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. 421 * @pte: The initial pte state; must be a swap entry. 422 * 423 * Increments the swap offset, while maintaining all other fields, including 424 * swap type, and any swp pte bits. The resulting pte is returned. 425 */ 426 static inline pte_t pte_next_swp_offset(pte_t pte) 427 { 428 return pte_move_swp_offset(pte, 1); 429 } 430 431 /** 432 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries 433 * @start_ptep: Page table pointer for the first entry. 434 * @max_nr: The maximum number of table entries to consider. 435 * @pte: Page table entry for the first entry. 436 * 437 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs 438 * containing swap entries all with consecutive offsets and targeting the same 439 * swap type, all with matching swp pte bits. 440 * 441 * max_nr must be at least one and must be limited by the caller so scanning 442 * cannot exceed a single page table. 443 * 444 * Return: the number of table entries in the batch. 445 */ 446 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) 447 { 448 pte_t expected_pte = pte_next_swp_offset(pte); 449 const pte_t *end_ptep = start_ptep + max_nr; 450 const softleaf_t entry = softleaf_from_pte(pte); 451 pte_t *ptep = start_ptep + 1; 452 unsigned short cgroup_id; 453 454 VM_WARN_ON(max_nr < 1); 455 VM_WARN_ON(!softleaf_is_swap(entry)); 456 457 cgroup_id = lookup_swap_cgroup_id(entry); 458 while (ptep < end_ptep) { 459 softleaf_t entry; 460 461 pte = ptep_get(ptep); 462 463 if (!pte_same(pte, expected_pte)) 464 break; 465 entry = softleaf_from_pte(pte); 466 if (lookup_swap_cgroup_id(entry) != cgroup_id) 467 break; 468 expected_pte = pte_next_swp_offset(expected_pte); 469 ptep++; 470 } 471 472 return ptep - start_ptep; 473 } 474 #endif /* CONFIG_MMU */ 475 476 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 477 int nr_throttled); 478 static inline void acct_reclaim_writeback(struct folio *folio) 479 { 480 pg_data_t *pgdat = folio_pgdat(folio); 481 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); 482 483 if (nr_throttled) 484 __acct_reclaim_writeback(pgdat, folio, nr_throttled); 485 } 486 487 static inline void wake_throttle_isolated(pg_data_t *pgdat) 488 { 489 wait_queue_head_t *wqh; 490 491 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; 492 if (waitqueue_active(wqh)) 493 wake_up(wqh); 494 } 495 496 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf); 497 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) 498 { 499 vm_fault_t ret = __vmf_anon_prepare(vmf); 500 501 if (unlikely(ret & VM_FAULT_RETRY)) 502 vma_end_read(vmf->vma); 503 return ret; 504 } 505 506 vm_fault_t do_swap_page(struct vm_fault *vmf); 507 void folio_rotate_reclaimable(struct folio *folio); 508 bool __folio_end_writeback(struct folio *folio); 509 void deactivate_file_folio(struct folio *folio); 510 void folio_activate(struct folio *folio); 511 512 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 513 struct vm_area_struct *start_vma, unsigned long floor, 514 unsigned long ceiling, bool mm_wr_locked); 515 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); 516 517 struct zap_details; 518 void unmap_page_range(struct mmu_gather *tlb, 519 struct vm_area_struct *vma, 520 unsigned long addr, unsigned long end, 521 struct zap_details *details); 522 void zap_page_range_single_batched(struct mmu_gather *tlb, 523 struct vm_area_struct *vma, unsigned long addr, 524 unsigned long size, struct zap_details *details); 525 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, 526 gfp_t gfp); 527 528 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *); 529 void force_page_cache_ra(struct readahead_control *, unsigned long nr); 530 static inline void force_page_cache_readahead(struct address_space *mapping, 531 struct file *file, pgoff_t index, unsigned long nr_to_read) 532 { 533 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); 534 force_page_cache_ra(&ractl, nr_to_read); 535 } 536 537 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 538 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 539 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 540 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 541 void filemap_free_folio(struct address_space *mapping, struct folio *folio); 542 int truncate_inode_folio(struct address_space *mapping, struct folio *folio); 543 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, 544 loff_t end); 545 long mapping_evict_folio(struct address_space *mapping, struct folio *folio); 546 unsigned long mapping_try_invalidate(struct address_space *mapping, 547 pgoff_t start, pgoff_t end, unsigned long *nr_failed); 548 549 /** 550 * folio_evictable - Test whether a folio is evictable. 551 * @folio: The folio to test. 552 * 553 * Test whether @folio is evictable -- i.e., should be placed on 554 * active/inactive lists vs unevictable list. 555 * 556 * Reasons folio might not be evictable: 557 * 1. folio's mapping marked unevictable 558 * 2. One of the pages in the folio is part of an mlocked VMA 559 */ 560 static inline bool folio_evictable(struct folio *folio) 561 { 562 bool ret; 563 564 /* Prevent address_space of inode and swap cache from being freed */ 565 rcu_read_lock(); 566 ret = !mapping_unevictable(folio_mapping(folio)) && 567 !folio_test_mlocked(folio); 568 rcu_read_unlock(); 569 return ret; 570 } 571 572 /* 573 * Turn a non-refcounted page (->_refcount == 0) into refcounted with 574 * a count of one. 575 */ 576 static inline void set_page_refcounted(struct page *page) 577 { 578 VM_BUG_ON_PAGE(PageTail(page), page); 579 VM_BUG_ON_PAGE(page_ref_count(page), page); 580 set_page_count(page, 1); 581 } 582 583 static inline void set_pages_refcounted(struct page *page, unsigned long nr_pages) 584 { 585 unsigned long pfn = page_to_pfn(page); 586 587 for (; nr_pages--; pfn++) 588 set_page_refcounted(pfn_to_page(pfn)); 589 } 590 591 /* 592 * Return true if a folio needs ->release_folio() calling upon it. 593 */ 594 static inline bool folio_needs_release(struct folio *folio) 595 { 596 struct address_space *mapping = folio_mapping(folio); 597 598 return folio_has_private(folio) || 599 (mapping && mapping_release_always(mapping)); 600 } 601 602 extern unsigned long highest_memmap_pfn; 603 604 /* 605 * Maximum number of reclaim retries without progress before the OOM 606 * killer is consider the only way forward. 607 */ 608 #define MAX_RECLAIM_RETRIES 16 609 610 /* 611 * in mm/vmscan.c: 612 */ 613 bool folio_isolate_lru(struct folio *folio); 614 void folio_putback_lru(struct folio *folio); 615 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); 616 int user_proactive_reclaim(char *buf, 617 struct mem_cgroup *memcg, pg_data_t *pgdat); 618 619 /* 620 * in mm/rmap.c: 621 */ 622 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); 623 624 /* 625 * in mm/page_alloc.c 626 */ 627 #define K(x) ((x) << (PAGE_SHIFT-10)) 628 629 extern char * const zone_names[MAX_NR_ZONES]; 630 631 /* perform sanity checks on struct pages being allocated or freed */ 632 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 633 634 extern int min_free_kbytes; 635 extern int defrag_mode; 636 637 void setup_per_zone_wmarks(void); 638 void calculate_min_free_kbytes(void); 639 int __meminit init_per_zone_wmark_min(void); 640 void page_alloc_sysctl_init(void); 641 642 /* 643 * Structure for holding the mostly immutable allocation parameters passed 644 * between functions involved in allocations, including the alloc_pages* 645 * family of functions. 646 * 647 * nodemask, migratetype and highest_zoneidx are initialized only once in 648 * __alloc_pages() and then never change. 649 * 650 * zonelist, preferred_zone and highest_zoneidx are set first in 651 * __alloc_pages() for the fast path, and might be later changed 652 * in __alloc_pages_slowpath(). All other functions pass the whole structure 653 * by a const pointer. 654 */ 655 struct alloc_context { 656 struct zonelist *zonelist; 657 nodemask_t *nodemask; 658 struct zoneref *preferred_zoneref; 659 int migratetype; 660 661 /* 662 * highest_zoneidx represents highest usable zone index of 663 * the allocation request. Due to the nature of the zone, 664 * memory on lower zone than the highest_zoneidx will be 665 * protected by lowmem_reserve[highest_zoneidx]. 666 * 667 * highest_zoneidx is also used by reclaim/compaction to limit 668 * the target zone since higher zone than this index cannot be 669 * usable for this allocation request. 670 */ 671 enum zone_type highest_zoneidx; 672 bool spread_dirty_pages; 673 }; 674 675 /* 676 * This function returns the order of a free page in the buddy system. In 677 * general, page_zone(page)->lock must be held by the caller to prevent the 678 * page from being allocated in parallel and returning garbage as the order. 679 * If a caller does not hold page_zone(page)->lock, it must guarantee that the 680 * page cannot be allocated or merged in parallel. Alternatively, it must 681 * handle invalid values gracefully, and use buddy_order_unsafe() below. 682 */ 683 static inline unsigned int buddy_order(struct page *page) 684 { 685 /* PageBuddy() must be checked by the caller */ 686 return page_private(page); 687 } 688 689 /* 690 * Like buddy_order(), but for callers who cannot afford to hold the zone lock. 691 * PageBuddy() should be checked first by the caller to minimize race window, 692 * and invalid values must be handled gracefully. 693 * 694 * READ_ONCE is used so that if the caller assigns the result into a local 695 * variable and e.g. tests it for valid range before using, the compiler cannot 696 * decide to remove the variable and inline the page_private(page) multiple 697 * times, potentially observing different values in the tests and the actual 698 * use of the result. 699 */ 700 #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) 701 702 /* 703 * This function checks whether a page is free && is the buddy 704 * we can coalesce a page and its buddy if 705 * (a) the buddy is not in a hole (check before calling!) && 706 * (b) the buddy is in the buddy system && 707 * (c) a page and its buddy have the same order && 708 * (d) a page and its buddy are in the same zone. 709 * 710 * For recording whether a page is in the buddy system, we set PageBuddy. 711 * Setting, clearing, and testing PageBuddy is serialized by zone->lock. 712 * 713 * For recording page's order, we use page_private(page). 714 */ 715 static inline bool page_is_buddy(struct page *page, struct page *buddy, 716 unsigned int order) 717 { 718 if (!page_is_guard(buddy) && !PageBuddy(buddy)) 719 return false; 720 721 if (buddy_order(buddy) != order) 722 return false; 723 724 /* 725 * zone check is done late to avoid uselessly calculating 726 * zone/node ids for pages that could never merge. 727 */ 728 if (page_zone_id(page) != page_zone_id(buddy)) 729 return false; 730 731 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 732 733 return true; 734 } 735 736 /* 737 * Locate the struct page for both the matching buddy in our 738 * pair (buddy1) and the combined O(n+1) page they form (page). 739 * 740 * 1) Any buddy B1 will have an order O twin B2 which satisfies 741 * the following equation: 742 * B2 = B1 ^ (1 << O) 743 * For example, if the starting buddy (buddy2) is #8 its order 744 * 1 buddy is #10: 745 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 746 * 747 * 2) Any buddy B will have an order O+1 parent P which 748 * satisfies the following equation: 749 * P = B & ~(1 << O) 750 * 751 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER 752 */ 753 static inline unsigned long 754 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) 755 { 756 return page_pfn ^ (1 << order); 757 } 758 759 /* 760 * Find the buddy of @page and validate it. 761 * @page: The input page 762 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the 763 * function is used in the performance-critical __free_one_page(). 764 * @order: The order of the page 765 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to 766 * page_to_pfn(). 767 * 768 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is 769 * not the same as @page. The validation is necessary before use it. 770 * 771 * Return: the found buddy page or NULL if not found. 772 */ 773 static inline struct page *find_buddy_page_pfn(struct page *page, 774 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) 775 { 776 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); 777 struct page *buddy; 778 779 buddy = page + (__buddy_pfn - pfn); 780 if (buddy_pfn) 781 *buddy_pfn = __buddy_pfn; 782 783 if (page_is_buddy(page, buddy, order)) 784 return buddy; 785 return NULL; 786 } 787 788 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 789 unsigned long end_pfn, struct zone *zone); 790 791 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, 792 unsigned long end_pfn, struct zone *zone) 793 { 794 if (zone->contiguous) 795 return pfn_to_page(start_pfn); 796 797 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); 798 } 799 800 void set_zone_contiguous(struct zone *zone); 801 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, 802 unsigned long nr_pages); 803 804 static inline void clear_zone_contiguous(struct zone *zone) 805 { 806 zone->contiguous = false; 807 } 808 809 extern int __isolate_free_page(struct page *page, unsigned int order); 810 extern void __putback_isolated_page(struct page *page, unsigned int order, 811 int mt); 812 extern void memblock_free_pages(unsigned long pfn, unsigned int order); 813 extern void __free_pages_core(struct page *page, unsigned int order, 814 enum meminit_context context); 815 816 /* 817 * This will have no effect, other than possibly generating a warning, if the 818 * caller passes in a non-large folio. 819 */ 820 static inline void folio_set_order(struct folio *folio, unsigned int order) 821 { 822 if (WARN_ON_ONCE(!order || !folio_test_large(folio))) 823 return; 824 VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER); 825 826 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; 827 #ifdef NR_PAGES_IN_LARGE_FOLIO 828 folio->_nr_pages = 1U << order; 829 #endif 830 } 831 832 bool __folio_unqueue_deferred_split(struct folio *folio); 833 static inline bool folio_unqueue_deferred_split(struct folio *folio) 834 { 835 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) 836 return false; 837 838 /* 839 * At this point, there is no one trying to add the folio to 840 * deferred_list. If folio is not in deferred_list, it's safe 841 * to check without acquiring the split_queue_lock. 842 */ 843 if (data_race(list_empty(&folio->_deferred_list))) 844 return false; 845 846 return __folio_unqueue_deferred_split(folio); 847 } 848 849 static inline struct folio *page_rmappable_folio(struct page *page) 850 { 851 struct folio *folio = (struct folio *)page; 852 853 if (folio && folio_test_large(folio)) 854 folio_set_large_rmappable(folio); 855 return folio; 856 } 857 858 static inline void prep_compound_head(struct page *page, unsigned int order) 859 { 860 struct folio *folio = (struct folio *)page; 861 862 folio_set_order(folio, order); 863 atomic_set(&folio->_large_mapcount, -1); 864 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 865 atomic_set(&folio->_nr_pages_mapped, 0); 866 if (IS_ENABLED(CONFIG_MM_ID)) { 867 folio->_mm_ids = 0; 868 folio->_mm_id_mapcount[0] = -1; 869 folio->_mm_id_mapcount[1] = -1; 870 } 871 if (IS_ENABLED(CONFIG_64BIT) || order > 1) { 872 atomic_set(&folio->_pincount, 0); 873 atomic_set(&folio->_entire_mapcount, -1); 874 } 875 if (order > 1) 876 INIT_LIST_HEAD(&folio->_deferred_list); 877 } 878 879 static inline void prep_compound_tail(struct page *head, int tail_idx) 880 { 881 struct page *p = head + tail_idx; 882 883 p->mapping = TAIL_MAPPING; 884 set_compound_head(p, head); 885 set_page_private(p, 0); 886 } 887 888 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); 889 extern bool free_pages_prepare(struct page *page, unsigned int order); 890 891 extern int user_min_free_kbytes; 892 893 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid, 894 nodemask_t *); 895 #define __alloc_frozen_pages(...) \ 896 alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__)) 897 void free_frozen_pages(struct page *page, unsigned int order); 898 void free_unref_folios(struct folio_batch *fbatch); 899 900 #ifdef CONFIG_NUMA 901 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order); 902 #else 903 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order) 904 { 905 return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL); 906 } 907 #endif 908 909 #define alloc_frozen_pages(...) \ 910 alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__)) 911 912 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order); 913 #define alloc_frozen_pages_nolock(...) \ 914 alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__)) 915 void free_frozen_pages_nolock(struct page *page, unsigned int order); 916 917 extern void zone_pcp_reset(struct zone *zone); 918 extern void zone_pcp_disable(struct zone *zone); 919 extern void zone_pcp_enable(struct zone *zone); 920 extern void zone_pcp_init(struct zone *zone); 921 922 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, 923 phys_addr_t min_addr, 924 int nid, bool exact_nid); 925 926 void memmap_init_range(unsigned long, int, unsigned long, unsigned long, 927 unsigned long, enum meminit_context, struct vmem_altmap *, int, 928 bool); 929 930 #ifdef CONFIG_SPARSEMEM 931 void sparse_init(void); 932 #else 933 static inline void sparse_init(void) {} 934 #endif /* CONFIG_SPARSEMEM */ 935 936 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 937 938 /* 939 * in mm/compaction.c 940 */ 941 /* 942 * compact_control is used to track pages being migrated and the free pages 943 * they are being migrated to during memory compaction. The free_pfn starts 944 * at the end of a zone and migrate_pfn begins at the start. Movable pages 945 * are moved to the end of a zone during a compaction run and the run 946 * completes when free_pfn <= migrate_pfn 947 */ 948 struct compact_control { 949 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ 950 struct list_head migratepages; /* List of pages being migrated */ 951 unsigned int nr_freepages; /* Number of isolated free pages */ 952 unsigned int nr_migratepages; /* Number of pages to migrate */ 953 unsigned long free_pfn; /* isolate_freepages search base */ 954 /* 955 * Acts as an in/out parameter to page isolation for migration. 956 * isolate_migratepages uses it as a search base. 957 * isolate_migratepages_block will update the value to the next pfn 958 * after the last isolated one. 959 */ 960 unsigned long migrate_pfn; 961 unsigned long fast_start_pfn; /* a pfn to start linear scan from */ 962 struct zone *zone; 963 unsigned long total_migrate_scanned; 964 unsigned long total_free_scanned; 965 unsigned short fast_search_fail;/* failures to use free list searches */ 966 short search_order; /* order to start a fast search at */ 967 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ 968 int order; /* order a direct compactor needs */ 969 int migratetype; /* migratetype of direct compactor */ 970 const unsigned int alloc_flags; /* alloc flags of a direct compactor */ 971 const int highest_zoneidx; /* zone index of a direct compactor */ 972 enum migrate_mode mode; /* Async or sync migration mode */ 973 bool ignore_skip_hint; /* Scan blocks even if marked skip */ 974 bool no_set_skip_hint; /* Don't mark blocks for skipping */ 975 bool ignore_block_suitable; /* Scan blocks considered unsuitable */ 976 bool direct_compaction; /* False from kcompactd or /proc/... */ 977 bool proactive_compaction; /* kcompactd proactive compaction */ 978 bool whole_zone; /* Whole zone should/has been scanned */ 979 bool contended; /* Signal lock contention */ 980 bool finish_pageblock; /* Scan the remainder of a pageblock. Used 981 * when there are potentially transient 982 * isolation or migration failures to 983 * ensure forward progress. 984 */ 985 bool alloc_contig; /* alloc_contig_range allocation */ 986 }; 987 988 /* 989 * Used in direct compaction when a page should be taken from the freelists 990 * immediately when one is created during the free path. 991 */ 992 struct capture_control { 993 struct compact_control *cc; 994 struct page *page; 995 }; 996 997 unsigned long 998 isolate_freepages_range(struct compact_control *cc, 999 unsigned long start_pfn, unsigned long end_pfn); 1000 int 1001 isolate_migratepages_range(struct compact_control *cc, 1002 unsigned long low_pfn, unsigned long end_pfn); 1003 1004 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1005 void init_cma_reserved_pageblock(struct page *page); 1006 1007 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1008 1009 struct cma; 1010 1011 #ifdef CONFIG_CMA 1012 bool cma_validate_zones(struct cma *cma); 1013 void *cma_reserve_early(struct cma *cma, unsigned long size); 1014 void init_cma_pageblock(struct page *page); 1015 #else 1016 static inline bool cma_validate_zones(struct cma *cma) 1017 { 1018 return false; 1019 } 1020 static inline void *cma_reserve_early(struct cma *cma, unsigned long size) 1021 { 1022 return NULL; 1023 } 1024 static inline void init_cma_pageblock(struct page *page) 1025 { 1026 } 1027 #endif 1028 1029 1030 int find_suitable_fallback(struct free_area *area, unsigned int order, 1031 int migratetype, bool claimable); 1032 1033 static inline bool free_area_empty(struct free_area *area, int migratetype) 1034 { 1035 return list_empty(&area->free_list[migratetype]); 1036 } 1037 1038 /* mm/util.c */ 1039 struct anon_vma *folio_anon_vma(const struct folio *folio); 1040 1041 #ifdef CONFIG_MMU 1042 void unmap_mapping_folio(struct folio *folio); 1043 extern long populate_vma_page_range(struct vm_area_struct *vma, 1044 unsigned long start, unsigned long end, int *locked); 1045 extern long faultin_page_range(struct mm_struct *mm, unsigned long start, 1046 unsigned long end, bool write, int *locked); 1047 bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, 1048 unsigned long bytes); 1049 1050 /* 1051 * NOTE: This function can't tell whether the folio is "fully mapped" in the 1052 * range. 1053 * "fully mapped" means all the pages of folio is associated with the page 1054 * table of range while this function just check whether the folio range is 1055 * within the range [start, end). Function caller needs to do page table 1056 * check if it cares about the page table association. 1057 * 1058 * Typical usage (like mlock or madvise) is: 1059 * Caller knows at least 1 page of folio is associated with page table of VMA 1060 * and the range [start, end) is intersect with the VMA range. Caller wants 1061 * to know whether the folio is fully associated with the range. It calls 1062 * this function to check whether the folio is in the range first. Then checks 1063 * the page table to know whether the folio is fully mapped to the range. 1064 */ 1065 static inline bool 1066 folio_within_range(struct folio *folio, struct vm_area_struct *vma, 1067 unsigned long start, unsigned long end) 1068 { 1069 pgoff_t pgoff, addr; 1070 unsigned long vma_pglen = vma_pages(vma); 1071 1072 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); 1073 if (start > end) 1074 return false; 1075 1076 if (start < vma->vm_start) 1077 start = vma->vm_start; 1078 1079 if (end > vma->vm_end) 1080 end = vma->vm_end; 1081 1082 pgoff = folio_pgoff(folio); 1083 1084 /* if folio start address is not in vma range */ 1085 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) 1086 return false; 1087 1088 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1089 1090 return !(addr < start || end - addr < folio_size(folio)); 1091 } 1092 1093 static inline bool 1094 folio_within_vma(struct folio *folio, struct vm_area_struct *vma) 1095 { 1096 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); 1097 } 1098 1099 /* 1100 * mlock_vma_folio() and munlock_vma_folio(): 1101 * should be called with vma's mmap_lock held for read or write, 1102 * under page table lock for the pte/pmd being added or removed. 1103 * 1104 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at 1105 * the end of folio_remove_rmap_*(); but new anon folios are managed by 1106 * folio_add_lru_vma() calling mlock_new_folio(). 1107 */ 1108 void mlock_folio(struct folio *folio); 1109 static inline void mlock_vma_folio(struct folio *folio, 1110 struct vm_area_struct *vma) 1111 { 1112 /* 1113 * The VM_SPECIAL check here serves two purposes. 1114 * 1) VM_IO check prevents migration from double-counting during mlock. 1115 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED 1116 * is never left set on a VM_SPECIAL vma, there is an interval while 1117 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may 1118 * still be set while VM_SPECIAL bits are added: so ignore it then. 1119 */ 1120 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) 1121 mlock_folio(folio); 1122 } 1123 1124 void munlock_folio(struct folio *folio); 1125 static inline void munlock_vma_folio(struct folio *folio, 1126 struct vm_area_struct *vma) 1127 { 1128 /* 1129 * munlock if the function is called. Ideally, we should only 1130 * do munlock if any page of folio is unmapped from VMA and 1131 * cause folio not fully mapped to VMA. 1132 * 1133 * But it's not easy to confirm that's the situation. So we 1134 * always munlock the folio and page reclaim will correct it 1135 * if it's wrong. 1136 */ 1137 if (unlikely(vma->vm_flags & VM_LOCKED)) 1138 munlock_folio(folio); 1139 } 1140 1141 void mlock_new_folio(struct folio *folio); 1142 bool need_mlock_drain(int cpu); 1143 void mlock_drain_local(void); 1144 void mlock_drain_remote(int cpu); 1145 1146 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 1147 1148 /** 1149 * vma_address - Find the virtual address a page range is mapped at 1150 * @vma: The vma which maps this object. 1151 * @pgoff: The page offset within its object. 1152 * @nr_pages: The number of pages to consider. 1153 * 1154 * If any page in this range is mapped by this VMA, return the first address 1155 * where any of these pages appear. Otherwise, return -EFAULT. 1156 */ 1157 static inline unsigned long vma_address(const struct vm_area_struct *vma, 1158 pgoff_t pgoff, unsigned long nr_pages) 1159 { 1160 unsigned long address; 1161 1162 if (pgoff >= vma->vm_pgoff) { 1163 address = vma->vm_start + 1164 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1165 /* Check for address beyond vma (or wrapped through 0?) */ 1166 if (address < vma->vm_start || address >= vma->vm_end) 1167 address = -EFAULT; 1168 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { 1169 /* Test above avoids possibility of wrap to 0 on 32-bit */ 1170 address = vma->vm_start; 1171 } else { 1172 address = -EFAULT; 1173 } 1174 return address; 1175 } 1176 1177 /* 1178 * Then at what user virtual address will none of the range be found in vma? 1179 * Assumes that vma_address() already returned a good starting address. 1180 */ 1181 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) 1182 { 1183 struct vm_area_struct *vma = pvmw->vma; 1184 pgoff_t pgoff; 1185 unsigned long address; 1186 1187 /* Common case, plus ->pgoff is invalid for KSM */ 1188 if (pvmw->nr_pages == 1) 1189 return pvmw->address + PAGE_SIZE; 1190 1191 pgoff = pvmw->pgoff + pvmw->nr_pages; 1192 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1193 /* Check for address beyond vma (or wrapped through 0?) */ 1194 if (address < vma->vm_start || address > vma->vm_end) 1195 address = vma->vm_end; 1196 return address; 1197 } 1198 1199 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, 1200 struct file *fpin) 1201 { 1202 int flags = vmf->flags; 1203 1204 if (fpin) 1205 return fpin; 1206 1207 /* 1208 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or 1209 * anything, so we only pin the file and drop the mmap_lock if only 1210 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. 1211 */ 1212 if (fault_flag_allow_retry_first(flags) && 1213 !(flags & FAULT_FLAG_RETRY_NOWAIT)) { 1214 fpin = get_file(vmf->vma->vm_file); 1215 release_fault_lock(vmf); 1216 } 1217 return fpin; 1218 } 1219 #else /* !CONFIG_MMU */ 1220 static inline void unmap_mapping_folio(struct folio *folio) { } 1221 static inline void mlock_new_folio(struct folio *folio) { } 1222 static inline bool need_mlock_drain(int cpu) { return false; } 1223 static inline void mlock_drain_local(void) { } 1224 static inline void mlock_drain_remote(int cpu) { } 1225 static inline void vunmap_range_noflush(unsigned long start, unsigned long end) 1226 { 1227 } 1228 #endif /* !CONFIG_MMU */ 1229 1230 /* Memory initialisation debug and verification */ 1231 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1232 DECLARE_STATIC_KEY_TRUE(deferred_pages); 1233 1234 bool __init deferred_grow_zone(struct zone *zone, unsigned int order); 1235 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1236 1237 void init_deferred_page(unsigned long pfn, int nid); 1238 1239 enum mminit_level { 1240 MMINIT_WARNING, 1241 MMINIT_VERIFY, 1242 MMINIT_TRACE 1243 }; 1244 1245 #ifdef CONFIG_DEBUG_MEMORY_INIT 1246 1247 extern int mminit_loglevel; 1248 1249 #define mminit_dprintk(level, prefix, fmt, arg...) \ 1250 do { \ 1251 if (level < mminit_loglevel) { \ 1252 if (level <= MMINIT_WARNING) \ 1253 pr_warn("mminit::" prefix " " fmt, ##arg); \ 1254 else \ 1255 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ 1256 } \ 1257 } while (0) 1258 1259 extern void mminit_verify_pageflags_layout(void); 1260 extern void mminit_verify_zonelist(void); 1261 #else 1262 1263 static inline void mminit_dprintk(enum mminit_level level, 1264 const char *prefix, const char *fmt, ...) 1265 { 1266 } 1267 1268 static inline void mminit_verify_pageflags_layout(void) 1269 { 1270 } 1271 1272 static inline void mminit_verify_zonelist(void) 1273 { 1274 } 1275 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 1276 1277 #define NODE_RECLAIM_NOSCAN -2 1278 #define NODE_RECLAIM_FULL -1 1279 #define NODE_RECLAIM_SOME 0 1280 #define NODE_RECLAIM_SUCCESS 1 1281 1282 #ifdef CONFIG_NUMA 1283 extern int node_reclaim_mode; 1284 1285 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 1286 extern int find_next_best_node(int node, nodemask_t *used_node_mask); 1287 #else 1288 #define node_reclaim_mode 0 1289 1290 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 1291 unsigned int order) 1292 { 1293 return NODE_RECLAIM_NOSCAN; 1294 } 1295 static inline int find_next_best_node(int node, nodemask_t *used_node_mask) 1296 { 1297 return NUMA_NO_NODE; 1298 } 1299 #endif 1300 1301 static inline bool node_reclaim_enabled(void) 1302 { 1303 /* Is any node_reclaim_mode bit set? */ 1304 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); 1305 } 1306 1307 /* 1308 * mm/memory-failure.c 1309 */ 1310 #ifdef CONFIG_MEMORY_FAILURE 1311 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill); 1312 void shake_folio(struct folio *folio); 1313 typedef int hwpoison_filter_func_t(struct page *p); 1314 void hwpoison_filter_register(hwpoison_filter_func_t *filter); 1315 void hwpoison_filter_unregister(void); 1316 1317 #define MAGIC_HWPOISON 0x48575053U /* HWPS */ 1318 void SetPageHWPoisonTakenOff(struct page *page); 1319 void ClearPageHWPoisonTakenOff(struct page *page); 1320 bool take_page_off_buddy(struct page *page); 1321 bool put_page_back_buddy(struct page *page); 1322 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early); 1323 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p, 1324 struct vm_area_struct *vma, struct list_head *to_kill, 1325 unsigned long ksm_addr); 1326 unsigned long page_mapped_in_vma(const struct page *page, 1327 struct vm_area_struct *vma); 1328 1329 #else 1330 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) 1331 { 1332 return -EBUSY; 1333 } 1334 #endif 1335 1336 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, 1337 unsigned long, unsigned long, 1338 unsigned long, unsigned long); 1339 1340 extern void set_pageblock_order(void); 1341 unsigned long reclaim_pages(struct list_head *folio_list); 1342 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1343 struct list_head *folio_list); 1344 /* The ALLOC_WMARK bits are used as an index to zone->watermark */ 1345 #define ALLOC_WMARK_MIN WMARK_MIN 1346 #define ALLOC_WMARK_LOW WMARK_LOW 1347 #define ALLOC_WMARK_HIGH WMARK_HIGH 1348 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ 1349 1350 /* Mask to get the watermark bits */ 1351 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) 1352 1353 /* 1354 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we 1355 * cannot assume a reduced access to memory reserves is sufficient for 1356 * !MMU 1357 */ 1358 #ifdef CONFIG_MMU 1359 #define ALLOC_OOM 0x08 1360 #else 1361 #define ALLOC_OOM ALLOC_NO_WATERMARKS 1362 #endif 1363 1364 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access 1365 * to 25% of the min watermark or 1366 * 62.5% if __GFP_HIGH is set. 1367 */ 1368 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50% 1369 * of the min watermark. 1370 */ 1371 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1372 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ 1373 #ifdef CONFIG_ZONE_DMA32 1374 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ 1375 #else 1376 #define ALLOC_NOFRAGMENT 0x0 1377 #endif 1378 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */ 1379 #define ALLOC_TRYLOCK 0x400 /* Only use spin_trylock in allocation path */ 1380 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ 1381 1382 /* Flags that allow allocations below the min watermark. */ 1383 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) 1384 1385 enum ttu_flags; 1386 struct tlbflush_unmap_batch; 1387 1388 1389 /* 1390 * only for MM internal work items which do not depend on 1391 * any allocations or locks which might depend on allocations 1392 */ 1393 extern struct workqueue_struct *mm_percpu_wq; 1394 1395 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 1396 void try_to_unmap_flush(void); 1397 void try_to_unmap_flush_dirty(void); 1398 void flush_tlb_batched_pending(struct mm_struct *mm); 1399 #else 1400 static inline void try_to_unmap_flush(void) 1401 { 1402 } 1403 static inline void try_to_unmap_flush_dirty(void) 1404 { 1405 } 1406 static inline void flush_tlb_batched_pending(struct mm_struct *mm) 1407 { 1408 } 1409 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 1410 1411 extern const struct trace_print_flags pageflag_names[]; 1412 extern const struct trace_print_flags vmaflag_names[]; 1413 extern const struct trace_print_flags gfpflag_names[]; 1414 1415 void setup_zone_pageset(struct zone *zone); 1416 1417 struct migration_target_control { 1418 int nid; /* preferred node id */ 1419 nodemask_t *nmask; 1420 gfp_t gfp_mask; 1421 enum migrate_reason reason; 1422 }; 1423 1424 /* 1425 * mm/filemap.c 1426 */ 1427 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, 1428 struct folio *folio, loff_t fpos, size_t size); 1429 1430 /* 1431 * mm/vmalloc.c 1432 */ 1433 #ifdef CONFIG_MMU 1434 void __init vmalloc_init(void); 1435 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1436 pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask); 1437 unsigned int get_vm_area_page_order(struct vm_struct *vm); 1438 #else 1439 static inline void vmalloc_init(void) 1440 { 1441 } 1442 1443 static inline 1444 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1445 pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask) 1446 { 1447 return -EINVAL; 1448 } 1449 #endif 1450 1451 int __must_check __vmap_pages_range_noflush(unsigned long addr, 1452 unsigned long end, pgprot_t prot, 1453 struct page **pages, unsigned int page_shift); 1454 1455 void vunmap_range_noflush(unsigned long start, unsigned long end); 1456 1457 void __vunmap_range_noflush(unsigned long start, unsigned long end); 1458 1459 static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma) 1460 { 1461 if (vma->vm_flags & VM_SHARED) 1462 return false; 1463 1464 return atomic_read(&vma->vm_mm->mm_users) == 1; 1465 } 1466 1467 #ifdef CONFIG_NUMA_BALANCING 1468 bool folio_can_map_prot_numa(struct folio *folio, struct vm_area_struct *vma, 1469 bool is_private_single_threaded); 1470 1471 #else 1472 static inline bool folio_can_map_prot_numa(struct folio *folio, 1473 struct vm_area_struct *vma, bool is_private_single_threaded) 1474 { 1475 return false; 1476 } 1477 #endif 1478 1479 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, 1480 unsigned long addr, int *flags, bool writable, 1481 int *last_cpupid); 1482 1483 void free_zone_device_folio(struct folio *folio); 1484 int migrate_device_coherent_folio(struct folio *folio); 1485 1486 struct vm_struct *__get_vm_area_node(unsigned long size, 1487 unsigned long align, unsigned long shift, 1488 unsigned long vm_flags, unsigned long start, 1489 unsigned long end, int node, gfp_t gfp_mask, 1490 const void *caller); 1491 1492 /* 1493 * mm/gup.c 1494 */ 1495 int __must_check try_grab_folio(struct folio *folio, int refs, 1496 unsigned int flags); 1497 1498 /* 1499 * mm/huge_memory.c 1500 */ 1501 void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1502 pud_t *pud, bool write); 1503 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1504 pmd_t *pmd, bool write); 1505 1506 /* 1507 * Parses a string with mem suffixes into its order. Useful to parse kernel 1508 * parameters. 1509 */ 1510 static inline int get_order_from_str(const char *size_str, 1511 unsigned long valid_orders) 1512 { 1513 unsigned long size; 1514 char *endptr; 1515 int order; 1516 1517 size = memparse(size_str, &endptr); 1518 1519 if (!is_power_of_2(size)) 1520 return -EINVAL; 1521 order = get_order(size); 1522 if (BIT(order) & ~valid_orders) 1523 return -EINVAL; 1524 1525 return order; 1526 } 1527 1528 enum { 1529 /* mark page accessed */ 1530 FOLL_TOUCH = 1 << 16, 1531 /* a retry, previous pass started an IO */ 1532 FOLL_TRIED = 1 << 17, 1533 /* we are working on non-current tsk/mm */ 1534 FOLL_REMOTE = 1 << 18, 1535 /* pages must be released via unpin_user_page */ 1536 FOLL_PIN = 1 << 19, 1537 /* gup_fast: prevent fall-back to slow gup */ 1538 FOLL_FAST_ONLY = 1 << 20, 1539 /* allow unlocking the mmap lock */ 1540 FOLL_UNLOCKABLE = 1 << 21, 1541 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */ 1542 FOLL_MADV_POPULATE = 1 << 22, 1543 }; 1544 1545 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \ 1546 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \ 1547 FOLL_MADV_POPULATE) 1548 1549 /* 1550 * Indicates for which pages that are write-protected in the page table, 1551 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the 1552 * GUP pin will remain consistent with the pages mapped into the page tables 1553 * of the MM. 1554 * 1555 * Temporary unmapping of PageAnonExclusive() pages or clearing of 1556 * PageAnonExclusive() has to protect against concurrent GUP: 1557 * * Ordinary GUP: Using the PT lock 1558 * * GUP-fast and fork(): mm->write_protect_seq 1559 * * GUP-fast and KSM or temporary unmapping (swap, migration): see 1560 * folio_try_share_anon_rmap_*() 1561 * 1562 * Must be called with the (sub)page that's actually referenced via the 1563 * page table entry, which might not necessarily be the head page for a 1564 * PTE-mapped THP. 1565 * 1566 * If the vma is NULL, we're coming from the GUP-fast path and might have 1567 * to fallback to the slow path just to lookup the vma. 1568 */ 1569 static inline bool gup_must_unshare(struct vm_area_struct *vma, 1570 unsigned int flags, struct page *page) 1571 { 1572 /* 1573 * FOLL_WRITE is implicitly handled correctly as the page table entry 1574 * has to be writable -- and if it references (part of) an anonymous 1575 * folio, that part is required to be marked exclusive. 1576 */ 1577 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN) 1578 return false; 1579 /* 1580 * Note: PageAnon(page) is stable until the page is actually getting 1581 * freed. 1582 */ 1583 if (!PageAnon(page)) { 1584 /* 1585 * We only care about R/O long-term pining: R/O short-term 1586 * pinning does not have the semantics to observe successive 1587 * changes through the process page tables. 1588 */ 1589 if (!(flags & FOLL_LONGTERM)) 1590 return false; 1591 1592 /* We really need the vma ... */ 1593 if (!vma) 1594 return true; 1595 1596 /* 1597 * ... because we only care about writable private ("COW") 1598 * mappings where we have to break COW early. 1599 */ 1600 return is_cow_mapping(vma->vm_flags); 1601 } 1602 1603 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ 1604 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 1605 smp_rmb(); 1606 1607 /* 1608 * Note that KSM pages cannot be exclusive, and consequently, 1609 * cannot get pinned. 1610 */ 1611 return !PageAnonExclusive(page); 1612 } 1613 1614 extern bool mirrored_kernelcore; 1615 bool memblock_has_mirror(void); 1616 void memblock_free_all(void); 1617 1618 static __always_inline void vma_set_range(struct vm_area_struct *vma, 1619 unsigned long start, unsigned long end, 1620 pgoff_t pgoff) 1621 { 1622 vma->vm_start = start; 1623 vma->vm_end = end; 1624 vma->vm_pgoff = pgoff; 1625 } 1626 1627 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) 1628 { 1629 /* 1630 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty 1631 * enablements, because when without soft-dirty being compiled in, 1632 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY) 1633 * will be constantly true. 1634 */ 1635 if (!pgtable_supports_soft_dirty()) 1636 return false; 1637 1638 /* 1639 * Soft-dirty is kind of special: its tracking is enabled when the 1640 * vma flags not set. 1641 */ 1642 return !(vma->vm_flags & VM_SOFTDIRTY); 1643 } 1644 1645 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd) 1646 { 1647 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd); 1648 } 1649 1650 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte) 1651 { 1652 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte); 1653 } 1654 1655 void __meminit __init_single_page(struct page *page, unsigned long pfn, 1656 unsigned long zone, int nid); 1657 void __meminit __init_page_from_nid(unsigned long pfn, int nid); 1658 1659 /* shrinker related functions */ 1660 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, 1661 int priority); 1662 1663 int shmem_add_to_page_cache(struct folio *folio, 1664 struct address_space *mapping, 1665 pgoff_t index, void *expected, gfp_t gfp); 1666 int shmem_inode_acct_blocks(struct inode *inode, long pages); 1667 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped); 1668 1669 #ifdef CONFIG_SHRINKER_DEBUG 1670 static inline __printf(2, 0) int shrinker_debugfs_name_alloc( 1671 struct shrinker *shrinker, const char *fmt, va_list ap) 1672 { 1673 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); 1674 1675 return shrinker->name ? 0 : -ENOMEM; 1676 } 1677 1678 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) 1679 { 1680 kfree_const(shrinker->name); 1681 shrinker->name = NULL; 1682 } 1683 1684 extern int shrinker_debugfs_add(struct shrinker *shrinker); 1685 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, 1686 int *debugfs_id); 1687 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, 1688 int debugfs_id); 1689 #else /* CONFIG_SHRINKER_DEBUG */ 1690 static inline int shrinker_debugfs_add(struct shrinker *shrinker) 1691 { 1692 return 0; 1693 } 1694 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker, 1695 const char *fmt, va_list ap) 1696 { 1697 return 0; 1698 } 1699 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) 1700 { 1701 } 1702 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, 1703 int *debugfs_id) 1704 { 1705 *debugfs_id = -1; 1706 return NULL; 1707 } 1708 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, 1709 int debugfs_id) 1710 { 1711 } 1712 #endif /* CONFIG_SHRINKER_DEBUG */ 1713 1714 /* Only track the nodes of mappings with shadow entries */ 1715 void workingset_update_node(struct xa_node *node); 1716 extern struct list_lru shadow_nodes; 1717 #define mapping_set_update(xas, mapping) do { \ 1718 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ 1719 xas_set_update(xas, workingset_update_node); \ 1720 xas_set_lru(xas, &shadow_nodes); \ 1721 } \ 1722 } while (0) 1723 1724 /* mremap.c */ 1725 unsigned long move_page_tables(struct pagetable_move_control *pmc); 1726 1727 #ifdef CONFIG_UNACCEPTED_MEMORY 1728 void accept_page(struct page *page); 1729 #else /* CONFIG_UNACCEPTED_MEMORY */ 1730 static inline void accept_page(struct page *page) 1731 { 1732 } 1733 #endif /* CONFIG_UNACCEPTED_MEMORY */ 1734 1735 /* pagewalk.c */ 1736 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start, 1737 unsigned long end, const struct mm_walk_ops *ops, 1738 void *private); 1739 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start, 1740 unsigned long end, const struct mm_walk_ops *ops, 1741 void *private); 1742 int walk_page_range_debug(struct mm_struct *mm, unsigned long start, 1743 unsigned long end, const struct mm_walk_ops *ops, 1744 pgd_t *pgd, void *private); 1745 1746 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm); 1747 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm); 1748 1749 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn); 1750 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr, 1751 unsigned long pfn, unsigned long size, pgprot_t pgprot); 1752 1753 static inline void io_remap_pfn_range_prepare(struct vm_area_desc *desc, 1754 unsigned long orig_pfn, unsigned long size) 1755 { 1756 const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size); 1757 1758 return remap_pfn_range_prepare(desc, pfn); 1759 } 1760 1761 static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma, 1762 unsigned long addr, unsigned long orig_pfn, unsigned long size, 1763 pgprot_t orig_prot) 1764 { 1765 const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size); 1766 const pgprot_t prot = pgprot_decrypted(orig_prot); 1767 1768 return remap_pfn_range_complete(vma, addr, pfn, size, prot); 1769 } 1770 1771 #endif /* __MM_INTERNAL_H */ 1772