1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* internal.h: mm/ internal definitions 3 * 4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __MM_INTERNAL_H 8 #define __MM_INTERNAL_H 9 10 #include <linux/fs.h> 11 #include <linux/khugepaged.h> 12 #include <linux/mm.h> 13 #include <linux/mm_inline.h> 14 #include <linux/pagemap.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/swapops.h> 18 #include <linux/tracepoint-defs.h> 19 20 /* Internal core VMA manipulation functions. */ 21 #include "vma.h" 22 23 struct folio_batch; 24 25 /* 26 * The set of flags that only affect watermark checking and reclaim 27 * behaviour. This is used by the MM to obey the caller constraints 28 * about IO, FS and watermark checking while ignoring placement 29 * hints such as HIGHMEM usage. 30 */ 31 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 32 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ 33 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ 34 __GFP_NOLOCKDEP) 35 36 /* The GFP flags allowed during early boot */ 37 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) 38 39 /* Control allocation cpuset and node placement constraints */ 40 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) 41 42 /* Do not use these with a slab allocator */ 43 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) 44 45 /* 46 * Different from WARN_ON_ONCE(), no warning will be issued 47 * when we specify __GFP_NOWARN. 48 */ 49 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \ 50 static bool __section(".data.once") __warned; \ 51 int __ret_warn_once = !!(cond); \ 52 \ 53 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ 54 __warned = true; \ 55 WARN_ON(1); \ 56 } \ 57 unlikely(__ret_warn_once); \ 58 }) 59 60 void page_writeback_init(void); 61 62 /* 63 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, 64 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit 65 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently 66 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. 67 */ 68 #define ENTIRELY_MAPPED 0x800000 69 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) 70 71 /* 72 * Flags passed to __show_mem() and show_free_areas() to suppress output in 73 * various contexts. 74 */ 75 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 76 77 /* 78 * How many individual pages have an elevated _mapcount. Excludes 79 * the folio's entire_mapcount. 80 * 81 * Don't use this function outside of debugging code. 82 */ 83 static inline int folio_nr_pages_mapped(const struct folio *folio) 84 { 85 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; 86 } 87 88 /* 89 * Retrieve the first entry of a folio based on a provided entry within the 90 * folio. We cannot rely on folio->swap as there is no guarantee that it has 91 * been initialized. Used for calling arch_swap_restore() 92 */ 93 static inline swp_entry_t folio_swap(swp_entry_t entry, 94 const struct folio *folio) 95 { 96 swp_entry_t swap = { 97 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), 98 }; 99 100 return swap; 101 } 102 103 static inline void *folio_raw_mapping(const struct folio *folio) 104 { 105 unsigned long mapping = (unsigned long)folio->mapping; 106 107 return (void *)(mapping & ~PAGE_MAPPING_FLAGS); 108 } 109 110 #ifdef CONFIG_MMU 111 112 /* Flags for folio_pte_batch(). */ 113 typedef int __bitwise fpb_t; 114 115 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ 116 #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) 117 118 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ 119 #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) 120 121 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) 122 { 123 if (flags & FPB_IGNORE_DIRTY) 124 pte = pte_mkclean(pte); 125 if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) 126 pte = pte_clear_soft_dirty(pte); 127 return pte_wrprotect(pte_mkold(pte)); 128 } 129 130 /** 131 * folio_pte_batch - detect a PTE batch for a large folio 132 * @folio: The large folio to detect a PTE batch for. 133 * @addr: The user virtual address the first page is mapped at. 134 * @start_ptep: Page table pointer for the first entry. 135 * @pte: Page table entry for the first page. 136 * @max_nr: The maximum number of table entries to consider. 137 * @flags: Flags to modify the PTE batch semantics. 138 * @any_writable: Optional pointer to indicate whether any entry except the 139 * first one is writable. 140 * @any_young: Optional pointer to indicate whether any entry except the 141 * first one is young. 142 * @any_dirty: Optional pointer to indicate whether any entry except the 143 * first one is dirty. 144 * 145 * Detect a PTE batch: consecutive (present) PTEs that map consecutive 146 * pages of the same large folio. 147 * 148 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, 149 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and 150 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). 151 * 152 * start_ptep must map any page of the folio. max_nr must be at least one and 153 * must be limited by the caller so scanning cannot exceed a single page table. 154 * 155 * Return: the number of table entries in the batch. 156 */ 157 static inline int folio_pte_batch(struct folio *folio, unsigned long addr, 158 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, 159 bool *any_writable, bool *any_young, bool *any_dirty) 160 { 161 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); 162 const pte_t *end_ptep = start_ptep + max_nr; 163 pte_t expected_pte, *ptep; 164 bool writable, young, dirty; 165 int nr; 166 167 if (any_writable) 168 *any_writable = false; 169 if (any_young) 170 *any_young = false; 171 if (any_dirty) 172 *any_dirty = false; 173 174 VM_WARN_ON_FOLIO(!pte_present(pte), folio); 175 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); 176 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); 177 178 nr = pte_batch_hint(start_ptep, pte); 179 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); 180 ptep = start_ptep + nr; 181 182 while (ptep < end_ptep) { 183 pte = ptep_get(ptep); 184 if (any_writable) 185 writable = !!pte_write(pte); 186 if (any_young) 187 young = !!pte_young(pte); 188 if (any_dirty) 189 dirty = !!pte_dirty(pte); 190 pte = __pte_batch_clear_ignored(pte, flags); 191 192 if (!pte_same(pte, expected_pte)) 193 break; 194 195 /* 196 * Stop immediately once we reached the end of the folio. In 197 * corner cases the next PFN might fall into a different 198 * folio. 199 */ 200 if (pte_pfn(pte) >= folio_end_pfn) 201 break; 202 203 if (any_writable) 204 *any_writable |= writable; 205 if (any_young) 206 *any_young |= young; 207 if (any_dirty) 208 *any_dirty |= dirty; 209 210 nr = pte_batch_hint(ptep, pte); 211 expected_pte = pte_advance_pfn(expected_pte, nr); 212 ptep += nr; 213 } 214 215 return min(ptep - start_ptep, max_nr); 216 } 217 218 /** 219 * pte_move_swp_offset - Move the swap entry offset field of a swap pte 220 * forward or backward by delta 221 * @pte: The initial pte state; is_swap_pte(pte) must be true and 222 * non_swap_entry() must be false. 223 * @delta: The direction and the offset we are moving; forward if delta 224 * is positive; backward if delta is negative 225 * 226 * Moves the swap offset, while maintaining all other fields, including 227 * swap type, and any swp pte bits. The resulting pte is returned. 228 */ 229 static inline pte_t pte_move_swp_offset(pte_t pte, long delta) 230 { 231 swp_entry_t entry = pte_to_swp_entry(pte); 232 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), 233 (swp_offset(entry) + delta))); 234 235 if (pte_swp_soft_dirty(pte)) 236 new = pte_swp_mksoft_dirty(new); 237 if (pte_swp_exclusive(pte)) 238 new = pte_swp_mkexclusive(new); 239 if (pte_swp_uffd_wp(pte)) 240 new = pte_swp_mkuffd_wp(new); 241 242 return new; 243 } 244 245 246 /** 247 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. 248 * @pte: The initial pte state; is_swap_pte(pte) must be true and 249 * non_swap_entry() must be false. 250 * 251 * Increments the swap offset, while maintaining all other fields, including 252 * swap type, and any swp pte bits. The resulting pte is returned. 253 */ 254 static inline pte_t pte_next_swp_offset(pte_t pte) 255 { 256 return pte_move_swp_offset(pte, 1); 257 } 258 259 /** 260 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries 261 * @start_ptep: Page table pointer for the first entry. 262 * @max_nr: The maximum number of table entries to consider. 263 * @pte: Page table entry for the first entry. 264 * 265 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs 266 * containing swap entries all with consecutive offsets and targeting the same 267 * swap type, all with matching swp pte bits. 268 * 269 * max_nr must be at least one and must be limited by the caller so scanning 270 * cannot exceed a single page table. 271 * 272 * Return: the number of table entries in the batch. 273 */ 274 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) 275 { 276 pte_t expected_pte = pte_next_swp_offset(pte); 277 const pte_t *end_ptep = start_ptep + max_nr; 278 pte_t *ptep = start_ptep + 1; 279 280 VM_WARN_ON(max_nr < 1); 281 VM_WARN_ON(!is_swap_pte(pte)); 282 VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte))); 283 284 while (ptep < end_ptep) { 285 pte = ptep_get(ptep); 286 287 if (!pte_same(pte, expected_pte)) 288 break; 289 290 expected_pte = pte_next_swp_offset(expected_pte); 291 ptep++; 292 } 293 294 return ptep - start_ptep; 295 } 296 #endif /* CONFIG_MMU */ 297 298 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 299 int nr_throttled); 300 static inline void acct_reclaim_writeback(struct folio *folio) 301 { 302 pg_data_t *pgdat = folio_pgdat(folio); 303 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); 304 305 if (nr_throttled) 306 __acct_reclaim_writeback(pgdat, folio, nr_throttled); 307 } 308 309 static inline void wake_throttle_isolated(pg_data_t *pgdat) 310 { 311 wait_queue_head_t *wqh; 312 313 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; 314 if (waitqueue_active(wqh)) 315 wake_up(wqh); 316 } 317 318 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf); 319 vm_fault_t do_swap_page(struct vm_fault *vmf); 320 void folio_rotate_reclaimable(struct folio *folio); 321 bool __folio_end_writeback(struct folio *folio); 322 void deactivate_file_folio(struct folio *folio); 323 void folio_activate(struct folio *folio); 324 325 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 326 struct vm_area_struct *start_vma, unsigned long floor, 327 unsigned long ceiling, bool mm_wr_locked); 328 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); 329 330 struct zap_details; 331 void unmap_page_range(struct mmu_gather *tlb, 332 struct vm_area_struct *vma, 333 unsigned long addr, unsigned long end, 334 struct zap_details *details); 335 336 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *, 337 unsigned int order); 338 void force_page_cache_ra(struct readahead_control *, unsigned long nr); 339 static inline void force_page_cache_readahead(struct address_space *mapping, 340 struct file *file, pgoff_t index, unsigned long nr_to_read) 341 { 342 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); 343 force_page_cache_ra(&ractl, nr_to_read); 344 } 345 346 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 347 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 348 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 349 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 350 void filemap_free_folio(struct address_space *mapping, struct folio *folio); 351 int truncate_inode_folio(struct address_space *mapping, struct folio *folio); 352 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, 353 loff_t end); 354 long mapping_evict_folio(struct address_space *mapping, struct folio *folio); 355 unsigned long mapping_try_invalidate(struct address_space *mapping, 356 pgoff_t start, pgoff_t end, unsigned long *nr_failed); 357 358 /** 359 * folio_evictable - Test whether a folio is evictable. 360 * @folio: The folio to test. 361 * 362 * Test whether @folio is evictable -- i.e., should be placed on 363 * active/inactive lists vs unevictable list. 364 * 365 * Reasons folio might not be evictable: 366 * 1. folio's mapping marked unevictable 367 * 2. One of the pages in the folio is part of an mlocked VMA 368 */ 369 static inline bool folio_evictable(struct folio *folio) 370 { 371 bool ret; 372 373 /* Prevent address_space of inode and swap cache from being freed */ 374 rcu_read_lock(); 375 ret = !mapping_unevictable(folio_mapping(folio)) && 376 !folio_test_mlocked(folio); 377 rcu_read_unlock(); 378 return ret; 379 } 380 381 /* 382 * Turn a non-refcounted page (->_refcount == 0) into refcounted with 383 * a count of one. 384 */ 385 static inline void set_page_refcounted(struct page *page) 386 { 387 VM_BUG_ON_PAGE(PageTail(page), page); 388 VM_BUG_ON_PAGE(page_ref_count(page), page); 389 set_page_count(page, 1); 390 } 391 392 /* 393 * Return true if a folio needs ->release_folio() calling upon it. 394 */ 395 static inline bool folio_needs_release(struct folio *folio) 396 { 397 struct address_space *mapping = folio_mapping(folio); 398 399 return folio_has_private(folio) || 400 (mapping && mapping_release_always(mapping)); 401 } 402 403 extern unsigned long highest_memmap_pfn; 404 405 /* 406 * Maximum number of reclaim retries without progress before the OOM 407 * killer is consider the only way forward. 408 */ 409 #define MAX_RECLAIM_RETRIES 16 410 411 /* 412 * in mm/vmscan.c: 413 */ 414 bool isolate_lru_page(struct page *page); 415 bool folio_isolate_lru(struct folio *folio); 416 void putback_lru_page(struct page *page); 417 void folio_putback_lru(struct folio *folio); 418 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); 419 420 /* 421 * in mm/rmap.c: 422 */ 423 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); 424 425 /* 426 * in mm/page_alloc.c 427 */ 428 #define K(x) ((x) << (PAGE_SHIFT-10)) 429 430 extern char * const zone_names[MAX_NR_ZONES]; 431 432 /* perform sanity checks on struct pages being allocated or freed */ 433 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 434 435 extern int min_free_kbytes; 436 437 void setup_per_zone_wmarks(void); 438 void calculate_min_free_kbytes(void); 439 int __meminit init_per_zone_wmark_min(void); 440 void page_alloc_sysctl_init(void); 441 442 /* 443 * Structure for holding the mostly immutable allocation parameters passed 444 * between functions involved in allocations, including the alloc_pages* 445 * family of functions. 446 * 447 * nodemask, migratetype and highest_zoneidx are initialized only once in 448 * __alloc_pages() and then never change. 449 * 450 * zonelist, preferred_zone and highest_zoneidx are set first in 451 * __alloc_pages() for the fast path, and might be later changed 452 * in __alloc_pages_slowpath(). All other functions pass the whole structure 453 * by a const pointer. 454 */ 455 struct alloc_context { 456 struct zonelist *zonelist; 457 nodemask_t *nodemask; 458 struct zoneref *preferred_zoneref; 459 int migratetype; 460 461 /* 462 * highest_zoneidx represents highest usable zone index of 463 * the allocation request. Due to the nature of the zone, 464 * memory on lower zone than the highest_zoneidx will be 465 * protected by lowmem_reserve[highest_zoneidx]. 466 * 467 * highest_zoneidx is also used by reclaim/compaction to limit 468 * the target zone since higher zone than this index cannot be 469 * usable for this allocation request. 470 */ 471 enum zone_type highest_zoneidx; 472 bool spread_dirty_pages; 473 }; 474 475 /* 476 * This function returns the order of a free page in the buddy system. In 477 * general, page_zone(page)->lock must be held by the caller to prevent the 478 * page from being allocated in parallel and returning garbage as the order. 479 * If a caller does not hold page_zone(page)->lock, it must guarantee that the 480 * page cannot be allocated or merged in parallel. Alternatively, it must 481 * handle invalid values gracefully, and use buddy_order_unsafe() below. 482 */ 483 static inline unsigned int buddy_order(struct page *page) 484 { 485 /* PageBuddy() must be checked by the caller */ 486 return page_private(page); 487 } 488 489 /* 490 * Like buddy_order(), but for callers who cannot afford to hold the zone lock. 491 * PageBuddy() should be checked first by the caller to minimize race window, 492 * and invalid values must be handled gracefully. 493 * 494 * READ_ONCE is used so that if the caller assigns the result into a local 495 * variable and e.g. tests it for valid range before using, the compiler cannot 496 * decide to remove the variable and inline the page_private(page) multiple 497 * times, potentially observing different values in the tests and the actual 498 * use of the result. 499 */ 500 #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) 501 502 /* 503 * This function checks whether a page is free && is the buddy 504 * we can coalesce a page and its buddy if 505 * (a) the buddy is not in a hole (check before calling!) && 506 * (b) the buddy is in the buddy system && 507 * (c) a page and its buddy have the same order && 508 * (d) a page and its buddy are in the same zone. 509 * 510 * For recording whether a page is in the buddy system, we set PageBuddy. 511 * Setting, clearing, and testing PageBuddy is serialized by zone->lock. 512 * 513 * For recording page's order, we use page_private(page). 514 */ 515 static inline bool page_is_buddy(struct page *page, struct page *buddy, 516 unsigned int order) 517 { 518 if (!page_is_guard(buddy) && !PageBuddy(buddy)) 519 return false; 520 521 if (buddy_order(buddy) != order) 522 return false; 523 524 /* 525 * zone check is done late to avoid uselessly calculating 526 * zone/node ids for pages that could never merge. 527 */ 528 if (page_zone_id(page) != page_zone_id(buddy)) 529 return false; 530 531 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 532 533 return true; 534 } 535 536 /* 537 * Locate the struct page for both the matching buddy in our 538 * pair (buddy1) and the combined O(n+1) page they form (page). 539 * 540 * 1) Any buddy B1 will have an order O twin B2 which satisfies 541 * the following equation: 542 * B2 = B1 ^ (1 << O) 543 * For example, if the starting buddy (buddy2) is #8 its order 544 * 1 buddy is #10: 545 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 546 * 547 * 2) Any buddy B will have an order O+1 parent P which 548 * satisfies the following equation: 549 * P = B & ~(1 << O) 550 * 551 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER 552 */ 553 static inline unsigned long 554 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) 555 { 556 return page_pfn ^ (1 << order); 557 } 558 559 /* 560 * Find the buddy of @page and validate it. 561 * @page: The input page 562 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the 563 * function is used in the performance-critical __free_one_page(). 564 * @order: The order of the page 565 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to 566 * page_to_pfn(). 567 * 568 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is 569 * not the same as @page. The validation is necessary before use it. 570 * 571 * Return: the found buddy page or NULL if not found. 572 */ 573 static inline struct page *find_buddy_page_pfn(struct page *page, 574 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) 575 { 576 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); 577 struct page *buddy; 578 579 buddy = page + (__buddy_pfn - pfn); 580 if (buddy_pfn) 581 *buddy_pfn = __buddy_pfn; 582 583 if (page_is_buddy(page, buddy, order)) 584 return buddy; 585 return NULL; 586 } 587 588 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 589 unsigned long end_pfn, struct zone *zone); 590 591 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, 592 unsigned long end_pfn, struct zone *zone) 593 { 594 if (zone->contiguous) 595 return pfn_to_page(start_pfn); 596 597 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); 598 } 599 600 void set_zone_contiguous(struct zone *zone); 601 602 static inline void clear_zone_contiguous(struct zone *zone) 603 { 604 zone->contiguous = false; 605 } 606 607 extern int __isolate_free_page(struct page *page, unsigned int order); 608 extern void __putback_isolated_page(struct page *page, unsigned int order, 609 int mt); 610 extern void memblock_free_pages(struct page *page, unsigned long pfn, 611 unsigned int order); 612 extern void __free_pages_core(struct page *page, unsigned int order, 613 enum meminit_context context); 614 615 /* 616 * This will have no effect, other than possibly generating a warning, if the 617 * caller passes in a non-large folio. 618 */ 619 static inline void folio_set_order(struct folio *folio, unsigned int order) 620 { 621 if (WARN_ON_ONCE(!order || !folio_test_large(folio))) 622 return; 623 624 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; 625 #ifdef CONFIG_64BIT 626 folio->_folio_nr_pages = 1U << order; 627 #endif 628 } 629 630 void __folio_undo_large_rmappable(struct folio *folio); 631 static inline void folio_undo_large_rmappable(struct folio *folio) 632 { 633 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) 634 return; 635 636 /* 637 * At this point, there is no one trying to add the folio to 638 * deferred_list. If folio is not in deferred_list, it's safe 639 * to check without acquiring the split_queue_lock. 640 */ 641 if (data_race(list_empty(&folio->_deferred_list))) 642 return; 643 644 __folio_undo_large_rmappable(folio); 645 } 646 647 static inline struct folio *page_rmappable_folio(struct page *page) 648 { 649 struct folio *folio = (struct folio *)page; 650 651 if (folio && folio_test_large(folio)) 652 folio_set_large_rmappable(folio); 653 return folio; 654 } 655 656 static inline void prep_compound_head(struct page *page, unsigned int order) 657 { 658 struct folio *folio = (struct folio *)page; 659 660 folio_set_order(folio, order); 661 atomic_set(&folio->_large_mapcount, -1); 662 atomic_set(&folio->_entire_mapcount, -1); 663 atomic_set(&folio->_nr_pages_mapped, 0); 664 atomic_set(&folio->_pincount, 0); 665 if (order > 1) 666 INIT_LIST_HEAD(&folio->_deferred_list); 667 } 668 669 static inline void prep_compound_tail(struct page *head, int tail_idx) 670 { 671 struct page *p = head + tail_idx; 672 673 p->mapping = TAIL_MAPPING; 674 set_compound_head(p, head); 675 set_page_private(p, 0); 676 } 677 678 extern void prep_compound_page(struct page *page, unsigned int order); 679 680 extern void post_alloc_hook(struct page *page, unsigned int order, 681 gfp_t gfp_flags); 682 extern bool free_pages_prepare(struct page *page, unsigned int order); 683 684 extern int user_min_free_kbytes; 685 686 void free_unref_page(struct page *page, unsigned int order); 687 void free_unref_folios(struct folio_batch *fbatch); 688 689 extern void zone_pcp_reset(struct zone *zone); 690 extern void zone_pcp_disable(struct zone *zone); 691 extern void zone_pcp_enable(struct zone *zone); 692 extern void zone_pcp_init(struct zone *zone); 693 694 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, 695 phys_addr_t min_addr, 696 int nid, bool exact_nid); 697 698 void memmap_init_range(unsigned long, int, unsigned long, unsigned long, 699 unsigned long, enum meminit_context, struct vmem_altmap *, int); 700 701 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 702 703 /* 704 * in mm/compaction.c 705 */ 706 /* 707 * compact_control is used to track pages being migrated and the free pages 708 * they are being migrated to during memory compaction. The free_pfn starts 709 * at the end of a zone and migrate_pfn begins at the start. Movable pages 710 * are moved to the end of a zone during a compaction run and the run 711 * completes when free_pfn <= migrate_pfn 712 */ 713 struct compact_control { 714 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ 715 struct list_head migratepages; /* List of pages being migrated */ 716 unsigned int nr_freepages; /* Number of isolated free pages */ 717 unsigned int nr_migratepages; /* Number of pages to migrate */ 718 unsigned long free_pfn; /* isolate_freepages search base */ 719 /* 720 * Acts as an in/out parameter to page isolation for migration. 721 * isolate_migratepages uses it as a search base. 722 * isolate_migratepages_block will update the value to the next pfn 723 * after the last isolated one. 724 */ 725 unsigned long migrate_pfn; 726 unsigned long fast_start_pfn; /* a pfn to start linear scan from */ 727 struct zone *zone; 728 unsigned long total_migrate_scanned; 729 unsigned long total_free_scanned; 730 unsigned short fast_search_fail;/* failures to use free list searches */ 731 short search_order; /* order to start a fast search at */ 732 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ 733 int order; /* order a direct compactor needs */ 734 int migratetype; /* migratetype of direct compactor */ 735 const unsigned int alloc_flags; /* alloc flags of a direct compactor */ 736 const int highest_zoneidx; /* zone index of a direct compactor */ 737 enum migrate_mode mode; /* Async or sync migration mode */ 738 bool ignore_skip_hint; /* Scan blocks even if marked skip */ 739 bool no_set_skip_hint; /* Don't mark blocks for skipping */ 740 bool ignore_block_suitable; /* Scan blocks considered unsuitable */ 741 bool direct_compaction; /* False from kcompactd or /proc/... */ 742 bool proactive_compaction; /* kcompactd proactive compaction */ 743 bool whole_zone; /* Whole zone should/has been scanned */ 744 bool contended; /* Signal lock contention */ 745 bool finish_pageblock; /* Scan the remainder of a pageblock. Used 746 * when there are potentially transient 747 * isolation or migration failures to 748 * ensure forward progress. 749 */ 750 bool alloc_contig; /* alloc_contig_range allocation */ 751 }; 752 753 /* 754 * Used in direct compaction when a page should be taken from the freelists 755 * immediately when one is created during the free path. 756 */ 757 struct capture_control { 758 struct compact_control *cc; 759 struct page *page; 760 }; 761 762 unsigned long 763 isolate_freepages_range(struct compact_control *cc, 764 unsigned long start_pfn, unsigned long end_pfn); 765 int 766 isolate_migratepages_range(struct compact_control *cc, 767 unsigned long low_pfn, unsigned long end_pfn); 768 769 int __alloc_contig_migrate_range(struct compact_control *cc, 770 unsigned long start, unsigned long end, 771 int migratetype); 772 773 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 774 void init_cma_reserved_pageblock(struct page *page); 775 776 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 777 778 int find_suitable_fallback(struct free_area *area, unsigned int order, 779 int migratetype, bool only_stealable, bool *can_steal); 780 781 static inline bool free_area_empty(struct free_area *area, int migratetype) 782 { 783 return list_empty(&area->free_list[migratetype]); 784 } 785 786 /* mm/util.c */ 787 struct anon_vma *folio_anon_vma(struct folio *folio); 788 789 #ifdef CONFIG_MMU 790 void unmap_mapping_folio(struct folio *folio); 791 extern long populate_vma_page_range(struct vm_area_struct *vma, 792 unsigned long start, unsigned long end, int *locked); 793 extern long faultin_page_range(struct mm_struct *mm, unsigned long start, 794 unsigned long end, bool write, int *locked); 795 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 796 unsigned long bytes); 797 798 /* 799 * NOTE: This function can't tell whether the folio is "fully mapped" in the 800 * range. 801 * "fully mapped" means all the pages of folio is associated with the page 802 * table of range while this function just check whether the folio range is 803 * within the range [start, end). Function caller needs to do page table 804 * check if it cares about the page table association. 805 * 806 * Typical usage (like mlock or madvise) is: 807 * Caller knows at least 1 page of folio is associated with page table of VMA 808 * and the range [start, end) is intersect with the VMA range. Caller wants 809 * to know whether the folio is fully associated with the range. It calls 810 * this function to check whether the folio is in the range first. Then checks 811 * the page table to know whether the folio is fully mapped to the range. 812 */ 813 static inline bool 814 folio_within_range(struct folio *folio, struct vm_area_struct *vma, 815 unsigned long start, unsigned long end) 816 { 817 pgoff_t pgoff, addr; 818 unsigned long vma_pglen = vma_pages(vma); 819 820 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); 821 if (start > end) 822 return false; 823 824 if (start < vma->vm_start) 825 start = vma->vm_start; 826 827 if (end > vma->vm_end) 828 end = vma->vm_end; 829 830 pgoff = folio_pgoff(folio); 831 832 /* if folio start address is not in vma range */ 833 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) 834 return false; 835 836 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 837 838 return !(addr < start || end - addr < folio_size(folio)); 839 } 840 841 static inline bool 842 folio_within_vma(struct folio *folio, struct vm_area_struct *vma) 843 { 844 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); 845 } 846 847 /* 848 * mlock_vma_folio() and munlock_vma_folio(): 849 * should be called with vma's mmap_lock held for read or write, 850 * under page table lock for the pte/pmd being added or removed. 851 * 852 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at 853 * the end of folio_remove_rmap_*(); but new anon folios are managed by 854 * folio_add_lru_vma() calling mlock_new_folio(). 855 */ 856 void mlock_folio(struct folio *folio); 857 static inline void mlock_vma_folio(struct folio *folio, 858 struct vm_area_struct *vma) 859 { 860 /* 861 * The VM_SPECIAL check here serves two purposes. 862 * 1) VM_IO check prevents migration from double-counting during mlock. 863 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED 864 * is never left set on a VM_SPECIAL vma, there is an interval while 865 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may 866 * still be set while VM_SPECIAL bits are added: so ignore it then. 867 */ 868 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) 869 mlock_folio(folio); 870 } 871 872 void munlock_folio(struct folio *folio); 873 static inline void munlock_vma_folio(struct folio *folio, 874 struct vm_area_struct *vma) 875 { 876 /* 877 * munlock if the function is called. Ideally, we should only 878 * do munlock if any page of folio is unmapped from VMA and 879 * cause folio not fully mapped to VMA. 880 * 881 * But it's not easy to confirm that's the situation. So we 882 * always munlock the folio and page reclaim will correct it 883 * if it's wrong. 884 */ 885 if (unlikely(vma->vm_flags & VM_LOCKED)) 886 munlock_folio(folio); 887 } 888 889 void mlock_new_folio(struct folio *folio); 890 bool need_mlock_drain(int cpu); 891 void mlock_drain_local(void); 892 void mlock_drain_remote(int cpu); 893 894 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 895 896 /** 897 * vma_address - Find the virtual address a page range is mapped at 898 * @vma: The vma which maps this object. 899 * @pgoff: The page offset within its object. 900 * @nr_pages: The number of pages to consider. 901 * 902 * If any page in this range is mapped by this VMA, return the first address 903 * where any of these pages appear. Otherwise, return -EFAULT. 904 */ 905 static inline unsigned long vma_address(struct vm_area_struct *vma, 906 pgoff_t pgoff, unsigned long nr_pages) 907 { 908 unsigned long address; 909 910 if (pgoff >= vma->vm_pgoff) { 911 address = vma->vm_start + 912 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 913 /* Check for address beyond vma (or wrapped through 0?) */ 914 if (address < vma->vm_start || address >= vma->vm_end) 915 address = -EFAULT; 916 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { 917 /* Test above avoids possibility of wrap to 0 on 32-bit */ 918 address = vma->vm_start; 919 } else { 920 address = -EFAULT; 921 } 922 return address; 923 } 924 925 /* 926 * Then at what user virtual address will none of the range be found in vma? 927 * Assumes that vma_address() already returned a good starting address. 928 */ 929 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) 930 { 931 struct vm_area_struct *vma = pvmw->vma; 932 pgoff_t pgoff; 933 unsigned long address; 934 935 /* Common case, plus ->pgoff is invalid for KSM */ 936 if (pvmw->nr_pages == 1) 937 return pvmw->address + PAGE_SIZE; 938 939 pgoff = pvmw->pgoff + pvmw->nr_pages; 940 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 941 /* Check for address beyond vma (or wrapped through 0?) */ 942 if (address < vma->vm_start || address > vma->vm_end) 943 address = vma->vm_end; 944 return address; 945 } 946 947 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, 948 struct file *fpin) 949 { 950 int flags = vmf->flags; 951 952 if (fpin) 953 return fpin; 954 955 /* 956 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or 957 * anything, so we only pin the file and drop the mmap_lock if only 958 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. 959 */ 960 if (fault_flag_allow_retry_first(flags) && 961 !(flags & FAULT_FLAG_RETRY_NOWAIT)) { 962 fpin = get_file(vmf->vma->vm_file); 963 release_fault_lock(vmf); 964 } 965 return fpin; 966 } 967 #else /* !CONFIG_MMU */ 968 static inline void unmap_mapping_folio(struct folio *folio) { } 969 static inline void mlock_new_folio(struct folio *folio) { } 970 static inline bool need_mlock_drain(int cpu) { return false; } 971 static inline void mlock_drain_local(void) { } 972 static inline void mlock_drain_remote(int cpu) { } 973 static inline void vunmap_range_noflush(unsigned long start, unsigned long end) 974 { 975 } 976 #endif /* !CONFIG_MMU */ 977 978 /* Memory initialisation debug and verification */ 979 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 980 DECLARE_STATIC_KEY_TRUE(deferred_pages); 981 982 bool __init deferred_grow_zone(struct zone *zone, unsigned int order); 983 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 984 985 enum mminit_level { 986 MMINIT_WARNING, 987 MMINIT_VERIFY, 988 MMINIT_TRACE 989 }; 990 991 #ifdef CONFIG_DEBUG_MEMORY_INIT 992 993 extern int mminit_loglevel; 994 995 #define mminit_dprintk(level, prefix, fmt, arg...) \ 996 do { \ 997 if (level < mminit_loglevel) { \ 998 if (level <= MMINIT_WARNING) \ 999 pr_warn("mminit::" prefix " " fmt, ##arg); \ 1000 else \ 1001 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ 1002 } \ 1003 } while (0) 1004 1005 extern void mminit_verify_pageflags_layout(void); 1006 extern void mminit_verify_zonelist(void); 1007 #else 1008 1009 static inline void mminit_dprintk(enum mminit_level level, 1010 const char *prefix, const char *fmt, ...) 1011 { 1012 } 1013 1014 static inline void mminit_verify_pageflags_layout(void) 1015 { 1016 } 1017 1018 static inline void mminit_verify_zonelist(void) 1019 { 1020 } 1021 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 1022 1023 #define NODE_RECLAIM_NOSCAN -2 1024 #define NODE_RECLAIM_FULL -1 1025 #define NODE_RECLAIM_SOME 0 1026 #define NODE_RECLAIM_SUCCESS 1 1027 1028 #ifdef CONFIG_NUMA 1029 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 1030 extern int find_next_best_node(int node, nodemask_t *used_node_mask); 1031 #else 1032 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 1033 unsigned int order) 1034 { 1035 return NODE_RECLAIM_NOSCAN; 1036 } 1037 static inline int find_next_best_node(int node, nodemask_t *used_node_mask) 1038 { 1039 return NUMA_NO_NODE; 1040 } 1041 #endif 1042 1043 /* 1044 * mm/memory-failure.c 1045 */ 1046 void shake_folio(struct folio *folio); 1047 extern int hwpoison_filter(struct page *p); 1048 1049 extern u32 hwpoison_filter_dev_major; 1050 extern u32 hwpoison_filter_dev_minor; 1051 extern u64 hwpoison_filter_flags_mask; 1052 extern u64 hwpoison_filter_flags_value; 1053 extern u64 hwpoison_filter_memcg; 1054 extern u32 hwpoison_filter_enable; 1055 #define MAGIC_HWPOISON 0x48575053U /* HWPS */ 1056 void SetPageHWPoisonTakenOff(struct page *page); 1057 void ClearPageHWPoisonTakenOff(struct page *page); 1058 bool take_page_off_buddy(struct page *page); 1059 bool put_page_back_buddy(struct page *page); 1060 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early); 1061 void add_to_kill_ksm(struct task_struct *tsk, struct page *p, 1062 struct vm_area_struct *vma, struct list_head *to_kill, 1063 unsigned long ksm_addr); 1064 unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 1065 1066 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, 1067 unsigned long, unsigned long, 1068 unsigned long, unsigned long); 1069 1070 extern void set_pageblock_order(void); 1071 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private); 1072 unsigned long reclaim_pages(struct list_head *folio_list); 1073 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1074 struct list_head *folio_list); 1075 /* The ALLOC_WMARK bits are used as an index to zone->watermark */ 1076 #define ALLOC_WMARK_MIN WMARK_MIN 1077 #define ALLOC_WMARK_LOW WMARK_LOW 1078 #define ALLOC_WMARK_HIGH WMARK_HIGH 1079 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ 1080 1081 /* Mask to get the watermark bits */ 1082 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) 1083 1084 /* 1085 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we 1086 * cannot assume a reduced access to memory reserves is sufficient for 1087 * !MMU 1088 */ 1089 #ifdef CONFIG_MMU 1090 #define ALLOC_OOM 0x08 1091 #else 1092 #define ALLOC_OOM ALLOC_NO_WATERMARKS 1093 #endif 1094 1095 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access 1096 * to 25% of the min watermark or 1097 * 62.5% if __GFP_HIGH is set. 1098 */ 1099 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50% 1100 * of the min watermark. 1101 */ 1102 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1103 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ 1104 #ifdef CONFIG_ZONE_DMA32 1105 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ 1106 #else 1107 #define ALLOC_NOFRAGMENT 0x0 1108 #endif 1109 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */ 1110 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ 1111 1112 /* Flags that allow allocations below the min watermark. */ 1113 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) 1114 1115 enum ttu_flags; 1116 struct tlbflush_unmap_batch; 1117 1118 1119 /* 1120 * only for MM internal work items which do not depend on 1121 * any allocations or locks which might depend on allocations 1122 */ 1123 extern struct workqueue_struct *mm_percpu_wq; 1124 1125 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 1126 void try_to_unmap_flush(void); 1127 void try_to_unmap_flush_dirty(void); 1128 void flush_tlb_batched_pending(struct mm_struct *mm); 1129 #else 1130 static inline void try_to_unmap_flush(void) 1131 { 1132 } 1133 static inline void try_to_unmap_flush_dirty(void) 1134 { 1135 } 1136 static inline void flush_tlb_batched_pending(struct mm_struct *mm) 1137 { 1138 } 1139 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 1140 1141 extern const struct trace_print_flags pageflag_names[]; 1142 extern const struct trace_print_flags pagetype_names[]; 1143 extern const struct trace_print_flags vmaflag_names[]; 1144 extern const struct trace_print_flags gfpflag_names[]; 1145 1146 static inline bool is_migrate_highatomic(enum migratetype migratetype) 1147 { 1148 return migratetype == MIGRATE_HIGHATOMIC; 1149 } 1150 1151 void setup_zone_pageset(struct zone *zone); 1152 1153 struct migration_target_control { 1154 int nid; /* preferred node id */ 1155 nodemask_t *nmask; 1156 gfp_t gfp_mask; 1157 enum migrate_reason reason; 1158 }; 1159 1160 /* 1161 * mm/filemap.c 1162 */ 1163 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, 1164 struct folio *folio, loff_t fpos, size_t size); 1165 1166 /* 1167 * mm/vmalloc.c 1168 */ 1169 #ifdef CONFIG_MMU 1170 void __init vmalloc_init(void); 1171 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1172 pgprot_t prot, struct page **pages, unsigned int page_shift); 1173 #else 1174 static inline void vmalloc_init(void) 1175 { 1176 } 1177 1178 static inline 1179 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1180 pgprot_t prot, struct page **pages, unsigned int page_shift) 1181 { 1182 return -EINVAL; 1183 } 1184 #endif 1185 1186 int __must_check __vmap_pages_range_noflush(unsigned long addr, 1187 unsigned long end, pgprot_t prot, 1188 struct page **pages, unsigned int page_shift); 1189 1190 void vunmap_range_noflush(unsigned long start, unsigned long end); 1191 1192 void __vunmap_range_noflush(unsigned long start, unsigned long end); 1193 1194 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, 1195 unsigned long addr, int page_nid, int *flags); 1196 1197 void free_zone_device_folio(struct folio *folio); 1198 int migrate_device_coherent_page(struct page *page); 1199 1200 /* 1201 * mm/gup.c 1202 */ 1203 int __must_check try_grab_folio(struct folio *folio, int refs, 1204 unsigned int flags); 1205 1206 /* 1207 * mm/huge_memory.c 1208 */ 1209 void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1210 pud_t *pud, bool write); 1211 void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1212 pmd_t *pmd, bool write); 1213 1214 enum { 1215 /* mark page accessed */ 1216 FOLL_TOUCH = 1 << 16, 1217 /* a retry, previous pass started an IO */ 1218 FOLL_TRIED = 1 << 17, 1219 /* we are working on non-current tsk/mm */ 1220 FOLL_REMOTE = 1 << 18, 1221 /* pages must be released via unpin_user_page */ 1222 FOLL_PIN = 1 << 19, 1223 /* gup_fast: prevent fall-back to slow gup */ 1224 FOLL_FAST_ONLY = 1 << 20, 1225 /* allow unlocking the mmap lock */ 1226 FOLL_UNLOCKABLE = 1 << 21, 1227 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */ 1228 FOLL_MADV_POPULATE = 1 << 22, 1229 }; 1230 1231 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \ 1232 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \ 1233 FOLL_MADV_POPULATE) 1234 1235 /* 1236 * Indicates for which pages that are write-protected in the page table, 1237 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the 1238 * GUP pin will remain consistent with the pages mapped into the page tables 1239 * of the MM. 1240 * 1241 * Temporary unmapping of PageAnonExclusive() pages or clearing of 1242 * PageAnonExclusive() has to protect against concurrent GUP: 1243 * * Ordinary GUP: Using the PT lock 1244 * * GUP-fast and fork(): mm->write_protect_seq 1245 * * GUP-fast and KSM or temporary unmapping (swap, migration): see 1246 * folio_try_share_anon_rmap_*() 1247 * 1248 * Must be called with the (sub)page that's actually referenced via the 1249 * page table entry, which might not necessarily be the head page for a 1250 * PTE-mapped THP. 1251 * 1252 * If the vma is NULL, we're coming from the GUP-fast path and might have 1253 * to fallback to the slow path just to lookup the vma. 1254 */ 1255 static inline bool gup_must_unshare(struct vm_area_struct *vma, 1256 unsigned int flags, struct page *page) 1257 { 1258 /* 1259 * FOLL_WRITE is implicitly handled correctly as the page table entry 1260 * has to be writable -- and if it references (part of) an anonymous 1261 * folio, that part is required to be marked exclusive. 1262 */ 1263 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN) 1264 return false; 1265 /* 1266 * Note: PageAnon(page) is stable until the page is actually getting 1267 * freed. 1268 */ 1269 if (!PageAnon(page)) { 1270 /* 1271 * We only care about R/O long-term pining: R/O short-term 1272 * pinning does not have the semantics to observe successive 1273 * changes through the process page tables. 1274 */ 1275 if (!(flags & FOLL_LONGTERM)) 1276 return false; 1277 1278 /* We really need the vma ... */ 1279 if (!vma) 1280 return true; 1281 1282 /* 1283 * ... because we only care about writable private ("COW") 1284 * mappings where we have to break COW early. 1285 */ 1286 return is_cow_mapping(vma->vm_flags); 1287 } 1288 1289 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ 1290 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 1291 smp_rmb(); 1292 1293 /* 1294 * Note that PageKsm() pages cannot be exclusive, and consequently, 1295 * cannot get pinned. 1296 */ 1297 return !PageAnonExclusive(page); 1298 } 1299 1300 extern bool mirrored_kernelcore; 1301 extern bool memblock_has_mirror(void); 1302 1303 static __always_inline void vma_set_range(struct vm_area_struct *vma, 1304 unsigned long start, unsigned long end, 1305 pgoff_t pgoff) 1306 { 1307 vma->vm_start = start; 1308 vma->vm_end = end; 1309 vma->vm_pgoff = pgoff; 1310 } 1311 1312 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) 1313 { 1314 /* 1315 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty 1316 * enablements, because when without soft-dirty being compiled in, 1317 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY) 1318 * will be constantly true. 1319 */ 1320 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 1321 return false; 1322 1323 /* 1324 * Soft-dirty is kind of special: its tracking is enabled when the 1325 * vma flags not set. 1326 */ 1327 return !(vma->vm_flags & VM_SOFTDIRTY); 1328 } 1329 1330 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd) 1331 { 1332 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd); 1333 } 1334 1335 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte) 1336 { 1337 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte); 1338 } 1339 1340 void __meminit __init_single_page(struct page *page, unsigned long pfn, 1341 unsigned long zone, int nid); 1342 1343 /* shrinker related functions */ 1344 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, 1345 int priority); 1346 1347 #ifdef CONFIG_64BIT 1348 static inline int can_do_mseal(unsigned long flags) 1349 { 1350 if (flags) 1351 return -EINVAL; 1352 1353 return 0; 1354 } 1355 1356 bool can_modify_mm(struct mm_struct *mm, unsigned long start, 1357 unsigned long end); 1358 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, 1359 unsigned long end, int behavior); 1360 #else 1361 static inline int can_do_mseal(unsigned long flags) 1362 { 1363 return -EPERM; 1364 } 1365 1366 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start, 1367 unsigned long end) 1368 { 1369 return true; 1370 } 1371 1372 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, 1373 unsigned long end, int behavior) 1374 { 1375 return true; 1376 } 1377 #endif 1378 1379 #ifdef CONFIG_SHRINKER_DEBUG 1380 static inline __printf(2, 0) int shrinker_debugfs_name_alloc( 1381 struct shrinker *shrinker, const char *fmt, va_list ap) 1382 { 1383 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); 1384 1385 return shrinker->name ? 0 : -ENOMEM; 1386 } 1387 1388 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) 1389 { 1390 kfree_const(shrinker->name); 1391 shrinker->name = NULL; 1392 } 1393 1394 extern int shrinker_debugfs_add(struct shrinker *shrinker); 1395 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, 1396 int *debugfs_id); 1397 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, 1398 int debugfs_id); 1399 #else /* CONFIG_SHRINKER_DEBUG */ 1400 static inline int shrinker_debugfs_add(struct shrinker *shrinker) 1401 { 1402 return 0; 1403 } 1404 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker, 1405 const char *fmt, va_list ap) 1406 { 1407 return 0; 1408 } 1409 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) 1410 { 1411 } 1412 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, 1413 int *debugfs_id) 1414 { 1415 *debugfs_id = -1; 1416 return NULL; 1417 } 1418 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, 1419 int debugfs_id) 1420 { 1421 } 1422 #endif /* CONFIG_SHRINKER_DEBUG */ 1423 1424 /* Only track the nodes of mappings with shadow entries */ 1425 void workingset_update_node(struct xa_node *node); 1426 extern struct list_lru shadow_nodes; 1427 1428 /* mremap.c */ 1429 unsigned long move_page_tables(struct vm_area_struct *vma, 1430 unsigned long old_addr, struct vm_area_struct *new_vma, 1431 unsigned long new_addr, unsigned long len, 1432 bool need_rmap_locks, bool for_stack); 1433 1434 #endif /* __MM_INTERNAL_H */ 1435