1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/tracepoint-defs.h>
17
18 struct folio_batch;
19
20 /*
21 * The set of flags that only affect watermark checking and reclaim
22 * behaviour. This is used by the MM to obey the caller constraints
23 * about IO, FS and watermark checking while ignoring placement
24 * hints such as HIGHMEM usage.
25 */
26 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
28 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
29 __GFP_NOLOCKDEP)
30
31 /* The GFP flags allowed during early boot */
32 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
33
34 /* Control allocation cpuset and node placement constraints */
35 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
36
37 /* Do not use these with a slab allocator */
38 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
39
40 /*
41 * Different from WARN_ON_ONCE(), no warning will be issued
42 * when we specify __GFP_NOWARN.
43 */
44 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
45 static bool __section(".data.once") __warned; \
46 int __ret_warn_once = !!(cond); \
47 \
48 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
49 __warned = true; \
50 WARN_ON(1); \
51 } \
52 unlikely(__ret_warn_once); \
53 })
54
55 void page_writeback_init(void);
56
57 /*
58 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
59 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
60 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
61 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
62 */
63 #define ENTIRELY_MAPPED 0x800000
64 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
65
66 /*
67 * Flags passed to __show_mem() and show_free_areas() to suppress output in
68 * various contexts.
69 */
70 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
71
72 /*
73 * How many individual pages have an elevated _mapcount. Excludes
74 * the folio's entire_mapcount.
75 *
76 * Don't use this function outside of debugging code.
77 */
folio_nr_pages_mapped(const struct folio * folio)78 static inline int folio_nr_pages_mapped(const struct folio *folio)
79 {
80 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
81 }
82
83 /*
84 * Retrieve the first entry of a folio based on a provided entry within the
85 * folio. We cannot rely on folio->swap as there is no guarantee that it has
86 * been initialized. Used for calling arch_swap_restore()
87 */
folio_swap(swp_entry_t entry,const struct folio * folio)88 static inline swp_entry_t folio_swap(swp_entry_t entry,
89 const struct folio *folio)
90 {
91 swp_entry_t swap = {
92 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
93 };
94
95 return swap;
96 }
97
folio_raw_mapping(const struct folio * folio)98 static inline void *folio_raw_mapping(const struct folio *folio)
99 {
100 unsigned long mapping = (unsigned long)folio->mapping;
101
102 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
103 }
104
105 #ifdef CONFIG_MMU
106
107 /* Flags for folio_pte_batch(). */
108 typedef int __bitwise fpb_t;
109
110 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111 #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
112
113 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114 #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
115
__pte_batch_clear_ignored(pte_t pte,fpb_t flags)116 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
117 {
118 if (flags & FPB_IGNORE_DIRTY)
119 pte = pte_mkclean(pte);
120 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 pte = pte_clear_soft_dirty(pte);
122 return pte_wrprotect(pte_mkold(pte));
123 }
124
125 /**
126 * folio_pte_batch - detect a PTE batch for a large folio
127 * @folio: The large folio to detect a PTE batch for.
128 * @addr: The user virtual address the first page is mapped at.
129 * @start_ptep: Page table pointer for the first entry.
130 * @pte: Page table entry for the first page.
131 * @max_nr: The maximum number of table entries to consider.
132 * @flags: Flags to modify the PTE batch semantics.
133 * @any_writable: Optional pointer to indicate whether any entry except the
134 * first one is writable.
135 * @any_young: Optional pointer to indicate whether any entry except the
136 * first one is young.
137 * @any_dirty: Optional pointer to indicate whether any entry except the
138 * first one is dirty.
139 *
140 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141 * pages of the same large folio.
142 *
143 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
146 *
147 * start_ptep must map any page of the folio. max_nr must be at least one and
148 * must be limited by the caller so scanning cannot exceed a single page table.
149 *
150 * Return: the number of table entries in the batch.
151 */
folio_pte_batch(struct folio * folio,unsigned long addr,pte_t * start_ptep,pte_t pte,int max_nr,fpb_t flags,bool * any_writable,bool * any_young,bool * any_dirty)152 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
154 bool *any_writable, bool *any_young, bool *any_dirty)
155 {
156 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 const pte_t *end_ptep = start_ptep + max_nr;
158 pte_t expected_pte, *ptep;
159 bool writable, young, dirty;
160 int nr;
161
162 if (any_writable)
163 *any_writable = false;
164 if (any_young)
165 *any_young = false;
166 if (any_dirty)
167 *any_dirty = false;
168
169 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
172
173 nr = pte_batch_hint(start_ptep, pte);
174 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 ptep = start_ptep + nr;
176
177 while (ptep < end_ptep) {
178 pte = ptep_get(ptep);
179 if (any_writable)
180 writable = !!pte_write(pte);
181 if (any_young)
182 young = !!pte_young(pte);
183 if (any_dirty)
184 dirty = !!pte_dirty(pte);
185 pte = __pte_batch_clear_ignored(pte, flags);
186
187 if (!pte_same(pte, expected_pte))
188 break;
189
190 /*
191 * Stop immediately once we reached the end of the folio. In
192 * corner cases the next PFN might fall into a different
193 * folio.
194 */
195 if (pte_pfn(pte) >= folio_end_pfn)
196 break;
197
198 if (any_writable)
199 *any_writable |= writable;
200 if (any_young)
201 *any_young |= young;
202 if (any_dirty)
203 *any_dirty |= dirty;
204
205 nr = pte_batch_hint(ptep, pte);
206 expected_pte = pte_advance_pfn(expected_pte, nr);
207 ptep += nr;
208 }
209
210 return min(ptep - start_ptep, max_nr);
211 }
212
213 /**
214 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
215 * @pte: The initial pte state; is_swap_pte(pte) must be true and
216 * non_swap_entry() must be false.
217 *
218 * Increments the swap offset, while maintaining all other fields, including
219 * swap type, and any swp pte bits. The resulting pte is returned.
220 */
pte_next_swp_offset(pte_t pte)221 static inline pte_t pte_next_swp_offset(pte_t pte)
222 {
223 swp_entry_t entry = pte_to_swp_entry(pte);
224 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
225 (swp_offset(entry) + 1)));
226
227 if (pte_swp_soft_dirty(pte))
228 new = pte_swp_mksoft_dirty(new);
229 if (pte_swp_exclusive(pte))
230 new = pte_swp_mkexclusive(new);
231 if (pte_swp_uffd_wp(pte))
232 new = pte_swp_mkuffd_wp(new);
233
234 return new;
235 }
236
237 /**
238 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
239 * @start_ptep: Page table pointer for the first entry.
240 * @max_nr: The maximum number of table entries to consider.
241 * @pte: Page table entry for the first entry.
242 *
243 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
244 * containing swap entries all with consecutive offsets and targeting the same
245 * swap type, all with matching swp pte bits.
246 *
247 * max_nr must be at least one and must be limited by the caller so scanning
248 * cannot exceed a single page table.
249 *
250 * Return: the number of table entries in the batch.
251 */
swap_pte_batch(pte_t * start_ptep,int max_nr,pte_t pte)252 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
253 {
254 pte_t expected_pte = pte_next_swp_offset(pte);
255 const pte_t *end_ptep = start_ptep + max_nr;
256 pte_t *ptep = start_ptep + 1;
257
258 VM_WARN_ON(max_nr < 1);
259 VM_WARN_ON(!is_swap_pte(pte));
260 VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
261
262 while (ptep < end_ptep) {
263 pte = ptep_get(ptep);
264
265 if (!pte_same(pte, expected_pte))
266 break;
267
268 expected_pte = pte_next_swp_offset(expected_pte);
269 ptep++;
270 }
271
272 return ptep - start_ptep;
273 }
274 #endif /* CONFIG_MMU */
275
276 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
277 int nr_throttled);
acct_reclaim_writeback(struct folio * folio)278 static inline void acct_reclaim_writeback(struct folio *folio)
279 {
280 pg_data_t *pgdat = folio_pgdat(folio);
281 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
282
283 if (nr_throttled)
284 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
285 }
286
wake_throttle_isolated(pg_data_t * pgdat)287 static inline void wake_throttle_isolated(pg_data_t *pgdat)
288 {
289 wait_queue_head_t *wqh;
290
291 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
292 if (waitqueue_active(wqh))
293 wake_up(wqh);
294 }
295
296 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
297 vm_fault_t do_swap_page(struct vm_fault *vmf);
298 void folio_rotate_reclaimable(struct folio *folio);
299 bool __folio_end_writeback(struct folio *folio);
300 void deactivate_file_folio(struct folio *folio);
301 void folio_activate(struct folio *folio);
302
303 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
304 struct vm_area_struct *start_vma, unsigned long floor,
305 unsigned long ceiling, bool mm_wr_locked);
306 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
307
308 struct zap_details;
309 void unmap_page_range(struct mmu_gather *tlb,
310 struct vm_area_struct *vma,
311 unsigned long addr, unsigned long end,
312 struct zap_details *details);
313
314 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
315 unsigned int order);
316 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)317 static inline void force_page_cache_readahead(struct address_space *mapping,
318 struct file *file, pgoff_t index, unsigned long nr_to_read)
319 {
320 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
321 force_page_cache_ra(&ractl, nr_to_read);
322 }
323
324 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
325 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
326 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
327 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
328 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
329 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
330 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
331 loff_t end);
332 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
333 unsigned long mapping_try_invalidate(struct address_space *mapping,
334 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
335
336 /**
337 * folio_evictable - Test whether a folio is evictable.
338 * @folio: The folio to test.
339 *
340 * Test whether @folio is evictable -- i.e., should be placed on
341 * active/inactive lists vs unevictable list.
342 *
343 * Reasons folio might not be evictable:
344 * 1. folio's mapping marked unevictable
345 * 2. One of the pages in the folio is part of an mlocked VMA
346 */
folio_evictable(struct folio * folio)347 static inline bool folio_evictable(struct folio *folio)
348 {
349 bool ret;
350
351 /* Prevent address_space of inode and swap cache from being freed */
352 rcu_read_lock();
353 ret = !mapping_unevictable(folio_mapping(folio)) &&
354 !folio_test_mlocked(folio);
355 rcu_read_unlock();
356 return ret;
357 }
358
359 /*
360 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
361 * a count of one.
362 */
set_page_refcounted(struct page * page)363 static inline void set_page_refcounted(struct page *page)
364 {
365 VM_BUG_ON_PAGE(PageTail(page), page);
366 VM_BUG_ON_PAGE(page_ref_count(page), page);
367 set_page_count(page, 1);
368 }
369
370 /*
371 * Return true if a folio needs ->release_folio() calling upon it.
372 */
folio_needs_release(struct folio * folio)373 static inline bool folio_needs_release(struct folio *folio)
374 {
375 struct address_space *mapping = folio_mapping(folio);
376
377 return folio_has_private(folio) ||
378 (mapping && mapping_release_always(mapping));
379 }
380
381 extern unsigned long highest_memmap_pfn;
382
383 /*
384 * Maximum number of reclaim retries without progress before the OOM
385 * killer is consider the only way forward.
386 */
387 #define MAX_RECLAIM_RETRIES 16
388
389 /*
390 * in mm/vmscan.c:
391 */
392 bool isolate_lru_page(struct page *page);
393 bool folio_isolate_lru(struct folio *folio);
394 void putback_lru_page(struct page *page);
395 void folio_putback_lru(struct folio *folio);
396 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
397
398 /*
399 * in mm/rmap.c:
400 */
401 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
402
403 /*
404 * in mm/page_alloc.c
405 */
406 #define K(x) ((x) << (PAGE_SHIFT-10))
407
408 extern char * const zone_names[MAX_NR_ZONES];
409
410 /* perform sanity checks on struct pages being allocated or freed */
411 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
412
413 extern int min_free_kbytes;
414
415 void setup_per_zone_wmarks(void);
416 void calculate_min_free_kbytes(void);
417 int __meminit init_per_zone_wmark_min(void);
418 void page_alloc_sysctl_init(void);
419
420 /*
421 * Structure for holding the mostly immutable allocation parameters passed
422 * between functions involved in allocations, including the alloc_pages*
423 * family of functions.
424 *
425 * nodemask, migratetype and highest_zoneidx are initialized only once in
426 * __alloc_pages() and then never change.
427 *
428 * zonelist, preferred_zone and highest_zoneidx are set first in
429 * __alloc_pages() for the fast path, and might be later changed
430 * in __alloc_pages_slowpath(). All other functions pass the whole structure
431 * by a const pointer.
432 */
433 struct alloc_context {
434 struct zonelist *zonelist;
435 nodemask_t *nodemask;
436 struct zoneref *preferred_zoneref;
437 int migratetype;
438
439 /*
440 * highest_zoneidx represents highest usable zone index of
441 * the allocation request. Due to the nature of the zone,
442 * memory on lower zone than the highest_zoneidx will be
443 * protected by lowmem_reserve[highest_zoneidx].
444 *
445 * highest_zoneidx is also used by reclaim/compaction to limit
446 * the target zone since higher zone than this index cannot be
447 * usable for this allocation request.
448 */
449 enum zone_type highest_zoneidx;
450 bool spread_dirty_pages;
451 };
452
453 /*
454 * This function returns the order of a free page in the buddy system. In
455 * general, page_zone(page)->lock must be held by the caller to prevent the
456 * page from being allocated in parallel and returning garbage as the order.
457 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
458 * page cannot be allocated or merged in parallel. Alternatively, it must
459 * handle invalid values gracefully, and use buddy_order_unsafe() below.
460 */
buddy_order(struct page * page)461 static inline unsigned int buddy_order(struct page *page)
462 {
463 /* PageBuddy() must be checked by the caller */
464 return page_private(page);
465 }
466
467 /*
468 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
469 * PageBuddy() should be checked first by the caller to minimize race window,
470 * and invalid values must be handled gracefully.
471 *
472 * READ_ONCE is used so that if the caller assigns the result into a local
473 * variable and e.g. tests it for valid range before using, the compiler cannot
474 * decide to remove the variable and inline the page_private(page) multiple
475 * times, potentially observing different values in the tests and the actual
476 * use of the result.
477 */
478 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
479
480 /*
481 * This function checks whether a page is free && is the buddy
482 * we can coalesce a page and its buddy if
483 * (a) the buddy is not in a hole (check before calling!) &&
484 * (b) the buddy is in the buddy system &&
485 * (c) a page and its buddy have the same order &&
486 * (d) a page and its buddy are in the same zone.
487 *
488 * For recording whether a page is in the buddy system, we set PageBuddy.
489 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
490 *
491 * For recording page's order, we use page_private(page).
492 */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)493 static inline bool page_is_buddy(struct page *page, struct page *buddy,
494 unsigned int order)
495 {
496 if (!page_is_guard(buddy) && !PageBuddy(buddy))
497 return false;
498
499 if (buddy_order(buddy) != order)
500 return false;
501
502 /*
503 * zone check is done late to avoid uselessly calculating
504 * zone/node ids for pages that could never merge.
505 */
506 if (page_zone_id(page) != page_zone_id(buddy))
507 return false;
508
509 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
510
511 return true;
512 }
513
514 /*
515 * Locate the struct page for both the matching buddy in our
516 * pair (buddy1) and the combined O(n+1) page they form (page).
517 *
518 * 1) Any buddy B1 will have an order O twin B2 which satisfies
519 * the following equation:
520 * B2 = B1 ^ (1 << O)
521 * For example, if the starting buddy (buddy2) is #8 its order
522 * 1 buddy is #10:
523 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
524 *
525 * 2) Any buddy B will have an order O+1 parent P which
526 * satisfies the following equation:
527 * P = B & ~(1 << O)
528 *
529 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
530 */
531 static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)532 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
533 {
534 return page_pfn ^ (1 << order);
535 }
536
537 /*
538 * Find the buddy of @page and validate it.
539 * @page: The input page
540 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
541 * function is used in the performance-critical __free_one_page().
542 * @order: The order of the page
543 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
544 * page_to_pfn().
545 *
546 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
547 * not the same as @page. The validation is necessary before use it.
548 *
549 * Return: the found buddy page or NULL if not found.
550 */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)551 static inline struct page *find_buddy_page_pfn(struct page *page,
552 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
553 {
554 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
555 struct page *buddy;
556
557 buddy = page + (__buddy_pfn - pfn);
558 if (buddy_pfn)
559 *buddy_pfn = __buddy_pfn;
560
561 if (page_is_buddy(page, buddy, order))
562 return buddy;
563 return NULL;
564 }
565
566 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
567 unsigned long end_pfn, struct zone *zone);
568
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)569 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
570 unsigned long end_pfn, struct zone *zone)
571 {
572 if (zone->contiguous)
573 return pfn_to_page(start_pfn);
574
575 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
576 }
577
578 void set_zone_contiguous(struct zone *zone);
579
clear_zone_contiguous(struct zone * zone)580 static inline void clear_zone_contiguous(struct zone *zone)
581 {
582 zone->contiguous = false;
583 }
584
585 extern int __isolate_free_page(struct page *page, unsigned int order);
586 extern void __putback_isolated_page(struct page *page, unsigned int order,
587 int mt);
588 extern void memblock_free_pages(struct page *page, unsigned long pfn,
589 unsigned int order);
590 extern void __free_pages_core(struct page *page, unsigned int order);
591
592 /*
593 * This will have no effect, other than possibly generating a warning, if the
594 * caller passes in a non-large folio.
595 */
folio_set_order(struct folio * folio,unsigned int order)596 static inline void folio_set_order(struct folio *folio, unsigned int order)
597 {
598 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
599 return;
600
601 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
602 #ifdef CONFIG_64BIT
603 folio->_folio_nr_pages = 1U << order;
604 #endif
605 }
606
607 void folio_undo_large_rmappable(struct folio *folio);
608
page_rmappable_folio(struct page * page)609 static inline struct folio *page_rmappable_folio(struct page *page)
610 {
611 struct folio *folio = (struct folio *)page;
612
613 if (folio && folio_test_large(folio))
614 folio_set_large_rmappable(folio);
615 return folio;
616 }
617
prep_compound_head(struct page * page,unsigned int order)618 static inline void prep_compound_head(struct page *page, unsigned int order)
619 {
620 struct folio *folio = (struct folio *)page;
621
622 folio_set_order(folio, order);
623 atomic_set(&folio->_large_mapcount, -1);
624 atomic_set(&folio->_entire_mapcount, -1);
625 atomic_set(&folio->_nr_pages_mapped, 0);
626 atomic_set(&folio->_pincount, 0);
627 if (order > 1)
628 INIT_LIST_HEAD(&folio->_deferred_list);
629 }
630
prep_compound_tail(struct page * head,int tail_idx)631 static inline void prep_compound_tail(struct page *head, int tail_idx)
632 {
633 struct page *p = head + tail_idx;
634
635 p->mapping = TAIL_MAPPING;
636 set_compound_head(p, head);
637 set_page_private(p, 0);
638 }
639
640 extern void prep_compound_page(struct page *page, unsigned int order);
641
642 extern void post_alloc_hook(struct page *page, unsigned int order,
643 gfp_t gfp_flags);
644 extern bool free_pages_prepare(struct page *page, unsigned int order);
645
646 extern int user_min_free_kbytes;
647
648 void free_unref_page(struct page *page, unsigned int order);
649 void free_unref_folios(struct folio_batch *fbatch);
650
651 extern void zone_pcp_reset(struct zone *zone);
652 extern void zone_pcp_disable(struct zone *zone);
653 extern void zone_pcp_enable(struct zone *zone);
654 extern void zone_pcp_init(struct zone *zone);
655
656 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
657 phys_addr_t min_addr,
658 int nid, bool exact_nid);
659
660 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
661 unsigned long, enum meminit_context, struct vmem_altmap *, int);
662
663 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
664
665 /*
666 * in mm/compaction.c
667 */
668 /*
669 * compact_control is used to track pages being migrated and the free pages
670 * they are being migrated to during memory compaction. The free_pfn starts
671 * at the end of a zone and migrate_pfn begins at the start. Movable pages
672 * are moved to the end of a zone during a compaction run and the run
673 * completes when free_pfn <= migrate_pfn
674 */
675 struct compact_control {
676 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
677 struct list_head migratepages; /* List of pages being migrated */
678 unsigned int nr_freepages; /* Number of isolated free pages */
679 unsigned int nr_migratepages; /* Number of pages to migrate */
680 unsigned long free_pfn; /* isolate_freepages search base */
681 /*
682 * Acts as an in/out parameter to page isolation for migration.
683 * isolate_migratepages uses it as a search base.
684 * isolate_migratepages_block will update the value to the next pfn
685 * after the last isolated one.
686 */
687 unsigned long migrate_pfn;
688 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
689 struct zone *zone;
690 unsigned long total_migrate_scanned;
691 unsigned long total_free_scanned;
692 unsigned short fast_search_fail;/* failures to use free list searches */
693 short search_order; /* order to start a fast search at */
694 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
695 int order; /* order a direct compactor needs */
696 int migratetype; /* migratetype of direct compactor */
697 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
698 const int highest_zoneidx; /* zone index of a direct compactor */
699 enum migrate_mode mode; /* Async or sync migration mode */
700 bool ignore_skip_hint; /* Scan blocks even if marked skip */
701 bool no_set_skip_hint; /* Don't mark blocks for skipping */
702 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
703 bool direct_compaction; /* False from kcompactd or /proc/... */
704 bool proactive_compaction; /* kcompactd proactive compaction */
705 bool whole_zone; /* Whole zone should/has been scanned */
706 bool contended; /* Signal lock contention */
707 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
708 * when there are potentially transient
709 * isolation or migration failures to
710 * ensure forward progress.
711 */
712 bool alloc_contig; /* alloc_contig_range allocation */
713 };
714
715 /*
716 * Used in direct compaction when a page should be taken from the freelists
717 * immediately when one is created during the free path.
718 */
719 struct capture_control {
720 struct compact_control *cc;
721 struct page *page;
722 };
723
724 unsigned long
725 isolate_freepages_range(struct compact_control *cc,
726 unsigned long start_pfn, unsigned long end_pfn);
727 int
728 isolate_migratepages_range(struct compact_control *cc,
729 unsigned long low_pfn, unsigned long end_pfn);
730
731 int __alloc_contig_migrate_range(struct compact_control *cc,
732 unsigned long start, unsigned long end,
733 int migratetype);
734
735 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
736 void init_cma_reserved_pageblock(struct page *page);
737
738 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
739
740 int find_suitable_fallback(struct free_area *area, unsigned int order,
741 int migratetype, bool only_stealable, bool *can_steal);
742
free_area_empty(struct free_area * area,int migratetype)743 static inline bool free_area_empty(struct free_area *area, int migratetype)
744 {
745 return list_empty(&area->free_list[migratetype]);
746 }
747
748 /*
749 * These three helpers classifies VMAs for virtual memory accounting.
750 */
751
752 /*
753 * Executable code area - executable, not writable, not stack
754 */
is_exec_mapping(vm_flags_t flags)755 static inline bool is_exec_mapping(vm_flags_t flags)
756 {
757 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
758 }
759
760 /*
761 * Stack area (including shadow stacks)
762 *
763 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
764 * do_mmap() forbids all other combinations.
765 */
is_stack_mapping(vm_flags_t flags)766 static inline bool is_stack_mapping(vm_flags_t flags)
767 {
768 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
769 }
770
771 /*
772 * Data area - private, writable, not stack
773 */
is_data_mapping(vm_flags_t flags)774 static inline bool is_data_mapping(vm_flags_t flags)
775 {
776 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
777 }
778
779 /* mm/util.c */
780 struct anon_vma *folio_anon_vma(struct folio *folio);
781
782 #ifdef CONFIG_MMU
783 void unmap_mapping_folio(struct folio *folio);
784 extern long populate_vma_page_range(struct vm_area_struct *vma,
785 unsigned long start, unsigned long end, int *locked);
786 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
787 unsigned long end, bool write, int *locked);
788 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
789 unsigned long bytes);
790
791 /*
792 * NOTE: This function can't tell whether the folio is "fully mapped" in the
793 * range.
794 * "fully mapped" means all the pages of folio is associated with the page
795 * table of range while this function just check whether the folio range is
796 * within the range [start, end). Function caller needs to do page table
797 * check if it cares about the page table association.
798 *
799 * Typical usage (like mlock or madvise) is:
800 * Caller knows at least 1 page of folio is associated with page table of VMA
801 * and the range [start, end) is intersect with the VMA range. Caller wants
802 * to know whether the folio is fully associated with the range. It calls
803 * this function to check whether the folio is in the range first. Then checks
804 * the page table to know whether the folio is fully mapped to the range.
805 */
806 static inline bool
folio_within_range(struct folio * folio,struct vm_area_struct * vma,unsigned long start,unsigned long end)807 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
808 unsigned long start, unsigned long end)
809 {
810 pgoff_t pgoff, addr;
811 unsigned long vma_pglen = vma_pages(vma);
812
813 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
814 if (start > end)
815 return false;
816
817 if (start < vma->vm_start)
818 start = vma->vm_start;
819
820 if (end > vma->vm_end)
821 end = vma->vm_end;
822
823 pgoff = folio_pgoff(folio);
824
825 /* if folio start address is not in vma range */
826 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
827 return false;
828
829 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
830
831 return !(addr < start || end - addr < folio_size(folio));
832 }
833
834 static inline bool
folio_within_vma(struct folio * folio,struct vm_area_struct * vma)835 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
836 {
837 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
838 }
839
840 /*
841 * mlock_vma_folio() and munlock_vma_folio():
842 * should be called with vma's mmap_lock held for read or write,
843 * under page table lock for the pte/pmd being added or removed.
844 *
845 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
846 * the end of folio_remove_rmap_*(); but new anon folios are managed by
847 * folio_add_lru_vma() calling mlock_new_folio().
848 */
849 void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)850 static inline void mlock_vma_folio(struct folio *folio,
851 struct vm_area_struct *vma)
852 {
853 /*
854 * The VM_SPECIAL check here serves two purposes.
855 * 1) VM_IO check prevents migration from double-counting during mlock.
856 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
857 * is never left set on a VM_SPECIAL vma, there is an interval while
858 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
859 * still be set while VM_SPECIAL bits are added: so ignore it then.
860 */
861 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
862 mlock_folio(folio);
863 }
864
865 void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)866 static inline void munlock_vma_folio(struct folio *folio,
867 struct vm_area_struct *vma)
868 {
869 /*
870 * munlock if the function is called. Ideally, we should only
871 * do munlock if any page of folio is unmapped from VMA and
872 * cause folio not fully mapped to VMA.
873 *
874 * But it's not easy to confirm that's the situation. So we
875 * always munlock the folio and page reclaim will correct it
876 * if it's wrong.
877 */
878 if (unlikely(vma->vm_flags & VM_LOCKED))
879 munlock_folio(folio);
880 }
881
882 void mlock_new_folio(struct folio *folio);
883 bool need_mlock_drain(int cpu);
884 void mlock_drain_local(void);
885 void mlock_drain_remote(int cpu);
886
887 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
888
889 /**
890 * vma_address - Find the virtual address a page range is mapped at
891 * @vma: The vma which maps this object.
892 * @pgoff: The page offset within its object.
893 * @nr_pages: The number of pages to consider.
894 *
895 * If any page in this range is mapped by this VMA, return the first address
896 * where any of these pages appear. Otherwise, return -EFAULT.
897 */
vma_address(struct vm_area_struct * vma,pgoff_t pgoff,unsigned long nr_pages)898 static inline unsigned long vma_address(struct vm_area_struct *vma,
899 pgoff_t pgoff, unsigned long nr_pages)
900 {
901 unsigned long address;
902
903 if (pgoff >= vma->vm_pgoff) {
904 address = vma->vm_start +
905 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
906 /* Check for address beyond vma (or wrapped through 0?) */
907 if (address < vma->vm_start || address >= vma->vm_end)
908 address = -EFAULT;
909 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
910 /* Test above avoids possibility of wrap to 0 on 32-bit */
911 address = vma->vm_start;
912 } else {
913 address = -EFAULT;
914 }
915 return address;
916 }
917
918 /*
919 * Then at what user virtual address will none of the range be found in vma?
920 * Assumes that vma_address() already returned a good starting address.
921 */
vma_address_end(struct page_vma_mapped_walk * pvmw)922 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
923 {
924 struct vm_area_struct *vma = pvmw->vma;
925 pgoff_t pgoff;
926 unsigned long address;
927
928 /* Common case, plus ->pgoff is invalid for KSM */
929 if (pvmw->nr_pages == 1)
930 return pvmw->address + PAGE_SIZE;
931
932 pgoff = pvmw->pgoff + pvmw->nr_pages;
933 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
934 /* Check for address beyond vma (or wrapped through 0?) */
935 if (address < vma->vm_start || address > vma->vm_end)
936 address = vma->vm_end;
937 return address;
938 }
939
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)940 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
941 struct file *fpin)
942 {
943 int flags = vmf->flags;
944
945 if (fpin)
946 return fpin;
947
948 /*
949 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
950 * anything, so we only pin the file and drop the mmap_lock if only
951 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
952 */
953 if (fault_flag_allow_retry_first(flags) &&
954 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
955 fpin = get_file(vmf->vma->vm_file);
956 release_fault_lock(vmf);
957 }
958 return fpin;
959 }
960 #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)961 static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)962 static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)963 static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)964 static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)965 static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)966 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
967 {
968 }
969 #endif /* !CONFIG_MMU */
970
971 /* Memory initialisation debug and verification */
972 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
973 DECLARE_STATIC_KEY_TRUE(deferred_pages);
974
975 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
976 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
977
978 enum mminit_level {
979 MMINIT_WARNING,
980 MMINIT_VERIFY,
981 MMINIT_TRACE
982 };
983
984 #ifdef CONFIG_DEBUG_MEMORY_INIT
985
986 extern int mminit_loglevel;
987
988 #define mminit_dprintk(level, prefix, fmt, arg...) \
989 do { \
990 if (level < mminit_loglevel) { \
991 if (level <= MMINIT_WARNING) \
992 pr_warn("mminit::" prefix " " fmt, ##arg); \
993 else \
994 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
995 } \
996 } while (0)
997
998 extern void mminit_verify_pageflags_layout(void);
999 extern void mminit_verify_zonelist(void);
1000 #else
1001
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)1002 static inline void mminit_dprintk(enum mminit_level level,
1003 const char *prefix, const char *fmt, ...)
1004 {
1005 }
1006
mminit_verify_pageflags_layout(void)1007 static inline void mminit_verify_pageflags_layout(void)
1008 {
1009 }
1010
mminit_verify_zonelist(void)1011 static inline void mminit_verify_zonelist(void)
1012 {
1013 }
1014 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1015
1016 #define NODE_RECLAIM_NOSCAN -2
1017 #define NODE_RECLAIM_FULL -1
1018 #define NODE_RECLAIM_SOME 0
1019 #define NODE_RECLAIM_SUCCESS 1
1020
1021 #ifdef CONFIG_NUMA
1022 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1023 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1024 #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)1025 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1026 unsigned int order)
1027 {
1028 return NODE_RECLAIM_NOSCAN;
1029 }
find_next_best_node(int node,nodemask_t * used_node_mask)1030 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1031 {
1032 return NUMA_NO_NODE;
1033 }
1034 #endif
1035
1036 /*
1037 * mm/memory-failure.c
1038 */
1039 void shake_folio(struct folio *folio);
1040 extern int hwpoison_filter(struct page *p);
1041
1042 extern u32 hwpoison_filter_dev_major;
1043 extern u32 hwpoison_filter_dev_minor;
1044 extern u64 hwpoison_filter_flags_mask;
1045 extern u64 hwpoison_filter_flags_value;
1046 extern u64 hwpoison_filter_memcg;
1047 extern u32 hwpoison_filter_enable;
1048
1049 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
1050 unsigned long, unsigned long,
1051 unsigned long, unsigned long);
1052
1053 extern void set_pageblock_order(void);
1054 unsigned long reclaim_pages(struct list_head *folio_list);
1055 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1056 struct list_head *folio_list);
1057 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1058 #define ALLOC_WMARK_MIN WMARK_MIN
1059 #define ALLOC_WMARK_LOW WMARK_LOW
1060 #define ALLOC_WMARK_HIGH WMARK_HIGH
1061 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1062
1063 /* Mask to get the watermark bits */
1064 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1065
1066 /*
1067 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1068 * cannot assume a reduced access to memory reserves is sufficient for
1069 * !MMU
1070 */
1071 #ifdef CONFIG_MMU
1072 #define ALLOC_OOM 0x08
1073 #else
1074 #define ALLOC_OOM ALLOC_NO_WATERMARKS
1075 #endif
1076
1077 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1078 * to 25% of the min watermark or
1079 * 62.5% if __GFP_HIGH is set.
1080 */
1081 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1082 * of the min watermark.
1083 */
1084 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1085 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1086 #ifdef CONFIG_ZONE_DMA32
1087 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1088 #else
1089 #define ALLOC_NOFRAGMENT 0x0
1090 #endif
1091 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1092 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1093
1094 /* Flags that allow allocations below the min watermark. */
1095 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1096
1097 enum ttu_flags;
1098 struct tlbflush_unmap_batch;
1099
1100
1101 /*
1102 * only for MM internal work items which do not depend on
1103 * any allocations or locks which might depend on allocations
1104 */
1105 extern struct workqueue_struct *mm_percpu_wq;
1106
1107 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1108 void try_to_unmap_flush(void);
1109 void try_to_unmap_flush_dirty(void);
1110 void flush_tlb_batched_pending(struct mm_struct *mm);
1111 #else
try_to_unmap_flush(void)1112 static inline void try_to_unmap_flush(void)
1113 {
1114 }
try_to_unmap_flush_dirty(void)1115 static inline void try_to_unmap_flush_dirty(void)
1116 {
1117 }
flush_tlb_batched_pending(struct mm_struct * mm)1118 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1119 {
1120 }
1121 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1122
1123 extern const struct trace_print_flags pageflag_names[];
1124 extern const struct trace_print_flags pagetype_names[];
1125 extern const struct trace_print_flags vmaflag_names[];
1126 extern const struct trace_print_flags gfpflag_names[];
1127
is_migrate_highatomic(enum migratetype migratetype)1128 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1129 {
1130 return migratetype == MIGRATE_HIGHATOMIC;
1131 }
1132
1133 void setup_zone_pageset(struct zone *zone);
1134
1135 struct migration_target_control {
1136 int nid; /* preferred node id */
1137 nodemask_t *nmask;
1138 gfp_t gfp_mask;
1139 enum migrate_reason reason;
1140 };
1141
1142 /*
1143 * mm/filemap.c
1144 */
1145 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1146 struct folio *folio, loff_t fpos, size_t size);
1147
1148 /*
1149 * mm/vmalloc.c
1150 */
1151 #ifdef CONFIG_MMU
1152 void __init vmalloc_init(void);
1153 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1154 pgprot_t prot, struct page **pages, unsigned int page_shift);
1155 #else
vmalloc_init(void)1156 static inline void vmalloc_init(void)
1157 {
1158 }
1159
1160 static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)1161 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1162 pgprot_t prot, struct page **pages, unsigned int page_shift)
1163 {
1164 return -EINVAL;
1165 }
1166 #endif
1167
1168 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1169 unsigned long end, pgprot_t prot,
1170 struct page **pages, unsigned int page_shift);
1171
1172 void vunmap_range_noflush(unsigned long start, unsigned long end);
1173
1174 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1175
1176 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
1177 unsigned long addr, int page_nid, int *flags);
1178
1179 void free_zone_device_folio(struct folio *folio);
1180 int migrate_device_coherent_page(struct page *page);
1181
1182 /*
1183 * mm/gup.c
1184 */
1185 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1186 int __must_check try_grab_page(struct page *page, unsigned int flags);
1187
1188 /*
1189 * mm/huge_memory.c
1190 */
1191 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1192 pud_t *pud, bool write);
1193 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1194 pmd_t *pmd, bool write);
1195
1196 /*
1197 * mm/mmap.c
1198 */
1199 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1200 struct vm_area_struct *vma,
1201 unsigned long delta);
1202
1203 enum {
1204 /* mark page accessed */
1205 FOLL_TOUCH = 1 << 16,
1206 /* a retry, previous pass started an IO */
1207 FOLL_TRIED = 1 << 17,
1208 /* we are working on non-current tsk/mm */
1209 FOLL_REMOTE = 1 << 18,
1210 /* pages must be released via unpin_user_page */
1211 FOLL_PIN = 1 << 19,
1212 /* gup_fast: prevent fall-back to slow gup */
1213 FOLL_FAST_ONLY = 1 << 20,
1214 /* allow unlocking the mmap lock */
1215 FOLL_UNLOCKABLE = 1 << 21,
1216 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1217 FOLL_MADV_POPULATE = 1 << 22,
1218 };
1219
1220 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1221 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1222 FOLL_MADV_POPULATE)
1223
1224 /*
1225 * Indicates for which pages that are write-protected in the page table,
1226 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1227 * GUP pin will remain consistent with the pages mapped into the page tables
1228 * of the MM.
1229 *
1230 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1231 * PageAnonExclusive() has to protect against concurrent GUP:
1232 * * Ordinary GUP: Using the PT lock
1233 * * GUP-fast and fork(): mm->write_protect_seq
1234 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1235 * folio_try_share_anon_rmap_*()
1236 *
1237 * Must be called with the (sub)page that's actually referenced via the
1238 * page table entry, which might not necessarily be the head page for a
1239 * PTE-mapped THP.
1240 *
1241 * If the vma is NULL, we're coming from the GUP-fast path and might have
1242 * to fallback to the slow path just to lookup the vma.
1243 */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)1244 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1245 unsigned int flags, struct page *page)
1246 {
1247 /*
1248 * FOLL_WRITE is implicitly handled correctly as the page table entry
1249 * has to be writable -- and if it references (part of) an anonymous
1250 * folio, that part is required to be marked exclusive.
1251 */
1252 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1253 return false;
1254 /*
1255 * Note: PageAnon(page) is stable until the page is actually getting
1256 * freed.
1257 */
1258 if (!PageAnon(page)) {
1259 /*
1260 * We only care about R/O long-term pining: R/O short-term
1261 * pinning does not have the semantics to observe successive
1262 * changes through the process page tables.
1263 */
1264 if (!(flags & FOLL_LONGTERM))
1265 return false;
1266
1267 /* We really need the vma ... */
1268 if (!vma)
1269 return true;
1270
1271 /*
1272 * ... because we only care about writable private ("COW")
1273 * mappings where we have to break COW early.
1274 */
1275 return is_cow_mapping(vma->vm_flags);
1276 }
1277
1278 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1279 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1280 smp_rmb();
1281
1282 /*
1283 * Note that PageKsm() pages cannot be exclusive, and consequently,
1284 * cannot get pinned.
1285 */
1286 return !PageAnonExclusive(page);
1287 }
1288
1289 extern bool mirrored_kernelcore;
1290 extern bool memblock_has_mirror(void);
1291
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1292 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1293 unsigned long start, unsigned long end,
1294 pgoff_t pgoff)
1295 {
1296 vma->vm_start = start;
1297 vma->vm_end = end;
1298 vma->vm_pgoff = pgoff;
1299 }
1300
vma_soft_dirty_enabled(struct vm_area_struct * vma)1301 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1302 {
1303 /*
1304 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1305 * enablements, because when without soft-dirty being compiled in,
1306 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1307 * will be constantly true.
1308 */
1309 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1310 return false;
1311
1312 /*
1313 * Soft-dirty is kind of special: its tracking is enabled when the
1314 * vma flags not set.
1315 */
1316 return !(vma->vm_flags & VM_SOFTDIRTY);
1317 }
1318
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)1319 static inline void vma_iter_config(struct vma_iterator *vmi,
1320 unsigned long index, unsigned long last)
1321 {
1322 __mas_set_range(&vmi->mas, index, last - 1);
1323 }
1324
vma_iter_reset(struct vma_iterator * vmi)1325 static inline void vma_iter_reset(struct vma_iterator *vmi)
1326 {
1327 mas_reset(&vmi->mas);
1328 }
1329
1330 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)1331 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1332 {
1333 return mas_prev_range(&vmi->mas, min);
1334 }
1335
1336 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)1337 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1338 {
1339 return mas_next_range(&vmi->mas, max);
1340 }
1341
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)1342 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1343 unsigned long max, unsigned long size)
1344 {
1345 return mas_empty_area(&vmi->mas, min, max - 1, size);
1346 }
1347
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)1348 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1349 unsigned long max, unsigned long size)
1350 {
1351 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1352 }
1353
1354 /*
1355 * VMA Iterator functions shared between nommu and mmap
1356 */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)1357 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1358 struct vm_area_struct *vma)
1359 {
1360 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1361 }
1362
vma_iter_clear(struct vma_iterator * vmi)1363 static inline void vma_iter_clear(struct vma_iterator *vmi)
1364 {
1365 mas_store_prealloc(&vmi->mas, NULL);
1366 }
1367
vma_iter_load(struct vma_iterator * vmi)1368 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1369 {
1370 return mas_walk(&vmi->mas);
1371 }
1372
1373 /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1374 static inline void vma_iter_store(struct vma_iterator *vmi,
1375 struct vm_area_struct *vma)
1376 {
1377
1378 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1379 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1380 vmi->mas.index > vma->vm_start)) {
1381 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1382 vmi->mas.index, vma->vm_start, vma->vm_start,
1383 vma->vm_end, vmi->mas.index, vmi->mas.last);
1384 }
1385 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1386 vmi->mas.last < vma->vm_start)) {
1387 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1388 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1389 vmi->mas.index, vmi->mas.last);
1390 }
1391 #endif
1392
1393 if (vmi->mas.status != ma_start &&
1394 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1395 vma_iter_invalidate(vmi);
1396
1397 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1398 mas_store_prealloc(&vmi->mas, vma);
1399 }
1400
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)1401 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1402 struct vm_area_struct *vma, gfp_t gfp)
1403 {
1404 if (vmi->mas.status != ma_start &&
1405 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1406 vma_iter_invalidate(vmi);
1407
1408 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1409 mas_store_gfp(&vmi->mas, vma, gfp);
1410 if (unlikely(mas_is_err(&vmi->mas)))
1411 return -ENOMEM;
1412
1413 return 0;
1414 }
1415
1416 /*
1417 * VMA lock generalization
1418 */
1419 struct vma_prepare {
1420 struct vm_area_struct *vma;
1421 struct vm_area_struct *adj_next;
1422 struct file *file;
1423 struct address_space *mapping;
1424 struct anon_vma *anon_vma;
1425 struct vm_area_struct *insert;
1426 struct vm_area_struct *remove;
1427 struct vm_area_struct *remove2;
1428 };
1429
1430 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1431 unsigned long zone, int nid);
1432
1433 /* shrinker related functions */
1434 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1435 int priority);
1436
1437 #ifdef CONFIG_64BIT
can_do_mseal(unsigned long flags)1438 static inline int can_do_mseal(unsigned long flags)
1439 {
1440 if (flags)
1441 return -EINVAL;
1442
1443 return 0;
1444 }
1445
1446 bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1447 unsigned long end);
1448 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1449 unsigned long end, int behavior);
1450 #else
can_do_mseal(unsigned long flags)1451 static inline int can_do_mseal(unsigned long flags)
1452 {
1453 return -EPERM;
1454 }
1455
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)1456 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1457 unsigned long end)
1458 {
1459 return true;
1460 }
1461
can_modify_mm_madv(struct mm_struct * mm,unsigned long start,unsigned long end,int behavior)1462 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1463 unsigned long end, int behavior)
1464 {
1465 return true;
1466 }
1467 #endif
1468
1469 #ifdef CONFIG_SHRINKER_DEBUG
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1470 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1471 struct shrinker *shrinker, const char *fmt, va_list ap)
1472 {
1473 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1474
1475 return shrinker->name ? 0 : -ENOMEM;
1476 }
1477
shrinker_debugfs_name_free(struct shrinker * shrinker)1478 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1479 {
1480 kfree_const(shrinker->name);
1481 shrinker->name = NULL;
1482 }
1483
1484 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1485 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1486 int *debugfs_id);
1487 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1488 int debugfs_id);
1489 #else /* CONFIG_SHRINKER_DEBUG */
shrinker_debugfs_add(struct shrinker * shrinker)1490 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1491 {
1492 return 0;
1493 }
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1494 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1495 const char *fmt, va_list ap)
1496 {
1497 return 0;
1498 }
shrinker_debugfs_name_free(struct shrinker * shrinker)1499 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1500 {
1501 }
shrinker_debugfs_detach(struct shrinker * shrinker,int * debugfs_id)1502 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1503 int *debugfs_id)
1504 {
1505 *debugfs_id = -1;
1506 return NULL;
1507 }
shrinker_debugfs_remove(struct dentry * debugfs_entry,int debugfs_id)1508 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1509 int debugfs_id)
1510 {
1511 }
1512 #endif /* CONFIG_SHRINKER_DEBUG */
1513
1514 /* Only track the nodes of mappings with shadow entries */
1515 void workingset_update_node(struct xa_node *node);
1516 extern struct list_lru shadow_nodes;
1517
1518 #endif /* __MM_INTERNAL_H */
1519