1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/leafops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24
25 struct folio_batch;
26
27 /*
28 * Maintains state across a page table move. The operation assumes both source
29 * and destination VMAs already exist and are specified by the user.
30 *
31 * Partial moves are permitted, but the old and new ranges must both reside
32 * within a VMA.
33 *
34 * mmap lock must be held in write and VMA write locks must be held on any VMA
35 * that is visible.
36 *
37 * Use the PAGETABLE_MOVE() macro to initialise this struct.
38 *
39 * The old_addr and new_addr fields are updated as the page table move is
40 * executed.
41 *
42 * NOTE: The page table move is affected by reading from [old_addr, old_end),
43 * and old_addr may be updated for better page table alignment, so len_in
44 * represents the length of the range being copied as specified by the user.
45 */
46 struct pagetable_move_control {
47 struct vm_area_struct *old; /* Source VMA. */
48 struct vm_area_struct *new; /* Destination VMA. */
49 unsigned long old_addr; /* Address from which the move begins. */
50 unsigned long old_end; /* Exclusive address at which old range ends. */
51 unsigned long new_addr; /* Address to move page tables to. */
52 unsigned long len_in; /* Bytes to remap specified by user. */
53
54 bool need_rmap_locks; /* Do rmap locks need to be taken? */
55 bool for_stack; /* Is this an early temp stack being moved? */
56 };
57
58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
59 struct pagetable_move_control name = { \
60 .old = old_, \
61 .new = new_, \
62 .old_addr = old_addr_, \
63 .old_end = (old_addr_) + (len_), \
64 .new_addr = new_addr_, \
65 .len_in = len_, \
66 }
67
68 /*
69 * The set of flags that only affect watermark checking and reclaim
70 * behaviour. This is used by the MM to obey the caller constraints
71 * about IO, FS and watermark checking while ignoring placement
72 * hints such as HIGHMEM usage.
73 */
74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
75 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
76 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
77 __GFP_NOLOCKDEP)
78
79 /* The GFP flags allowed during early boot */
80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
81
82 /* Control allocation cpuset and node placement constraints */
83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
84
85 /* Do not use these with a slab allocator */
86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
87
88 /*
89 * Different from WARN_ON_ONCE(), no warning will be issued
90 * when we specify __GFP_NOWARN.
91 */
92 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
93 static bool __section(".data..once") __warned; \
94 int __ret_warn_once = !!(cond); \
95 \
96 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
97 __warned = true; \
98 WARN_ON(1); \
99 } \
100 unlikely(__ret_warn_once); \
101 })
102
103 void page_writeback_init(void);
104
105 /*
106 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
107 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
108 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
109 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
110 */
111 #define ENTIRELY_MAPPED 0x800000
112 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
113
114 /*
115 * Flags passed to __show_mem() and show_free_areas() to suppress output in
116 * various contexts.
117 */
118 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
119
120 /*
121 * How many individual pages have an elevated _mapcount. Excludes
122 * the folio's entire_mapcount.
123 *
124 * Don't use this function outside of debugging code.
125 */
folio_nr_pages_mapped(const struct folio * folio)126 static inline int folio_nr_pages_mapped(const struct folio *folio)
127 {
128 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
129 return -1;
130 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
131 }
132
133 /*
134 * Retrieve the first entry of a folio based on a provided entry within the
135 * folio. We cannot rely on folio->swap as there is no guarantee that it has
136 * been initialized. Used for calling arch_swap_restore()
137 */
folio_swap(swp_entry_t entry,const struct folio * folio)138 static inline swp_entry_t folio_swap(swp_entry_t entry,
139 const struct folio *folio)
140 {
141 swp_entry_t swap = {
142 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
143 };
144
145 return swap;
146 }
147
folio_raw_mapping(const struct folio * folio)148 static inline void *folio_raw_mapping(const struct folio *folio)
149 {
150 unsigned long mapping = (unsigned long)folio->mapping;
151
152 return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
153 }
154
155 /*
156 * This is a file-backed mapping, and is about to be memory mapped - invoke its
157 * mmap hook and safely handle error conditions. On error, VMA hooks will be
158 * mutated.
159 *
160 * @file: File which backs the mapping.
161 * @vma: VMA which we are mapping.
162 *
163 * Returns: 0 if success, error otherwise.
164 */
mmap_file(struct file * file,struct vm_area_struct * vma)165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
166 {
167 int err = vfs_mmap(file, vma);
168
169 if (likely(!err))
170 return 0;
171
172 /*
173 * OK, we tried to call the file hook for mmap(), but an error
174 * arose. The mapping is in an inconsistent state and we must not invoke
175 * any further hooks on it.
176 */
177 vma->vm_ops = &vma_dummy_vm_ops;
178
179 return err;
180 }
181
182 /*
183 * If the VMA has a close hook then close it, and since closing it might leave
184 * it in an inconsistent state which makes the use of any hooks suspect, clear
185 * them down by installing dummy empty hooks.
186 */
vma_close(struct vm_area_struct * vma)187 static inline void vma_close(struct vm_area_struct *vma)
188 {
189 if (vma->vm_ops && vma->vm_ops->close) {
190 vma->vm_ops->close(vma);
191
192 /*
193 * The mapping is in an inconsistent state, and no further hooks
194 * may be invoked upon it.
195 */
196 vma->vm_ops = &vma_dummy_vm_ops;
197 }
198 }
199
200 /* unmap_vmas is in mm/memory.c */
201 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap);
202
203 #ifdef CONFIG_MMU
204
get_anon_vma(struct anon_vma * anon_vma)205 static inline void get_anon_vma(struct anon_vma *anon_vma)
206 {
207 atomic_inc(&anon_vma->refcount);
208 }
209
210 void __put_anon_vma(struct anon_vma *anon_vma);
211
put_anon_vma(struct anon_vma * anon_vma)212 static inline void put_anon_vma(struct anon_vma *anon_vma)
213 {
214 if (atomic_dec_and_test(&anon_vma->refcount))
215 __put_anon_vma(anon_vma);
216 }
217
anon_vma_lock_write(struct anon_vma * anon_vma)218 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
219 {
220 down_write(&anon_vma->root->rwsem);
221 }
222
anon_vma_trylock_write(struct anon_vma * anon_vma)223 static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
224 {
225 return down_write_trylock(&anon_vma->root->rwsem);
226 }
227
anon_vma_unlock_write(struct anon_vma * anon_vma)228 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
229 {
230 up_write(&anon_vma->root->rwsem);
231 }
232
anon_vma_lock_read(struct anon_vma * anon_vma)233 static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
234 {
235 down_read(&anon_vma->root->rwsem);
236 }
237
anon_vma_trylock_read(struct anon_vma * anon_vma)238 static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
239 {
240 return down_read_trylock(&anon_vma->root->rwsem);
241 }
242
anon_vma_unlock_read(struct anon_vma * anon_vma)243 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
244 {
245 up_read(&anon_vma->root->rwsem);
246 }
247
248 struct anon_vma *folio_get_anon_vma(const struct folio *folio);
249
250 /* Operations which modify VMAs. */
251 enum vma_operation {
252 VMA_OP_SPLIT,
253 VMA_OP_MERGE_UNFAULTED,
254 VMA_OP_REMAP,
255 VMA_OP_FORK,
256 };
257
258 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
259 enum vma_operation operation);
260 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma);
261 int __anon_vma_prepare(struct vm_area_struct *vma);
262 void unlink_anon_vmas(struct vm_area_struct *vma);
263
anon_vma_prepare(struct vm_area_struct * vma)264 static inline int anon_vma_prepare(struct vm_area_struct *vma)
265 {
266 if (likely(vma->anon_vma))
267 return 0;
268
269 return __anon_vma_prepare(vma);
270 }
271
272 /* Flags for folio_pte_batch(). */
273 typedef int __bitwise fpb_t;
274
275 /* Compare PTEs respecting the dirty bit. */
276 #define FPB_RESPECT_DIRTY ((__force fpb_t)BIT(0))
277
278 /* Compare PTEs respecting the soft-dirty bit. */
279 #define FPB_RESPECT_SOFT_DIRTY ((__force fpb_t)BIT(1))
280
281 /* Compare PTEs respecting the writable bit. */
282 #define FPB_RESPECT_WRITE ((__force fpb_t)BIT(2))
283
284 /*
285 * Merge PTE write bits: if any PTE in the batch is writable, modify the
286 * PTE at @ptentp to be writable.
287 */
288 #define FPB_MERGE_WRITE ((__force fpb_t)BIT(3))
289
290 /*
291 * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
292 * modify the PTE at @ptentp to be young or dirty, respectively.
293 */
294 #define FPB_MERGE_YOUNG_DIRTY ((__force fpb_t)BIT(4))
295
__pte_batch_clear_ignored(pte_t pte,fpb_t flags)296 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
297 {
298 if (!(flags & FPB_RESPECT_DIRTY))
299 pte = pte_mkclean(pte);
300 if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
301 pte = pte_clear_soft_dirty(pte);
302 if (likely(!(flags & FPB_RESPECT_WRITE)))
303 pte = pte_wrprotect(pte);
304 return pte_mkold(pte);
305 }
306
307 /**
308 * folio_pte_batch_flags - detect a PTE batch for a large folio
309 * @folio: The large folio to detect a PTE batch for.
310 * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
311 * @ptep: Page table pointer for the first entry.
312 * @ptentp: Pointer to a COPY of the first page table entry whose flags this
313 * function updates based on @flags if appropriate.
314 * @max_nr: The maximum number of table entries to consider.
315 * @flags: Flags to modify the PTE batch semantics.
316 *
317 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
318 * pages of the same large folio in a single VMA and a single page table.
319 *
320 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
321 * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
322 * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
323 *
324 * @ptep must map any page of the folio. max_nr must be at least one and
325 * must be limited by the caller so scanning cannot exceed a single VMA and
326 * a single page table.
327 *
328 * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
329 * be updated: it's crucial that a pointer to a COPY of the first
330 * page table entry, obtained through ptep_get(), is provided as @ptentp.
331 *
332 * This function will be inlined to optimize based on the input parameters;
333 * consider using folio_pte_batch() instead if applicable.
334 *
335 * Return: the number of table entries in the batch.
336 */
folio_pte_batch_flags(struct folio * folio,struct vm_area_struct * vma,pte_t * ptep,pte_t * ptentp,unsigned int max_nr,fpb_t flags)337 static inline unsigned int folio_pte_batch_flags(struct folio *folio,
338 struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
339 unsigned int max_nr, fpb_t flags)
340 {
341 bool any_writable = false, any_young = false, any_dirty = false;
342 pte_t expected_pte, pte = *ptentp;
343 unsigned int nr, cur_nr;
344
345 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
346 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
347 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
348 /*
349 * Ensure this is a pointer to a copy not a pointer into a page table.
350 * If this is a stack value, it won't be a valid virtual address, but
351 * that's fine because it also cannot be pointing into the page table.
352 */
353 VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
354
355 /* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
356 max_nr = min_t(unsigned long, max_nr,
357 folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
358
359 nr = pte_batch_hint(ptep, pte);
360 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
361 ptep = ptep + nr;
362
363 while (nr < max_nr) {
364 pte = ptep_get(ptep);
365
366 if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
367 break;
368
369 if (flags & FPB_MERGE_WRITE)
370 any_writable |= pte_write(pte);
371 if (flags & FPB_MERGE_YOUNG_DIRTY) {
372 any_young |= pte_young(pte);
373 any_dirty |= pte_dirty(pte);
374 }
375
376 cur_nr = pte_batch_hint(ptep, pte);
377 expected_pte = pte_advance_pfn(expected_pte, cur_nr);
378 ptep += cur_nr;
379 nr += cur_nr;
380 }
381
382 if (any_writable)
383 *ptentp = pte_mkwrite(*ptentp, vma);
384 if (any_young)
385 *ptentp = pte_mkyoung(*ptentp);
386 if (any_dirty)
387 *ptentp = pte_mkdirty(*ptentp);
388
389 return min(nr, max_nr);
390 }
391
392 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
393 unsigned int max_nr);
394
395 /**
396 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
397 * forward or backward by delta
398 * @pte: The initial pte state; must be a swap entry
399 * @delta: The direction and the offset we are moving; forward if delta
400 * is positive; backward if delta is negative
401 *
402 * Moves the swap offset, while maintaining all other fields, including
403 * swap type, and any swp pte bits. The resulting pte is returned.
404 */
pte_move_swp_offset(pte_t pte,long delta)405 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
406 {
407 const softleaf_t entry = softleaf_from_pte(pte);
408 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
409 (swp_offset(entry) + delta)));
410
411 if (pte_swp_soft_dirty(pte))
412 new = pte_swp_mksoft_dirty(new);
413 if (pte_swp_exclusive(pte))
414 new = pte_swp_mkexclusive(new);
415 if (pte_swp_uffd_wp(pte))
416 new = pte_swp_mkuffd_wp(new);
417
418 return new;
419 }
420
421
422 /**
423 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
424 * @pte: The initial pte state; must be a swap entry.
425 *
426 * Increments the swap offset, while maintaining all other fields, including
427 * swap type, and any swp pte bits. The resulting pte is returned.
428 */
pte_next_swp_offset(pte_t pte)429 static inline pte_t pte_next_swp_offset(pte_t pte)
430 {
431 return pte_move_swp_offset(pte, 1);
432 }
433
434 /**
435 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
436 * @start_ptep: Page table pointer for the first entry.
437 * @max_nr: The maximum number of table entries to consider.
438 * @pte: Page table entry for the first entry.
439 *
440 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
441 * containing swap entries all with consecutive offsets and targeting the same
442 * swap type, all with matching swp pte bits.
443 *
444 * max_nr must be at least one and must be limited by the caller so scanning
445 * cannot exceed a single page table.
446 *
447 * Return: the number of table entries in the batch.
448 */
swap_pte_batch(pte_t * start_ptep,int max_nr,pte_t pte)449 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
450 {
451 pte_t expected_pte = pte_next_swp_offset(pte);
452 const pte_t *end_ptep = start_ptep + max_nr;
453 const softleaf_t entry = softleaf_from_pte(pte);
454 pte_t *ptep = start_ptep + 1;
455 unsigned short cgroup_id;
456
457 VM_WARN_ON(max_nr < 1);
458 VM_WARN_ON(!softleaf_is_swap(entry));
459
460 cgroup_id = lookup_swap_cgroup_id(entry);
461 while (ptep < end_ptep) {
462 softleaf_t entry;
463
464 pte = ptep_get(ptep);
465
466 if (!pte_same(pte, expected_pte))
467 break;
468 entry = softleaf_from_pte(pte);
469 if (lookup_swap_cgroup_id(entry) != cgroup_id)
470 break;
471 expected_pte = pte_next_swp_offset(expected_pte);
472 ptep++;
473 }
474
475 return ptep - start_ptep;
476 }
477 #endif /* CONFIG_MMU */
478
479 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
480 int nr_throttled);
acct_reclaim_writeback(struct folio * folio)481 static inline void acct_reclaim_writeback(struct folio *folio)
482 {
483 pg_data_t *pgdat = folio_pgdat(folio);
484 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
485
486 if (nr_throttled)
487 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
488 }
489
wake_throttle_isolated(pg_data_t * pgdat)490 static inline void wake_throttle_isolated(pg_data_t *pgdat)
491 {
492 wait_queue_head_t *wqh;
493
494 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
495 if (waitqueue_active(wqh))
496 wake_up(wqh);
497 }
498
499 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
vmf_anon_prepare(struct vm_fault * vmf)500 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
501 {
502 vm_fault_t ret = __vmf_anon_prepare(vmf);
503
504 if (unlikely(ret & VM_FAULT_RETRY))
505 vma_end_read(vmf->vma);
506 return ret;
507 }
508
509 vm_fault_t do_swap_page(struct vm_fault *vmf);
510 void folio_rotate_reclaimable(struct folio *folio);
511 bool __folio_end_writeback(struct folio *folio);
512 void deactivate_file_folio(struct folio *folio);
513 void folio_activate(struct folio *folio);
514
515 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
516
517 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
518
519 struct zap_details;
520 void unmap_page_range(struct mmu_gather *tlb,
521 struct vm_area_struct *vma,
522 unsigned long addr, unsigned long end,
523 struct zap_details *details);
524 void zap_page_range_single_batched(struct mmu_gather *tlb,
525 struct vm_area_struct *vma, unsigned long addr,
526 unsigned long size, struct zap_details *details);
527 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
528 gfp_t gfp);
529
530 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
531 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)532 static inline void force_page_cache_readahead(struct address_space *mapping,
533 struct file *file, pgoff_t index, unsigned long nr_to_read)
534 {
535 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
536 force_page_cache_ra(&ractl, nr_to_read);
537 }
538
539 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
540 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
541 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
542 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
543 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
544 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
545 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
546 loff_t end);
547 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
548 unsigned long mapping_try_invalidate(struct address_space *mapping,
549 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
550
551 /**
552 * folio_evictable - Test whether a folio is evictable.
553 * @folio: The folio to test.
554 *
555 * Test whether @folio is evictable -- i.e., should be placed on
556 * active/inactive lists vs unevictable list.
557 *
558 * Reasons folio might not be evictable:
559 * 1. folio's mapping marked unevictable
560 * 2. One of the pages in the folio is part of an mlocked VMA
561 */
folio_evictable(struct folio * folio)562 static inline bool folio_evictable(struct folio *folio)
563 {
564 bool ret;
565
566 /* Prevent address_space of inode and swap cache from being freed */
567 rcu_read_lock();
568 ret = !mapping_unevictable(folio_mapping(folio)) &&
569 !folio_test_mlocked(folio);
570 rcu_read_unlock();
571 return ret;
572 }
573
574 /*
575 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
576 * a count of one.
577 */
set_page_refcounted(struct page * page)578 static inline void set_page_refcounted(struct page *page)
579 {
580 VM_BUG_ON_PAGE(PageTail(page), page);
581 VM_BUG_ON_PAGE(page_ref_count(page), page);
582 set_page_count(page, 1);
583 }
584
set_pages_refcounted(struct page * page,unsigned long nr_pages)585 static inline void set_pages_refcounted(struct page *page, unsigned long nr_pages)
586 {
587 unsigned long pfn = page_to_pfn(page);
588
589 for (; nr_pages--; pfn++)
590 set_page_refcounted(pfn_to_page(pfn));
591 }
592
593 /*
594 * Return true if a folio needs ->release_folio() calling upon it.
595 */
folio_needs_release(struct folio * folio)596 static inline bool folio_needs_release(struct folio *folio)
597 {
598 struct address_space *mapping = folio_mapping(folio);
599
600 return folio_has_private(folio) ||
601 (mapping && mapping_release_always(mapping));
602 }
603
604 extern unsigned long highest_memmap_pfn;
605
606 /*
607 * Maximum number of reclaim retries without progress before the OOM
608 * killer is consider the only way forward.
609 */
610 #define MAX_RECLAIM_RETRIES 16
611
612 /*
613 * in mm/vmscan.c:
614 */
615 bool folio_isolate_lru(struct folio *folio);
616 void folio_putback_lru(struct folio *folio);
617 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
618 int user_proactive_reclaim(char *buf,
619 struct mem_cgroup *memcg, pg_data_t *pgdat);
620
621 /*
622 * in mm/rmap.c:
623 */
624 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
625
626 /*
627 * in mm/page_alloc.c
628 */
629 #define K(x) ((x) << (PAGE_SHIFT-10))
630
631 extern char * const zone_names[MAX_NR_ZONES];
632
633 /* perform sanity checks on struct pages being allocated or freed */
634 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
635
636 extern int min_free_kbytes;
637 extern int defrag_mode;
638
639 void setup_per_zone_wmarks(void);
640 void calculate_min_free_kbytes(void);
641 int __meminit init_per_zone_wmark_min(void);
642 void page_alloc_sysctl_init(void);
643
644 /*
645 * Structure for holding the mostly immutable allocation parameters passed
646 * between functions involved in allocations, including the alloc_pages*
647 * family of functions.
648 *
649 * nodemask, migratetype and highest_zoneidx are initialized only once in
650 * __alloc_pages() and then never change.
651 *
652 * zonelist, preferred_zone and highest_zoneidx are set first in
653 * __alloc_pages() for the fast path, and might be later changed
654 * in __alloc_pages_slowpath(). All other functions pass the whole structure
655 * by a const pointer.
656 */
657 struct alloc_context {
658 struct zonelist *zonelist;
659 nodemask_t *nodemask;
660 struct zoneref *preferred_zoneref;
661 int migratetype;
662
663 /*
664 * highest_zoneidx represents highest usable zone index of
665 * the allocation request. Due to the nature of the zone,
666 * memory on lower zone than the highest_zoneidx will be
667 * protected by lowmem_reserve[highest_zoneidx].
668 *
669 * highest_zoneidx is also used by reclaim/compaction to limit
670 * the target zone since higher zone than this index cannot be
671 * usable for this allocation request.
672 */
673 enum zone_type highest_zoneidx;
674 bool spread_dirty_pages;
675 };
676
677 /*
678 * This function returns the order of a free page in the buddy system. In
679 * general, page_zone(page)->lock must be held by the caller to prevent the
680 * page from being allocated in parallel and returning garbage as the order.
681 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
682 * page cannot be allocated or merged in parallel. Alternatively, it must
683 * handle invalid values gracefully, and use buddy_order_unsafe() below.
684 */
buddy_order(struct page * page)685 static inline unsigned int buddy_order(struct page *page)
686 {
687 /* PageBuddy() must be checked by the caller */
688 return page_private(page);
689 }
690
691 /*
692 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
693 * PageBuddy() should be checked first by the caller to minimize race window,
694 * and invalid values must be handled gracefully.
695 *
696 * READ_ONCE is used so that if the caller assigns the result into a local
697 * variable and e.g. tests it for valid range before using, the compiler cannot
698 * decide to remove the variable and inline the page_private(page) multiple
699 * times, potentially observing different values in the tests and the actual
700 * use of the result.
701 */
702 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
703
704 /*
705 * This function checks whether a page is free && is the buddy
706 * we can coalesce a page and its buddy if
707 * (a) the buddy is not in a hole (check before calling!) &&
708 * (b) the buddy is in the buddy system &&
709 * (c) a page and its buddy have the same order &&
710 * (d) a page and its buddy are in the same zone.
711 *
712 * For recording whether a page is in the buddy system, we set PageBuddy.
713 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
714 *
715 * For recording page's order, we use page_private(page).
716 */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)717 static inline bool page_is_buddy(struct page *page, struct page *buddy,
718 unsigned int order)
719 {
720 if (!page_is_guard(buddy) && !PageBuddy(buddy))
721 return false;
722
723 if (buddy_order(buddy) != order)
724 return false;
725
726 /*
727 * zone check is done late to avoid uselessly calculating
728 * zone/node ids for pages that could never merge.
729 */
730 if (page_zone_id(page) != page_zone_id(buddy))
731 return false;
732
733 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
734
735 return true;
736 }
737
738 /*
739 * Locate the struct page for both the matching buddy in our
740 * pair (buddy1) and the combined O(n+1) page they form (page).
741 *
742 * 1) Any buddy B1 will have an order O twin B2 which satisfies
743 * the following equation:
744 * B2 = B1 ^ (1 << O)
745 * For example, if the starting buddy (buddy2) is #8 its order
746 * 1 buddy is #10:
747 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
748 *
749 * 2) Any buddy B will have an order O+1 parent P which
750 * satisfies the following equation:
751 * P = B & ~(1 << O)
752 *
753 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
754 */
755 static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)756 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
757 {
758 return page_pfn ^ (1 << order);
759 }
760
761 /*
762 * Find the buddy of @page and validate it.
763 * @page: The input page
764 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
765 * function is used in the performance-critical __free_one_page().
766 * @order: The order of the page
767 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
768 * page_to_pfn().
769 *
770 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
771 * not the same as @page. The validation is necessary before use it.
772 *
773 * Return: the found buddy page or NULL if not found.
774 */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)775 static inline struct page *find_buddy_page_pfn(struct page *page,
776 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
777 {
778 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
779 struct page *buddy;
780
781 buddy = page + (__buddy_pfn - pfn);
782 if (buddy_pfn)
783 *buddy_pfn = __buddy_pfn;
784
785 if (page_is_buddy(page, buddy, order))
786 return buddy;
787 return NULL;
788 }
789
790 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
791 unsigned long end_pfn, struct zone *zone);
792
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)793 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
794 unsigned long end_pfn, struct zone *zone)
795 {
796 if (zone->contiguous)
797 return pfn_to_page(start_pfn);
798
799 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
800 }
801
802 void set_zone_contiguous(struct zone *zone);
803 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
804 unsigned long nr_pages);
805
clear_zone_contiguous(struct zone * zone)806 static inline void clear_zone_contiguous(struct zone *zone)
807 {
808 zone->contiguous = false;
809 }
810
811 extern int __isolate_free_page(struct page *page, unsigned int order);
812 extern void __putback_isolated_page(struct page *page, unsigned int order,
813 int mt);
814 extern void memblock_free_pages(unsigned long pfn, unsigned int order);
815 extern void __free_pages_core(struct page *page, unsigned int order,
816 enum meminit_context context);
817
818 /*
819 * This will have no effect, other than possibly generating a warning, if the
820 * caller passes in a non-large folio.
821 */
folio_set_order(struct folio * folio,unsigned int order)822 static inline void folio_set_order(struct folio *folio, unsigned int order)
823 {
824 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
825 return;
826 VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER);
827
828 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
829 #ifdef NR_PAGES_IN_LARGE_FOLIO
830 folio->_nr_pages = 1U << order;
831 #endif
832 }
833
834 bool __folio_unqueue_deferred_split(struct folio *folio);
folio_unqueue_deferred_split(struct folio * folio)835 static inline bool folio_unqueue_deferred_split(struct folio *folio)
836 {
837 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
838 return false;
839
840 /*
841 * At this point, there is no one trying to add the folio to
842 * deferred_list. If folio is not in deferred_list, it's safe
843 * to check without acquiring the split_queue_lock.
844 */
845 if (data_race(list_empty(&folio->_deferred_list)))
846 return false;
847
848 return __folio_unqueue_deferred_split(folio);
849 }
850
page_rmappable_folio(struct page * page)851 static inline struct folio *page_rmappable_folio(struct page *page)
852 {
853 struct folio *folio = (struct folio *)page;
854
855 if (folio && folio_test_large(folio))
856 folio_set_large_rmappable(folio);
857 return folio;
858 }
859
prep_compound_head(struct page * page,unsigned int order)860 static inline void prep_compound_head(struct page *page, unsigned int order)
861 {
862 struct folio *folio = (struct folio *)page;
863
864 folio_set_order(folio, order);
865 atomic_set(&folio->_large_mapcount, -1);
866 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
867 atomic_set(&folio->_nr_pages_mapped, 0);
868 if (IS_ENABLED(CONFIG_MM_ID)) {
869 folio->_mm_ids = 0;
870 folio->_mm_id_mapcount[0] = -1;
871 folio->_mm_id_mapcount[1] = -1;
872 }
873 if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
874 atomic_set(&folio->_pincount, 0);
875 atomic_set(&folio->_entire_mapcount, -1);
876 }
877 if (order > 1)
878 INIT_LIST_HEAD(&folio->_deferred_list);
879 }
880
prep_compound_tail(struct page * head,int tail_idx)881 static inline void prep_compound_tail(struct page *head, int tail_idx)
882 {
883 struct page *p = head + tail_idx;
884
885 p->mapping = TAIL_MAPPING;
886 set_compound_head(p, head);
887 set_page_private(p, 0);
888 }
889
890 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
891 extern bool free_pages_prepare(struct page *page, unsigned int order);
892
893 extern int user_min_free_kbytes;
894
895 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
896 nodemask_t *);
897 #define __alloc_frozen_pages(...) \
898 alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
899 void free_frozen_pages(struct page *page, unsigned int order);
900 void free_unref_folios(struct folio_batch *fbatch);
901
902 #ifdef CONFIG_NUMA
903 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
904 #else
alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order)905 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
906 {
907 return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
908 }
909 #endif
910
911 #define alloc_frozen_pages(...) \
912 alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
913
914 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
915 #define alloc_frozen_pages_nolock(...) \
916 alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
917 void free_frozen_pages_nolock(struct page *page, unsigned int order);
918
919 extern void zone_pcp_reset(struct zone *zone);
920 extern void zone_pcp_disable(struct zone *zone);
921 extern void zone_pcp_enable(struct zone *zone);
922 extern void zone_pcp_init(struct zone *zone);
923
924 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
925 phys_addr_t min_addr,
926 int nid, bool exact_nid);
927
928 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
929 unsigned long, enum meminit_context, struct vmem_altmap *, int,
930 bool);
931
932 #ifdef CONFIG_SPARSEMEM
933 void sparse_init(void);
934 #else
sparse_init(void)935 static inline void sparse_init(void) {}
936 #endif /* CONFIG_SPARSEMEM */
937
938 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
939
940 /*
941 * in mm/compaction.c
942 */
943 /*
944 * compact_control is used to track pages being migrated and the free pages
945 * they are being migrated to during memory compaction. The free_pfn starts
946 * at the end of a zone and migrate_pfn begins at the start. Movable pages
947 * are moved to the end of a zone during a compaction run and the run
948 * completes when free_pfn <= migrate_pfn
949 */
950 struct compact_control {
951 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
952 struct list_head migratepages; /* List of pages being migrated */
953 unsigned int nr_freepages; /* Number of isolated free pages */
954 unsigned int nr_migratepages; /* Number of pages to migrate */
955 unsigned long free_pfn; /* isolate_freepages search base */
956 /*
957 * Acts as an in/out parameter to page isolation for migration.
958 * isolate_migratepages uses it as a search base.
959 * isolate_migratepages_block will update the value to the next pfn
960 * after the last isolated one.
961 */
962 unsigned long migrate_pfn;
963 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
964 struct zone *zone;
965 unsigned long total_migrate_scanned;
966 unsigned long total_free_scanned;
967 unsigned short fast_search_fail;/* failures to use free list searches */
968 short search_order; /* order to start a fast search at */
969 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
970 int order; /* order a direct compactor needs */
971 int migratetype; /* migratetype of direct compactor */
972 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
973 const int highest_zoneidx; /* zone index of a direct compactor */
974 enum migrate_mode mode; /* Async or sync migration mode */
975 bool ignore_skip_hint; /* Scan blocks even if marked skip */
976 bool no_set_skip_hint; /* Don't mark blocks for skipping */
977 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
978 bool direct_compaction; /* False from kcompactd or /proc/... */
979 bool proactive_compaction; /* kcompactd proactive compaction */
980 bool whole_zone; /* Whole zone should/has been scanned */
981 bool contended; /* Signal lock contention */
982 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
983 * when there are potentially transient
984 * isolation or migration failures to
985 * ensure forward progress.
986 */
987 bool alloc_contig; /* alloc_contig_range allocation */
988 };
989
990 /*
991 * Used in direct compaction when a page should be taken from the freelists
992 * immediately when one is created during the free path.
993 */
994 struct capture_control {
995 struct compact_control *cc;
996 struct page *page;
997 };
998
999 unsigned long
1000 isolate_freepages_range(struct compact_control *cc,
1001 unsigned long start_pfn, unsigned long end_pfn);
1002 int
1003 isolate_migratepages_range(struct compact_control *cc,
1004 unsigned long low_pfn, unsigned long end_pfn);
1005
1006 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1007 void init_cma_reserved_pageblock(struct page *page);
1008
1009 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1010
1011 struct cma;
1012
1013 #ifdef CONFIG_CMA
1014 bool cma_validate_zones(struct cma *cma);
1015 void *cma_reserve_early(struct cma *cma, unsigned long size);
1016 void init_cma_pageblock(struct page *page);
1017 #else
cma_validate_zones(struct cma * cma)1018 static inline bool cma_validate_zones(struct cma *cma)
1019 {
1020 return false;
1021 }
cma_reserve_early(struct cma * cma,unsigned long size)1022 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
1023 {
1024 return NULL;
1025 }
init_cma_pageblock(struct page * page)1026 static inline void init_cma_pageblock(struct page *page)
1027 {
1028 }
1029 #endif
1030
1031
1032 int find_suitable_fallback(struct free_area *area, unsigned int order,
1033 int migratetype, bool claimable);
1034
free_area_empty(struct free_area * area,int migratetype)1035 static inline bool free_area_empty(struct free_area *area, int migratetype)
1036 {
1037 return list_empty(&area->free_list[migratetype]);
1038 }
1039
1040 /* mm/util.c */
1041 struct anon_vma *folio_anon_vma(const struct folio *folio);
1042
1043 #ifdef CONFIG_MMU
1044 void unmap_mapping_folio(struct folio *folio);
1045 extern long populate_vma_page_range(struct vm_area_struct *vma,
1046 unsigned long start, unsigned long end, int *locked);
1047 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
1048 unsigned long end, bool write, int *locked);
1049 bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
1050 unsigned long bytes);
1051
1052 /*
1053 * NOTE: This function can't tell whether the folio is "fully mapped" in the
1054 * range.
1055 * "fully mapped" means all the pages of folio is associated with the page
1056 * table of range while this function just check whether the folio range is
1057 * within the range [start, end). Function caller needs to do page table
1058 * check if it cares about the page table association.
1059 *
1060 * Typical usage (like mlock or madvise) is:
1061 * Caller knows at least 1 page of folio is associated with page table of VMA
1062 * and the range [start, end) is intersect with the VMA range. Caller wants
1063 * to know whether the folio is fully associated with the range. It calls
1064 * this function to check whether the folio is in the range first. Then checks
1065 * the page table to know whether the folio is fully mapped to the range.
1066 */
1067 static inline bool
folio_within_range(struct folio * folio,struct vm_area_struct * vma,unsigned long start,unsigned long end)1068 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
1069 unsigned long start, unsigned long end)
1070 {
1071 pgoff_t pgoff, addr;
1072 unsigned long vma_pglen = vma_pages(vma);
1073
1074 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
1075 if (start > end)
1076 return false;
1077
1078 if (start < vma->vm_start)
1079 start = vma->vm_start;
1080
1081 if (end > vma->vm_end)
1082 end = vma->vm_end;
1083
1084 pgoff = folio_pgoff(folio);
1085
1086 /* if folio start address is not in vma range */
1087 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
1088 return false;
1089
1090 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1091
1092 return !(addr < start || end - addr < folio_size(folio));
1093 }
1094
1095 static inline bool
folio_within_vma(struct folio * folio,struct vm_area_struct * vma)1096 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
1097 {
1098 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
1099 }
1100
1101 /*
1102 * mlock_vma_folio() and munlock_vma_folio():
1103 * should be called with vma's mmap_lock held for read or write,
1104 * under page table lock for the pte/pmd being added or removed.
1105 *
1106 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
1107 * the end of folio_remove_rmap_*(); but new anon folios are managed by
1108 * folio_add_lru_vma() calling mlock_new_folio().
1109 */
1110 void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)1111 static inline void mlock_vma_folio(struct folio *folio,
1112 struct vm_area_struct *vma)
1113 {
1114 /*
1115 * The VM_SPECIAL check here serves two purposes.
1116 * 1) VM_IO check prevents migration from double-counting during mlock.
1117 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
1118 * is never left set on a VM_SPECIAL vma, there is an interval while
1119 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1120 * still be set while VM_SPECIAL bits are added: so ignore it then.
1121 */
1122 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1123 mlock_folio(folio);
1124 }
1125
1126 void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)1127 static inline void munlock_vma_folio(struct folio *folio,
1128 struct vm_area_struct *vma)
1129 {
1130 /*
1131 * munlock if the function is called. Ideally, we should only
1132 * do munlock if any page of folio is unmapped from VMA and
1133 * cause folio not fully mapped to VMA.
1134 *
1135 * But it's not easy to confirm that's the situation. So we
1136 * always munlock the folio and page reclaim will correct it
1137 * if it's wrong.
1138 */
1139 if (unlikely(vma->vm_flags & VM_LOCKED))
1140 munlock_folio(folio);
1141 }
1142
1143 void mlock_new_folio(struct folio *folio);
1144 bool need_mlock_drain(int cpu);
1145 void mlock_drain_local(void);
1146 void mlock_drain_remote(int cpu);
1147
1148 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1149
1150 /**
1151 * vma_address - Find the virtual address a page range is mapped at
1152 * @vma: The vma which maps this object.
1153 * @pgoff: The page offset within its object.
1154 * @nr_pages: The number of pages to consider.
1155 *
1156 * If any page in this range is mapped by this VMA, return the first address
1157 * where any of these pages appear. Otherwise, return -EFAULT.
1158 */
vma_address(const struct vm_area_struct * vma,pgoff_t pgoff,unsigned long nr_pages)1159 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1160 pgoff_t pgoff, unsigned long nr_pages)
1161 {
1162 unsigned long address;
1163
1164 if (pgoff >= vma->vm_pgoff) {
1165 address = vma->vm_start +
1166 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1167 /* Check for address beyond vma (or wrapped through 0?) */
1168 if (address < vma->vm_start || address >= vma->vm_end)
1169 address = -EFAULT;
1170 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1171 /* Test above avoids possibility of wrap to 0 on 32-bit */
1172 address = vma->vm_start;
1173 } else {
1174 address = -EFAULT;
1175 }
1176 return address;
1177 }
1178
1179 /*
1180 * Then at what user virtual address will none of the range be found in vma?
1181 * Assumes that vma_address() already returned a good starting address.
1182 */
vma_address_end(struct page_vma_mapped_walk * pvmw)1183 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1184 {
1185 struct vm_area_struct *vma = pvmw->vma;
1186 pgoff_t pgoff;
1187 unsigned long address;
1188
1189 /* Common case, plus ->pgoff is invalid for KSM */
1190 if (pvmw->nr_pages == 1)
1191 return pvmw->address + PAGE_SIZE;
1192
1193 pgoff = pvmw->pgoff + pvmw->nr_pages;
1194 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1195 /* Check for address beyond vma (or wrapped through 0?) */
1196 if (address < vma->vm_start || address > vma->vm_end)
1197 address = vma->vm_end;
1198 return address;
1199 }
1200
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)1201 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1202 struct file *fpin)
1203 {
1204 int flags = vmf->flags;
1205
1206 if (fpin)
1207 return fpin;
1208
1209 /*
1210 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1211 * anything, so we only pin the file and drop the mmap_lock if only
1212 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1213 */
1214 if (fault_flag_allow_retry_first(flags) &&
1215 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1216 fpin = get_file(vmf->vma->vm_file);
1217 release_fault_lock(vmf);
1218 }
1219 return fpin;
1220 }
1221 #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)1222 static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)1223 static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)1224 static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)1225 static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)1226 static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)1227 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1228 {
1229 }
1230 #endif /* !CONFIG_MMU */
1231
1232 /* Memory initialisation debug and verification */
1233 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1234 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1235
1236 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1237 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1238
1239 void init_deferred_page(unsigned long pfn, int nid);
1240
1241 enum mminit_level {
1242 MMINIT_WARNING,
1243 MMINIT_VERIFY,
1244 MMINIT_TRACE
1245 };
1246
1247 #ifdef CONFIG_DEBUG_MEMORY_INIT
1248
1249 extern int mminit_loglevel;
1250
1251 #define mminit_dprintk(level, prefix, fmt, arg...) \
1252 do { \
1253 if (level < mminit_loglevel) { \
1254 if (level <= MMINIT_WARNING) \
1255 pr_warn("mminit::" prefix " " fmt, ##arg); \
1256 else \
1257 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1258 } \
1259 } while (0)
1260
1261 extern void mminit_verify_pageflags_layout(void);
1262 extern void mminit_verify_zonelist(void);
1263 #else
1264
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)1265 static inline void mminit_dprintk(enum mminit_level level,
1266 const char *prefix, const char *fmt, ...)
1267 {
1268 }
1269
mminit_verify_pageflags_layout(void)1270 static inline void mminit_verify_pageflags_layout(void)
1271 {
1272 }
1273
mminit_verify_zonelist(void)1274 static inline void mminit_verify_zonelist(void)
1275 {
1276 }
1277 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1278
1279 #define NODE_RECLAIM_NOSCAN -2
1280 #define NODE_RECLAIM_FULL -1
1281 #define NODE_RECLAIM_SOME 0
1282 #define NODE_RECLAIM_SUCCESS 1
1283
1284 #ifdef CONFIG_NUMA
1285 extern int node_reclaim_mode;
1286
1287 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1288 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1289 #else
1290 #define node_reclaim_mode 0
1291
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)1292 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1293 unsigned int order)
1294 {
1295 return NODE_RECLAIM_NOSCAN;
1296 }
find_next_best_node(int node,nodemask_t * used_node_mask)1297 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1298 {
1299 return NUMA_NO_NODE;
1300 }
1301 #endif
1302
node_reclaim_enabled(void)1303 static inline bool node_reclaim_enabled(void)
1304 {
1305 /* Is any node_reclaim_mode bit set? */
1306 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1307 }
1308
1309 /*
1310 * mm/memory-failure.c
1311 */
1312 #ifdef CONFIG_MEMORY_FAILURE
1313 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1314 void shake_folio(struct folio *folio);
1315 typedef int hwpoison_filter_func_t(struct page *p);
1316 void hwpoison_filter_register(hwpoison_filter_func_t *filter);
1317 void hwpoison_filter_unregister(void);
1318
1319 #define MAGIC_HWPOISON 0x48575053U /* HWPS */
1320 void SetPageHWPoisonTakenOff(struct page *page);
1321 void ClearPageHWPoisonTakenOff(struct page *page);
1322 bool take_page_off_buddy(struct page *page);
1323 bool put_page_back_buddy(struct page *page);
1324 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1325 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1326 struct vm_area_struct *vma, struct list_head *to_kill,
1327 unsigned long ksm_addr);
1328 unsigned long page_mapped_in_vma(const struct page *page,
1329 struct vm_area_struct *vma);
1330
1331 #else
unmap_poisoned_folio(struct folio * folio,unsigned long pfn,bool must_kill)1332 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1333 {
1334 return -EBUSY;
1335 }
1336 #endif
1337
1338 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
1339 unsigned long, unsigned long,
1340 unsigned long, unsigned long);
1341
1342 extern void set_pageblock_order(void);
1343 unsigned long reclaim_pages(struct list_head *folio_list);
1344 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1345 struct list_head *folio_list);
1346 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1347 #define ALLOC_WMARK_MIN WMARK_MIN
1348 #define ALLOC_WMARK_LOW WMARK_LOW
1349 #define ALLOC_WMARK_HIGH WMARK_HIGH
1350 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1351
1352 /* Mask to get the watermark bits */
1353 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1354
1355 /*
1356 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1357 * cannot assume a reduced access to memory reserves is sufficient for
1358 * !MMU
1359 */
1360 #ifdef CONFIG_MMU
1361 #define ALLOC_OOM 0x08
1362 #else
1363 #define ALLOC_OOM ALLOC_NO_WATERMARKS
1364 #endif
1365
1366 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1367 * to 25% of the min watermark or
1368 * 62.5% if __GFP_HIGH is set.
1369 */
1370 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1371 * of the min watermark.
1372 */
1373 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1374 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1375 #ifdef CONFIG_ZONE_DMA32
1376 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1377 #else
1378 #define ALLOC_NOFRAGMENT 0x0
1379 #endif
1380 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1381 #define ALLOC_TRYLOCK 0x400 /* Only use spin_trylock in allocation path */
1382 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1383
1384 /* Flags that allow allocations below the min watermark. */
1385 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1386
1387 enum ttu_flags;
1388 struct tlbflush_unmap_batch;
1389
1390
1391 /*
1392 * only for MM internal work items which do not depend on
1393 * any allocations or locks which might depend on allocations
1394 */
1395 extern struct workqueue_struct *mm_percpu_wq;
1396
1397 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1398 void try_to_unmap_flush(void);
1399 void try_to_unmap_flush_dirty(void);
1400 void flush_tlb_batched_pending(struct mm_struct *mm);
1401 #else
try_to_unmap_flush(void)1402 static inline void try_to_unmap_flush(void)
1403 {
1404 }
try_to_unmap_flush_dirty(void)1405 static inline void try_to_unmap_flush_dirty(void)
1406 {
1407 }
flush_tlb_batched_pending(struct mm_struct * mm)1408 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1409 {
1410 }
1411 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1412
1413 extern const struct trace_print_flags pageflag_names[];
1414 extern const struct trace_print_flags vmaflag_names[];
1415 extern const struct trace_print_flags gfpflag_names[];
1416
1417 void setup_zone_pageset(struct zone *zone);
1418
1419 struct migration_target_control {
1420 int nid; /* preferred node id */
1421 nodemask_t *nmask;
1422 gfp_t gfp_mask;
1423 enum migrate_reason reason;
1424 };
1425
1426 /*
1427 * mm/filemap.c
1428 */
1429 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1430 struct folio *folio, loff_t fpos, size_t size);
1431
1432 /*
1433 * mm/vmalloc.c
1434 */
1435 #ifdef CONFIG_MMU
1436 void __init vmalloc_init(void);
1437 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1438 pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask);
1439 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1440 #else
vmalloc_init(void)1441 static inline void vmalloc_init(void)
1442 {
1443 }
1444
1445 static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift,gfp_t gfp_mask)1446 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1447 pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
1448 {
1449 return -EINVAL;
1450 }
1451 #endif
1452
1453 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1454 unsigned long end, pgprot_t prot,
1455 struct page **pages, unsigned int page_shift);
1456
1457 void vunmap_range_noflush(unsigned long start, unsigned long end);
1458
1459 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1460
vma_is_single_threaded_private(struct vm_area_struct * vma)1461 static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma)
1462 {
1463 if (vma->vm_flags & VM_SHARED)
1464 return false;
1465
1466 return atomic_read(&vma->vm_mm->mm_users) == 1;
1467 }
1468
1469 #ifdef CONFIG_NUMA_BALANCING
1470 bool folio_can_map_prot_numa(struct folio *folio, struct vm_area_struct *vma,
1471 bool is_private_single_threaded);
1472
1473 #else
folio_can_map_prot_numa(struct folio * folio,struct vm_area_struct * vma,bool is_private_single_threaded)1474 static inline bool folio_can_map_prot_numa(struct folio *folio,
1475 struct vm_area_struct *vma, bool is_private_single_threaded)
1476 {
1477 return false;
1478 }
1479 #endif
1480
1481 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1482 unsigned long addr, int *flags, bool writable,
1483 int *last_cpupid);
1484
1485 void free_zone_device_folio(struct folio *folio);
1486 int migrate_device_coherent_folio(struct folio *folio);
1487
1488 struct vm_struct *__get_vm_area_node(unsigned long size,
1489 unsigned long align, unsigned long shift,
1490 unsigned long vm_flags, unsigned long start,
1491 unsigned long end, int node, gfp_t gfp_mask,
1492 const void *caller);
1493
1494 /*
1495 * mm/gup.c
1496 */
1497 int __must_check try_grab_folio(struct folio *folio, int refs,
1498 unsigned int flags);
1499
1500 /*
1501 * mm/huge_memory.c
1502 */
1503 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1504 pud_t *pud, bool write);
1505 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1506 pmd_t *pmd, bool write);
1507
1508 /*
1509 * Parses a string with mem suffixes into its order. Useful to parse kernel
1510 * parameters.
1511 */
get_order_from_str(const char * size_str,unsigned long valid_orders)1512 static inline int get_order_from_str(const char *size_str,
1513 unsigned long valid_orders)
1514 {
1515 unsigned long size;
1516 char *endptr;
1517 int order;
1518
1519 size = memparse(size_str, &endptr);
1520
1521 if (!is_power_of_2(size))
1522 return -EINVAL;
1523 order = get_order(size);
1524 if (BIT(order) & ~valid_orders)
1525 return -EINVAL;
1526
1527 return order;
1528 }
1529
1530 enum {
1531 /* mark page accessed */
1532 FOLL_TOUCH = 1 << 16,
1533 /* a retry, previous pass started an IO */
1534 FOLL_TRIED = 1 << 17,
1535 /* we are working on non-current tsk/mm */
1536 FOLL_REMOTE = 1 << 18,
1537 /* pages must be released via unpin_user_page */
1538 FOLL_PIN = 1 << 19,
1539 /* gup_fast: prevent fall-back to slow gup */
1540 FOLL_FAST_ONLY = 1 << 20,
1541 /* allow unlocking the mmap lock */
1542 FOLL_UNLOCKABLE = 1 << 21,
1543 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1544 FOLL_MADV_POPULATE = 1 << 22,
1545 };
1546
1547 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1548 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1549 FOLL_MADV_POPULATE)
1550
1551 /*
1552 * Indicates for which pages that are write-protected in the page table,
1553 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1554 * GUP pin will remain consistent with the pages mapped into the page tables
1555 * of the MM.
1556 *
1557 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1558 * PageAnonExclusive() has to protect against concurrent GUP:
1559 * * Ordinary GUP: Using the PT lock
1560 * * GUP-fast and fork(): mm->write_protect_seq
1561 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1562 * folio_try_share_anon_rmap_*()
1563 *
1564 * Must be called with the (sub)page that's actually referenced via the
1565 * page table entry, which might not necessarily be the head page for a
1566 * PTE-mapped THP.
1567 *
1568 * If the vma is NULL, we're coming from the GUP-fast path and might have
1569 * to fallback to the slow path just to lookup the vma.
1570 */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)1571 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1572 unsigned int flags, struct page *page)
1573 {
1574 /*
1575 * FOLL_WRITE is implicitly handled correctly as the page table entry
1576 * has to be writable -- and if it references (part of) an anonymous
1577 * folio, that part is required to be marked exclusive.
1578 */
1579 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1580 return false;
1581 /*
1582 * Note: PageAnon(page) is stable until the page is actually getting
1583 * freed.
1584 */
1585 if (!PageAnon(page)) {
1586 /*
1587 * We only care about R/O long-term pining: R/O short-term
1588 * pinning does not have the semantics to observe successive
1589 * changes through the process page tables.
1590 */
1591 if (!(flags & FOLL_LONGTERM))
1592 return false;
1593
1594 /* We really need the vma ... */
1595 if (!vma)
1596 return true;
1597
1598 /*
1599 * ... because we only care about writable private ("COW")
1600 * mappings where we have to break COW early.
1601 */
1602 return is_cow_mapping(vma->vm_flags);
1603 }
1604
1605 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1606 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1607 smp_rmb();
1608
1609 /*
1610 * Note that KSM pages cannot be exclusive, and consequently,
1611 * cannot get pinned.
1612 */
1613 return !PageAnonExclusive(page);
1614 }
1615
1616 extern bool mirrored_kernelcore;
1617 bool memblock_has_mirror(void);
1618 void memblock_free_all(void);
1619
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1620 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1621 unsigned long start, unsigned long end,
1622 pgoff_t pgoff)
1623 {
1624 vma->vm_start = start;
1625 vma->vm_end = end;
1626 vma->vm_pgoff = pgoff;
1627 }
1628
vma_soft_dirty_enabled(struct vm_area_struct * vma)1629 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1630 {
1631 /*
1632 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1633 * enablements, because when without soft-dirty being compiled in,
1634 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1635 * will be constantly true.
1636 */
1637 if (!pgtable_supports_soft_dirty())
1638 return false;
1639
1640 /*
1641 * Soft-dirty is kind of special: its tracking is enabled when the
1642 * vma flags not set.
1643 */
1644 return !(vma->vm_flags & VM_SOFTDIRTY);
1645 }
1646
pmd_needs_soft_dirty_wp(struct vm_area_struct * vma,pmd_t pmd)1647 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1648 {
1649 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1650 }
1651
pte_needs_soft_dirty_wp(struct vm_area_struct * vma,pte_t pte)1652 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1653 {
1654 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1655 }
1656
1657 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1658 unsigned long zone, int nid);
1659 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1660
1661 /* shrinker related functions */
1662 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1663 int priority);
1664
1665 int shmem_add_to_page_cache(struct folio *folio,
1666 struct address_space *mapping,
1667 pgoff_t index, void *expected, gfp_t gfp);
1668 int shmem_inode_acct_blocks(struct inode *inode, long pages);
1669 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
1670
1671 #ifdef CONFIG_SHRINKER_DEBUG
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1672 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1673 struct shrinker *shrinker, const char *fmt, va_list ap)
1674 {
1675 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1676
1677 return shrinker->name ? 0 : -ENOMEM;
1678 }
1679
shrinker_debugfs_name_free(struct shrinker * shrinker)1680 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1681 {
1682 kfree_const(shrinker->name);
1683 shrinker->name = NULL;
1684 }
1685
1686 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1687 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1688 int *debugfs_id);
1689 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1690 int debugfs_id);
1691 #else /* CONFIG_SHRINKER_DEBUG */
shrinker_debugfs_add(struct shrinker * shrinker)1692 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1693 {
1694 return 0;
1695 }
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1696 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1697 const char *fmt, va_list ap)
1698 {
1699 return 0;
1700 }
shrinker_debugfs_name_free(struct shrinker * shrinker)1701 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1702 {
1703 }
shrinker_debugfs_detach(struct shrinker * shrinker,int * debugfs_id)1704 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1705 int *debugfs_id)
1706 {
1707 *debugfs_id = -1;
1708 return NULL;
1709 }
shrinker_debugfs_remove(struct dentry * debugfs_entry,int debugfs_id)1710 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1711 int debugfs_id)
1712 {
1713 }
1714 #endif /* CONFIG_SHRINKER_DEBUG */
1715
1716 /* Only track the nodes of mappings with shadow entries */
1717 void workingset_update_node(struct xa_node *node);
1718 extern struct list_lru shadow_nodes;
1719 #define mapping_set_update(xas, mapping) do { \
1720 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
1721 xas_set_update(xas, workingset_update_node); \
1722 xas_set_lru(xas, &shadow_nodes); \
1723 } \
1724 } while (0)
1725
1726 /* mremap.c */
1727 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1728
1729 #ifdef CONFIG_UNACCEPTED_MEMORY
1730 void accept_page(struct page *page);
1731 #else /* CONFIG_UNACCEPTED_MEMORY */
accept_page(struct page * page)1732 static inline void accept_page(struct page *page)
1733 {
1734 }
1735 #endif /* CONFIG_UNACCEPTED_MEMORY */
1736
1737 /* pagewalk.c */
1738 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
1739 unsigned long end, const struct mm_walk_ops *ops,
1740 void *private);
1741 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
1742 unsigned long end, const struct mm_walk_ops *ops,
1743 void *private);
1744 int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
1745 unsigned long end, const struct mm_walk_ops *ops,
1746 pgd_t *pgd, void *private);
1747
1748 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
1749 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
1750
1751 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn);
1752 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
1753 unsigned long pfn, unsigned long size, pgprot_t pgprot);
1754
io_remap_pfn_range_prepare(struct vm_area_desc * desc,unsigned long orig_pfn,unsigned long size)1755 static inline void io_remap_pfn_range_prepare(struct vm_area_desc *desc,
1756 unsigned long orig_pfn, unsigned long size)
1757 {
1758 const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1759
1760 return remap_pfn_range_prepare(desc, pfn);
1761 }
1762
io_remap_pfn_range_complete(struct vm_area_struct * vma,unsigned long addr,unsigned long orig_pfn,unsigned long size,pgprot_t orig_prot)1763 static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma,
1764 unsigned long addr, unsigned long orig_pfn, unsigned long size,
1765 pgprot_t orig_prot)
1766 {
1767 const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1768 const pgprot_t prot = pgprot_decrypted(orig_prot);
1769
1770 return remap_pfn_range_complete(vma, addr, pfn, size, prot);
1771 }
1772
1773 #endif /* __MM_INTERNAL_H */
1774