xref: /linux/mm/internal.h (revision 09cbdf7dbe2334d32853ad3ba3b54df017d7a37b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagewalk.h>
17 #include <linux/rmap.h>
18 #include <linux/swap.h>
19 #include <linux/leafops.h>
20 #include <linux/swap_cgroup.h>
21 #include <linux/tracepoint-defs.h>
22 
23 /* Internal core VMA manipulation functions. */
24 #include "vma.h"
25 
26 struct folio_batch;
27 
28 /*
29  * Maintains state across a page table move. The operation assumes both source
30  * and destination VMAs already exist and are specified by the user.
31  *
32  * Partial moves are permitted, but the old and new ranges must both reside
33  * within a VMA.
34  *
35  * mmap lock must be held in write and VMA write locks must be held on any VMA
36  * that is visible.
37  *
38  * Use the PAGETABLE_MOVE() macro to initialise this struct.
39  *
40  * The old_addr and new_addr fields are updated as the page table move is
41  * executed.
42  *
43  * NOTE: The page table move is affected by reading from [old_addr, old_end),
44  * and old_addr may be updated for better page table alignment, so len_in
45  * represents the length of the range being copied as specified by the user.
46  */
47 struct pagetable_move_control {
48 	struct vm_area_struct *old; /* Source VMA. */
49 	struct vm_area_struct *new; /* Destination VMA. */
50 	unsigned long old_addr; /* Address from which the move begins. */
51 	unsigned long old_end; /* Exclusive address at which old range ends. */
52 	unsigned long new_addr; /* Address to move page tables to. */
53 	unsigned long len_in; /* Bytes to remap specified by user. */
54 
55 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
56 	bool for_stack; /* Is this an early temp stack being moved? */
57 };
58 
59 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
60 	struct pagetable_move_control name = {				\
61 		.old = old_,						\
62 		.new = new_,						\
63 		.old_addr = old_addr_,					\
64 		.old_end = (old_addr_) + (len_),			\
65 		.new_addr = new_addr_,					\
66 		.len_in = len_,						\
67 	}
68 
69 /*
70  * The set of flags that only affect watermark checking and reclaim
71  * behaviour. This is used by the MM to obey the caller constraints
72  * about IO, FS and watermark checking while ignoring placement
73  * hints such as HIGHMEM usage.
74  */
75 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
76 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
77 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
78 			__GFP_NOLOCKDEP)
79 
80 /* The GFP flags allowed during early boot */
81 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
82 
83 /* Control allocation cpuset and node placement constraints */
84 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
85 
86 /* Do not use these with a slab allocator */
87 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
88 
89 /*
90  * Different from WARN_ON_ONCE(), no warning will be issued
91  * when we specify __GFP_NOWARN.
92  */
93 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
94 	static bool __section(".data..once") __warned;			\
95 	int __ret_warn_once = !!(cond);					\
96 									\
97 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
98 		__warned = true;					\
99 		WARN_ON(1);						\
100 	}								\
101 	unlikely(__ret_warn_once);					\
102 })
103 
104 void page_writeback_init(void);
105 
106 /*
107  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
108  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
109  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
110  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
111  */
112 #define ENTIRELY_MAPPED		0x800000
113 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
114 
115 /*
116  * Flags passed to __show_mem() and show_free_areas() to suppress output in
117  * various contexts.
118  */
119 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
120 
121 /*
122  * How many individual pages have an elevated _mapcount.  Excludes
123  * the folio's entire_mapcount.
124  *
125  * Don't use this function outside of debugging code.
126  */
127 static inline int folio_nr_pages_mapped(const struct folio *folio)
128 {
129 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
130 		return -1;
131 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
132 }
133 
134 /*
135  * Retrieve the first entry of a folio based on a provided entry within the
136  * folio. We cannot rely on folio->swap as there is no guarantee that it has
137  * been initialized. Used for calling arch_swap_restore()
138  */
139 static inline swp_entry_t folio_swap(swp_entry_t entry,
140 		const struct folio *folio)
141 {
142 	swp_entry_t swap = {
143 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
144 	};
145 
146 	return swap;
147 }
148 
149 static inline void *folio_raw_mapping(const struct folio *folio)
150 {
151 	unsigned long mapping = (unsigned long)folio->mapping;
152 
153 	return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
154 }
155 
156 /*
157  * This is a file-backed mapping, and is about to be memory mapped - invoke its
158  * mmap hook and safely handle error conditions. On error, VMA hooks will be
159  * mutated.
160  *
161  * @file: File which backs the mapping.
162  * @vma:  VMA which we are mapping.
163  *
164  * Returns: 0 if success, error otherwise.
165  */
166 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
167 {
168 	int err = vfs_mmap(file, vma);
169 
170 	if (likely(!err))
171 		return 0;
172 
173 	/*
174 	 * OK, we tried to call the file hook for mmap(), but an error
175 	 * arose. The mapping is in an inconsistent state and we must not invoke
176 	 * any further hooks on it.
177 	 */
178 	vma->vm_ops = &vma_dummy_vm_ops;
179 
180 	return err;
181 }
182 
183 /*
184  * If the VMA has a close hook then close it, and since closing it might leave
185  * it in an inconsistent state which makes the use of any hooks suspect, clear
186  * them down by installing dummy empty hooks.
187  */
188 static inline void vma_close(struct vm_area_struct *vma)
189 {
190 	if (vma->vm_ops && vma->vm_ops->close) {
191 		vma->vm_ops->close(vma);
192 
193 		/*
194 		 * The mapping is in an inconsistent state, and no further hooks
195 		 * may be invoked upon it.
196 		 */
197 		vma->vm_ops = &vma_dummy_vm_ops;
198 	}
199 }
200 
201 /* unmap_vmas is in mm/memory.c */
202 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap);
203 
204 #ifdef CONFIG_MMU
205 
206 static inline void get_anon_vma(struct anon_vma *anon_vma)
207 {
208 	atomic_inc(&anon_vma->refcount);
209 }
210 
211 void __put_anon_vma(struct anon_vma *anon_vma);
212 
213 static inline void put_anon_vma(struct anon_vma *anon_vma)
214 {
215 	if (atomic_dec_and_test(&anon_vma->refcount))
216 		__put_anon_vma(anon_vma);
217 }
218 
219 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
220 {
221 	down_write(&anon_vma->root->rwsem);
222 }
223 
224 static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
225 {
226 	return down_write_trylock(&anon_vma->root->rwsem);
227 }
228 
229 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
230 {
231 	up_write(&anon_vma->root->rwsem);
232 }
233 
234 static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
235 {
236 	down_read(&anon_vma->root->rwsem);
237 }
238 
239 static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
240 {
241 	return down_read_trylock(&anon_vma->root->rwsem);
242 }
243 
244 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
245 {
246 	up_read(&anon_vma->root->rwsem);
247 }
248 
249 struct anon_vma *folio_get_anon_vma(const struct folio *folio);
250 
251 /* Operations which modify VMAs. */
252 enum vma_operation {
253 	VMA_OP_SPLIT,
254 	VMA_OP_MERGE_UNFAULTED,
255 	VMA_OP_REMAP,
256 	VMA_OP_FORK,
257 };
258 
259 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
260 	enum vma_operation operation);
261 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma);
262 int  __anon_vma_prepare(struct vm_area_struct *vma);
263 void unlink_anon_vmas(struct vm_area_struct *vma);
264 
265 static inline int anon_vma_prepare(struct vm_area_struct *vma)
266 {
267 	if (likely(vma->anon_vma))
268 		return 0;
269 
270 	return __anon_vma_prepare(vma);
271 }
272 
273 /* Flags for folio_pte_batch(). */
274 typedef int __bitwise fpb_t;
275 
276 /* Compare PTEs respecting the dirty bit. */
277 #define FPB_RESPECT_DIRTY		((__force fpb_t)BIT(0))
278 
279 /* Compare PTEs respecting the soft-dirty bit. */
280 #define FPB_RESPECT_SOFT_DIRTY		((__force fpb_t)BIT(1))
281 
282 /* Compare PTEs respecting the writable bit. */
283 #define FPB_RESPECT_WRITE		((__force fpb_t)BIT(2))
284 
285 /*
286  * Merge PTE write bits: if any PTE in the batch is writable, modify the
287  * PTE at @ptentp to be writable.
288  */
289 #define FPB_MERGE_WRITE			((__force fpb_t)BIT(3))
290 
291 /*
292  * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
293  * modify the PTE at @ptentp to be young or dirty, respectively.
294  */
295 #define FPB_MERGE_YOUNG_DIRTY		((__force fpb_t)BIT(4))
296 
297 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
298 {
299 	if (!(flags & FPB_RESPECT_DIRTY))
300 		pte = pte_mkclean(pte);
301 	if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
302 		pte = pte_clear_soft_dirty(pte);
303 	if (likely(!(flags & FPB_RESPECT_WRITE)))
304 		pte = pte_wrprotect(pte);
305 	return pte_mkold(pte);
306 }
307 
308 /**
309  * folio_pte_batch_flags - detect a PTE batch for a large folio
310  * @folio: The large folio to detect a PTE batch for.
311  * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
312  * @ptep: Page table pointer for the first entry.
313  * @ptentp: Pointer to a COPY of the first page table entry whose flags this
314  *	    function updates based on @flags if appropriate.
315  * @max_nr: The maximum number of table entries to consider.
316  * @flags: Flags to modify the PTE batch semantics.
317  *
318  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
319  * pages of the same large folio in a single VMA and a single page table.
320  *
321  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
322  * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
323  * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
324  *
325  * @ptep must map any page of the folio. max_nr must be at least one and
326  * must be limited by the caller so scanning cannot exceed a single VMA and
327  * a single page table.
328  *
329  * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
330  * be updated: it's crucial that a pointer to a COPY of the first
331  * page table entry, obtained through ptep_get(), is provided as @ptentp.
332  *
333  * This function will be inlined to optimize based on the input parameters;
334  * consider using folio_pte_batch() instead if applicable.
335  *
336  * Return: the number of table entries in the batch.
337  */
338 static inline unsigned int folio_pte_batch_flags(struct folio *folio,
339 		struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
340 		unsigned int max_nr, fpb_t flags)
341 {
342 	bool any_writable = false, any_young = false, any_dirty = false;
343 	pte_t expected_pte, pte = *ptentp;
344 	unsigned int nr, cur_nr;
345 
346 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
347 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
348 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
349 	/*
350 	 * Ensure this is a pointer to a copy not a pointer into a page table.
351 	 * If this is a stack value, it won't be a valid virtual address, but
352 	 * that's fine because it also cannot be pointing into the page table.
353 	 */
354 	VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
355 
356 	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
357 	max_nr = min_t(unsigned long, max_nr,
358 		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
359 
360 	nr = pte_batch_hint(ptep, pte);
361 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
362 	ptep = ptep + nr;
363 
364 	while (nr < max_nr) {
365 		pte = ptep_get(ptep);
366 
367 		if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
368 			break;
369 
370 		if (flags & FPB_MERGE_WRITE)
371 			any_writable |= pte_write(pte);
372 		if (flags & FPB_MERGE_YOUNG_DIRTY) {
373 			any_young |= pte_young(pte);
374 			any_dirty |= pte_dirty(pte);
375 		}
376 
377 		cur_nr = pte_batch_hint(ptep, pte);
378 		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
379 		ptep += cur_nr;
380 		nr += cur_nr;
381 	}
382 
383 	if (any_writable)
384 		*ptentp = pte_mkwrite(*ptentp, vma);
385 	if (any_young)
386 		*ptentp = pte_mkyoung(*ptentp);
387 	if (any_dirty)
388 		*ptentp = pte_mkdirty(*ptentp);
389 
390 	return min(nr, max_nr);
391 }
392 
393 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
394 		unsigned int max_nr);
395 
396 /**
397  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
398  *	 forward or backward by delta
399  * @pte: The initial pte state; must be a swap entry
400  * @delta: The direction and the offset we are moving; forward if delta
401  *	 is positive; backward if delta is negative
402  *
403  * Moves the swap offset, while maintaining all other fields, including
404  * swap type, and any swp pte bits. The resulting pte is returned.
405  */
406 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
407 {
408 	const softleaf_t entry = softleaf_from_pte(pte);
409 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
410 						   (swp_offset(entry) + delta)));
411 
412 	if (pte_swp_soft_dirty(pte))
413 		new = pte_swp_mksoft_dirty(new);
414 	if (pte_swp_exclusive(pte))
415 		new = pte_swp_mkexclusive(new);
416 	if (pte_swp_uffd_wp(pte))
417 		new = pte_swp_mkuffd_wp(new);
418 
419 	return new;
420 }
421 
422 
423 /**
424  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
425  * @pte: The initial pte state; must be a swap entry.
426  *
427  * Increments the swap offset, while maintaining all other fields, including
428  * swap type, and any swp pte bits. The resulting pte is returned.
429  */
430 static inline pte_t pte_next_swp_offset(pte_t pte)
431 {
432 	return pte_move_swp_offset(pte, 1);
433 }
434 
435 /**
436  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
437  * @start_ptep: Page table pointer for the first entry.
438  * @max_nr: The maximum number of table entries to consider.
439  * @pte: Page table entry for the first entry.
440  *
441  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
442  * containing swap entries all with consecutive offsets and targeting the same
443  * swap type, all with matching swp pte bits.
444  *
445  * max_nr must be at least one and must be limited by the caller so scanning
446  * cannot exceed a single page table.
447  *
448  * Return: the number of table entries in the batch.
449  */
450 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
451 {
452 	pte_t expected_pte = pte_next_swp_offset(pte);
453 	const pte_t *end_ptep = start_ptep + max_nr;
454 	const softleaf_t entry = softleaf_from_pte(pte);
455 	pte_t *ptep = start_ptep + 1;
456 	unsigned short cgroup_id;
457 
458 	VM_WARN_ON(max_nr < 1);
459 	VM_WARN_ON(!softleaf_is_swap(entry));
460 
461 	cgroup_id = lookup_swap_cgroup_id(entry);
462 	while (ptep < end_ptep) {
463 		softleaf_t entry;
464 
465 		pte = ptep_get(ptep);
466 
467 		if (!pte_same(pte, expected_pte))
468 			break;
469 		entry = softleaf_from_pte(pte);
470 		if (lookup_swap_cgroup_id(entry) != cgroup_id)
471 			break;
472 		expected_pte = pte_next_swp_offset(expected_pte);
473 		ptep++;
474 	}
475 
476 	return ptep - start_ptep;
477 }
478 #endif /* CONFIG_MMU */
479 
480 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
481 						int nr_throttled);
482 static inline void acct_reclaim_writeback(struct folio *folio)
483 {
484 	pg_data_t *pgdat = folio_pgdat(folio);
485 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
486 
487 	if (nr_throttled)
488 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
489 }
490 
491 static inline void wake_throttle_isolated(pg_data_t *pgdat)
492 {
493 	wait_queue_head_t *wqh;
494 
495 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
496 	if (waitqueue_active(wqh))
497 		wake_up(wqh);
498 }
499 
500 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
501 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
502 {
503 	vm_fault_t ret = __vmf_anon_prepare(vmf);
504 
505 	if (unlikely(ret & VM_FAULT_RETRY))
506 		vma_end_read(vmf->vma);
507 	return ret;
508 }
509 
510 vm_fault_t do_swap_page(struct vm_fault *vmf);
511 void folio_rotate_reclaimable(struct folio *folio);
512 bool __folio_end_writeback(struct folio *folio);
513 void deactivate_file_folio(struct folio *folio);
514 void folio_activate(struct folio *folio);
515 
516 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
517 
518 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
519 
520 /**
521  * sync_with_folio_pmd_zap - sync with concurrent zapping of a folio PMD
522  * @mm: The mm_struct.
523  * @pmdp: Pointer to the pmd that was found to be pmd_none().
524  *
525  * When we find a pmd_none() while unmapping a folio without holding the PTL,
526  * zap_huge_pmd() may have cleared the PMD but not yet modified the folio to
527  * indicate that it's unmapped. Skipping the PMD without synchronization could
528  * make folio unmapping code assume that unmapping failed.
529  *
530  * Wait for concurrent zapping to complete by grabbing the PTL.
531  */
532 static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
533 {
534 	spinlock_t *ptl = pmd_lock(mm, pmdp);
535 
536 	spin_unlock(ptl);
537 }
538 
539 struct zap_details;
540 void zap_vma_range_batched(struct mmu_gather *tlb,
541 		struct vm_area_struct *vma, unsigned long addr,
542 		unsigned long size, struct zap_details *details);
543 int zap_vma_for_reaping(struct vm_area_struct *vma);
544 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
545 			   gfp_t gfp);
546 
547 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
548 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
549 static inline void force_page_cache_readahead(struct address_space *mapping,
550 		struct file *file, pgoff_t index, unsigned long nr_to_read)
551 {
552 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
553 	force_page_cache_ra(&ractl, nr_to_read);
554 }
555 
556 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
557 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
558 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
559 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
560 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
561 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
562 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
563 		loff_t end);
564 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
565 unsigned long mapping_try_invalidate(struct address_space *mapping,
566 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
567 
568 /**
569  * folio_evictable - Test whether a folio is evictable.
570  * @folio: The folio to test.
571  *
572  * Test whether @folio is evictable -- i.e., should be placed on
573  * active/inactive lists vs unevictable list.
574  *
575  * Reasons folio might not be evictable:
576  * 1. folio's mapping marked unevictable
577  * 2. One of the pages in the folio is part of an mlocked VMA
578  */
579 static inline bool folio_evictable(struct folio *folio)
580 {
581 	bool ret;
582 
583 	/* Prevent address_space of inode and swap cache from being freed */
584 	rcu_read_lock();
585 	ret = !mapping_unevictable(folio_mapping(folio)) &&
586 			!folio_test_mlocked(folio);
587 	rcu_read_unlock();
588 	return ret;
589 }
590 
591 /*
592  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
593  * a count of one.
594  */
595 static inline void set_page_refcounted(struct page *page)
596 {
597 	VM_BUG_ON_PAGE(PageTail(page), page);
598 	VM_BUG_ON_PAGE(page_ref_count(page), page);
599 	set_page_count(page, 1);
600 }
601 
602 static inline void set_pages_refcounted(struct page *page, unsigned long nr_pages)
603 {
604 	unsigned long pfn = page_to_pfn(page);
605 
606 	for (; nr_pages--; pfn++)
607 		set_page_refcounted(pfn_to_page(pfn));
608 }
609 
610 /*
611  * Return true if a folio needs ->release_folio() calling upon it.
612  */
613 static inline bool folio_needs_release(struct folio *folio)
614 {
615 	struct address_space *mapping = folio_mapping(folio);
616 
617 	return folio_has_private(folio) ||
618 		(mapping && mapping_release_always(mapping));
619 }
620 
621 extern unsigned long highest_memmap_pfn;
622 
623 /*
624  * Maximum number of reclaim retries without progress before the OOM
625  * killer is consider the only way forward.
626  */
627 #define MAX_RECLAIM_RETRIES 16
628 
629 /*
630  * in mm/vmscan.c:
631  */
632 bool folio_isolate_lru(struct folio *folio);
633 void folio_putback_lru(struct folio *folio);
634 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
635 int user_proactive_reclaim(char *buf,
636 			   struct mem_cgroup *memcg, pg_data_t *pgdat);
637 
638 /*
639  * in mm/rmap.c:
640  */
641 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
642 
643 /*
644  * in mm/page_alloc.c
645  */
646 #define K(x) ((x) << (PAGE_SHIFT-10))
647 
648 extern char * const zone_names[MAX_NR_ZONES];
649 
650 /* perform sanity checks on struct pages being allocated or freed */
651 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
652 
653 extern int min_free_kbytes;
654 extern int defrag_mode;
655 
656 void setup_per_zone_wmarks(void);
657 void calculate_min_free_kbytes(void);
658 int __meminit init_per_zone_wmark_min(void);
659 void page_alloc_sysctl_init(void);
660 
661 /*
662  * Structure for holding the mostly immutable allocation parameters passed
663  * between functions involved in allocations, including the alloc_pages*
664  * family of functions.
665  *
666  * nodemask, migratetype and highest_zoneidx are initialized only once in
667  * __alloc_pages() and then never change.
668  *
669  * zonelist, preferred_zone and highest_zoneidx are set first in
670  * __alloc_pages() for the fast path, and might be later changed
671  * in __alloc_pages_slowpath(). All other functions pass the whole structure
672  * by a const pointer.
673  */
674 struct alloc_context {
675 	struct zonelist *zonelist;
676 	nodemask_t *nodemask;
677 	struct zoneref *preferred_zoneref;
678 	int migratetype;
679 
680 	/*
681 	 * highest_zoneidx represents highest usable zone index of
682 	 * the allocation request. Due to the nature of the zone,
683 	 * memory on lower zone than the highest_zoneidx will be
684 	 * protected by lowmem_reserve[highest_zoneidx].
685 	 *
686 	 * highest_zoneidx is also used by reclaim/compaction to limit
687 	 * the target zone since higher zone than this index cannot be
688 	 * usable for this allocation request.
689 	 */
690 	enum zone_type highest_zoneidx;
691 	bool spread_dirty_pages;
692 };
693 
694 /*
695  * This function returns the order of a free page in the buddy system. In
696  * general, page_zone(page)->lock must be held by the caller to prevent the
697  * page from being allocated in parallel and returning garbage as the order.
698  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
699  * page cannot be allocated or merged in parallel. Alternatively, it must
700  * handle invalid values gracefully, and use buddy_order_unsafe() below.
701  */
702 static inline unsigned int buddy_order(struct page *page)
703 {
704 	/* PageBuddy() must be checked by the caller */
705 	return page_private(page);
706 }
707 
708 /*
709  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
710  * PageBuddy() should be checked first by the caller to minimize race window,
711  * and invalid values must be handled gracefully.
712  *
713  * READ_ONCE is used so that if the caller assigns the result into a local
714  * variable and e.g. tests it for valid range before using, the compiler cannot
715  * decide to remove the variable and inline the page_private(page) multiple
716  * times, potentially observing different values in the tests and the actual
717  * use of the result.
718  */
719 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
720 
721 /*
722  * This function checks whether a page is free && is the buddy
723  * we can coalesce a page and its buddy if
724  * (a) the buddy is not in a hole (check before calling!) &&
725  * (b) the buddy is in the buddy system &&
726  * (c) a page and its buddy have the same order &&
727  * (d) a page and its buddy are in the same zone.
728  *
729  * For recording whether a page is in the buddy system, we set PageBuddy.
730  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
731  *
732  * For recording page's order, we use page_private(page).
733  */
734 static inline bool page_is_buddy(struct page *page, struct page *buddy,
735 				 unsigned int order)
736 {
737 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
738 		return false;
739 
740 	if (buddy_order(buddy) != order)
741 		return false;
742 
743 	/*
744 	 * zone check is done late to avoid uselessly calculating
745 	 * zone/node ids for pages that could never merge.
746 	 */
747 	if (page_zone_id(page) != page_zone_id(buddy))
748 		return false;
749 
750 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
751 
752 	return true;
753 }
754 
755 /*
756  * Locate the struct page for both the matching buddy in our
757  * pair (buddy1) and the combined O(n+1) page they form (page).
758  *
759  * 1) Any buddy B1 will have an order O twin B2 which satisfies
760  * the following equation:
761  *     B2 = B1 ^ (1 << O)
762  * For example, if the starting buddy (buddy2) is #8 its order
763  * 1 buddy is #10:
764  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
765  *
766  * 2) Any buddy B will have an order O+1 parent P which
767  * satisfies the following equation:
768  *     P = B & ~(1 << O)
769  *
770  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
771  */
772 static inline unsigned long
773 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
774 {
775 	return page_pfn ^ (1 << order);
776 }
777 
778 /*
779  * Find the buddy of @page and validate it.
780  * @page: The input page
781  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
782  *       function is used in the performance-critical __free_one_page().
783  * @order: The order of the page
784  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
785  *             page_to_pfn().
786  *
787  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
788  * not the same as @page. The validation is necessary before use it.
789  *
790  * Return: the found buddy page or NULL if not found.
791  */
792 static inline struct page *find_buddy_page_pfn(struct page *page,
793 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
794 {
795 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
796 	struct page *buddy;
797 
798 	buddy = page + (__buddy_pfn - pfn);
799 	if (buddy_pfn)
800 		*buddy_pfn = __buddy_pfn;
801 
802 	if (page_is_buddy(page, buddy, order))
803 		return buddy;
804 	return NULL;
805 }
806 
807 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
808 				unsigned long end_pfn, struct zone *zone);
809 
810 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
811 				unsigned long end_pfn, struct zone *zone)
812 {
813 	if (zone->contiguous)
814 		return pfn_to_page(start_pfn);
815 
816 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
817 }
818 
819 void set_zone_contiguous(struct zone *zone);
820 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
821 			   unsigned long nr_pages);
822 
823 static inline void clear_zone_contiguous(struct zone *zone)
824 {
825 	zone->contiguous = false;
826 }
827 
828 extern int __isolate_free_page(struct page *page, unsigned int order);
829 extern void __putback_isolated_page(struct page *page, unsigned int order,
830 				    int mt);
831 extern void memblock_free_pages(unsigned long pfn, unsigned int order);
832 extern void __free_pages_core(struct page *page, unsigned int order,
833 		enum meminit_context context);
834 
835 /*
836  * This will have no effect, other than possibly generating a warning, if the
837  * caller passes in a non-large folio.
838  */
839 static inline void folio_set_order(struct folio *folio, unsigned int order)
840 {
841 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
842 		return;
843 	VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER);
844 
845 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
846 #ifdef NR_PAGES_IN_LARGE_FOLIO
847 	folio->_nr_pages = 1U << order;
848 #endif
849 }
850 
851 bool __folio_unqueue_deferred_split(struct folio *folio);
852 static inline bool folio_unqueue_deferred_split(struct folio *folio)
853 {
854 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
855 		return false;
856 
857 	/*
858 	 * At this point, there is no one trying to add the folio to
859 	 * deferred_list. If folio is not in deferred_list, it's safe
860 	 * to check without acquiring the split_queue_lock.
861 	 */
862 	if (data_race(list_empty(&folio->_deferred_list)))
863 		return false;
864 
865 	return __folio_unqueue_deferred_split(folio);
866 }
867 
868 static inline struct folio *page_rmappable_folio(struct page *page)
869 {
870 	struct folio *folio = (struct folio *)page;
871 
872 	if (folio && folio_test_large(folio))
873 		folio_set_large_rmappable(folio);
874 	return folio;
875 }
876 
877 static inline void prep_compound_head(struct page *page, unsigned int order)
878 {
879 	struct folio *folio = (struct folio *)page;
880 
881 	folio_set_order(folio, order);
882 	atomic_set(&folio->_large_mapcount, -1);
883 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
884 		atomic_set(&folio->_nr_pages_mapped, 0);
885 	if (IS_ENABLED(CONFIG_MM_ID)) {
886 		folio->_mm_ids = 0;
887 		folio->_mm_id_mapcount[0] = -1;
888 		folio->_mm_id_mapcount[1] = -1;
889 	}
890 	if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
891 		atomic_set(&folio->_pincount, 0);
892 		atomic_set(&folio->_entire_mapcount, -1);
893 	}
894 	if (order > 1)
895 		INIT_LIST_HEAD(&folio->_deferred_list);
896 }
897 
898 static inline void prep_compound_tail(struct page *tail,
899 		const struct page *head, unsigned int order)
900 {
901 	tail->mapping = TAIL_MAPPING;
902 	set_compound_head(tail, head, order);
903 	set_page_private(tail, 0);
904 }
905 
906 static inline void init_compound_tail(struct page *tail,
907 		const struct page *head, unsigned int order, struct zone *zone)
908 {
909 	atomic_set(&tail->_mapcount, -1);
910 	set_page_node(tail, zone_to_nid(zone));
911 	set_page_zone(tail, zone_idx(zone));
912 	prep_compound_tail(tail, head, order);
913 }
914 
915 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
916 extern bool free_pages_prepare(struct page *page, unsigned int order);
917 
918 extern int user_min_free_kbytes;
919 
920 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
921 		nodemask_t *);
922 #define __alloc_frozen_pages(...) \
923 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
924 void free_frozen_pages(struct page *page, unsigned int order);
925 void free_unref_folios(struct folio_batch *fbatch);
926 
927 #ifdef CONFIG_NUMA
928 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
929 #else
930 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
931 {
932 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
933 }
934 #endif
935 
936 #define alloc_frozen_pages(...) \
937 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
938 
939 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
940 #define alloc_frozen_pages_nolock(...) \
941 	alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
942 void free_frozen_pages_nolock(struct page *page, unsigned int order);
943 
944 extern void zone_pcp_reset(struct zone *zone);
945 extern void zone_pcp_disable(struct zone *zone);
946 extern void zone_pcp_enable(struct zone *zone);
947 extern void zone_pcp_init(struct zone *zone);
948 
949 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
950 			  phys_addr_t min_addr,
951 			  int nid, bool exact_nid);
952 
953 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
954 		unsigned long, enum meminit_context, struct vmem_altmap *, int,
955 		bool);
956 
957 #ifdef CONFIG_SPARSEMEM
958 void sparse_init(void);
959 #else
960 static inline void sparse_init(void) {}
961 #endif /* CONFIG_SPARSEMEM */
962 
963 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
964 
965 /*
966  * in mm/compaction.c
967  */
968 /*
969  * compact_control is used to track pages being migrated and the free pages
970  * they are being migrated to during memory compaction. The free_pfn starts
971  * at the end of a zone and migrate_pfn begins at the start. Movable pages
972  * are moved to the end of a zone during a compaction run and the run
973  * completes when free_pfn <= migrate_pfn
974  */
975 struct compact_control {
976 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
977 	struct list_head migratepages;	/* List of pages being migrated */
978 	unsigned int nr_freepages;	/* Number of isolated free pages */
979 	unsigned int nr_migratepages;	/* Number of pages to migrate */
980 	unsigned long free_pfn;		/* isolate_freepages search base */
981 	/*
982 	 * Acts as an in/out parameter to page isolation for migration.
983 	 * isolate_migratepages uses it as a search base.
984 	 * isolate_migratepages_block will update the value to the next pfn
985 	 * after the last isolated one.
986 	 */
987 	unsigned long migrate_pfn;
988 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
989 	struct zone *zone;
990 	unsigned long total_migrate_scanned;
991 	unsigned long total_free_scanned;
992 	unsigned short fast_search_fail;/* failures to use free list searches */
993 	short search_order;		/* order to start a fast search at */
994 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
995 	int order;			/* order a direct compactor needs */
996 	int migratetype;		/* migratetype of direct compactor */
997 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
998 	const int highest_zoneidx;	/* zone index of a direct compactor */
999 	enum migrate_mode mode;		/* Async or sync migration mode */
1000 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
1001 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
1002 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
1003 	bool direct_compaction;		/* False from kcompactd or /proc/... */
1004 	bool proactive_compaction;	/* kcompactd proactive compaction */
1005 	bool whole_zone;		/* Whole zone should/has been scanned */
1006 	bool contended;			/* Signal lock contention */
1007 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
1008 					 * when there are potentially transient
1009 					 * isolation or migration failures to
1010 					 * ensure forward progress.
1011 					 */
1012 	bool alloc_contig;		/* alloc_contig_range allocation */
1013 };
1014 
1015 /*
1016  * Used in direct compaction when a page should be taken from the freelists
1017  * immediately when one is created during the free path.
1018  */
1019 struct capture_control {
1020 	struct compact_control *cc;
1021 	struct page *page;
1022 };
1023 
1024 unsigned long
1025 isolate_freepages_range(struct compact_control *cc,
1026 			unsigned long start_pfn, unsigned long end_pfn);
1027 int
1028 isolate_migratepages_range(struct compact_control *cc,
1029 			   unsigned long low_pfn, unsigned long end_pfn);
1030 
1031 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1032 void init_cma_reserved_pageblock(struct page *page);
1033 
1034 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1035 
1036 struct cma;
1037 
1038 #ifdef CONFIG_CMA
1039 bool cma_validate_zones(struct cma *cma);
1040 void *cma_reserve_early(struct cma *cma, unsigned long size);
1041 void init_cma_pageblock(struct page *page);
1042 #else
1043 static inline bool cma_validate_zones(struct cma *cma)
1044 {
1045 	return false;
1046 }
1047 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
1048 {
1049 	return NULL;
1050 }
1051 static inline void init_cma_pageblock(struct page *page)
1052 {
1053 }
1054 #endif
1055 
1056 
1057 int find_suitable_fallback(struct free_area *area, unsigned int order,
1058 			   int migratetype, bool claimable);
1059 
1060 static inline bool free_area_empty(struct free_area *area, int migratetype)
1061 {
1062 	return list_empty(&area->free_list[migratetype]);
1063 }
1064 
1065 /* mm/util.c */
1066 struct anon_vma *folio_anon_vma(const struct folio *folio);
1067 
1068 #ifdef CONFIG_MMU
1069 void unmap_mapping_folio(struct folio *folio);
1070 extern long populate_vma_page_range(struct vm_area_struct *vma,
1071 		unsigned long start, unsigned long end, int *locked);
1072 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
1073 		unsigned long end, bool write, int *locked);
1074 bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
1075 		unsigned long bytes);
1076 
1077 /*
1078  * NOTE: This function can't tell whether the folio is "fully mapped" in the
1079  * range.
1080  * "fully mapped" means all the pages of folio is associated with the page
1081  * table of range while this function just check whether the folio range is
1082  * within the range [start, end). Function caller needs to do page table
1083  * check if it cares about the page table association.
1084  *
1085  * Typical usage (like mlock or madvise) is:
1086  * Caller knows at least 1 page of folio is associated with page table of VMA
1087  * and the range [start, end) is intersect with the VMA range. Caller wants
1088  * to know whether the folio is fully associated with the range. It calls
1089  * this function to check whether the folio is in the range first. Then checks
1090  * the page table to know whether the folio is fully mapped to the range.
1091  */
1092 static inline bool
1093 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
1094 		unsigned long start, unsigned long end)
1095 {
1096 	pgoff_t pgoff, addr;
1097 	unsigned long vma_pglen = vma_pages(vma);
1098 
1099 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
1100 	if (start > end)
1101 		return false;
1102 
1103 	if (start < vma->vm_start)
1104 		start = vma->vm_start;
1105 
1106 	if (end > vma->vm_end)
1107 		end = vma->vm_end;
1108 
1109 	pgoff = folio_pgoff(folio);
1110 
1111 	/* if folio start address is not in vma range */
1112 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
1113 		return false;
1114 
1115 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1116 
1117 	return !(addr < start || end - addr < folio_size(folio));
1118 }
1119 
1120 static inline bool
1121 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
1122 {
1123 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
1124 }
1125 
1126 /*
1127  * mlock_vma_folio() and munlock_vma_folio():
1128  * should be called with vma's mmap_lock held for read or write,
1129  * under page table lock for the pte/pmd being added or removed.
1130  *
1131  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
1132  * the end of folio_remove_rmap_*(); but new anon folios are managed by
1133  * folio_add_lru_vma() calling mlock_new_folio().
1134  */
1135 void mlock_folio(struct folio *folio);
1136 static inline void mlock_vma_folio(struct folio *folio,
1137 				struct vm_area_struct *vma)
1138 {
1139 	/*
1140 	 * The VM_SPECIAL check here serves two purposes.
1141 	 * 1) VM_IO check prevents migration from double-counting during mlock.
1142 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
1143 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
1144 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1145 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
1146 	 */
1147 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1148 		mlock_folio(folio);
1149 }
1150 
1151 void munlock_folio(struct folio *folio);
1152 static inline void munlock_vma_folio(struct folio *folio,
1153 					struct vm_area_struct *vma)
1154 {
1155 	/*
1156 	 * munlock if the function is called. Ideally, we should only
1157 	 * do munlock if any page of folio is unmapped from VMA and
1158 	 * cause folio not fully mapped to VMA.
1159 	 *
1160 	 * But it's not easy to confirm that's the situation. So we
1161 	 * always munlock the folio and page reclaim will correct it
1162 	 * if it's wrong.
1163 	 */
1164 	if (unlikely(vma->vm_flags & VM_LOCKED))
1165 		munlock_folio(folio);
1166 }
1167 
1168 void mlock_new_folio(struct folio *folio);
1169 bool need_mlock_drain(int cpu);
1170 void mlock_drain_local(void);
1171 void mlock_drain_remote(int cpu);
1172 
1173 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1174 
1175 /**
1176  * vma_address - Find the virtual address a page range is mapped at
1177  * @vma: The vma which maps this object.
1178  * @pgoff: The page offset within its object.
1179  * @nr_pages: The number of pages to consider.
1180  *
1181  * If any page in this range is mapped by this VMA, return the first address
1182  * where any of these pages appear.  Otherwise, return -EFAULT.
1183  */
1184 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1185 		pgoff_t pgoff, unsigned long nr_pages)
1186 {
1187 	unsigned long address;
1188 
1189 	if (pgoff >= vma->vm_pgoff) {
1190 		address = vma->vm_start +
1191 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1192 		/* Check for address beyond vma (or wrapped through 0?) */
1193 		if (address < vma->vm_start || address >= vma->vm_end)
1194 			address = -EFAULT;
1195 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1196 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1197 		address = vma->vm_start;
1198 	} else {
1199 		address = -EFAULT;
1200 	}
1201 	return address;
1202 }
1203 
1204 /*
1205  * Then at what user virtual address will none of the range be found in vma?
1206  * Assumes that vma_address() already returned a good starting address.
1207  */
1208 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1209 {
1210 	struct vm_area_struct *vma = pvmw->vma;
1211 	pgoff_t pgoff;
1212 	unsigned long address;
1213 
1214 	/* Common case, plus ->pgoff is invalid for KSM */
1215 	if (pvmw->nr_pages == 1)
1216 		return pvmw->address + PAGE_SIZE;
1217 
1218 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1219 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1220 	/* Check for address beyond vma (or wrapped through 0?) */
1221 	if (address < vma->vm_start || address > vma->vm_end)
1222 		address = vma->vm_end;
1223 	return address;
1224 }
1225 
1226 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1227 						    struct file *fpin)
1228 {
1229 	int flags = vmf->flags;
1230 
1231 	if (fpin)
1232 		return fpin;
1233 
1234 	/*
1235 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1236 	 * anything, so we only pin the file and drop the mmap_lock if only
1237 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1238 	 */
1239 	if (fault_flag_allow_retry_first(flags) &&
1240 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1241 		fpin = get_file(vmf->vma->vm_file);
1242 		release_fault_lock(vmf);
1243 	}
1244 	return fpin;
1245 }
1246 #else /* !CONFIG_MMU */
1247 static inline void unmap_mapping_folio(struct folio *folio) { }
1248 static inline void mlock_new_folio(struct folio *folio) { }
1249 static inline bool need_mlock_drain(int cpu) { return false; }
1250 static inline void mlock_drain_local(void) { }
1251 static inline void mlock_drain_remote(int cpu) { }
1252 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1253 {
1254 }
1255 #endif /* !CONFIG_MMU */
1256 
1257 /* Memory initialisation debug and verification */
1258 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1259 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1260 
1261 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1262 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1263 
1264 void init_deferred_page(unsigned long pfn, int nid);
1265 
1266 enum mminit_level {
1267 	MMINIT_WARNING,
1268 	MMINIT_VERIFY,
1269 	MMINIT_TRACE
1270 };
1271 
1272 #ifdef CONFIG_DEBUG_MEMORY_INIT
1273 
1274 extern int mminit_loglevel;
1275 
1276 #define mminit_dprintk(level, prefix, fmt, arg...) \
1277 do { \
1278 	if (level < mminit_loglevel) { \
1279 		if (level <= MMINIT_WARNING) \
1280 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1281 		else \
1282 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1283 	} \
1284 } while (0)
1285 
1286 extern void mminit_verify_pageflags_layout(void);
1287 extern void mminit_verify_zonelist(void);
1288 #else
1289 
1290 static inline void mminit_dprintk(enum mminit_level level,
1291 				const char *prefix, const char *fmt, ...)
1292 {
1293 }
1294 
1295 static inline void mminit_verify_pageflags_layout(void)
1296 {
1297 }
1298 
1299 static inline void mminit_verify_zonelist(void)
1300 {
1301 }
1302 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1303 
1304 #define NODE_RECLAIM_NOSCAN	-2
1305 #define NODE_RECLAIM_FULL	-1
1306 #define NODE_RECLAIM_SOME	0
1307 #define NODE_RECLAIM_SUCCESS	1
1308 
1309 #ifdef CONFIG_NUMA
1310 extern int node_reclaim_mode;
1311 
1312 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1313 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1314 #else
1315 #define node_reclaim_mode 0
1316 
1317 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1318 				unsigned int order)
1319 {
1320 	return NODE_RECLAIM_NOSCAN;
1321 }
1322 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1323 {
1324 	return NUMA_NO_NODE;
1325 }
1326 #endif
1327 
1328 static inline bool node_reclaim_enabled(void)
1329 {
1330 	/* Is any node_reclaim_mode bit set? */
1331 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1332 }
1333 
1334 /*
1335  * mm/memory-failure.c
1336  */
1337 #ifdef CONFIG_MEMORY_FAILURE
1338 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1339 void shake_folio(struct folio *folio);
1340 typedef int hwpoison_filter_func_t(struct page *p);
1341 void hwpoison_filter_register(hwpoison_filter_func_t *filter);
1342 void hwpoison_filter_unregister(void);
1343 
1344 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1345 void SetPageHWPoisonTakenOff(struct page *page);
1346 void ClearPageHWPoisonTakenOff(struct page *page);
1347 bool take_page_off_buddy(struct page *page);
1348 bool put_page_back_buddy(struct page *page);
1349 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1350 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1351 		     struct vm_area_struct *vma, struct list_head *to_kill,
1352 		     unsigned long ksm_addr);
1353 unsigned long page_mapped_in_vma(const struct page *page,
1354 		struct vm_area_struct *vma);
1355 
1356 #else
1357 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1358 {
1359 	return -EBUSY;
1360 }
1361 #endif
1362 
1363 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1364         unsigned long, unsigned long,
1365         unsigned long, unsigned long);
1366 
1367 extern void set_pageblock_order(void);
1368 unsigned long reclaim_pages(struct list_head *folio_list);
1369 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1370 					    struct list_head *folio_list);
1371 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1372 #define ALLOC_WMARK_MIN		WMARK_MIN
1373 #define ALLOC_WMARK_LOW		WMARK_LOW
1374 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1375 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1376 
1377 /* Mask to get the watermark bits */
1378 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1379 
1380 /*
1381  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1382  * cannot assume a reduced access to memory reserves is sufficient for
1383  * !MMU
1384  */
1385 #ifdef CONFIG_MMU
1386 #define ALLOC_OOM		0x08
1387 #else
1388 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1389 #endif
1390 
1391 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1392 				       * to 25% of the min watermark or
1393 				       * 62.5% if __GFP_HIGH is set.
1394 				       */
1395 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1396 				       * of the min watermark.
1397 				       */
1398 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1399 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1400 #ifdef CONFIG_ZONE_DMA32
1401 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1402 #else
1403 #define ALLOC_NOFRAGMENT	  0x0
1404 #endif
1405 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1406 #define ALLOC_TRYLOCK		0x400 /* Only use spin_trylock in allocation path */
1407 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1408 
1409 /* Flags that allow allocations below the min watermark. */
1410 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1411 
1412 enum ttu_flags;
1413 struct tlbflush_unmap_batch;
1414 
1415 
1416 /*
1417  * only for MM internal work items which do not depend on
1418  * any allocations or locks which might depend on allocations
1419  */
1420 extern struct workqueue_struct *mm_percpu_wq;
1421 
1422 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1423 void try_to_unmap_flush(void);
1424 void try_to_unmap_flush_dirty(void);
1425 void flush_tlb_batched_pending(struct mm_struct *mm);
1426 #else
1427 static inline void try_to_unmap_flush(void)
1428 {
1429 }
1430 static inline void try_to_unmap_flush_dirty(void)
1431 {
1432 }
1433 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1434 {
1435 }
1436 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1437 
1438 extern const struct trace_print_flags pageflag_names[];
1439 extern const struct trace_print_flags vmaflag_names[];
1440 extern const struct trace_print_flags gfpflag_names[];
1441 
1442 void setup_zone_pageset(struct zone *zone);
1443 
1444 struct migration_target_control {
1445 	int nid;		/* preferred node id */
1446 	nodemask_t *nmask;
1447 	gfp_t gfp_mask;
1448 	enum migrate_reason reason;
1449 };
1450 
1451 /*
1452  * mm/filemap.c
1453  */
1454 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1455 			      struct folio *folio, loff_t fpos, size_t size);
1456 
1457 /*
1458  * mm/vmalloc.c
1459  */
1460 #ifdef CONFIG_MMU
1461 void __init vmalloc_init(void);
1462 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1463 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask);
1464 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1465 #else
1466 static inline void vmalloc_init(void)
1467 {
1468 }
1469 
1470 static inline
1471 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1472 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
1473 {
1474 	return -EINVAL;
1475 }
1476 #endif
1477 
1478 void clear_vm_uninitialized_flag(struct vm_struct *vm);
1479 
1480 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1481 			       unsigned long end, pgprot_t prot,
1482 			       struct page **pages, unsigned int page_shift);
1483 
1484 void vunmap_range_noflush(unsigned long start, unsigned long end);
1485 
1486 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1487 
1488 static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma)
1489 {
1490 	if (vma->vm_flags & VM_SHARED)
1491 		return false;
1492 
1493 	return atomic_read(&vma->vm_mm->mm_users) == 1;
1494 }
1495 
1496 #ifdef CONFIG_NUMA_BALANCING
1497 bool folio_can_map_prot_numa(struct folio *folio, struct vm_area_struct *vma,
1498 		bool is_private_single_threaded);
1499 
1500 #else
1501 static inline bool folio_can_map_prot_numa(struct folio *folio,
1502 		struct vm_area_struct *vma, bool is_private_single_threaded)
1503 {
1504 	return false;
1505 }
1506 #endif
1507 
1508 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1509 		      unsigned long addr, int *flags, bool writable,
1510 		      int *last_cpupid);
1511 
1512 void free_zone_device_folio(struct folio *folio);
1513 int migrate_device_coherent_folio(struct folio *folio);
1514 
1515 struct vm_struct *__get_vm_area_node(unsigned long size,
1516 				     unsigned long align, unsigned long shift,
1517 				     unsigned long vm_flags, unsigned long start,
1518 				     unsigned long end, int node, gfp_t gfp_mask,
1519 				     const void *caller);
1520 
1521 /*
1522  * mm/gup.c
1523  */
1524 int __must_check try_grab_folio(struct folio *folio, int refs,
1525 				unsigned int flags);
1526 
1527 /*
1528  * mm/huge_memory.c
1529  */
1530 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1531 	       pud_t *pud, bool write);
1532 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1533 	       pmd_t *pmd, bool write);
1534 
1535 /*
1536  * Parses a string with mem suffixes into its order. Useful to parse kernel
1537  * parameters.
1538  */
1539 static inline int get_order_from_str(const char *size_str,
1540 				     unsigned long valid_orders)
1541 {
1542 	unsigned long size;
1543 	char *endptr;
1544 	int order;
1545 
1546 	size = memparse(size_str, &endptr);
1547 
1548 	if (!is_power_of_2(size))
1549 		return -EINVAL;
1550 	order = get_order(size);
1551 	if (BIT(order) & ~valid_orders)
1552 		return -EINVAL;
1553 
1554 	return order;
1555 }
1556 
1557 enum {
1558 	/* mark page accessed */
1559 	FOLL_TOUCH = 1 << 16,
1560 	/* a retry, previous pass started an IO */
1561 	FOLL_TRIED = 1 << 17,
1562 	/* we are working on non-current tsk/mm */
1563 	FOLL_REMOTE = 1 << 18,
1564 	/* pages must be released via unpin_user_page */
1565 	FOLL_PIN = 1 << 19,
1566 	/* gup_fast: prevent fall-back to slow gup */
1567 	FOLL_FAST_ONLY = 1 << 20,
1568 	/* allow unlocking the mmap lock */
1569 	FOLL_UNLOCKABLE = 1 << 21,
1570 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1571 	FOLL_MADV_POPULATE = 1 << 22,
1572 };
1573 
1574 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1575 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1576 			    FOLL_MADV_POPULATE)
1577 
1578 /*
1579  * Indicates for which pages that are write-protected in the page table,
1580  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1581  * GUP pin will remain consistent with the pages mapped into the page tables
1582  * of the MM.
1583  *
1584  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1585  * PageAnonExclusive() has to protect against concurrent GUP:
1586  * * Ordinary GUP: Using the PT lock
1587  * * GUP-fast and fork(): mm->write_protect_seq
1588  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1589  *    folio_try_share_anon_rmap_*()
1590  *
1591  * Must be called with the (sub)page that's actually referenced via the
1592  * page table entry, which might not necessarily be the head page for a
1593  * PTE-mapped THP.
1594  *
1595  * If the vma is NULL, we're coming from the GUP-fast path and might have
1596  * to fallback to the slow path just to lookup the vma.
1597  */
1598 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1599 				    unsigned int flags, struct page *page)
1600 {
1601 	/*
1602 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1603 	 * has to be writable -- and if it references (part of) an anonymous
1604 	 * folio, that part is required to be marked exclusive.
1605 	 */
1606 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1607 		return false;
1608 	/*
1609 	 * Note: PageAnon(page) is stable until the page is actually getting
1610 	 * freed.
1611 	 */
1612 	if (!PageAnon(page)) {
1613 		/*
1614 		 * We only care about R/O long-term pining: R/O short-term
1615 		 * pinning does not have the semantics to observe successive
1616 		 * changes through the process page tables.
1617 		 */
1618 		if (!(flags & FOLL_LONGTERM))
1619 			return false;
1620 
1621 		/* We really need the vma ... */
1622 		if (!vma)
1623 			return true;
1624 
1625 		/*
1626 		 * ... because we only care about writable private ("COW")
1627 		 * mappings where we have to break COW early.
1628 		 */
1629 		return is_cow_mapping(vma->vm_flags);
1630 	}
1631 
1632 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1633 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1634 		smp_rmb();
1635 
1636 	/*
1637 	 * Note that KSM pages cannot be exclusive, and consequently,
1638 	 * cannot get pinned.
1639 	 */
1640 	return !PageAnonExclusive(page);
1641 }
1642 
1643 extern bool mirrored_kernelcore;
1644 bool memblock_has_mirror(void);
1645 void memblock_free_all(void);
1646 
1647 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1648 					  unsigned long start, unsigned long end,
1649 					  pgoff_t pgoff)
1650 {
1651 	vma->vm_start = start;
1652 	vma->vm_end = end;
1653 	vma->vm_pgoff = pgoff;
1654 }
1655 
1656 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1657 {
1658 	/*
1659 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1660 	 * enablements, because when without soft-dirty being compiled in,
1661 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1662 	 * will be constantly true.
1663 	 */
1664 	if (!pgtable_supports_soft_dirty())
1665 		return false;
1666 
1667 	/*
1668 	 * Soft-dirty is kind of special: its tracking is enabled when the
1669 	 * vma flags not set.
1670 	 */
1671 	return !(vma->vm_flags & VM_SOFTDIRTY);
1672 }
1673 
1674 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1675 {
1676 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1677 }
1678 
1679 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1680 {
1681 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1682 }
1683 
1684 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1685 				unsigned long zone, int nid);
1686 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1687 
1688 /* shrinker related functions */
1689 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1690 			  int priority);
1691 
1692 int shmem_add_to_page_cache(struct folio *folio,
1693 			    struct address_space *mapping,
1694 			    pgoff_t index, void *expected, gfp_t gfp);
1695 int shmem_inode_acct_blocks(struct inode *inode, long pages);
1696 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
1697 
1698 #ifdef CONFIG_SHRINKER_DEBUG
1699 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1700 			struct shrinker *shrinker, const char *fmt, va_list ap)
1701 {
1702 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1703 
1704 	return shrinker->name ? 0 : -ENOMEM;
1705 }
1706 
1707 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1708 {
1709 	kfree_const(shrinker->name);
1710 	shrinker->name = NULL;
1711 }
1712 
1713 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1714 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1715 					      int *debugfs_id);
1716 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1717 				    int debugfs_id);
1718 #else /* CONFIG_SHRINKER_DEBUG */
1719 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1720 {
1721 	return 0;
1722 }
1723 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1724 					      const char *fmt, va_list ap)
1725 {
1726 	return 0;
1727 }
1728 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1729 {
1730 }
1731 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1732 						     int *debugfs_id)
1733 {
1734 	*debugfs_id = -1;
1735 	return NULL;
1736 }
1737 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1738 					   int debugfs_id)
1739 {
1740 }
1741 #endif /* CONFIG_SHRINKER_DEBUG */
1742 
1743 /* Only track the nodes of mappings with shadow entries */
1744 void workingset_update_node(struct xa_node *node);
1745 extern struct list_lru shadow_nodes;
1746 #define mapping_set_update(xas, mapping) do {			\
1747 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1748 		xas_set_update(xas, workingset_update_node);	\
1749 		xas_set_lru(xas, &shadow_nodes);		\
1750 	}							\
1751 } while (0)
1752 
1753 /* mremap.c */
1754 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1755 
1756 #ifdef CONFIG_UNACCEPTED_MEMORY
1757 void accept_page(struct page *page);
1758 #else /* CONFIG_UNACCEPTED_MEMORY */
1759 static inline void accept_page(struct page *page)
1760 {
1761 }
1762 #endif /* CONFIG_UNACCEPTED_MEMORY */
1763 
1764 /* pagewalk.c */
1765 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
1766 		unsigned long end, const struct mm_walk_ops *ops,
1767 		void *private);
1768 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
1769 		unsigned long end, const struct mm_walk_ops *ops,
1770 		void *private);
1771 int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
1772 			  unsigned long end, const struct mm_walk_ops *ops,
1773 			  pgd_t *pgd, void *private);
1774 
1775 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
1776 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
1777 
1778 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn);
1779 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
1780 		unsigned long pfn, unsigned long size, pgprot_t pgprot);
1781 
1782 static inline void io_remap_pfn_range_prepare(struct vm_area_desc *desc,
1783 		unsigned long orig_pfn, unsigned long size)
1784 {
1785 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1786 
1787 	return remap_pfn_range_prepare(desc, pfn);
1788 }
1789 
1790 static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma,
1791 		unsigned long addr, unsigned long orig_pfn, unsigned long size,
1792 		pgprot_t orig_prot)
1793 {
1794 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1795 	const pgprot_t prot = pgprot_decrypted(orig_prot);
1796 
1797 	return remap_pfn_range_complete(vma, addr, pfn, size, prot);
1798 }
1799 
1800 #ifdef CONFIG_MMU_NOTIFIER
1801 static inline int clear_flush_young_ptes_notify(struct vm_area_struct *vma,
1802 		unsigned long addr, pte_t *ptep, unsigned int nr)
1803 {
1804 	int young;
1805 
1806 	young = clear_flush_young_ptes(vma, addr, ptep, nr);
1807 	young |= mmu_notifier_clear_flush_young(vma->vm_mm, addr,
1808 						addr + nr * PAGE_SIZE);
1809 	return young;
1810 }
1811 
1812 static inline int pmdp_clear_flush_young_notify(struct vm_area_struct *vma,
1813 		unsigned long addr, pmd_t *pmdp)
1814 {
1815 	int young;
1816 
1817 	young = pmdp_clear_flush_young(vma, addr, pmdp);
1818 	young |= mmu_notifier_clear_flush_young(vma->vm_mm, addr, addr + PMD_SIZE);
1819 	return young;
1820 }
1821 
1822 static inline int test_and_clear_young_ptes_notify(struct vm_area_struct *vma,
1823 		unsigned long addr, pte_t *ptep, unsigned int nr)
1824 {
1825 	int young;
1826 
1827 	young = test_and_clear_young_ptes(vma, addr, ptep, nr);
1828 	young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + nr * PAGE_SIZE);
1829 	return young;
1830 }
1831 
1832 static inline int pmdp_test_and_clear_young_notify(struct vm_area_struct *vma,
1833 		unsigned long addr, pmd_t *pmdp)
1834 {
1835 	int young;
1836 
1837 	young = pmdp_test_and_clear_young(vma, addr, pmdp);
1838 	young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PMD_SIZE);
1839 	return young;
1840 }
1841 
1842 #else /* CONFIG_MMU_NOTIFIER */
1843 
1844 #define clear_flush_young_ptes_notify	clear_flush_young_ptes
1845 #define pmdp_clear_flush_young_notify	pmdp_clear_flush_young
1846 #define test_and_clear_young_ptes_notify	test_and_clear_young_ptes
1847 #define pmdp_test_and_clear_young_notify	pmdp_test_and_clear_young
1848 
1849 #endif /* CONFIG_MMU_NOTIFIER */
1850 
1851 #endif	/* __MM_INTERNAL_H */
1852