xref: /linux/mm/internal.h (revision 019fc36872374db6fd35e118c9e935374404bfbf)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/leafops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21 
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24 
25 struct folio_batch;
26 
27 /*
28  * Maintains state across a page table move. The operation assumes both source
29  * and destination VMAs already exist and are specified by the user.
30  *
31  * Partial moves are permitted, but the old and new ranges must both reside
32  * within a VMA.
33  *
34  * mmap lock must be held in write and VMA write locks must be held on any VMA
35  * that is visible.
36  *
37  * Use the PAGETABLE_MOVE() macro to initialise this struct.
38  *
39  * The old_addr and new_addr fields are updated as the page table move is
40  * executed.
41  *
42  * NOTE: The page table move is affected by reading from [old_addr, old_end),
43  * and old_addr may be updated for better page table alignment, so len_in
44  * represents the length of the range being copied as specified by the user.
45  */
46 struct pagetable_move_control {
47 	struct vm_area_struct *old; /* Source VMA. */
48 	struct vm_area_struct *new; /* Destination VMA. */
49 	unsigned long old_addr; /* Address from which the move begins. */
50 	unsigned long old_end; /* Exclusive address at which old range ends. */
51 	unsigned long new_addr; /* Address to move page tables to. */
52 	unsigned long len_in; /* Bytes to remap specified by user. */
53 
54 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
55 	bool for_stack; /* Is this an early temp stack being moved? */
56 };
57 
58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
59 	struct pagetable_move_control name = {				\
60 		.old = old_,						\
61 		.new = new_,						\
62 		.old_addr = old_addr_,					\
63 		.old_end = (old_addr_) + (len_),			\
64 		.new_addr = new_addr_,					\
65 		.len_in = len_,						\
66 	}
67 
68 /*
69  * The set of flags that only affect watermark checking and reclaim
70  * behaviour. This is used by the MM to obey the caller constraints
71  * about IO, FS and watermark checking while ignoring placement
72  * hints such as HIGHMEM usage.
73  */
74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
75 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
76 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
77 			__GFP_NOLOCKDEP)
78 
79 /* The GFP flags allowed during early boot */
80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
81 
82 /* Control allocation cpuset and node placement constraints */
83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
84 
85 /* Do not use these with a slab allocator */
86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
87 
88 /*
89  * Different from WARN_ON_ONCE(), no warning will be issued
90  * when we specify __GFP_NOWARN.
91  */
92 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
93 	static bool __section(".data..once") __warned;			\
94 	int __ret_warn_once = !!(cond);					\
95 									\
96 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
97 		__warned = true;					\
98 		WARN_ON(1);						\
99 	}								\
100 	unlikely(__ret_warn_once);					\
101 })
102 
103 void page_writeback_init(void);
104 
105 /*
106  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
107  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
108  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
109  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
110  */
111 #define ENTIRELY_MAPPED		0x800000
112 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
113 
114 /*
115  * Flags passed to __show_mem() and show_free_areas() to suppress output in
116  * various contexts.
117  */
118 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
119 
120 /*
121  * How many individual pages have an elevated _mapcount.  Excludes
122  * the folio's entire_mapcount.
123  *
124  * Don't use this function outside of debugging code.
125  */
126 static inline int folio_nr_pages_mapped(const struct folio *folio)
127 {
128 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
129 		return -1;
130 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
131 }
132 
133 /*
134  * Retrieve the first entry of a folio based on a provided entry within the
135  * folio. We cannot rely on folio->swap as there is no guarantee that it has
136  * been initialized. Used for calling arch_swap_restore()
137  */
138 static inline swp_entry_t folio_swap(swp_entry_t entry,
139 		const struct folio *folio)
140 {
141 	swp_entry_t swap = {
142 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
143 	};
144 
145 	return swap;
146 }
147 
148 static inline void *folio_raw_mapping(const struct folio *folio)
149 {
150 	unsigned long mapping = (unsigned long)folio->mapping;
151 
152 	return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
153 }
154 
155 /*
156  * This is a file-backed mapping, and is about to be memory mapped - invoke its
157  * mmap hook and safely handle error conditions. On error, VMA hooks will be
158  * mutated.
159  *
160  * @file: File which backs the mapping.
161  * @vma:  VMA which we are mapping.
162  *
163  * Returns: 0 if success, error otherwise.
164  */
165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
166 {
167 	int err = vfs_mmap(file, vma);
168 
169 	if (likely(!err))
170 		return 0;
171 
172 	/*
173 	 * OK, we tried to call the file hook for mmap(), but an error
174 	 * arose. The mapping is in an inconsistent state and we must not invoke
175 	 * any further hooks on it.
176 	 */
177 	vma->vm_ops = &vma_dummy_vm_ops;
178 
179 	return err;
180 }
181 
182 /*
183  * If the VMA has a close hook then close it, and since closing it might leave
184  * it in an inconsistent state which makes the use of any hooks suspect, clear
185  * them down by installing dummy empty hooks.
186  */
187 static inline void vma_close(struct vm_area_struct *vma)
188 {
189 	if (vma->vm_ops && vma->vm_ops->close) {
190 		vma->vm_ops->close(vma);
191 
192 		/*
193 		 * The mapping is in an inconsistent state, and no further hooks
194 		 * may be invoked upon it.
195 		 */
196 		vma->vm_ops = &vma_dummy_vm_ops;
197 	}
198 }
199 
200 /* unmap_vmas is in mm/memory.c */
201 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap);
202 
203 #ifdef CONFIG_MMU
204 
205 static inline void get_anon_vma(struct anon_vma *anon_vma)
206 {
207 	atomic_inc(&anon_vma->refcount);
208 }
209 
210 void __put_anon_vma(struct anon_vma *anon_vma);
211 
212 static inline void put_anon_vma(struct anon_vma *anon_vma)
213 {
214 	if (atomic_dec_and_test(&anon_vma->refcount))
215 		__put_anon_vma(anon_vma);
216 }
217 
218 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
219 {
220 	down_write(&anon_vma->root->rwsem);
221 }
222 
223 static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
224 {
225 	return down_write_trylock(&anon_vma->root->rwsem);
226 }
227 
228 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
229 {
230 	up_write(&anon_vma->root->rwsem);
231 }
232 
233 static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
234 {
235 	down_read(&anon_vma->root->rwsem);
236 }
237 
238 static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
239 {
240 	return down_read_trylock(&anon_vma->root->rwsem);
241 }
242 
243 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
244 {
245 	up_read(&anon_vma->root->rwsem);
246 }
247 
248 struct anon_vma *folio_get_anon_vma(const struct folio *folio);
249 
250 /* Operations which modify VMAs. */
251 enum vma_operation {
252 	VMA_OP_SPLIT,
253 	VMA_OP_MERGE_UNFAULTED,
254 	VMA_OP_REMAP,
255 	VMA_OP_FORK,
256 };
257 
258 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
259 	enum vma_operation operation);
260 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma);
261 int  __anon_vma_prepare(struct vm_area_struct *vma);
262 void unlink_anon_vmas(struct vm_area_struct *vma);
263 
264 static inline int anon_vma_prepare(struct vm_area_struct *vma)
265 {
266 	if (likely(vma->anon_vma))
267 		return 0;
268 
269 	return __anon_vma_prepare(vma);
270 }
271 
272 /* Flags for folio_pte_batch(). */
273 typedef int __bitwise fpb_t;
274 
275 /* Compare PTEs respecting the dirty bit. */
276 #define FPB_RESPECT_DIRTY		((__force fpb_t)BIT(0))
277 
278 /* Compare PTEs respecting the soft-dirty bit. */
279 #define FPB_RESPECT_SOFT_DIRTY		((__force fpb_t)BIT(1))
280 
281 /* Compare PTEs respecting the writable bit. */
282 #define FPB_RESPECT_WRITE		((__force fpb_t)BIT(2))
283 
284 /*
285  * Merge PTE write bits: if any PTE in the batch is writable, modify the
286  * PTE at @ptentp to be writable.
287  */
288 #define FPB_MERGE_WRITE			((__force fpb_t)BIT(3))
289 
290 /*
291  * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
292  * modify the PTE at @ptentp to be young or dirty, respectively.
293  */
294 #define FPB_MERGE_YOUNG_DIRTY		((__force fpb_t)BIT(4))
295 
296 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
297 {
298 	if (!(flags & FPB_RESPECT_DIRTY))
299 		pte = pte_mkclean(pte);
300 	if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
301 		pte = pte_clear_soft_dirty(pte);
302 	if (likely(!(flags & FPB_RESPECT_WRITE)))
303 		pte = pte_wrprotect(pte);
304 	return pte_mkold(pte);
305 }
306 
307 /**
308  * folio_pte_batch_flags - detect a PTE batch for a large folio
309  * @folio: The large folio to detect a PTE batch for.
310  * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
311  * @ptep: Page table pointer for the first entry.
312  * @ptentp: Pointer to a COPY of the first page table entry whose flags this
313  *	    function updates based on @flags if appropriate.
314  * @max_nr: The maximum number of table entries to consider.
315  * @flags: Flags to modify the PTE batch semantics.
316  *
317  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
318  * pages of the same large folio in a single VMA and a single page table.
319  *
320  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
321  * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
322  * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
323  *
324  * @ptep must map any page of the folio. max_nr must be at least one and
325  * must be limited by the caller so scanning cannot exceed a single VMA and
326  * a single page table.
327  *
328  * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
329  * be updated: it's crucial that a pointer to a COPY of the first
330  * page table entry, obtained through ptep_get(), is provided as @ptentp.
331  *
332  * This function will be inlined to optimize based on the input parameters;
333  * consider using folio_pte_batch() instead if applicable.
334  *
335  * Return: the number of table entries in the batch.
336  */
337 static inline unsigned int folio_pte_batch_flags(struct folio *folio,
338 		struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
339 		unsigned int max_nr, fpb_t flags)
340 {
341 	bool any_writable = false, any_young = false, any_dirty = false;
342 	pte_t expected_pte, pte = *ptentp;
343 	unsigned int nr, cur_nr;
344 
345 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
346 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
347 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
348 	/*
349 	 * Ensure this is a pointer to a copy not a pointer into a page table.
350 	 * If this is a stack value, it won't be a valid virtual address, but
351 	 * that's fine because it also cannot be pointing into the page table.
352 	 */
353 	VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
354 
355 	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
356 	max_nr = min_t(unsigned long, max_nr,
357 		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
358 
359 	nr = pte_batch_hint(ptep, pte);
360 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
361 	ptep = ptep + nr;
362 
363 	while (nr < max_nr) {
364 		pte = ptep_get(ptep);
365 
366 		if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
367 			break;
368 
369 		if (flags & FPB_MERGE_WRITE)
370 			any_writable |= pte_write(pte);
371 		if (flags & FPB_MERGE_YOUNG_DIRTY) {
372 			any_young |= pte_young(pte);
373 			any_dirty |= pte_dirty(pte);
374 		}
375 
376 		cur_nr = pte_batch_hint(ptep, pte);
377 		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
378 		ptep += cur_nr;
379 		nr += cur_nr;
380 	}
381 
382 	if (any_writable)
383 		*ptentp = pte_mkwrite(*ptentp, vma);
384 	if (any_young)
385 		*ptentp = pte_mkyoung(*ptentp);
386 	if (any_dirty)
387 		*ptentp = pte_mkdirty(*ptentp);
388 
389 	return min(nr, max_nr);
390 }
391 
392 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
393 		unsigned int max_nr);
394 
395 /**
396  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
397  *	 forward or backward by delta
398  * @pte: The initial pte state; must be a swap entry
399  * @delta: The direction and the offset we are moving; forward if delta
400  *	 is positive; backward if delta is negative
401  *
402  * Moves the swap offset, while maintaining all other fields, including
403  * swap type, and any swp pte bits. The resulting pte is returned.
404  */
405 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
406 {
407 	const softleaf_t entry = softleaf_from_pte(pte);
408 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
409 						   (swp_offset(entry) + delta)));
410 
411 	if (pte_swp_soft_dirty(pte))
412 		new = pte_swp_mksoft_dirty(new);
413 	if (pte_swp_exclusive(pte))
414 		new = pte_swp_mkexclusive(new);
415 	if (pte_swp_uffd_wp(pte))
416 		new = pte_swp_mkuffd_wp(new);
417 
418 	return new;
419 }
420 
421 
422 /**
423  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
424  * @pte: The initial pte state; must be a swap entry.
425  *
426  * Increments the swap offset, while maintaining all other fields, including
427  * swap type, and any swp pte bits. The resulting pte is returned.
428  */
429 static inline pte_t pte_next_swp_offset(pte_t pte)
430 {
431 	return pte_move_swp_offset(pte, 1);
432 }
433 
434 /**
435  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
436  * @start_ptep: Page table pointer for the first entry.
437  * @max_nr: The maximum number of table entries to consider.
438  * @pte: Page table entry for the first entry.
439  *
440  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
441  * containing swap entries all with consecutive offsets and targeting the same
442  * swap type, all with matching swp pte bits.
443  *
444  * max_nr must be at least one and must be limited by the caller so scanning
445  * cannot exceed a single page table.
446  *
447  * Return: the number of table entries in the batch.
448  */
449 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
450 {
451 	pte_t expected_pte = pte_next_swp_offset(pte);
452 	const pte_t *end_ptep = start_ptep + max_nr;
453 	const softleaf_t entry = softleaf_from_pte(pte);
454 	pte_t *ptep = start_ptep + 1;
455 	unsigned short cgroup_id;
456 
457 	VM_WARN_ON(max_nr < 1);
458 	VM_WARN_ON(!softleaf_is_swap(entry));
459 
460 	cgroup_id = lookup_swap_cgroup_id(entry);
461 	while (ptep < end_ptep) {
462 		softleaf_t entry;
463 
464 		pte = ptep_get(ptep);
465 
466 		if (!pte_same(pte, expected_pte))
467 			break;
468 		entry = softleaf_from_pte(pte);
469 		if (lookup_swap_cgroup_id(entry) != cgroup_id)
470 			break;
471 		expected_pte = pte_next_swp_offset(expected_pte);
472 		ptep++;
473 	}
474 
475 	return ptep - start_ptep;
476 }
477 #endif /* CONFIG_MMU */
478 
479 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
480 						int nr_throttled);
481 static inline void acct_reclaim_writeback(struct folio *folio)
482 {
483 	pg_data_t *pgdat = folio_pgdat(folio);
484 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
485 
486 	if (nr_throttled)
487 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
488 }
489 
490 static inline void wake_throttle_isolated(pg_data_t *pgdat)
491 {
492 	wait_queue_head_t *wqh;
493 
494 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
495 	if (waitqueue_active(wqh))
496 		wake_up(wqh);
497 }
498 
499 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
500 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
501 {
502 	vm_fault_t ret = __vmf_anon_prepare(vmf);
503 
504 	if (unlikely(ret & VM_FAULT_RETRY))
505 		vma_end_read(vmf->vma);
506 	return ret;
507 }
508 
509 vm_fault_t do_swap_page(struct vm_fault *vmf);
510 void folio_rotate_reclaimable(struct folio *folio);
511 bool __folio_end_writeback(struct folio *folio);
512 void deactivate_file_folio(struct folio *folio);
513 void folio_activate(struct folio *folio);
514 
515 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
516 
517 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
518 
519 /**
520  * sync_with_folio_pmd_zap - sync with concurrent zapping of a folio PMD
521  * @mm: The mm_struct.
522  * @pmdp: Pointer to the pmd that was found to be pmd_none().
523  *
524  * When we find a pmd_none() while unmapping a folio without holding the PTL,
525  * zap_huge_pmd() may have cleared the PMD but not yet modified the folio to
526  * indicate that it's unmapped. Skipping the PMD without synchronization could
527  * make folio unmapping code assume that unmapping failed.
528  *
529  * Wait for concurrent zapping to complete by grabbing the PTL.
530  */
531 static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
532 {
533 	spinlock_t *ptl = pmd_lock(mm, pmdp);
534 
535 	spin_unlock(ptl);
536 }
537 
538 struct zap_details;
539 void unmap_page_range(struct mmu_gather *tlb,
540 			     struct vm_area_struct *vma,
541 			     unsigned long addr, unsigned long end,
542 			     struct zap_details *details);
543 void zap_page_range_single_batched(struct mmu_gather *tlb,
544 		struct vm_area_struct *vma, unsigned long addr,
545 		unsigned long size, struct zap_details *details);
546 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
547 			   gfp_t gfp);
548 
549 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
550 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
551 static inline void force_page_cache_readahead(struct address_space *mapping,
552 		struct file *file, pgoff_t index, unsigned long nr_to_read)
553 {
554 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
555 	force_page_cache_ra(&ractl, nr_to_read);
556 }
557 
558 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
559 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
560 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
561 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
562 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
563 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
564 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
565 		loff_t end);
566 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
567 unsigned long mapping_try_invalidate(struct address_space *mapping,
568 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
569 
570 /**
571  * folio_evictable - Test whether a folio is evictable.
572  * @folio: The folio to test.
573  *
574  * Test whether @folio is evictable -- i.e., should be placed on
575  * active/inactive lists vs unevictable list.
576  *
577  * Reasons folio might not be evictable:
578  * 1. folio's mapping marked unevictable
579  * 2. One of the pages in the folio is part of an mlocked VMA
580  */
581 static inline bool folio_evictable(struct folio *folio)
582 {
583 	bool ret;
584 
585 	/* Prevent address_space of inode and swap cache from being freed */
586 	rcu_read_lock();
587 	ret = !mapping_unevictable(folio_mapping(folio)) &&
588 			!folio_test_mlocked(folio);
589 	rcu_read_unlock();
590 	return ret;
591 }
592 
593 /*
594  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
595  * a count of one.
596  */
597 static inline void set_page_refcounted(struct page *page)
598 {
599 	VM_BUG_ON_PAGE(PageTail(page), page);
600 	VM_BUG_ON_PAGE(page_ref_count(page), page);
601 	set_page_count(page, 1);
602 }
603 
604 static inline void set_pages_refcounted(struct page *page, unsigned long nr_pages)
605 {
606 	unsigned long pfn = page_to_pfn(page);
607 
608 	for (; nr_pages--; pfn++)
609 		set_page_refcounted(pfn_to_page(pfn));
610 }
611 
612 /*
613  * Return true if a folio needs ->release_folio() calling upon it.
614  */
615 static inline bool folio_needs_release(struct folio *folio)
616 {
617 	struct address_space *mapping = folio_mapping(folio);
618 
619 	return folio_has_private(folio) ||
620 		(mapping && mapping_release_always(mapping));
621 }
622 
623 extern unsigned long highest_memmap_pfn;
624 
625 /*
626  * Maximum number of reclaim retries without progress before the OOM
627  * killer is consider the only way forward.
628  */
629 #define MAX_RECLAIM_RETRIES 16
630 
631 /*
632  * in mm/vmscan.c:
633  */
634 bool folio_isolate_lru(struct folio *folio);
635 void folio_putback_lru(struct folio *folio);
636 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
637 int user_proactive_reclaim(char *buf,
638 			   struct mem_cgroup *memcg, pg_data_t *pgdat);
639 
640 /*
641  * in mm/rmap.c:
642  */
643 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
644 
645 /*
646  * in mm/page_alloc.c
647  */
648 #define K(x) ((x) << (PAGE_SHIFT-10))
649 
650 extern char * const zone_names[MAX_NR_ZONES];
651 
652 /* perform sanity checks on struct pages being allocated or freed */
653 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
654 
655 extern int min_free_kbytes;
656 extern int defrag_mode;
657 
658 void setup_per_zone_wmarks(void);
659 void calculate_min_free_kbytes(void);
660 int __meminit init_per_zone_wmark_min(void);
661 void page_alloc_sysctl_init(void);
662 
663 /*
664  * Structure for holding the mostly immutable allocation parameters passed
665  * between functions involved in allocations, including the alloc_pages*
666  * family of functions.
667  *
668  * nodemask, migratetype and highest_zoneidx are initialized only once in
669  * __alloc_pages() and then never change.
670  *
671  * zonelist, preferred_zone and highest_zoneidx are set first in
672  * __alloc_pages() for the fast path, and might be later changed
673  * in __alloc_pages_slowpath(). All other functions pass the whole structure
674  * by a const pointer.
675  */
676 struct alloc_context {
677 	struct zonelist *zonelist;
678 	nodemask_t *nodemask;
679 	struct zoneref *preferred_zoneref;
680 	int migratetype;
681 
682 	/*
683 	 * highest_zoneidx represents highest usable zone index of
684 	 * the allocation request. Due to the nature of the zone,
685 	 * memory on lower zone than the highest_zoneidx will be
686 	 * protected by lowmem_reserve[highest_zoneidx].
687 	 *
688 	 * highest_zoneidx is also used by reclaim/compaction to limit
689 	 * the target zone since higher zone than this index cannot be
690 	 * usable for this allocation request.
691 	 */
692 	enum zone_type highest_zoneidx;
693 	bool spread_dirty_pages;
694 };
695 
696 /*
697  * This function returns the order of a free page in the buddy system. In
698  * general, page_zone(page)->lock must be held by the caller to prevent the
699  * page from being allocated in parallel and returning garbage as the order.
700  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
701  * page cannot be allocated or merged in parallel. Alternatively, it must
702  * handle invalid values gracefully, and use buddy_order_unsafe() below.
703  */
704 static inline unsigned int buddy_order(struct page *page)
705 {
706 	/* PageBuddy() must be checked by the caller */
707 	return page_private(page);
708 }
709 
710 /*
711  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
712  * PageBuddy() should be checked first by the caller to minimize race window,
713  * and invalid values must be handled gracefully.
714  *
715  * READ_ONCE is used so that if the caller assigns the result into a local
716  * variable and e.g. tests it for valid range before using, the compiler cannot
717  * decide to remove the variable and inline the page_private(page) multiple
718  * times, potentially observing different values in the tests and the actual
719  * use of the result.
720  */
721 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
722 
723 /*
724  * This function checks whether a page is free && is the buddy
725  * we can coalesce a page and its buddy if
726  * (a) the buddy is not in a hole (check before calling!) &&
727  * (b) the buddy is in the buddy system &&
728  * (c) a page and its buddy have the same order &&
729  * (d) a page and its buddy are in the same zone.
730  *
731  * For recording whether a page is in the buddy system, we set PageBuddy.
732  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
733  *
734  * For recording page's order, we use page_private(page).
735  */
736 static inline bool page_is_buddy(struct page *page, struct page *buddy,
737 				 unsigned int order)
738 {
739 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
740 		return false;
741 
742 	if (buddy_order(buddy) != order)
743 		return false;
744 
745 	/*
746 	 * zone check is done late to avoid uselessly calculating
747 	 * zone/node ids for pages that could never merge.
748 	 */
749 	if (page_zone_id(page) != page_zone_id(buddy))
750 		return false;
751 
752 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
753 
754 	return true;
755 }
756 
757 /*
758  * Locate the struct page for both the matching buddy in our
759  * pair (buddy1) and the combined O(n+1) page they form (page).
760  *
761  * 1) Any buddy B1 will have an order O twin B2 which satisfies
762  * the following equation:
763  *     B2 = B1 ^ (1 << O)
764  * For example, if the starting buddy (buddy2) is #8 its order
765  * 1 buddy is #10:
766  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
767  *
768  * 2) Any buddy B will have an order O+1 parent P which
769  * satisfies the following equation:
770  *     P = B & ~(1 << O)
771  *
772  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
773  */
774 static inline unsigned long
775 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
776 {
777 	return page_pfn ^ (1 << order);
778 }
779 
780 /*
781  * Find the buddy of @page and validate it.
782  * @page: The input page
783  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
784  *       function is used in the performance-critical __free_one_page().
785  * @order: The order of the page
786  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
787  *             page_to_pfn().
788  *
789  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
790  * not the same as @page. The validation is necessary before use it.
791  *
792  * Return: the found buddy page or NULL if not found.
793  */
794 static inline struct page *find_buddy_page_pfn(struct page *page,
795 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
796 {
797 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
798 	struct page *buddy;
799 
800 	buddy = page + (__buddy_pfn - pfn);
801 	if (buddy_pfn)
802 		*buddy_pfn = __buddy_pfn;
803 
804 	if (page_is_buddy(page, buddy, order))
805 		return buddy;
806 	return NULL;
807 }
808 
809 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
810 				unsigned long end_pfn, struct zone *zone);
811 
812 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
813 				unsigned long end_pfn, struct zone *zone)
814 {
815 	if (zone->contiguous)
816 		return pfn_to_page(start_pfn);
817 
818 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
819 }
820 
821 void set_zone_contiguous(struct zone *zone);
822 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
823 			   unsigned long nr_pages);
824 
825 static inline void clear_zone_contiguous(struct zone *zone)
826 {
827 	zone->contiguous = false;
828 }
829 
830 extern int __isolate_free_page(struct page *page, unsigned int order);
831 extern void __putback_isolated_page(struct page *page, unsigned int order,
832 				    int mt);
833 extern void memblock_free_pages(unsigned long pfn, unsigned int order);
834 extern void __free_pages_core(struct page *page, unsigned int order,
835 		enum meminit_context context);
836 
837 /*
838  * This will have no effect, other than possibly generating a warning, if the
839  * caller passes in a non-large folio.
840  */
841 static inline void folio_set_order(struct folio *folio, unsigned int order)
842 {
843 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
844 		return;
845 	VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER);
846 
847 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
848 #ifdef NR_PAGES_IN_LARGE_FOLIO
849 	folio->_nr_pages = 1U << order;
850 #endif
851 }
852 
853 bool __folio_unqueue_deferred_split(struct folio *folio);
854 static inline bool folio_unqueue_deferred_split(struct folio *folio)
855 {
856 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
857 		return false;
858 
859 	/*
860 	 * At this point, there is no one trying to add the folio to
861 	 * deferred_list. If folio is not in deferred_list, it's safe
862 	 * to check without acquiring the split_queue_lock.
863 	 */
864 	if (data_race(list_empty(&folio->_deferred_list)))
865 		return false;
866 
867 	return __folio_unqueue_deferred_split(folio);
868 }
869 
870 static inline struct folio *page_rmappable_folio(struct page *page)
871 {
872 	struct folio *folio = (struct folio *)page;
873 
874 	if (folio && folio_test_large(folio))
875 		folio_set_large_rmappable(folio);
876 	return folio;
877 }
878 
879 static inline void prep_compound_head(struct page *page, unsigned int order)
880 {
881 	struct folio *folio = (struct folio *)page;
882 
883 	folio_set_order(folio, order);
884 	atomic_set(&folio->_large_mapcount, -1);
885 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
886 		atomic_set(&folio->_nr_pages_mapped, 0);
887 	if (IS_ENABLED(CONFIG_MM_ID)) {
888 		folio->_mm_ids = 0;
889 		folio->_mm_id_mapcount[0] = -1;
890 		folio->_mm_id_mapcount[1] = -1;
891 	}
892 	if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
893 		atomic_set(&folio->_pincount, 0);
894 		atomic_set(&folio->_entire_mapcount, -1);
895 	}
896 	if (order > 1)
897 		INIT_LIST_HEAD(&folio->_deferred_list);
898 }
899 
900 static inline void prep_compound_tail(struct page *head, int tail_idx)
901 {
902 	struct page *p = head + tail_idx;
903 
904 	p->mapping = TAIL_MAPPING;
905 	set_compound_head(p, head);
906 	set_page_private(p, 0);
907 }
908 
909 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
910 extern bool free_pages_prepare(struct page *page, unsigned int order);
911 
912 extern int user_min_free_kbytes;
913 
914 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
915 		nodemask_t *);
916 #define __alloc_frozen_pages(...) \
917 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
918 void free_frozen_pages(struct page *page, unsigned int order);
919 void free_unref_folios(struct folio_batch *fbatch);
920 
921 #ifdef CONFIG_NUMA
922 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
923 #else
924 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
925 {
926 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
927 }
928 #endif
929 
930 #define alloc_frozen_pages(...) \
931 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
932 
933 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
934 #define alloc_frozen_pages_nolock(...) \
935 	alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
936 void free_frozen_pages_nolock(struct page *page, unsigned int order);
937 
938 extern void zone_pcp_reset(struct zone *zone);
939 extern void zone_pcp_disable(struct zone *zone);
940 extern void zone_pcp_enable(struct zone *zone);
941 extern void zone_pcp_init(struct zone *zone);
942 
943 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
944 			  phys_addr_t min_addr,
945 			  int nid, bool exact_nid);
946 
947 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
948 		unsigned long, enum meminit_context, struct vmem_altmap *, int,
949 		bool);
950 
951 #ifdef CONFIG_SPARSEMEM
952 void sparse_init(void);
953 #else
954 static inline void sparse_init(void) {}
955 #endif /* CONFIG_SPARSEMEM */
956 
957 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
958 
959 /*
960  * in mm/compaction.c
961  */
962 /*
963  * compact_control is used to track pages being migrated and the free pages
964  * they are being migrated to during memory compaction. The free_pfn starts
965  * at the end of a zone and migrate_pfn begins at the start. Movable pages
966  * are moved to the end of a zone during a compaction run and the run
967  * completes when free_pfn <= migrate_pfn
968  */
969 struct compact_control {
970 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
971 	struct list_head migratepages;	/* List of pages being migrated */
972 	unsigned int nr_freepages;	/* Number of isolated free pages */
973 	unsigned int nr_migratepages;	/* Number of pages to migrate */
974 	unsigned long free_pfn;		/* isolate_freepages search base */
975 	/*
976 	 * Acts as an in/out parameter to page isolation for migration.
977 	 * isolate_migratepages uses it as a search base.
978 	 * isolate_migratepages_block will update the value to the next pfn
979 	 * after the last isolated one.
980 	 */
981 	unsigned long migrate_pfn;
982 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
983 	struct zone *zone;
984 	unsigned long total_migrate_scanned;
985 	unsigned long total_free_scanned;
986 	unsigned short fast_search_fail;/* failures to use free list searches */
987 	short search_order;		/* order to start a fast search at */
988 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
989 	int order;			/* order a direct compactor needs */
990 	int migratetype;		/* migratetype of direct compactor */
991 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
992 	const int highest_zoneidx;	/* zone index of a direct compactor */
993 	enum migrate_mode mode;		/* Async or sync migration mode */
994 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
995 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
996 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
997 	bool direct_compaction;		/* False from kcompactd or /proc/... */
998 	bool proactive_compaction;	/* kcompactd proactive compaction */
999 	bool whole_zone;		/* Whole zone should/has been scanned */
1000 	bool contended;			/* Signal lock contention */
1001 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
1002 					 * when there are potentially transient
1003 					 * isolation or migration failures to
1004 					 * ensure forward progress.
1005 					 */
1006 	bool alloc_contig;		/* alloc_contig_range allocation */
1007 };
1008 
1009 /*
1010  * Used in direct compaction when a page should be taken from the freelists
1011  * immediately when one is created during the free path.
1012  */
1013 struct capture_control {
1014 	struct compact_control *cc;
1015 	struct page *page;
1016 };
1017 
1018 unsigned long
1019 isolate_freepages_range(struct compact_control *cc,
1020 			unsigned long start_pfn, unsigned long end_pfn);
1021 int
1022 isolate_migratepages_range(struct compact_control *cc,
1023 			   unsigned long low_pfn, unsigned long end_pfn);
1024 
1025 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1026 void init_cma_reserved_pageblock(struct page *page);
1027 
1028 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1029 
1030 struct cma;
1031 
1032 #ifdef CONFIG_CMA
1033 bool cma_validate_zones(struct cma *cma);
1034 void *cma_reserve_early(struct cma *cma, unsigned long size);
1035 void init_cma_pageblock(struct page *page);
1036 #else
1037 static inline bool cma_validate_zones(struct cma *cma)
1038 {
1039 	return false;
1040 }
1041 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
1042 {
1043 	return NULL;
1044 }
1045 static inline void init_cma_pageblock(struct page *page)
1046 {
1047 }
1048 #endif
1049 
1050 
1051 int find_suitable_fallback(struct free_area *area, unsigned int order,
1052 			   int migratetype, bool claimable);
1053 
1054 static inline bool free_area_empty(struct free_area *area, int migratetype)
1055 {
1056 	return list_empty(&area->free_list[migratetype]);
1057 }
1058 
1059 /* mm/util.c */
1060 struct anon_vma *folio_anon_vma(const struct folio *folio);
1061 
1062 #ifdef CONFIG_MMU
1063 void unmap_mapping_folio(struct folio *folio);
1064 extern long populate_vma_page_range(struct vm_area_struct *vma,
1065 		unsigned long start, unsigned long end, int *locked);
1066 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
1067 		unsigned long end, bool write, int *locked);
1068 bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
1069 		unsigned long bytes);
1070 
1071 /*
1072  * NOTE: This function can't tell whether the folio is "fully mapped" in the
1073  * range.
1074  * "fully mapped" means all the pages of folio is associated with the page
1075  * table of range while this function just check whether the folio range is
1076  * within the range [start, end). Function caller needs to do page table
1077  * check if it cares about the page table association.
1078  *
1079  * Typical usage (like mlock or madvise) is:
1080  * Caller knows at least 1 page of folio is associated with page table of VMA
1081  * and the range [start, end) is intersect with the VMA range. Caller wants
1082  * to know whether the folio is fully associated with the range. It calls
1083  * this function to check whether the folio is in the range first. Then checks
1084  * the page table to know whether the folio is fully mapped to the range.
1085  */
1086 static inline bool
1087 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
1088 		unsigned long start, unsigned long end)
1089 {
1090 	pgoff_t pgoff, addr;
1091 	unsigned long vma_pglen = vma_pages(vma);
1092 
1093 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
1094 	if (start > end)
1095 		return false;
1096 
1097 	if (start < vma->vm_start)
1098 		start = vma->vm_start;
1099 
1100 	if (end > vma->vm_end)
1101 		end = vma->vm_end;
1102 
1103 	pgoff = folio_pgoff(folio);
1104 
1105 	/* if folio start address is not in vma range */
1106 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
1107 		return false;
1108 
1109 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1110 
1111 	return !(addr < start || end - addr < folio_size(folio));
1112 }
1113 
1114 static inline bool
1115 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
1116 {
1117 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
1118 }
1119 
1120 /*
1121  * mlock_vma_folio() and munlock_vma_folio():
1122  * should be called with vma's mmap_lock held for read or write,
1123  * under page table lock for the pte/pmd being added or removed.
1124  *
1125  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
1126  * the end of folio_remove_rmap_*(); but new anon folios are managed by
1127  * folio_add_lru_vma() calling mlock_new_folio().
1128  */
1129 void mlock_folio(struct folio *folio);
1130 static inline void mlock_vma_folio(struct folio *folio,
1131 				struct vm_area_struct *vma)
1132 {
1133 	/*
1134 	 * The VM_SPECIAL check here serves two purposes.
1135 	 * 1) VM_IO check prevents migration from double-counting during mlock.
1136 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
1137 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
1138 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1139 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
1140 	 */
1141 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1142 		mlock_folio(folio);
1143 }
1144 
1145 void munlock_folio(struct folio *folio);
1146 static inline void munlock_vma_folio(struct folio *folio,
1147 					struct vm_area_struct *vma)
1148 {
1149 	/*
1150 	 * munlock if the function is called. Ideally, we should only
1151 	 * do munlock if any page of folio is unmapped from VMA and
1152 	 * cause folio not fully mapped to VMA.
1153 	 *
1154 	 * But it's not easy to confirm that's the situation. So we
1155 	 * always munlock the folio and page reclaim will correct it
1156 	 * if it's wrong.
1157 	 */
1158 	if (unlikely(vma->vm_flags & VM_LOCKED))
1159 		munlock_folio(folio);
1160 }
1161 
1162 void mlock_new_folio(struct folio *folio);
1163 bool need_mlock_drain(int cpu);
1164 void mlock_drain_local(void);
1165 void mlock_drain_remote(int cpu);
1166 
1167 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1168 
1169 /**
1170  * vma_address - Find the virtual address a page range is mapped at
1171  * @vma: The vma which maps this object.
1172  * @pgoff: The page offset within its object.
1173  * @nr_pages: The number of pages to consider.
1174  *
1175  * If any page in this range is mapped by this VMA, return the first address
1176  * where any of these pages appear.  Otherwise, return -EFAULT.
1177  */
1178 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1179 		pgoff_t pgoff, unsigned long nr_pages)
1180 {
1181 	unsigned long address;
1182 
1183 	if (pgoff >= vma->vm_pgoff) {
1184 		address = vma->vm_start +
1185 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1186 		/* Check for address beyond vma (or wrapped through 0?) */
1187 		if (address < vma->vm_start || address >= vma->vm_end)
1188 			address = -EFAULT;
1189 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1190 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1191 		address = vma->vm_start;
1192 	} else {
1193 		address = -EFAULT;
1194 	}
1195 	return address;
1196 }
1197 
1198 /*
1199  * Then at what user virtual address will none of the range be found in vma?
1200  * Assumes that vma_address() already returned a good starting address.
1201  */
1202 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1203 {
1204 	struct vm_area_struct *vma = pvmw->vma;
1205 	pgoff_t pgoff;
1206 	unsigned long address;
1207 
1208 	/* Common case, plus ->pgoff is invalid for KSM */
1209 	if (pvmw->nr_pages == 1)
1210 		return pvmw->address + PAGE_SIZE;
1211 
1212 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1213 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1214 	/* Check for address beyond vma (or wrapped through 0?) */
1215 	if (address < vma->vm_start || address > vma->vm_end)
1216 		address = vma->vm_end;
1217 	return address;
1218 }
1219 
1220 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1221 						    struct file *fpin)
1222 {
1223 	int flags = vmf->flags;
1224 
1225 	if (fpin)
1226 		return fpin;
1227 
1228 	/*
1229 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1230 	 * anything, so we only pin the file and drop the mmap_lock if only
1231 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1232 	 */
1233 	if (fault_flag_allow_retry_first(flags) &&
1234 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1235 		fpin = get_file(vmf->vma->vm_file);
1236 		release_fault_lock(vmf);
1237 	}
1238 	return fpin;
1239 }
1240 #else /* !CONFIG_MMU */
1241 static inline void unmap_mapping_folio(struct folio *folio) { }
1242 static inline void mlock_new_folio(struct folio *folio) { }
1243 static inline bool need_mlock_drain(int cpu) { return false; }
1244 static inline void mlock_drain_local(void) { }
1245 static inline void mlock_drain_remote(int cpu) { }
1246 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1247 {
1248 }
1249 #endif /* !CONFIG_MMU */
1250 
1251 /* Memory initialisation debug and verification */
1252 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1253 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1254 
1255 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1256 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1257 
1258 void init_deferred_page(unsigned long pfn, int nid);
1259 
1260 enum mminit_level {
1261 	MMINIT_WARNING,
1262 	MMINIT_VERIFY,
1263 	MMINIT_TRACE
1264 };
1265 
1266 #ifdef CONFIG_DEBUG_MEMORY_INIT
1267 
1268 extern int mminit_loglevel;
1269 
1270 #define mminit_dprintk(level, prefix, fmt, arg...) \
1271 do { \
1272 	if (level < mminit_loglevel) { \
1273 		if (level <= MMINIT_WARNING) \
1274 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1275 		else \
1276 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1277 	} \
1278 } while (0)
1279 
1280 extern void mminit_verify_pageflags_layout(void);
1281 extern void mminit_verify_zonelist(void);
1282 #else
1283 
1284 static inline void mminit_dprintk(enum mminit_level level,
1285 				const char *prefix, const char *fmt, ...)
1286 {
1287 }
1288 
1289 static inline void mminit_verify_pageflags_layout(void)
1290 {
1291 }
1292 
1293 static inline void mminit_verify_zonelist(void)
1294 {
1295 }
1296 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1297 
1298 #define NODE_RECLAIM_NOSCAN	-2
1299 #define NODE_RECLAIM_FULL	-1
1300 #define NODE_RECLAIM_SOME	0
1301 #define NODE_RECLAIM_SUCCESS	1
1302 
1303 #ifdef CONFIG_NUMA
1304 extern int node_reclaim_mode;
1305 
1306 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1307 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1308 #else
1309 #define node_reclaim_mode 0
1310 
1311 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1312 				unsigned int order)
1313 {
1314 	return NODE_RECLAIM_NOSCAN;
1315 }
1316 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1317 {
1318 	return NUMA_NO_NODE;
1319 }
1320 #endif
1321 
1322 static inline bool node_reclaim_enabled(void)
1323 {
1324 	/* Is any node_reclaim_mode bit set? */
1325 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1326 }
1327 
1328 /*
1329  * mm/memory-failure.c
1330  */
1331 #ifdef CONFIG_MEMORY_FAILURE
1332 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1333 void shake_folio(struct folio *folio);
1334 typedef int hwpoison_filter_func_t(struct page *p);
1335 void hwpoison_filter_register(hwpoison_filter_func_t *filter);
1336 void hwpoison_filter_unregister(void);
1337 
1338 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1339 void SetPageHWPoisonTakenOff(struct page *page);
1340 void ClearPageHWPoisonTakenOff(struct page *page);
1341 bool take_page_off_buddy(struct page *page);
1342 bool put_page_back_buddy(struct page *page);
1343 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1344 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1345 		     struct vm_area_struct *vma, struct list_head *to_kill,
1346 		     unsigned long ksm_addr);
1347 unsigned long page_mapped_in_vma(const struct page *page,
1348 		struct vm_area_struct *vma);
1349 
1350 #else
1351 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1352 {
1353 	return -EBUSY;
1354 }
1355 #endif
1356 
1357 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1358         unsigned long, unsigned long,
1359         unsigned long, unsigned long);
1360 
1361 extern void set_pageblock_order(void);
1362 unsigned long reclaim_pages(struct list_head *folio_list);
1363 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1364 					    struct list_head *folio_list);
1365 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1366 #define ALLOC_WMARK_MIN		WMARK_MIN
1367 #define ALLOC_WMARK_LOW		WMARK_LOW
1368 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1369 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1370 
1371 /* Mask to get the watermark bits */
1372 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1373 
1374 /*
1375  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1376  * cannot assume a reduced access to memory reserves is sufficient for
1377  * !MMU
1378  */
1379 #ifdef CONFIG_MMU
1380 #define ALLOC_OOM		0x08
1381 #else
1382 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1383 #endif
1384 
1385 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1386 				       * to 25% of the min watermark or
1387 				       * 62.5% if __GFP_HIGH is set.
1388 				       */
1389 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1390 				       * of the min watermark.
1391 				       */
1392 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1393 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1394 #ifdef CONFIG_ZONE_DMA32
1395 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1396 #else
1397 #define ALLOC_NOFRAGMENT	  0x0
1398 #endif
1399 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1400 #define ALLOC_TRYLOCK		0x400 /* Only use spin_trylock in allocation path */
1401 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1402 
1403 /* Flags that allow allocations below the min watermark. */
1404 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1405 
1406 enum ttu_flags;
1407 struct tlbflush_unmap_batch;
1408 
1409 
1410 /*
1411  * only for MM internal work items which do not depend on
1412  * any allocations or locks which might depend on allocations
1413  */
1414 extern struct workqueue_struct *mm_percpu_wq;
1415 
1416 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1417 void try_to_unmap_flush(void);
1418 void try_to_unmap_flush_dirty(void);
1419 void flush_tlb_batched_pending(struct mm_struct *mm);
1420 #else
1421 static inline void try_to_unmap_flush(void)
1422 {
1423 }
1424 static inline void try_to_unmap_flush_dirty(void)
1425 {
1426 }
1427 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1428 {
1429 }
1430 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1431 
1432 extern const struct trace_print_flags pageflag_names[];
1433 extern const struct trace_print_flags vmaflag_names[];
1434 extern const struct trace_print_flags gfpflag_names[];
1435 
1436 void setup_zone_pageset(struct zone *zone);
1437 
1438 struct migration_target_control {
1439 	int nid;		/* preferred node id */
1440 	nodemask_t *nmask;
1441 	gfp_t gfp_mask;
1442 	enum migrate_reason reason;
1443 };
1444 
1445 /*
1446  * mm/filemap.c
1447  */
1448 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1449 			      struct folio *folio, loff_t fpos, size_t size);
1450 
1451 /*
1452  * mm/vmalloc.c
1453  */
1454 #ifdef CONFIG_MMU
1455 void __init vmalloc_init(void);
1456 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1457 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask);
1458 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1459 #else
1460 static inline void vmalloc_init(void)
1461 {
1462 }
1463 
1464 static inline
1465 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1466 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
1467 {
1468 	return -EINVAL;
1469 }
1470 #endif
1471 
1472 void clear_vm_uninitialized_flag(struct vm_struct *vm);
1473 
1474 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1475 			       unsigned long end, pgprot_t prot,
1476 			       struct page **pages, unsigned int page_shift);
1477 
1478 void vunmap_range_noflush(unsigned long start, unsigned long end);
1479 
1480 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1481 
1482 static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma)
1483 {
1484 	if (vma->vm_flags & VM_SHARED)
1485 		return false;
1486 
1487 	return atomic_read(&vma->vm_mm->mm_users) == 1;
1488 }
1489 
1490 #ifdef CONFIG_NUMA_BALANCING
1491 bool folio_can_map_prot_numa(struct folio *folio, struct vm_area_struct *vma,
1492 		bool is_private_single_threaded);
1493 
1494 #else
1495 static inline bool folio_can_map_prot_numa(struct folio *folio,
1496 		struct vm_area_struct *vma, bool is_private_single_threaded)
1497 {
1498 	return false;
1499 }
1500 #endif
1501 
1502 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1503 		      unsigned long addr, int *flags, bool writable,
1504 		      int *last_cpupid);
1505 
1506 void free_zone_device_folio(struct folio *folio);
1507 int migrate_device_coherent_folio(struct folio *folio);
1508 
1509 struct vm_struct *__get_vm_area_node(unsigned long size,
1510 				     unsigned long align, unsigned long shift,
1511 				     unsigned long vm_flags, unsigned long start,
1512 				     unsigned long end, int node, gfp_t gfp_mask,
1513 				     const void *caller);
1514 
1515 /*
1516  * mm/gup.c
1517  */
1518 int __must_check try_grab_folio(struct folio *folio, int refs,
1519 				unsigned int flags);
1520 
1521 /*
1522  * mm/huge_memory.c
1523  */
1524 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1525 	       pud_t *pud, bool write);
1526 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1527 	       pmd_t *pmd, bool write);
1528 
1529 /*
1530  * Parses a string with mem suffixes into its order. Useful to parse kernel
1531  * parameters.
1532  */
1533 static inline int get_order_from_str(const char *size_str,
1534 				     unsigned long valid_orders)
1535 {
1536 	unsigned long size;
1537 	char *endptr;
1538 	int order;
1539 
1540 	size = memparse(size_str, &endptr);
1541 
1542 	if (!is_power_of_2(size))
1543 		return -EINVAL;
1544 	order = get_order(size);
1545 	if (BIT(order) & ~valid_orders)
1546 		return -EINVAL;
1547 
1548 	return order;
1549 }
1550 
1551 enum {
1552 	/* mark page accessed */
1553 	FOLL_TOUCH = 1 << 16,
1554 	/* a retry, previous pass started an IO */
1555 	FOLL_TRIED = 1 << 17,
1556 	/* we are working on non-current tsk/mm */
1557 	FOLL_REMOTE = 1 << 18,
1558 	/* pages must be released via unpin_user_page */
1559 	FOLL_PIN = 1 << 19,
1560 	/* gup_fast: prevent fall-back to slow gup */
1561 	FOLL_FAST_ONLY = 1 << 20,
1562 	/* allow unlocking the mmap lock */
1563 	FOLL_UNLOCKABLE = 1 << 21,
1564 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1565 	FOLL_MADV_POPULATE = 1 << 22,
1566 };
1567 
1568 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1569 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1570 			    FOLL_MADV_POPULATE)
1571 
1572 /*
1573  * Indicates for which pages that are write-protected in the page table,
1574  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1575  * GUP pin will remain consistent with the pages mapped into the page tables
1576  * of the MM.
1577  *
1578  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1579  * PageAnonExclusive() has to protect against concurrent GUP:
1580  * * Ordinary GUP: Using the PT lock
1581  * * GUP-fast and fork(): mm->write_protect_seq
1582  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1583  *    folio_try_share_anon_rmap_*()
1584  *
1585  * Must be called with the (sub)page that's actually referenced via the
1586  * page table entry, which might not necessarily be the head page for a
1587  * PTE-mapped THP.
1588  *
1589  * If the vma is NULL, we're coming from the GUP-fast path and might have
1590  * to fallback to the slow path just to lookup the vma.
1591  */
1592 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1593 				    unsigned int flags, struct page *page)
1594 {
1595 	/*
1596 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1597 	 * has to be writable -- and if it references (part of) an anonymous
1598 	 * folio, that part is required to be marked exclusive.
1599 	 */
1600 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1601 		return false;
1602 	/*
1603 	 * Note: PageAnon(page) is stable until the page is actually getting
1604 	 * freed.
1605 	 */
1606 	if (!PageAnon(page)) {
1607 		/*
1608 		 * We only care about R/O long-term pining: R/O short-term
1609 		 * pinning does not have the semantics to observe successive
1610 		 * changes through the process page tables.
1611 		 */
1612 		if (!(flags & FOLL_LONGTERM))
1613 			return false;
1614 
1615 		/* We really need the vma ... */
1616 		if (!vma)
1617 			return true;
1618 
1619 		/*
1620 		 * ... because we only care about writable private ("COW")
1621 		 * mappings where we have to break COW early.
1622 		 */
1623 		return is_cow_mapping(vma->vm_flags);
1624 	}
1625 
1626 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1627 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1628 		smp_rmb();
1629 
1630 	/*
1631 	 * Note that KSM pages cannot be exclusive, and consequently,
1632 	 * cannot get pinned.
1633 	 */
1634 	return !PageAnonExclusive(page);
1635 }
1636 
1637 extern bool mirrored_kernelcore;
1638 bool memblock_has_mirror(void);
1639 void memblock_free_all(void);
1640 
1641 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1642 					  unsigned long start, unsigned long end,
1643 					  pgoff_t pgoff)
1644 {
1645 	vma->vm_start = start;
1646 	vma->vm_end = end;
1647 	vma->vm_pgoff = pgoff;
1648 }
1649 
1650 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1651 {
1652 	/*
1653 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1654 	 * enablements, because when without soft-dirty being compiled in,
1655 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1656 	 * will be constantly true.
1657 	 */
1658 	if (!pgtable_supports_soft_dirty())
1659 		return false;
1660 
1661 	/*
1662 	 * Soft-dirty is kind of special: its tracking is enabled when the
1663 	 * vma flags not set.
1664 	 */
1665 	return !(vma->vm_flags & VM_SOFTDIRTY);
1666 }
1667 
1668 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1669 {
1670 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1671 }
1672 
1673 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1674 {
1675 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1676 }
1677 
1678 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1679 				unsigned long zone, int nid);
1680 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1681 
1682 /* shrinker related functions */
1683 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1684 			  int priority);
1685 
1686 int shmem_add_to_page_cache(struct folio *folio,
1687 			    struct address_space *mapping,
1688 			    pgoff_t index, void *expected, gfp_t gfp);
1689 int shmem_inode_acct_blocks(struct inode *inode, long pages);
1690 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
1691 
1692 #ifdef CONFIG_SHRINKER_DEBUG
1693 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1694 			struct shrinker *shrinker, const char *fmt, va_list ap)
1695 {
1696 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1697 
1698 	return shrinker->name ? 0 : -ENOMEM;
1699 }
1700 
1701 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1702 {
1703 	kfree_const(shrinker->name);
1704 	shrinker->name = NULL;
1705 }
1706 
1707 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1708 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1709 					      int *debugfs_id);
1710 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1711 				    int debugfs_id);
1712 #else /* CONFIG_SHRINKER_DEBUG */
1713 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1714 {
1715 	return 0;
1716 }
1717 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1718 					      const char *fmt, va_list ap)
1719 {
1720 	return 0;
1721 }
1722 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1723 {
1724 }
1725 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1726 						     int *debugfs_id)
1727 {
1728 	*debugfs_id = -1;
1729 	return NULL;
1730 }
1731 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1732 					   int debugfs_id)
1733 {
1734 }
1735 #endif /* CONFIG_SHRINKER_DEBUG */
1736 
1737 /* Only track the nodes of mappings with shadow entries */
1738 void workingset_update_node(struct xa_node *node);
1739 extern struct list_lru shadow_nodes;
1740 #define mapping_set_update(xas, mapping) do {			\
1741 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1742 		xas_set_update(xas, workingset_update_node);	\
1743 		xas_set_lru(xas, &shadow_nodes);		\
1744 	}							\
1745 } while (0)
1746 
1747 /* mremap.c */
1748 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1749 
1750 #ifdef CONFIG_UNACCEPTED_MEMORY
1751 void accept_page(struct page *page);
1752 #else /* CONFIG_UNACCEPTED_MEMORY */
1753 static inline void accept_page(struct page *page)
1754 {
1755 }
1756 #endif /* CONFIG_UNACCEPTED_MEMORY */
1757 
1758 /* pagewalk.c */
1759 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
1760 		unsigned long end, const struct mm_walk_ops *ops,
1761 		void *private);
1762 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
1763 		unsigned long end, const struct mm_walk_ops *ops,
1764 		void *private);
1765 int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
1766 			  unsigned long end, const struct mm_walk_ops *ops,
1767 			  pgd_t *pgd, void *private);
1768 
1769 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
1770 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
1771 
1772 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn);
1773 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
1774 		unsigned long pfn, unsigned long size, pgprot_t pgprot);
1775 
1776 static inline void io_remap_pfn_range_prepare(struct vm_area_desc *desc,
1777 		unsigned long orig_pfn, unsigned long size)
1778 {
1779 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1780 
1781 	return remap_pfn_range_prepare(desc, pfn);
1782 }
1783 
1784 static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma,
1785 		unsigned long addr, unsigned long orig_pfn, unsigned long size,
1786 		pgprot_t orig_prot)
1787 {
1788 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1789 	const pgprot_t prot = pgprot_decrypted(orig_prot);
1790 
1791 	return remap_pfn_range_complete(vma, addr, pfn, size, prot);
1792 }
1793 
1794 #endif	/* __MM_INTERNAL_H */
1795