xref: /linux/include/linux/mm.h (revision 40735a683bf844a453d7a0f91e5e3daa0abc659b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/args.h>
6 #include <linux/errno.h>
7 #include <linux/mmdebug.h>
8 #include <linux/gfp.h>
9 #include <linux/pgalloc_tag.h>
10 #include <linux/bug.h>
11 #include <linux/list.h>
12 #include <linux/mmzone.h>
13 #include <linux/rbtree.h>
14 #include <linux/atomic.h>
15 #include <linux/debug_locks.h>
16 #include <linux/compiler.h>
17 #include <linux/mm_types.h>
18 #include <linux/mmap_lock.h>
19 #include <linux/range.h>
20 #include <linux/pfn.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/bit_spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/resource.h>
25 #include <linux/page_ext.h>
26 #include <linux/err.h>
27 #include <linux/page-flags.h>
28 #include <linux/page_ref.h>
29 #include <linux/overflow.h>
30 #include <linux/sched.h>
31 #include <linux/pgtable.h>
32 #include <linux/kasan.h>
33 #include <linux/memremap.h>
34 #include <linux/slab.h>
35 #include <linux/cacheinfo.h>
36 #include <linux/rcuwait.h>
37 #include <linux/bitmap.h>
38 #include <linux/bitops.h>
39 #include <linux/iommu-debug-pagealloc.h>
40 
41 struct mempolicy;
42 struct anon_vma;
43 struct anon_vma_chain;
44 struct user_struct;
45 struct pt_regs;
46 struct folio_batch;
47 
48 void arch_mm_preinit(void);
49 void mm_core_init_early(void);
50 void mm_core_init(void);
51 void init_mm_internals(void);
52 
53 extern atomic_long_t _totalram_pages;
54 static inline unsigned long totalram_pages(void)
55 {
56 	return (unsigned long)atomic_long_read(&_totalram_pages);
57 }
58 
59 static inline void totalram_pages_inc(void)
60 {
61 	atomic_long_inc(&_totalram_pages);
62 }
63 
64 static inline void totalram_pages_dec(void)
65 {
66 	atomic_long_dec(&_totalram_pages);
67 }
68 
69 static inline void totalram_pages_add(long count)
70 {
71 	atomic_long_add(count, &_totalram_pages);
72 }
73 
74 extern void * high_memory;
75 
76 /*
77  * Convert between pages and MB
78  * 20 is the shift for 1MB (2^20 = 1MB)
79  * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
80  * So (20 - PAGE_SHIFT) converts between pages and MB
81  */
82 #define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
83 #define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
84 
85 #ifdef CONFIG_SYSCTL
86 extern int sysctl_legacy_va_layout;
87 #else
88 #define sysctl_legacy_va_layout 0
89 #endif
90 
91 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
92 extern const int mmap_rnd_bits_min;
93 extern int mmap_rnd_bits_max __ro_after_init;
94 extern int mmap_rnd_bits __read_mostly;
95 #endif
96 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
97 extern const int mmap_rnd_compat_bits_min;
98 extern const int mmap_rnd_compat_bits_max;
99 extern int mmap_rnd_compat_bits __read_mostly;
100 #endif
101 
102 #ifndef DIRECT_MAP_PHYSMEM_END
103 # ifdef MAX_PHYSMEM_BITS
104 # define DIRECT_MAP_PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
105 # else
106 # define DIRECT_MAP_PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
107 # endif
108 #endif
109 
110 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
111 
112 #include <asm/page.h>
113 #include <asm/processor.h>
114 
115 #ifndef __pa_symbol
116 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
117 #endif
118 
119 #ifndef page_to_virt
120 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
121 #endif
122 
123 #ifndef lm_alias
124 #define lm_alias(x)	__va(__pa_symbol(x))
125 #endif
126 
127 /*
128  * To prevent common memory management code establishing
129  * a zero page mapping on a read fault.
130  * This macro should be defined within <asm/pgtable.h>.
131  * s390 does this to prevent multiplexing of hardware bits
132  * related to the physical page in case of virtualization.
133  */
134 #ifndef mm_forbids_zeropage
135 #define mm_forbids_zeropage(X)	(0)
136 #endif
137 
138 /*
139  * On some architectures it is expensive to call memset() for small sizes.
140  * If an architecture decides to implement their own version of
141  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
142  * define their own version of this macro in <asm/pgtable.h>
143  */
144 #if BITS_PER_LONG == 64
145 /* This function must be updated when the size of struct page grows above 96
146  * or reduces below 56. The idea that compiler optimizes out switch()
147  * statement, and only leaves move/store instructions. Also the compiler can
148  * combine write statements if they are both assignments and can be reordered,
149  * this can result in several of the writes here being dropped.
150  */
151 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
152 static inline void __mm_zero_struct_page(struct page *page)
153 {
154 	unsigned long *_pp = (void *)page;
155 
156 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
157 	BUILD_BUG_ON(sizeof(struct page) & 7);
158 	BUILD_BUG_ON(sizeof(struct page) < 56);
159 	BUILD_BUG_ON(sizeof(struct page) > 96);
160 
161 	switch (sizeof(struct page)) {
162 	case 96:
163 		_pp[11] = 0;
164 		fallthrough;
165 	case 88:
166 		_pp[10] = 0;
167 		fallthrough;
168 	case 80:
169 		_pp[9] = 0;
170 		fallthrough;
171 	case 72:
172 		_pp[8] = 0;
173 		fallthrough;
174 	case 64:
175 		_pp[7] = 0;
176 		fallthrough;
177 	case 56:
178 		_pp[6] = 0;
179 		_pp[5] = 0;
180 		_pp[4] = 0;
181 		_pp[3] = 0;
182 		_pp[2] = 0;
183 		_pp[1] = 0;
184 		_pp[0] = 0;
185 	}
186 }
187 #else
188 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
189 #endif
190 
191 /*
192  * Default maximum number of active map areas, this limits the number of vmas
193  * per mm struct. Users can overwrite this number by sysctl but there is a
194  * problem.
195  *
196  * When a program's coredump is generated as ELF format, a section is created
197  * per a vma. In ELF, the number of sections is represented in unsigned short.
198  * This means the number of sections should be smaller than 65535 at coredump.
199  * Because the kernel adds some informative sections to a image of program at
200  * generating coredump, we need some margin. The number of extra sections is
201  * 1-3 now and depends on arch. We use "5" as safe margin, here.
202  *
203  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
204  * not a hard limit any more. Although some userspace tools can be surprised by
205  * that.
206  */
207 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
208 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
209 
210 extern unsigned long sysctl_user_reserve_kbytes;
211 extern unsigned long sysctl_admin_reserve_kbytes;
212 
213 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
214 bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
215 #else
216 static inline bool page_range_contiguous(const struct page *page,
217 		unsigned long nr_pages)
218 {
219 	return true;
220 }
221 #endif
222 
223 /* to align the pointer to the (next) page boundary */
224 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
225 
226 /* to align the pointer to the (prev) page boundary */
227 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
228 
229 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
230 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
231 
232 /**
233  * folio_page_idx - Return the number of a page in a folio.
234  * @folio: The folio.
235  * @page: The folio page.
236  *
237  * This function expects that the page is actually part of the folio.
238  * The returned number is relative to the start of the folio.
239  */
240 static inline unsigned long folio_page_idx(const struct folio *folio,
241 		const struct page *page)
242 {
243 	return page - &folio->page;
244 }
245 
246 static inline struct folio *lru_to_folio(struct list_head *head)
247 {
248 	return list_entry((head)->prev, struct folio, lru);
249 }
250 
251 void setup_initial_init_mm(void *start_code, void *end_code,
252 			   void *end_data, void *brk);
253 
254 /*
255  * Linux kernel virtual memory manager primitives.
256  * The idea being to have a "virtual" mm in the same way
257  * we have a virtual fs - giving a cleaner interface to the
258  * mm details, and allowing different kinds of memory mappings
259  * (from shared memory to executable loading to arbitrary
260  * mmap() functions).
261  */
262 
263 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
264 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
265 void vm_area_free(struct vm_area_struct *);
266 
267 #ifndef CONFIG_MMU
268 extern struct rb_root nommu_region_tree;
269 extern struct rw_semaphore nommu_region_sem;
270 
271 extern unsigned int kobjsize(const void *objp);
272 #endif
273 
274 /*
275  * vm_flags in vm_area_struct, see mm_types.h.
276  * When changing, update also include/trace/events/mmflags.h
277  */
278 
279 #define VM_NONE		0x00000000
280 
281 /**
282  * typedef vma_flag_t - specifies an individual VMA flag by bit number.
283  *
284  * This value is made type safe by sparse to avoid passing invalid flag values
285  * around.
286  */
287 typedef int __bitwise vma_flag_t;
288 
289 #define DECLARE_VMA_BIT(name, bitnum) \
290 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
291 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
292 	VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT)
293 enum {
294 	DECLARE_VMA_BIT(READ, 0),
295 	DECLARE_VMA_BIT(WRITE, 1),
296 	DECLARE_VMA_BIT(EXEC, 2),
297 	DECLARE_VMA_BIT(SHARED, 3),
298 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
299 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
300 	DECLARE_VMA_BIT(MAYWRITE, 5),
301 	DECLARE_VMA_BIT(MAYEXEC, 6),
302 	DECLARE_VMA_BIT(MAYSHARE, 7),
303 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
304 #ifdef CONFIG_MMU
305 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
306 #else
307 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
308 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
309 #endif /* CONFIG_MMU */
310 	/* Page-ranges managed without "struct page", just pure PFN */
311 	DECLARE_VMA_BIT(PFNMAP, 10),
312 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
313 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
314 	DECLARE_VMA_BIT(LOCKED, 13),
315 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
316 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
317 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
318 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
319 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
320 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
321 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
322 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
323 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
324 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
325 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
326 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
327 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
328 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
329 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
330 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
331 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
332 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
333 	/* These bits are reused, we define specific uses below. */
334 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
335 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
336 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
337 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
338 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
339 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
340 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
341 	/*
342 	 * This flag is used to connect VFIO to arch specific KVM code. It
343 	 * indicates that the memory under this VMA is safe for use with any
344 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
345 	 * platforms, are thought to be unsafe and can cause machine crashes
346 	 * if KVM does not lock down the memory type.
347 	 */
348 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
349 #if defined(CONFIG_PPC32)
350 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
351 #elif defined(CONFIG_64BIT)
352 	DECLARE_VMA_BIT(DROPPABLE, 40),
353 #endif
354 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
355 	DECLARE_VMA_BIT(SEALED, 42),
356 	/* Flags that reuse flags above. */
357 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
358 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
359 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
360 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
361 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
362 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_RISCV_USER_CFI)
363 	/*
364 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
365 	 * support core mm.
366 	 *
367 	 * These VMAs will get a single end guard page. This helps userspace
368 	 * protect itself from attacks. A single page is enough for current
369 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
370 	 * arch/x86/kernel/shstk.c for more details on the guard size.
371 	 */
372 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
373 #elif defined(CONFIG_ARM64_GCS)
374 	/*
375 	 * arm64's Guarded Control Stack implements similar functionality and
376 	 * has similar constraints to shadow stacks.
377 	 */
378 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
379 #endif
380 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
381 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
382 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
383 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
384 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
385 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
386 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
387 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
388 #ifdef CONFIG_STACK_GROWSUP
389 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
390 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
391 #else
392 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
393 #endif
394 };
395 #undef DECLARE_VMA_BIT
396 #undef DECLARE_VMA_BIT_ALIAS
397 
398 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
399 #define VM_READ		INIT_VM_FLAG(READ)
400 #define VM_WRITE	INIT_VM_FLAG(WRITE)
401 #define VM_EXEC		INIT_VM_FLAG(EXEC)
402 #define VM_SHARED	INIT_VM_FLAG(SHARED)
403 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
404 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
405 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
406 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
407 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
408 #ifdef CONFIG_MMU
409 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
410 #else
411 #define VM_UFFD_MISSING	VM_NONE
412 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
413 #endif
414 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
415 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
416 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
417 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
418 #define VM_IO		INIT_VM_FLAG(IO)
419 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
420 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
421 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
422 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
423 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
424 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
425 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
426 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
427 #define VM_SYNC		INIT_VM_FLAG(SYNC)
428 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
429 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
430 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
431 #ifdef CONFIG_MEM_SOFT_DIRTY
432 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
433 #else
434 #define VM_SOFTDIRTY	VM_NONE
435 #endif
436 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
437 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
438 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
439 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
440 #define VM_STACK	INIT_VM_FLAG(STACK)
441 #ifdef CONFIG_STACK_GROWSUP
442 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
443 #else
444 #define VM_STACK_EARLY	VM_NONE
445 #endif
446 #ifdef CONFIG_ARCH_HAS_PKEYS
447 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
448 /* Despite the naming, these are FLAGS not bits. */
449 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
450 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
451 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
452 #if CONFIG_ARCH_PKEY_BITS > 3
453 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
454 #else
455 #define VM_PKEY_BIT3  VM_NONE
456 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
457 #if CONFIG_ARCH_PKEY_BITS > 4
458 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
459 #else
460 #define VM_PKEY_BIT4  VM_NONE
461 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
462 #endif /* CONFIG_ARCH_HAS_PKEYS */
463 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
464 	defined(CONFIG_RISCV_USER_CFI)
465 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
466 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
467 #else
468 #define VM_SHADOW_STACK	VM_NONE
469 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
470 #endif
471 #if defined(CONFIG_PPC64)
472 #define VM_SAO		INIT_VM_FLAG(SAO)
473 #elif defined(CONFIG_PARISC)
474 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
475 #elif defined(CONFIG_SPARC64)
476 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
477 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
478 #elif defined(CONFIG_ARM64)
479 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
480 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
481 #elif !defined(CONFIG_MMU)
482 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
483 #endif
484 #ifndef VM_GROWSUP
485 #define VM_GROWSUP	VM_NONE
486 #endif
487 #ifdef CONFIG_ARM64_MTE
488 #define VM_MTE		INIT_VM_FLAG(MTE)
489 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
490 #else
491 #define VM_MTE		VM_NONE
492 #define VM_MTE_ALLOWED	VM_NONE
493 #endif
494 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
495 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
496 #else
497 #define VM_UFFD_MINOR	VM_NONE
498 #endif
499 #ifdef CONFIG_64BIT
500 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
501 #define VM_SEALED		INIT_VM_FLAG(SEALED)
502 #else
503 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
504 #define VM_SEALED		VM_NONE
505 #endif
506 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
507 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
508 #define VMA_DROPPABLE		mk_vma_flags(VMA_DROPPABLE_BIT)
509 #else
510 #define VM_DROPPABLE		VM_NONE
511 #define VMA_DROPPABLE		EMPTY_VMA_FLAGS
512 #endif
513 
514 /* Bits set in the VMA until the stack is in its final location */
515 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
516 
517 #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
518 		       VMA_EXEC_BIT : VMA_READ_BIT)
519 
520 /* Common data flag combinations */
521 #define VMA_DATA_FLAGS_TSK_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
522 		TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
523 		VMA_MAYEXEC_BIT)
524 #define VMA_DATA_FLAGS_NON_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
525 		VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
526 #define VMA_DATA_FLAGS_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
527 		VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
528 		VMA_MAYEXEC_BIT)
529 
530 #ifndef VMA_DATA_DEFAULT_FLAGS		/* arch can override this */
531 #define VMA_DATA_DEFAULT_FLAGS  VMA_DATA_FLAGS_EXEC
532 #endif
533 
534 #ifndef VMA_STACK_DEFAULT_FLAGS		/* arch can override this */
535 #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
536 #endif
537 
538 #define VMA_STACK_FLAGS	append_vma_flags(VMA_STACK_DEFAULT_FLAGS,	\
539 		VMA_STACK_BIT, VMA_ACCOUNT_BIT)
540 
541 /* Temporary until VMA flags conversion complete. */
542 #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
543 
544 #ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
545 #define VM_SEALED_SYSMAP	VM_SEALED
546 #else
547 #define VM_SEALED_SYSMAP	VM_NONE
548 #endif
549 
550 /* VMA basic access permission flags */
551 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
552 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
553 
554 /*
555  * Special vmas that are non-mergable, non-mlock()able.
556  */
557 
558 #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
559 				       VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
560 #define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS)
561 
562 /*
563  * Physically remapped pages are special. Tell the
564  * rest of the world about it:
565  *   IO tells people not to look at these pages
566  *	(accesses can have side effects).
567  *   PFNMAP tells the core MM that the base pages are just
568  *	raw PFN mappings, and do not have a "struct page" associated
569  *	with them.
570  *   DONTEXPAND
571  *      Disable vma merging and expanding with mremap().
572  *   DONTDUMP
573  *      Omit vma from core dump, even when VM_IO turned off.
574  */
575 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,	\
576 				     VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
577 
578 /* This mask prevents VMA from being scanned with khugepaged */
579 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
580 
581 /* This mask defines which mm->def_flags a process can inherit its parent */
582 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
583 
584 /* This mask represents all the VMA flag bits used by mlock */
585 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
586 
587 #define VMA_LOCKED_MASK	mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
588 
589 /* These flags can be updated atomically via VMA/mmap read lock. */
590 #define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
591 
592 /* Arch-specific flags to clear when updating VM flags on protection change */
593 #ifndef VM_ARCH_CLEAR
594 #define VM_ARCH_CLEAR	VM_NONE
595 #endif
596 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
597 
598 /*
599  * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
600  * possesses it but the other does not, the merged VMA should nonetheless have
601  * applied to it:
602  *
603  *   VMA_SOFTDIRTY_BIT - if a VMA is marked soft-dirty, that is has not had its
604  *                       references cleared via /proc/$pid/clear_refs, any
605  *                       merged VMA should be considered soft-dirty also as it
606  *                       operates at a VMA granularity.
607  *
608  * VMA_MAYBE_GUARD_BIT - If a VMA may have guard regions in place it implies
609  *                       that mapped page tables may contain metadata not
610  *                       described by the VMA and thus any merged VMA may also
611  *                       contain this metadata, and thus we must make this flag
612  *                       sticky.
613  */
614 #ifdef CONFIG_MEM_SOFT_DIRTY
615 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
616 #else
617 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
618 #endif
619 
620 /*
621  * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
622  * of these flags and the other not does not preclude a merge.
623  *
624  *    VMA_STICKY_FLAGS - When merging VMAs, VMA flags must match, unless they
625  *                       are 'sticky'. If any sticky flags exist in either VMA,
626  *                       we simply set all of them on the merged VMA.
627  */
628 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
629 
630 /*
631  * Flags which should result in page tables being copied on fork. These are
632  * flags which indicate that the VMA maps page tables which cannot be
633  * reconsistuted upon page fault, so necessitate page table copying upon fork.
634  *
635  * Note that these flags should be compared with the DESTINATION VMA not the
636  * source, as VM_UFFD_WP may not be propagated to destination, while all other
637  * flags will be.
638  *
639  * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
640  *                           reasonably reconstructed on page fault.
641  *
642  *              VM_UFFD_WP - Encodes metadata about an installed uffd
643  *                           write protect handler, which cannot be
644  *                           reconstructed on page fault.
645  *
646  *                           We always copy pgtables when dst_vma has uffd-wp
647  *                           enabled even if it's file-backed
648  *                           (e.g. shmem). Because when uffd-wp is enabled,
649  *                           pgtable contains uffd-wp protection information,
650  *                           that's something we can't retrieve from page cache,
651  *                           and skip copying will lose those info.
652  *
653  *          VM_MAYBE_GUARD - Could contain page guard region markers which
654  *                           by design are a property of the page tables
655  *                           only and thus cannot be reconstructed on page
656  *                           fault.
657  */
658 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
659 
660 /*
661  * mapping from the currently active vm_flags protection bits (the
662  * low four bits) to a page protection mask..
663  */
664 
665 /*
666  * The default fault flags that should be used by most of the
667  * arch-specific page fault handlers.
668  */
669 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
670 			     FAULT_FLAG_KILLABLE | \
671 			     FAULT_FLAG_INTERRUPTIBLE)
672 
673 /**
674  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
675  * @flags: Fault flags.
676  *
677  * This is mostly used for places where we want to try to avoid taking
678  * the mmap_lock for too long a time when waiting for another condition
679  * to change, in which case we can try to be polite to release the
680  * mmap_lock in the first round to avoid potential starvation of other
681  * processes that would also want the mmap_lock.
682  *
683  * Return: true if the page fault allows retry and this is the first
684  * attempt of the fault handling; false otherwise.
685  */
686 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
687 {
688 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
689 	    (!(flags & FAULT_FLAG_TRIED));
690 }
691 
692 #define FAULT_FLAG_TRACE \
693 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
694 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
695 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
696 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
697 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
698 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
699 	{ FAULT_FLAG_USER,		"USER" }, \
700 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
701 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
702 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
703 	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
704 
705 /*
706  * vm_fault is filled by the pagefault handler and passed to the vma's
707  * ->fault function. The vma's ->fault is responsible for returning a bitmask
708  * of VM_FAULT_xxx flags that give details about how the fault was handled.
709  *
710  * MM layer fills up gfp_mask for page allocations but fault handler might
711  * alter it if its implementation requires a different allocation context.
712  *
713  * pgoff should be used in favour of virtual_address, if possible.
714  */
715 struct vm_fault {
716 	const struct {
717 		struct vm_area_struct *vma;	/* Target VMA */
718 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
719 		pgoff_t pgoff;			/* Logical page offset based on vma */
720 		unsigned long address;		/* Faulting virtual address - masked */
721 		unsigned long real_address;	/* Faulting virtual address - unmasked */
722 	};
723 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
724 					 * XXX: should really be 'const' */
725 	pmd_t *pmd;			/* Pointer to pmd entry matching
726 					 * the 'address' */
727 	pud_t *pud;			/* Pointer to pud entry matching
728 					 * the 'address'
729 					 */
730 	union {
731 		pte_t orig_pte;		/* Value of PTE at the time of fault */
732 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
733 					 * used by PMD fault only.
734 					 */
735 	};
736 
737 	struct page *cow_page;		/* Page handler may use for COW fault */
738 	struct page *page;		/* ->fault handlers should return a
739 					 * page here, unless VM_FAULT_NOPAGE
740 					 * is set (which is also implied by
741 					 * VM_FAULT_ERROR).
742 					 */
743 	/* These three entries are valid only while holding ptl lock */
744 	pte_t *pte;			/* Pointer to pte entry matching
745 					 * the 'address'. NULL if the page
746 					 * table hasn't been allocated.
747 					 */
748 	spinlock_t *ptl;		/* Page table lock.
749 					 * Protects pte page table if 'pte'
750 					 * is not NULL, otherwise pmd.
751 					 */
752 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
753 					 * vm_ops->map_pages() sets up a page
754 					 * table from atomic context.
755 					 * do_fault_around() pre-allocates
756 					 * page table to avoid allocation from
757 					 * atomic context.
758 					 */
759 };
760 
761 struct vm_uffd_ops;
762 
763 /*
764  * These are the virtual MM functions - opening of an area, closing and
765  * unmapping it (needed to keep files on disk up-to-date etc), pointer
766  * to the functions called when a no-page or a wp-page exception occurs.
767  */
768 struct vm_operations_struct {
769 	/**
770 	 * @open: Called when a VMA is remapped, split or forked. Not called
771 	 * upon first mapping a VMA.
772 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
773 	 */
774 	void (*open)(struct vm_area_struct *vma);
775 	/**
776 	 * @close: Called when the VMA is being removed from the MM.
777 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
778 	 */
779 	void (*close)(struct vm_area_struct *vma);
780 	/**
781 	 * @mapped: Called when the VMA is first mapped in the MM. Not called if
782 	 * the new VMA is merged with an adjacent VMA.
783 	 *
784 	 * The @vm_private_data field is an output field allowing the user to
785 	 * modify vma->vm_private_data as necessary.
786 	 *
787 	 * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
788 	 * set from f_op->mmap.
789 	 *
790 	 * Returns %0 on success, or an error otherwise. On error, the VMA will
791 	 * be unmapped.
792 	 *
793 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
794 	 */
795 	int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
796 		      const struct file *file, void **vm_private_data);
797 	/* Called any time before splitting to check if it's allowed */
798 	int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
799 	int (*mremap)(struct vm_area_struct *vma);
800 	/*
801 	 * Called by mprotect() to make driver-specific permission
802 	 * checks before mprotect() is finalised.   The VMA must not
803 	 * be modified.  Returns 0 if mprotect() can proceed.
804 	 */
805 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
806 			unsigned long end, unsigned long newflags);
807 	vm_fault_t (*fault)(struct vm_fault *vmf);
808 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
809 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
810 			pgoff_t start_pgoff, pgoff_t end_pgoff);
811 	unsigned long (*pagesize)(struct vm_area_struct *vma);
812 
813 	/* notification that a previously read-only page is about to become
814 	 * writable, if an error is returned it will cause a SIGBUS */
815 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
816 
817 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
818 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
819 
820 	/* called by access_process_vm when get_user_pages() fails, typically
821 	 * for use by special VMAs. See also generic_access_phys() for a generic
822 	 * implementation useful for any iomem mapping.
823 	 */
824 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
825 		      void *buf, int len, int write);
826 
827 	/* Called by the /proc/PID/maps code to ask the vma whether it
828 	 * has a special name.  Returning non-NULL will also cause this
829 	 * vma to be dumped unconditionally. */
830 	const char *(*name)(struct vm_area_struct *vma);
831 
832 #ifdef CONFIG_NUMA
833 	/*
834 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
835 	 * to hold the policy upon return.  Caller should pass NULL @new to
836 	 * remove a policy and fall back to surrounding context--i.e. do not
837 	 * install a MPOL_DEFAULT policy, nor the task or system default
838 	 * mempolicy.
839 	 */
840 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
841 
842 	/*
843 	 * get_policy() op must add reference [mpol_get()] to any policy at
844 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
845 	 * in mm/mempolicy.c will do this automatically.
846 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
847 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
848 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
849 	 * must return NULL--i.e., do not "fallback" to task or system default
850 	 * policy.
851 	 */
852 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
853 					unsigned long addr, pgoff_t *ilx);
854 #endif
855 #ifdef CONFIG_FIND_NORMAL_PAGE
856 	/*
857 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
858 	 * allows for returning a "normal" page from vm_normal_page() even
859 	 * though the PTE indicates that the "struct page" either does not exist
860 	 * or should not be touched: "special".
861 	 *
862 	 * Do not add new users: this really only works when a "normal" page
863 	 * was mapped, but then the PTE got changed to something weird (+
864 	 * marked special) that would not make pte_pfn() identify the originally
865 	 * inserted page.
866 	 */
867 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
868 					 unsigned long addr);
869 #endif /* CONFIG_FIND_NORMAL_PAGE */
870 #ifdef CONFIG_USERFAULTFD
871 	const struct vm_uffd_ops *uffd_ops;
872 #endif
873 };
874 
875 #ifdef CONFIG_NUMA_BALANCING
876 static inline void vma_numab_state_init(struct vm_area_struct *vma)
877 {
878 	vma->numab_state = NULL;
879 }
880 static inline void vma_numab_state_free(struct vm_area_struct *vma)
881 {
882 	kfree(vma->numab_state);
883 }
884 #else
885 static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
886 static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
887 #endif /* CONFIG_NUMA_BALANCING */
888 
889 /*
890  * These must be here rather than mmap_lock.h as dependent on vm_fault type,
891  * declared in this header.
892  */
893 #ifdef CONFIG_PER_VMA_LOCK
894 static inline void release_fault_lock(struct vm_fault *vmf)
895 {
896 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
897 		vma_end_read(vmf->vma);
898 	else
899 		mmap_read_unlock(vmf->vma->vm_mm);
900 }
901 
902 static inline void assert_fault_locked(const struct vm_fault *vmf)
903 {
904 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
905 		vma_assert_locked(vmf->vma);
906 	else
907 		mmap_assert_locked(vmf->vma->vm_mm);
908 }
909 #else
910 static inline void release_fault_lock(struct vm_fault *vmf)
911 {
912 	mmap_read_unlock(vmf->vma->vm_mm);
913 }
914 
915 static inline void assert_fault_locked(const struct vm_fault *vmf)
916 {
917 	mmap_assert_locked(vmf->vma->vm_mm);
918 }
919 #endif /* CONFIG_PER_VMA_LOCK */
920 
921 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
922 {
923 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
924 }
925 
926 static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
927 {
928 	return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
929 }
930 
931 static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
932 {
933 	return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
934 }
935 
936 static inline void mm_flags_set(int flag, struct mm_struct *mm)
937 {
938 	set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
939 }
940 
941 static inline void mm_flags_clear(int flag, struct mm_struct *mm)
942 {
943 	clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
944 }
945 
946 static inline void mm_flags_clear_all(struct mm_struct *mm)
947 {
948 	bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
949 }
950 
951 extern const struct vm_operations_struct vma_dummy_vm_ops;
952 
953 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
954 {
955 	memset(vma, 0, sizeof(*vma));
956 	vma->vm_mm = mm;
957 	vma->vm_ops = &vma_dummy_vm_ops;
958 	INIT_LIST_HEAD(&vma->anon_vma_chain);
959 	vma_lock_init(vma, false);
960 }
961 
962 /* Use when VMA is not part of the VMA tree and needs no locking */
963 static inline void vm_flags_init(struct vm_area_struct *vma,
964 				 vm_flags_t flags)
965 {
966 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
967 	vma_flags_clear_all(&vma->flags);
968 	vma_flags_overwrite_word(&vma->flags, flags);
969 }
970 
971 /*
972  * Use when VMA is part of the VMA tree and modifications need coordination
973  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
974  * it should be locked explicitly beforehand.
975  */
976 static inline void vm_flags_reset(struct vm_area_struct *vma,
977 				  vm_flags_t flags)
978 {
979 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
980 	vma_assert_write_locked(vma);
981 	vm_flags_init(vma, flags);
982 }
983 
984 static inline void vma_flags_reset_once(struct vm_area_struct *vma,
985 					vma_flags_t *flags)
986 {
987 	const unsigned long word = flags->__vma_flags[0];
988 
989 	/* It is assumed only the first system word must be written once. */
990 	vma_flags_overwrite_word_once(&vma->flags, word);
991 	/* The remainder can be copied normally. */
992 	if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
993 		unsigned long *dst = &vma->flags.__vma_flags[1];
994 		const unsigned long *src = &flags->__vma_flags[1];
995 
996 		bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
997 	}
998 }
999 
1000 static inline void vm_flags_set(struct vm_area_struct *vma,
1001 				vm_flags_t flags)
1002 {
1003 	vma_start_write(vma);
1004 	vma_flags_set_word(&vma->flags, flags);
1005 }
1006 
1007 static inline void vm_flags_clear(struct vm_area_struct *vma,
1008 				  vm_flags_t flags)
1009 {
1010 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
1011 	vma_start_write(vma);
1012 	vma_flags_clear_word(&vma->flags, flags);
1013 }
1014 
1015 /*
1016  * Use only if VMA is not part of the VMA tree or has no other users and
1017  * therefore needs no locking.
1018  */
1019 static inline void __vm_flags_mod(struct vm_area_struct *vma,
1020 				  vm_flags_t set, vm_flags_t clear)
1021 {
1022 	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
1023 }
1024 
1025 /*
1026  * Use only when the order of set/clear operations is unimportant, otherwise
1027  * use vm_flags_{set|clear} explicitly.
1028  */
1029 static inline void vm_flags_mod(struct vm_area_struct *vma,
1030 				vm_flags_t set, vm_flags_t clear)
1031 {
1032 	vma_start_write(vma);
1033 	__vm_flags_mod(vma, set, clear);
1034 }
1035 
1036 static __always_inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma,
1037 		vma_flag_t bit)
1038 {
1039 	const vm_flags_t mask = BIT((__force int)bit);
1040 
1041 	/* Only specific flags are permitted */
1042 	if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED)))
1043 		return false;
1044 
1045 	return true;
1046 }
1047 
1048 /*
1049  * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
1050  * valid flags are allowed to do this.
1051  */
1052 static __always_inline void vma_set_atomic_flag(struct vm_area_struct *vma,
1053 		vma_flag_t bit)
1054 {
1055 	unsigned long *bitmap = vma->flags.__vma_flags;
1056 
1057 	vma_assert_stabilised(vma);
1058 	if (__vma_atomic_valid_flag(vma, bit))
1059 		set_bit((__force int)bit, bitmap);
1060 }
1061 
1062 /*
1063  * Test for VMA flag atomically. Requires no locks. Only specific valid flags
1064  * are allowed to do this.
1065  *
1066  * This is necessarily racey, so callers must ensure that serialisation is
1067  * achieved through some other means, or that races are permissible.
1068  */
1069 static __always_inline bool vma_test_atomic_flag(struct vm_area_struct *vma,
1070 		vma_flag_t bit)
1071 {
1072 	if (__vma_atomic_valid_flag(vma, bit))
1073 		return test_bit((__force int)bit, &vma->vm_flags);
1074 
1075 	return false;
1076 }
1077 
1078 /* Set an individual VMA flag in flags, non-atomically. */
1079 static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
1080 		vma_flag_t bit)
1081 {
1082 	unsigned long *bitmap = flags->__vma_flags;
1083 
1084 	__set_bit((__force int)bit, bitmap);
1085 }
1086 
1087 static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
1088 		size_t count, const vma_flag_t *bits)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < count; i++)
1093 		vma_flags_set_flag(&flags, bits[i]);
1094 	return flags;
1095 }
1096 
1097 /*
1098  * Helper macro which bitwise-or combines the specified input flags into a
1099  * vma_flags_t bitmap value. E.g.:
1100  *
1101  * vma_flags_t flags = mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,
1102  *              VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT);
1103  *
1104  * The compiler cleverly optimises away all of the work and this ends up being
1105  * equivalent to aggregating the values manually.
1106  */
1107 #define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS,			\
1108 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
1109 
1110 /*
1111  * Helper macro which acts like mk_vma_flags, only appending to a copy of the
1112  * specified flags rather than establishing new flags. E.g.:
1113  *
1114  * vma_flags_t flags = append_vma_flags(VMA_STACK_DEFAULT_FLAGS, VMA_STACK_BIT,
1115  *              VMA_ACCOUNT_BIT);
1116  */
1117 #define append_vma_flags(flags, ...) __mk_vma_flags(flags,			\
1118 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
1119 
1120 /* Calculates the number of set bits in the specified VMA flags. */
1121 static __always_inline int vma_flags_count(const vma_flags_t *flags)
1122 {
1123 	const unsigned long *bitmap = flags->__vma_flags;
1124 
1125 	return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
1126 }
1127 
1128 /*
1129  * Test whether a specific VMA flag is set, e.g.:
1130  *
1131  * if (vma_flags_test(flags, VMA_READ_BIT)) { ... }
1132  */
1133 static __always_inline bool vma_flags_test(const vma_flags_t *flags,
1134 		vma_flag_t bit)
1135 {
1136 	const unsigned long *bitmap = flags->__vma_flags;
1137 
1138 	return test_bit((__force int)bit, bitmap);
1139 }
1140 
1141 /*
1142  * Obtain a set of VMA flags which contain the overlapping flags contained
1143  * within flags and to_and.
1144  */
1145 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
1146 						      vma_flags_t to_and)
1147 {
1148 	vma_flags_t dst;
1149 	unsigned long *bitmap_dst = dst.__vma_flags;
1150 	const unsigned long *bitmap = flags->__vma_flags;
1151 	const unsigned long *bitmap_to_and = to_and.__vma_flags;
1152 
1153 	bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
1154 	return dst;
1155 }
1156 
1157 /*
1158  * Obtain a set of VMA flags which contains the specified overlapping flags,
1159  * e.g.:
1160  *
1161  * vma_flags_t read_flags = vma_flags_and(&flags, VMA_READ_BIT,
1162  *                                        VMA_MAY_READ_BIT);
1163  */
1164 #define vma_flags_and(flags, ...)				\
1165 	vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
1166 
1167 /*  Test each of to_test flags in flags, non-atomically. */
1168 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
1169 		vma_flags_t to_test)
1170 {
1171 	const unsigned long *bitmap = flags->__vma_flags;
1172 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
1173 
1174 	return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
1175 }
1176 
1177 /*
1178  * Test whether any specified VMA flag is set, e.g.:
1179  *
1180  * if (vma_flags_test_any(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1181  */
1182 #define vma_flags_test_any(flags, ...) \
1183 	vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
1184 
1185 /* Test that ALL of the to_test flags are set, non-atomically. */
1186 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
1187 		vma_flags_t to_test)
1188 {
1189 	const unsigned long *bitmap = flags->__vma_flags;
1190 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
1191 
1192 	return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
1193 }
1194 
1195 /*
1196  * Test whether ALL specified VMA flags are set, e.g.:
1197  *
1198  * if (vma_flags_test_all(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1199  */
1200 #define vma_flags_test_all(flags, ...) \
1201 	vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
1202 
1203 /*
1204  * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
1205  * (returning false if flagmask has no flags set).
1206  *
1207  * This is defined to make the semantics clearer when testing an optionally
1208  * defined VMA flags mask, e.g.:
1209  *
1210  * if (vma_flags_test_single_mask(&flags, VMA_DROPPABLE)) { ... }
1211  *
1212  * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
1213  * otherwise.
1214  */
1215 static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
1216 		vma_flags_t flagmask)
1217 {
1218 	VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
1219 
1220 	return vma_flags_test_any_mask(flags, flagmask);
1221 }
1222 
1223 /* Set each of the to_set flags in flags, non-atomically. */
1224 static __always_inline void vma_flags_set_mask(vma_flags_t *flags,
1225 		vma_flags_t to_set)
1226 {
1227 	unsigned long *bitmap = flags->__vma_flags;
1228 	const unsigned long *bitmap_to_set = to_set.__vma_flags;
1229 
1230 	bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
1231 }
1232 
1233 /*
1234  * Set all specified VMA flags, e.g.:
1235  *
1236  * vma_flags_set(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
1237  */
1238 #define vma_flags_set(flags, ...) \
1239 	vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
1240 
1241 /* Clear all of the to-clear flags in flags, non-atomically. */
1242 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags,
1243 		vma_flags_t to_clear)
1244 {
1245 	unsigned long *bitmap = flags->__vma_flags;
1246 	const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
1247 
1248 	bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
1249 }
1250 
1251 /*
1252  * Clear all specified individual flags, e.g.:
1253  *
1254  * vma_flags_clear(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
1255  */
1256 #define vma_flags_clear(flags, ...) \
1257 	vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
1258 
1259 /*
1260  * Obtain a VMA flags value containing those flags that are present in flags or
1261  * flags_other but not in both.
1262  */
1263 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
1264 		const vma_flags_t *flags_other)
1265 {
1266 	vma_flags_t dst;
1267 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1268 	const unsigned long *bitmap = flags->__vma_flags;
1269 	unsigned long *bitmap_dst = dst.__vma_flags;
1270 
1271 	bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1272 	return dst;
1273 }
1274 
1275 /* Determine if flags and flags_other have precisely the same flags set. */
1276 static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
1277 						const vma_flags_t *flags_other)
1278 {
1279 	const unsigned long *bitmap = flags->__vma_flags;
1280 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1281 
1282 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1283 }
1284 
1285 /* Determine if flags and flags_other have precisely the same flags set.  */
1286 static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
1287 						vma_flags_t flags_other)
1288 {
1289 	const unsigned long *bitmap = flags->__vma_flags;
1290 	const unsigned long *bitmap_other = flags_other.__vma_flags;
1291 
1292 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1293 }
1294 
1295 /*
1296  * Helper macro to determine if only the specific flags are set, e.g.:
1297  *
1298  * if (vma_flags_same(&flags, VMA_WRITE_BIT) { ... }
1299  */
1300 #define vma_flags_same(flags, ...) \
1301 	vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
1302 
1303 /*
1304  * Test whether a specific flag in the VMA is set, e.g.:
1305  *
1306  * if (vma_test(vma, VMA_READ_BIT)) { ... }
1307  */
1308 static __always_inline bool vma_test(const struct vm_area_struct *vma,
1309 		vma_flag_t bit)
1310 {
1311 	return vma_flags_test(&vma->flags, bit);
1312 }
1313 
1314 /* Helper to test any VMA flags in a VMA . */
1315 static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
1316 		vma_flags_t flags)
1317 {
1318 	return vma_flags_test_any_mask(&vma->flags, flags);
1319 }
1320 
1321 /*
1322  * Helper macro for testing whether any VMA flags are set in a VMA,
1323  * e.g.:
1324  *
1325  * if (vma_test_any(vma, VMA_IO_BIT, VMA_PFNMAP_BIT,
1326  *		VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
1327  */
1328 #define vma_test_any(vma, ...) \
1329 	vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
1330 
1331 /*
1332  * Helper to test that ALL specified flags are set in a VMA.
1333  *
1334  * Note: appropriate locks must be held, this function does not acquire them for
1335  * you.
1336  */
1337 static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
1338 		vma_flags_t flags)
1339 {
1340 	return vma_flags_test_all_mask(&vma->flags, flags);
1341 }
1342 
1343 /*
1344  * Helper macro for checking that ALL specified flags are set in a VMA, e.g.:
1345  *
1346  * if (vma_test_all(vma, VMA_READ_BIT, VMA_MAYREAD_BIT) { ... }
1347  */
1348 #define vma_test_all(vma, ...) \
1349 	vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
1350 
1351 /*
1352  * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
1353  * (returning false if flagmask has no flags set).
1354  *
1355  * This is useful when a flag needs to be either defined or not depending upon
1356  * kernel configuration, e.g.:
1357  *
1358  * if (vma_test_single_mask(vma, VMA_DROPPABLE)) { ... }
1359  *
1360  * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
1361  * otherwise.
1362  */
1363 static __always_inline bool
1364 vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
1365 {
1366 	return vma_flags_test_single_mask(&vma->flags, flagmask);
1367 }
1368 
1369 /*
1370  * Helper to set all VMA flags in a VMA.
1371  *
1372  * Note: appropriate locks must be held, this function does not acquire them for
1373  * you.
1374  */
1375 static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
1376 		vma_flags_t flags)
1377 {
1378 	vma_flags_set_mask(&vma->flags, flags);
1379 }
1380 
1381 /*
1382  * Helper macro for specifying VMA flags in a VMA, e.g.:
1383  *
1384  * vma_set_flags(vma, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1385  * 		VMA_DONTDUMP_BIT);
1386  *
1387  * Note: appropriate locks must be held, this function does not acquire them for
1388  * you.
1389  */
1390 #define vma_set_flags(vma, ...) \
1391 	vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1392 
1393 /* Helper to clear all VMA flags in a VMA. */
1394 static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
1395 		vma_flags_t flags)
1396 {
1397 	vma_flags_clear_mask(&vma->flags, flags);
1398 }
1399 
1400 /*
1401  * Helper macro for clearing VMA flags, e.g.:
1402  *
1403  * vma_clear_flags(vma, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1404  * 		VMA_DONTDUMP_BIT);
1405  */
1406 #define vma_clear_flags(vma, ...) \
1407 	vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1408 
1409 /*
1410  * Test whether a specific VMA flag is set in a VMA descriptor, e.g.:
1411  *
1412  * if (vma_desc_test(desc, VMA_READ_BIT)) { ... }
1413  */
1414 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
1415 		vma_flag_t bit)
1416 {
1417 	return vma_flags_test(&desc->vma_flags, bit);
1418 }
1419 
1420 /* Helper to test any VMA flags in a VMA descriptor. */
1421 static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
1422 		vma_flags_t flags)
1423 {
1424 	return vma_flags_test_any_mask(&desc->vma_flags, flags);
1425 }
1426 
1427 /*
1428  * Helper macro for testing whether any VMA flags are set in a VMA descriptor,
1429  * e.g.:
1430  *
1431  * if (vma_desc_test_any(desc, VMA_IO_BIT, VMA_PFNMAP_BIT,
1432  *		VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
1433  */
1434 #define vma_desc_test_any(desc, ...) \
1435 	vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
1436 
1437 /* Helper to test all VMA flags in a VMA descriptor. */
1438 static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
1439 		vma_flags_t flags)
1440 {
1441 	return vma_flags_test_all_mask(&desc->vma_flags, flags);
1442 }
1443 
1444 /*
1445  * Helper macro for testing whether ALL VMA flags are set in a VMA descriptor,
1446  * e.g.:
1447  *
1448  * if (vma_desc_test_all(desc, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1449  */
1450 #define vma_desc_test_all(desc, ...) \
1451 	vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
1452 
1453 /* Helper to set all VMA flags in a VMA descriptor. */
1454 static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
1455 		vma_flags_t flags)
1456 {
1457 	vma_flags_set_mask(&desc->vma_flags, flags);
1458 }
1459 
1460 /*
1461  * Helper macro for specifying VMA flags for an input pointer to a struct
1462  * vm_area_desc object describing a proposed VMA, e.g.:
1463  *
1464  * vma_desc_set_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1465  * 		VMA_DONTDUMP_BIT);
1466  */
1467 #define vma_desc_set_flags(desc, ...) \
1468 	vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1469 
1470 /* Helper to clear all VMA flags in a VMA descriptor. */
1471 static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1472 		vma_flags_t flags)
1473 {
1474 	vma_flags_clear_mask(&desc->vma_flags, flags);
1475 }
1476 
1477 /*
1478  * Helper macro for clearing VMA flags for an input pointer to a struct
1479  * vm_area_desc object describing a proposed VMA, e.g.:
1480  *
1481  * vma_desc_clear_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1482  * 		VMA_DONTDUMP_BIT);
1483  */
1484 #define vma_desc_clear_flags(desc, ...) \
1485 	vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1486 
1487 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1488 {
1489 	vma->vm_ops = NULL;
1490 }
1491 
1492 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1493 {
1494 	return !vma->vm_ops;
1495 }
1496 
1497 /*
1498  * Indicate if the VMA is a heap for the given task; for
1499  * /proc/PID/maps that is the heap of the main task.
1500  */
1501 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
1502 {
1503 	return vma->vm_start < vma->vm_mm->brk &&
1504 		vma->vm_end > vma->vm_mm->start_brk;
1505 }
1506 
1507 /*
1508  * Indicate if the VMA is a stack for the given task; for
1509  * /proc/PID/maps that is the stack of the main task.
1510  */
1511 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
1512 {
1513 	/*
1514 	 * We make no effort to guess what a given thread considers to be
1515 	 * its "stack".  It's not even well-defined for programs written
1516 	 * languages like Go.
1517 	 */
1518 	return vma->vm_start <= vma->vm_mm->start_stack &&
1519 		vma->vm_end >= vma->vm_mm->start_stack;
1520 }
1521 
1522 static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
1523 {
1524 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1525 
1526 	if (!maybe_stack)
1527 		return false;
1528 
1529 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1530 						VM_STACK_INCOMPLETE_SETUP)
1531 		return true;
1532 
1533 	return false;
1534 }
1535 
1536 static inline bool vma_is_foreign(const struct vm_area_struct *vma)
1537 {
1538 	if (!current->mm)
1539 		return true;
1540 
1541 	if (current->mm != vma->vm_mm)
1542 		return true;
1543 
1544 	return false;
1545 }
1546 
1547 static inline bool vma_is_accessible(const struct vm_area_struct *vma)
1548 {
1549 	return vma->vm_flags & VM_ACCESS_FLAGS;
1550 }
1551 
1552 static inline bool is_shared_maywrite(const vma_flags_t *flags)
1553 {
1554 	return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1555 }
1556 
1557 static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
1558 {
1559 	return is_shared_maywrite(&vma->flags);
1560 }
1561 
1562 /**
1563  * vma_kernel_pagesize - Default page size granularity for this VMA.
1564  * @vma: The user mapping.
1565  *
1566  * The kernel page size specifies in which granularity VMA modifications
1567  * can be performed. Folios in this VMA will be aligned to, and at least
1568  * the size of the number of bytes returned by this function.
1569  *
1570  * The default kernel page size is not affected by Transparent Huge Pages
1571  * being in effect.
1572  *
1573  * Return: The default page size granularity for this VMA.
1574  */
1575 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1576 {
1577 	if (unlikely(vma->vm_ops && vma->vm_ops->pagesize))
1578 		return vma->vm_ops->pagesize(vma);
1579 	return PAGE_SIZE;
1580 }
1581 
1582 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
1583 
1584 static inline
1585 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1586 {
1587 	return mas_find(&vmi->mas, max - 1);
1588 }
1589 
1590 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1591 {
1592 	/*
1593 	 * Uses mas_find() to get the first VMA when the iterator starts.
1594 	 * Calling mas_next() could skip the first entry.
1595 	 */
1596 	return mas_find(&vmi->mas, ULONG_MAX);
1597 }
1598 
1599 static inline
1600 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1601 {
1602 	return mas_next_range(&vmi->mas, ULONG_MAX);
1603 }
1604 
1605 
1606 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1607 {
1608 	return mas_prev(&vmi->mas, 0);
1609 }
1610 
1611 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1612 			unsigned long start, unsigned long end, gfp_t gfp)
1613 {
1614 	__mas_set_range(&vmi->mas, start, end - 1);
1615 	mas_store_gfp(&vmi->mas, NULL, gfp);
1616 	if (unlikely(mas_is_err(&vmi->mas)))
1617 		return -ENOMEM;
1618 
1619 	return 0;
1620 }
1621 
1622 /* Free any unused preallocations */
1623 static inline void vma_iter_free(struct vma_iterator *vmi)
1624 {
1625 	mas_destroy(&vmi->mas);
1626 }
1627 
1628 static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1629 				      struct vm_area_struct *vma)
1630 {
1631 	vmi->mas.index = vma->vm_start;
1632 	vmi->mas.last = vma->vm_end - 1;
1633 	mas_store(&vmi->mas, vma);
1634 	if (unlikely(mas_is_err(&vmi->mas)))
1635 		return -ENOMEM;
1636 
1637 	vma_mark_attached(vma);
1638 	return 0;
1639 }
1640 
1641 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1642 {
1643 	mas_pause(&vmi->mas);
1644 }
1645 
1646 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1647 {
1648 	mas_set(&vmi->mas, addr);
1649 }
1650 
1651 #define for_each_vma(__vmi, __vma)					\
1652 	while (((__vma) = vma_next(&(__vmi))) != NULL)
1653 
1654 /* The MM code likes to work with exclusive end addresses */
1655 #define for_each_vma_range(__vmi, __vma, __end)				\
1656 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1657 
1658 #ifdef CONFIG_SHMEM
1659 /*
1660  * The vma_is_shmem is not inline because it is used only by slow
1661  * paths in userfault.
1662  */
1663 bool vma_is_shmem(const struct vm_area_struct *vma);
1664 bool vma_is_anon_shmem(const struct vm_area_struct *vma);
1665 #else
1666 static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
1667 static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
1668 #endif
1669 
1670 int vma_is_stack_for_current(const struct vm_area_struct *vma);
1671 
1672 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1673 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1674 
1675 struct mmu_gather;
1676 struct inode;
1677 
1678 extern void prep_compound_page(struct page *page, unsigned int order);
1679 
1680 static inline unsigned int folio_large_order(const struct folio *folio)
1681 {
1682 	return folio->_flags_1 & 0xff;
1683 }
1684 
1685 #ifdef NR_PAGES_IN_LARGE_FOLIO
1686 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1687 {
1688 	return folio->_nr_pages;
1689 }
1690 #else
1691 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1692 {
1693 	return 1L << folio_large_order(folio);
1694 }
1695 #endif
1696 
1697 /*
1698  * compound_order() can be called without holding a reference, which means
1699  * that niceties like page_folio() don't work.  These callers should be
1700  * prepared to handle wild return values.  For example, PG_head may be
1701  * set before the order is initialised, or this may be a tail page.
1702  * See compaction.c for some good examples.
1703  */
1704 static inline unsigned int compound_order(const struct page *page)
1705 {
1706 	const struct folio *folio = (struct folio *)page;
1707 
1708 	if (!test_bit(PG_head, &folio->flags.f))
1709 		return 0;
1710 	return folio_large_order(folio);
1711 }
1712 
1713 /**
1714  * folio_order - The allocation order of a folio.
1715  * @folio: The folio.
1716  *
1717  * A folio is composed of 2^order pages.  See get_order() for the definition
1718  * of order.
1719  *
1720  * Return: The order of the folio.
1721  */
1722 static inline unsigned int folio_order(const struct folio *folio)
1723 {
1724 	if (!folio_test_large(folio))
1725 		return 0;
1726 	return folio_large_order(folio);
1727 }
1728 
1729 /**
1730  * folio_reset_order - Reset the folio order and derived _nr_pages
1731  * @folio: The folio.
1732  *
1733  * Reset the order and derived _nr_pages to 0. Must only be used in the
1734  * process of splitting large folios.
1735  */
1736 static inline void folio_reset_order(struct folio *folio)
1737 {
1738 	if (WARN_ON_ONCE(!folio_test_large(folio)))
1739 		return;
1740 	folio->_flags_1 &= ~0xffUL;
1741 #ifdef NR_PAGES_IN_LARGE_FOLIO
1742 	folio->_nr_pages = 0;
1743 #endif
1744 }
1745 
1746 #include <linux/huge_mm.h>
1747 
1748 /*
1749  * Methods to modify the page usage count.
1750  *
1751  * What counts for a page usage:
1752  * - cache mapping   (page->mapping)
1753  * - private data    (page->private)
1754  * - page mapped in a task's page tables, each mapping
1755  *   is counted separately
1756  *
1757  * Also, many kernel routines increase the page count before a critical
1758  * routine so they can be sure the page doesn't go away from under them.
1759  */
1760 
1761 /*
1762  * Drop a ref, return true if the refcount fell to zero (the page has no users)
1763  */
1764 static inline int put_page_testzero(struct page *page)
1765 {
1766 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1767 	return page_ref_dec_and_test(page);
1768 }
1769 
1770 static inline int folio_put_testzero(struct folio *folio)
1771 {
1772 	return put_page_testzero(&folio->page);
1773 }
1774 
1775 /*
1776  * Try to grab a ref unless the page has a refcount of zero, return false if
1777  * that is the case.
1778  * This can be called when MMU is off so it must not access
1779  * any of the virtual mappings.
1780  */
1781 static inline bool get_page_unless_zero(struct page *page)
1782 {
1783 	return page_ref_add_unless_zero(page, 1);
1784 }
1785 
1786 static inline struct folio *folio_get_nontail_page(struct page *page)
1787 {
1788 	if (unlikely(!get_page_unless_zero(page)))
1789 		return NULL;
1790 	return (struct folio *)page;
1791 }
1792 
1793 extern int page_is_ram(unsigned long pfn);
1794 
1795 enum {
1796 	REGION_INTERSECTS,
1797 	REGION_DISJOINT,
1798 	REGION_MIXED,
1799 };
1800 
1801 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1802 		      unsigned long desc);
1803 
1804 /* Support for virtually mapped pages */
1805 struct page *vmalloc_to_page(const void *addr);
1806 unsigned long vmalloc_to_pfn(const void *addr);
1807 
1808 /*
1809  * Determine if an address is within the vmalloc range
1810  *
1811  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1812  * is no special casing required.
1813  */
1814 #ifdef CONFIG_MMU
1815 extern bool is_vmalloc_addr(const void *x);
1816 extern int is_vmalloc_or_module_addr(const void *x);
1817 #else
1818 static inline bool is_vmalloc_addr(const void *x)
1819 {
1820 	return false;
1821 }
1822 static inline int is_vmalloc_or_module_addr(const void *x)
1823 {
1824 	return 0;
1825 }
1826 #endif
1827 
1828 /*
1829  * How many times the entire folio is mapped as a single unit (eg by a
1830  * PMD or PUD entry).  This is probably not what you want, except for
1831  * debugging purposes or implementation of other core folio_*() primitives.
1832  */
1833 static inline int folio_entire_mapcount(const struct folio *folio)
1834 {
1835 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1836 	if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
1837 		return 0;
1838 	return atomic_read(&folio->_entire_mapcount) + 1;
1839 }
1840 
1841 static inline int folio_large_mapcount(const struct folio *folio)
1842 {
1843 	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1844 	return atomic_read(&folio->_large_mapcount) + 1;
1845 }
1846 
1847 /**
1848  * folio_mapcount() - Number of mappings of this folio.
1849  * @folio: The folio.
1850  *
1851  * The folio mapcount corresponds to the number of present user page table
1852  * entries that reference any part of a folio. Each such present user page
1853  * table entry must be paired with exactly on folio reference.
1854  *
1855  * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1856  * exactly once.
1857  *
1858  * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1859  * references the entire folio counts exactly once, even when such special
1860  * page table entries are comprised of multiple ordinary page table entries.
1861  *
1862  * Will report 0 for pages which cannot be mapped into userspace, such as
1863  * slab, page tables and similar.
1864  *
1865  * Return: The number of times this folio is mapped.
1866  */
1867 static inline int folio_mapcount(const struct folio *folio)
1868 {
1869 	int mapcount;
1870 
1871 	if (likely(!folio_test_large(folio))) {
1872 		mapcount = atomic_read(&folio->_mapcount) + 1;
1873 		if (page_mapcount_is_type(mapcount))
1874 			mapcount = 0;
1875 		return mapcount;
1876 	}
1877 	return folio_large_mapcount(folio);
1878 }
1879 
1880 /**
1881  * folio_mapped - Is this folio mapped into userspace?
1882  * @folio: The folio.
1883  *
1884  * Return: True if any page in this folio is referenced by user page tables.
1885  */
1886 static inline bool folio_mapped(const struct folio *folio)
1887 {
1888 	return folio_mapcount(folio) >= 1;
1889 }
1890 
1891 /*
1892  * Return true if this page is mapped into pagetables.
1893  * For compound page it returns true if any sub-page of compound page is mapped,
1894  * even if this particular sub-page is not itself mapped by any PTE or PMD.
1895  */
1896 static inline bool page_mapped(const struct page *page)
1897 {
1898 	return folio_mapped(page_folio(page));
1899 }
1900 
1901 static inline struct page *virt_to_head_page(const void *x)
1902 {
1903 	struct page *page = virt_to_page(x);
1904 
1905 	return compound_head(page);
1906 }
1907 
1908 static inline struct folio *virt_to_folio(const void *x)
1909 {
1910 	struct page *page = virt_to_page(x);
1911 
1912 	return page_folio(page);
1913 }
1914 
1915 void __folio_put(struct folio *folio);
1916 
1917 void split_page(struct page *page, unsigned int order);
1918 void folio_copy(struct folio *dst, struct folio *src);
1919 int folio_mc_copy(struct folio *dst, struct folio *src);
1920 
1921 unsigned long nr_free_buffer_pages(void);
1922 
1923 /* Returns the number of bytes in this potentially compound page. */
1924 static inline unsigned long page_size(const struct page *page)
1925 {
1926 	return PAGE_SIZE << compound_order(page);
1927 }
1928 
1929 /* Returns the number of bits needed for the number of bytes in a page */
1930 static inline unsigned int page_shift(struct page *page)
1931 {
1932 	return PAGE_SHIFT + compound_order(page);
1933 }
1934 
1935 /**
1936  * thp_order - Order of a transparent huge page.
1937  * @page: Head page of a transparent huge page.
1938  */
1939 static inline unsigned int thp_order(struct page *page)
1940 {
1941 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1942 	return compound_order(page);
1943 }
1944 
1945 /**
1946  * thp_size - Size of a transparent huge page.
1947  * @page: Head page of a transparent huge page.
1948  *
1949  * Return: Number of bytes in this page.
1950  */
1951 static inline unsigned long thp_size(struct page *page)
1952 {
1953 	return PAGE_SIZE << thp_order(page);
1954 }
1955 
1956 #ifdef CONFIG_MMU
1957 /*
1958  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1959  * servicing faults for write access.  In the normal case, do always want
1960  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1961  * that do not have writing enabled, when used by access_process_vm.
1962  */
1963 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1964 {
1965 	if (likely(vma->vm_flags & VM_WRITE))
1966 		pte = pte_mkwrite(pte, vma);
1967 	return pte;
1968 }
1969 
1970 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
1971 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1972 		struct page *page, unsigned int nr, unsigned long addr);
1973 
1974 vm_fault_t finish_fault(struct vm_fault *vmf);
1975 #endif
1976 
1977 /*
1978  * Multiple processes may "see" the same page. E.g. for untouched
1979  * mappings of /dev/null, all processes see the same page full of
1980  * zeroes, and text pages of executables and shared libraries have
1981  * only one copy in memory, at most, normally.
1982  *
1983  * For the non-reserved pages, page_count(page) denotes a reference count.
1984  *   page_count() == 0 means the page is free. page->lru is then used for
1985  *   freelist management in the buddy allocator.
1986  *   page_count() > 0  means the page has been allocated.
1987  *
1988  * Pages are allocated by the slab allocator in order to provide memory
1989  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1990  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1991  * unless a particular usage is carefully commented. (the responsibility of
1992  * freeing the kmalloc memory is the caller's, of course).
1993  *
1994  * A page may be used by anyone else who does a __get_free_page().
1995  * In this case, page_count still tracks the references, and should only
1996  * be used through the normal accessor functions. The top bits of page->flags
1997  * and page->virtual store page management information, but all other fields
1998  * are unused and could be used privately, carefully. The management of this
1999  * page is the responsibility of the one who allocated it, and those who have
2000  * subsequently been given references to it.
2001  *
2002  * The other pages (we may call them "pagecache pages") are completely
2003  * managed by the Linux memory manager: I/O, buffers, swapping etc.
2004  * The following discussion applies only to them.
2005  *
2006  * A pagecache page contains an opaque `private' member, which belongs to the
2007  * page's address_space. Usually, this is the address of a circular list of
2008  * the page's disk buffers. PG_private must be set to tell the VM to call
2009  * into the filesystem to release these pages.
2010  *
2011  * A folio may belong to an inode's memory mapping. In this case,
2012  * folio->mapping points to the inode, and folio->index is the file
2013  * offset of the folio, in units of PAGE_SIZE.
2014  *
2015  * If pagecache pages are not associated with an inode, they are said to be
2016  * anonymous pages. These may become associated with the swapcache, and in that
2017  * case PG_swapcache is set, and page->private is an offset into the swapcache.
2018  *
2019  * In either case (swapcache or inode backed), the pagecache itself holds one
2020  * reference to the page. Setting PG_private should also increment the
2021  * refcount. The each user mapping also has a reference to the page.
2022  *
2023  * The pagecache pages are stored in a per-mapping radix tree, which is
2024  * rooted at mapping->i_pages, and indexed by offset.
2025  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
2026  * lists, we instead now tag pages as dirty/writeback in the radix tree.
2027  *
2028  * All pagecache pages may be subject to I/O:
2029  * - inode pages may need to be read from disk,
2030  * - inode pages which have been modified and are MAP_SHARED may need
2031  *   to be written back to the inode on disk,
2032  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
2033  *   modified may need to be swapped out to swap space and (later) to be read
2034  *   back into memory.
2035  */
2036 
2037 /* 127: arbitrary random number, small enough to assemble well */
2038 #define folio_ref_zero_or_close_to_overflow(folio) \
2039 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
2040 
2041 /**
2042  * folio_get - Increment the reference count on a folio.
2043  * @folio: The folio.
2044  *
2045  * Context: May be called in any context, as long as you know that
2046  * you have a refcount on the folio.  If you do not already have one,
2047  * folio_try_get() may be the right interface for you to use.
2048  */
2049 static inline void folio_get(struct folio *folio)
2050 {
2051 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
2052 	folio_ref_inc(folio);
2053 }
2054 
2055 static inline void get_page(struct page *page)
2056 {
2057 	struct folio *folio = page_folio(page);
2058 	if (WARN_ON_ONCE(folio_test_slab(folio)))
2059 		return;
2060 	if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
2061 		return;
2062 	folio_get(folio);
2063 }
2064 
2065 static inline __must_check bool try_get_page(struct page *page)
2066 {
2067 	page = compound_head(page);
2068 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
2069 		return false;
2070 	page_ref_inc(page);
2071 	return true;
2072 }
2073 
2074 /**
2075  * folio_put - Decrement the reference count on a folio.
2076  * @folio: The folio.
2077  *
2078  * If the folio's reference count reaches zero, the memory will be
2079  * released back to the page allocator and may be used by another
2080  * allocation immediately.  Do not access the memory or the struct folio
2081  * after calling folio_put() unless you can be sure that it wasn't the
2082  * last reference.
2083  *
2084  * Context: May be called in process or interrupt context, but not in NMI
2085  * context.  May be called while holding a spinlock.
2086  */
2087 static inline void folio_put(struct folio *folio)
2088 {
2089 	if (folio_put_testzero(folio))
2090 		__folio_put(folio);
2091 }
2092 
2093 /**
2094  * folio_put_refs - Reduce the reference count on a folio.
2095  * @folio: The folio.
2096  * @refs: The amount to subtract from the folio's reference count.
2097  *
2098  * If the folio's reference count reaches zero, the memory will be
2099  * released back to the page allocator and may be used by another
2100  * allocation immediately.  Do not access the memory or the struct folio
2101  * after calling folio_put_refs() unless you can be sure that these weren't
2102  * the last references.
2103  *
2104  * Context: May be called in process or interrupt context, but not in NMI
2105  * context.  May be called while holding a spinlock.
2106  */
2107 static inline void folio_put_refs(struct folio *folio, int refs)
2108 {
2109 	if (folio_ref_sub_and_test(folio, refs))
2110 		__folio_put(folio);
2111 }
2112 
2113 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
2114 
2115 /*
2116  * union release_pages_arg - an array of pages or folios
2117  *
2118  * release_pages() releases a simple array of multiple pages, and
2119  * accepts various different forms of said page array: either
2120  * a regular old boring array of pages, an array of folios, or
2121  * an array of encoded page pointers.
2122  *
2123  * The transparent union syntax for this kind of "any of these
2124  * argument types" is all kinds of ugly, so look away.
2125  */
2126 typedef union {
2127 	struct page **pages;
2128 	struct folio **folios;
2129 	struct encoded_page **encoded_pages;
2130 } release_pages_arg __attribute__ ((__transparent_union__));
2131 
2132 void release_pages(release_pages_arg, int nr);
2133 
2134 /**
2135  * folios_put - Decrement the reference count on an array of folios.
2136  * @folios: The folios.
2137  *
2138  * Like folio_put(), but for a batch of folios.  This is more efficient
2139  * than writing the loop yourself as it will optimise the locks which need
2140  * to be taken if the folios are freed.  The folios batch is returned
2141  * empty and ready to be reused for another batch; there is no need to
2142  * reinitialise it.
2143  *
2144  * Context: May be called in process or interrupt context, but not in NMI
2145  * context.  May be called while holding a spinlock.
2146  */
2147 static inline void folios_put(struct folio_batch *folios)
2148 {
2149 	folios_put_refs(folios, NULL);
2150 }
2151 
2152 static inline void put_page(struct page *page)
2153 {
2154 	struct folio *folio = page_folio(page);
2155 
2156 	if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
2157 		return;
2158 
2159 	folio_put(folio);
2160 }
2161 
2162 /*
2163  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
2164  * the page's refcount so that two separate items are tracked: the original page
2165  * reference count, and also a new count of how many pin_user_pages() calls were
2166  * made against the page. ("gup-pinned" is another term for the latter).
2167  *
2168  * With this scheme, pin_user_pages() becomes special: such pages are marked as
2169  * distinct from normal pages. As such, the unpin_user_page() call (and its
2170  * variants) must be used in order to release gup-pinned pages.
2171  *
2172  * Choice of value:
2173  *
2174  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
2175  * counts with respect to pin_user_pages() and unpin_user_page() becomes
2176  * simpler, due to the fact that adding an even power of two to the page
2177  * refcount has the effect of using only the upper N bits, for the code that
2178  * counts up using the bias value. This means that the lower bits are left for
2179  * the exclusive use of the original code that increments and decrements by one
2180  * (or at least, by much smaller values than the bias value).
2181  *
2182  * Of course, once the lower bits overflow into the upper bits (and this is
2183  * OK, because subtraction recovers the original values), then visual inspection
2184  * no longer suffices to directly view the separate counts. However, for normal
2185  * applications that don't have huge page reference counts, this won't be an
2186  * issue.
2187  *
2188  * Locking: the lockless algorithm described in folio_try_get_rcu()
2189  * provides safe operation for get_user_pages(), folio_mkclean() and
2190  * other calls that race to set up page table entries.
2191  */
2192 #define GUP_PIN_COUNTING_BIAS (1U << 10)
2193 
2194 void unpin_user_page(struct page *page);
2195 void unpin_folio(struct folio *folio);
2196 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
2197 				 bool make_dirty);
2198 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
2199 				      bool make_dirty);
2200 void unpin_user_pages(struct page **pages, unsigned long npages);
2201 void unpin_user_folio(struct folio *folio, unsigned long npages);
2202 void unpin_folios(struct folio **folios, unsigned long nfolios);
2203 
2204 static inline bool is_cow_mapping(vm_flags_t flags)
2205 {
2206 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2207 }
2208 
2209 static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc)
2210 {
2211 	const vma_flags_t *flags = &desc->vma_flags;
2212 
2213 	return vma_flags_test(flags, VMA_MAYWRITE_BIT) &&
2214 		!vma_flags_test(flags, VMA_SHARED_BIT);
2215 }
2216 
2217 #ifndef CONFIG_MMU
2218 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
2219 {
2220 	/*
2221 	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
2222 	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
2223 	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
2224 	 * underlying memory if ptrace is active, so this is only possible if
2225 	 * ptrace does not apply. Note that there is no mprotect() to upgrade
2226 	 * write permissions later.
2227 	 */
2228 	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
2229 }
2230 
2231 static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags)
2232 {
2233 	return vma_flags_test_any(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
2234 }
2235 #endif
2236 
2237 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
2238 #define SECTION_IN_PAGE_FLAGS
2239 #endif
2240 
2241 /*
2242  * The identification function is mainly used by the buddy allocator for
2243  * determining if two pages could be buddies. We are not really identifying
2244  * the zone since we could be using the section number id if we do not have
2245  * node id available in page flags.
2246  * We only guarantee that it will return the same value for two combinable
2247  * pages in a zone.
2248  */
2249 static inline int page_zone_id(struct page *page)
2250 {
2251 	return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
2252 }
2253 
2254 #ifdef NODE_NOT_IN_PAGE_FLAGS
2255 int memdesc_nid(memdesc_flags_t mdf);
2256 #else
2257 static inline int memdesc_nid(memdesc_flags_t mdf)
2258 {
2259 	return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
2260 }
2261 #endif
2262 
2263 static inline int page_to_nid(const struct page *page)
2264 {
2265 	return memdesc_nid(PF_POISONED_CHECK(page)->flags);
2266 }
2267 
2268 static inline int folio_nid(const struct folio *folio)
2269 {
2270 	return memdesc_nid(folio->flags);
2271 }
2272 
2273 #ifdef CONFIG_NUMA_BALANCING
2274 /* page access time bits needs to hold at least 4 seconds */
2275 #define PAGE_ACCESS_TIME_MIN_BITS	12
2276 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
2277 #define PAGE_ACCESS_TIME_BUCKETS				\
2278 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
2279 #else
2280 #define PAGE_ACCESS_TIME_BUCKETS	0
2281 #endif
2282 
2283 #define PAGE_ACCESS_TIME_MASK				\
2284 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
2285 
2286 static inline int cpu_pid_to_cpupid(int cpu, int pid)
2287 {
2288 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
2289 }
2290 
2291 static inline int cpupid_to_pid(int cpupid)
2292 {
2293 	return cpupid & LAST__PID_MASK;
2294 }
2295 
2296 static inline int cpupid_to_cpu(int cpupid)
2297 {
2298 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
2299 }
2300 
2301 static inline int cpupid_to_nid(int cpupid)
2302 {
2303 	return cpu_to_node(cpupid_to_cpu(cpupid));
2304 }
2305 
2306 static inline bool cpupid_pid_unset(int cpupid)
2307 {
2308 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
2309 }
2310 
2311 static inline bool cpupid_cpu_unset(int cpupid)
2312 {
2313 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
2314 }
2315 
2316 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
2317 {
2318 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
2319 }
2320 
2321 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
2322 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
2323 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
2324 {
2325 	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
2326 }
2327 
2328 static inline int folio_last_cpupid(struct folio *folio)
2329 {
2330 	return folio->_last_cpupid;
2331 }
2332 static inline void page_cpupid_reset_last(struct page *page)
2333 {
2334 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
2335 }
2336 #else
2337 static inline int folio_last_cpupid(struct folio *folio)
2338 {
2339 	return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
2340 }
2341 
2342 int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
2343 
2344 static inline void page_cpupid_reset_last(struct page *page)
2345 {
2346 	page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
2347 }
2348 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
2349 
2350 static inline int folio_xchg_access_time(struct folio *folio, int time)
2351 {
2352 	int last_time;
2353 
2354 	last_time = folio_xchg_last_cpupid(folio,
2355 					   time >> PAGE_ACCESS_TIME_BUCKETS);
2356 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
2357 }
2358 
2359 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
2360 {
2361 	unsigned int pid_bit;
2362 
2363 	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
2364 	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
2365 		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
2366 	}
2367 }
2368 
2369 bool folio_use_access_time(struct folio *folio);
2370 #else /* !CONFIG_NUMA_BALANCING */
2371 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
2372 {
2373 	return folio_nid(folio); /* XXX */
2374 }
2375 
2376 static inline int folio_xchg_access_time(struct folio *folio, int time)
2377 {
2378 	return 0;
2379 }
2380 
2381 static inline int folio_last_cpupid(struct folio *folio)
2382 {
2383 	return folio_nid(folio); /* XXX */
2384 }
2385 
2386 static inline int cpupid_to_nid(int cpupid)
2387 {
2388 	return -1;
2389 }
2390 
2391 static inline int cpupid_to_pid(int cpupid)
2392 {
2393 	return -1;
2394 }
2395 
2396 static inline int cpupid_to_cpu(int cpupid)
2397 {
2398 	return -1;
2399 }
2400 
2401 static inline int cpu_pid_to_cpupid(int nid, int pid)
2402 {
2403 	return -1;
2404 }
2405 
2406 static inline bool cpupid_pid_unset(int cpupid)
2407 {
2408 	return true;
2409 }
2410 
2411 static inline void page_cpupid_reset_last(struct page *page)
2412 {
2413 }
2414 
2415 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
2416 {
2417 	return false;
2418 }
2419 
2420 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
2421 {
2422 }
2423 static inline bool folio_use_access_time(struct folio *folio)
2424 {
2425 	return false;
2426 }
2427 #endif /* CONFIG_NUMA_BALANCING */
2428 
2429 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
2430 
2431 /*
2432  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
2433  * setting tags for all pages to native kernel tag value 0xff, as the default
2434  * value 0x00 maps to 0xff.
2435  */
2436 
2437 static inline u8 page_kasan_tag(const struct page *page)
2438 {
2439 	u8 tag = KASAN_TAG_KERNEL;
2440 
2441 	if (kasan_enabled()) {
2442 		tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
2443 		tag ^= 0xff;
2444 	}
2445 
2446 	return tag;
2447 }
2448 
2449 static inline void page_kasan_tag_set(struct page *page, u8 tag)
2450 {
2451 	unsigned long old_flags, flags;
2452 
2453 	if (!kasan_enabled())
2454 		return;
2455 
2456 	tag ^= 0xff;
2457 	old_flags = READ_ONCE(page->flags.f);
2458 	do {
2459 		flags = old_flags;
2460 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
2461 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
2462 	} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
2463 }
2464 
2465 static inline void page_kasan_tag_reset(struct page *page)
2466 {
2467 	if (kasan_enabled())
2468 		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
2469 }
2470 
2471 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
2472 
2473 static inline u8 page_kasan_tag(const struct page *page)
2474 {
2475 	return 0xff;
2476 }
2477 
2478 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
2479 static inline void page_kasan_tag_reset(struct page *page) { }
2480 
2481 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
2482 
2483 static inline struct zone *page_zone(const struct page *page)
2484 {
2485 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
2486 }
2487 
2488 static inline pg_data_t *page_pgdat(const struct page *page)
2489 {
2490 	return NODE_DATA(page_to_nid(page));
2491 }
2492 
2493 static inline pg_data_t *folio_pgdat(const struct folio *folio)
2494 {
2495 	return NODE_DATA(folio_nid(folio));
2496 }
2497 
2498 static inline struct zone *folio_zone(const struct folio *folio)
2499 {
2500 	return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
2501 }
2502 
2503 #ifdef SECTION_IN_PAGE_FLAGS
2504 static inline void set_page_section(struct page *page, unsigned long section)
2505 {
2506 	page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
2507 	page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
2508 }
2509 
2510 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2511 {
2512 	return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
2513 }
2514 #else /* !SECTION_IN_PAGE_FLAGS */
2515 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2516 {
2517 	return 0;
2518 }
2519 #endif /* SECTION_IN_PAGE_FLAGS */
2520 
2521 /**
2522  * folio_pfn - Return the Page Frame Number of a folio.
2523  * @folio: The folio.
2524  *
2525  * A folio may contain multiple pages.  The pages have consecutive
2526  * Page Frame Numbers.
2527  *
2528  * Return: The Page Frame Number of the first page in the folio.
2529  */
2530 static inline unsigned long folio_pfn(const struct folio *folio)
2531 {
2532 	return page_to_pfn(&folio->page);
2533 }
2534 
2535 static inline struct folio *pfn_folio(unsigned long pfn)
2536 {
2537 	return page_folio(pfn_to_page(pfn));
2538 }
2539 
2540 #ifdef CONFIG_MMU
2541 static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
2542 {
2543 	return pfn_pte(page_to_pfn(page), pgprot);
2544 }
2545 
2546 /**
2547  * folio_mk_pte - Create a PTE for this folio
2548  * @folio: The folio to create a PTE for
2549  * @pgprot: The page protection bits to use
2550  *
2551  * Create a page table entry for the first page of this folio.
2552  * This is suitable for passing to set_ptes().
2553  *
2554  * Return: A page table entry suitable for mapping this folio.
2555  */
2556 static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
2557 {
2558 	return pfn_pte(folio_pfn(folio), pgprot);
2559 }
2560 
2561 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2562 /**
2563  * folio_mk_pmd - Create a PMD for this folio
2564  * @folio: The folio to create a PMD for
2565  * @pgprot: The page protection bits to use
2566  *
2567  * Create a page table entry for the first page of this folio.
2568  * This is suitable for passing to set_pmd_at().
2569  *
2570  * Return: A page table entry suitable for mapping this folio.
2571  */
2572 static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
2573 {
2574 	return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
2575 }
2576 
2577 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2578 /**
2579  * folio_mk_pud - Create a PUD for this folio
2580  * @folio: The folio to create a PUD for
2581  * @pgprot: The page protection bits to use
2582  *
2583  * Create a page table entry for the first page of this folio.
2584  * This is suitable for passing to set_pud_at().
2585  *
2586  * Return: A page table entry suitable for mapping this folio.
2587  */
2588 static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
2589 {
2590 	return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
2591 }
2592 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2593 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2594 #endif /* CONFIG_MMU */
2595 
2596 static inline bool folio_has_pincount(const struct folio *folio)
2597 {
2598 	if (IS_ENABLED(CONFIG_64BIT))
2599 		return folio_test_large(folio);
2600 	return folio_order(folio) > 1;
2601 }
2602 
2603 /**
2604  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
2605  * @folio: The folio.
2606  *
2607  * This function checks if a folio has been pinned via a call to
2608  * a function in the pin_user_pages() family.
2609  *
2610  * For small folios, the return value is partially fuzzy: false is not fuzzy,
2611  * because it means "definitely not pinned for DMA", but true means "probably
2612  * pinned for DMA, but possibly a false positive due to having at least
2613  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
2614  *
2615  * False positives are OK, because: a) it's unlikely for a folio to
2616  * get that many refcounts, and b) all the callers of this routine are
2617  * expected to be able to deal gracefully with a false positive.
2618  *
2619  * For most large folios, the result will be exactly correct. That's because
2620  * we have more tracking data available: the _pincount field is used
2621  * instead of the GUP_PIN_COUNTING_BIAS scheme.
2622  *
2623  * For more information, please see Documentation/core-api/pin_user_pages.rst.
2624  *
2625  * Return: True, if it is likely that the folio has been "dma-pinned".
2626  * False, if the folio is definitely not dma-pinned.
2627  */
2628 static inline bool folio_maybe_dma_pinned(struct folio *folio)
2629 {
2630 	if (folio_has_pincount(folio))
2631 		return atomic_read(&folio->_pincount) > 0;
2632 
2633 	/*
2634 	 * folio_ref_count() is signed. If that refcount overflows, then
2635 	 * folio_ref_count() returns a negative value, and callers will avoid
2636 	 * further incrementing the refcount.
2637 	 *
2638 	 * Here, for that overflow case, use the sign bit to count a little
2639 	 * bit higher via unsigned math, and thus still get an accurate result.
2640 	 */
2641 	return ((unsigned int)folio_ref_count(folio)) >=
2642 		GUP_PIN_COUNTING_BIAS;
2643 }
2644 
2645 /*
2646  * This should most likely only be called during fork() to see whether we
2647  * should break the cow immediately for an anon page on the src mm.
2648  *
2649  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
2650  */
2651 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
2652 					  struct folio *folio)
2653 {
2654 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
2655 
2656 	if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
2657 		return false;
2658 
2659 	return folio_maybe_dma_pinned(folio);
2660 }
2661 
2662 /**
2663  * is_zero_page - Query if a page is a zero page
2664  * @page: The page to query
2665  *
2666  * This returns true if @page is one of the permanent zero pages.
2667  */
2668 static inline bool is_zero_page(const struct page *page)
2669 {
2670 	return is_zero_pfn(page_to_pfn(page));
2671 }
2672 
2673 /**
2674  * is_zero_folio - Query if a folio is a zero page
2675  * @folio: The folio to query
2676  *
2677  * This returns true if @folio is one of the permanent zero pages.
2678  */
2679 static inline bool is_zero_folio(const struct folio *folio)
2680 {
2681 	return is_zero_page(&folio->page);
2682 }
2683 
2684 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2685 #ifdef CONFIG_MIGRATION
2686 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2687 {
2688 #ifdef CONFIG_CMA
2689 	int mt = folio_migratetype(folio);
2690 
2691 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2692 		return false;
2693 #endif
2694 	/* The zero page can be "pinned" but gets special handling. */
2695 	if (is_zero_folio(folio))
2696 		return true;
2697 
2698 	/* Coherent device memory must always allow eviction. */
2699 	if (folio_is_device_coherent(folio))
2700 		return false;
2701 
2702 	/*
2703 	 * Filesystems can only tolerate transient delays to truncate and
2704 	 * hole-punch operations
2705 	 */
2706 	if (folio_is_fsdax(folio))
2707 		return false;
2708 
2709 	/* Otherwise, non-movable zone folios can be pinned. */
2710 	return !folio_is_zone_movable(folio);
2711 
2712 }
2713 #else
2714 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2715 {
2716 	return true;
2717 }
2718 #endif
2719 
2720 static inline void set_page_zone(struct page *page, enum zone_type zone)
2721 {
2722 	page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
2723 	page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2724 }
2725 
2726 static inline void set_page_node(struct page *page, unsigned long node)
2727 {
2728 	page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
2729 	page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
2730 }
2731 
2732 static inline void set_page_links(struct page *page, enum zone_type zone,
2733 	unsigned long node, unsigned long pfn)
2734 {
2735 	set_page_zone(page, zone);
2736 	set_page_node(page, node);
2737 #ifdef SECTION_IN_PAGE_FLAGS
2738 	set_page_section(page, pfn_to_section_nr(pfn));
2739 #endif
2740 }
2741 
2742 /**
2743  * folio_nr_pages - The number of pages in the folio.
2744  * @folio: The folio.
2745  *
2746  * Return: A positive power of two.
2747  */
2748 static inline unsigned long folio_nr_pages(const struct folio *folio)
2749 {
2750 	if (!folio_test_large(folio))
2751 		return 1;
2752 	return folio_large_nr_pages(folio);
2753 }
2754 
2755 /*
2756  * compound_nr() returns the number of pages in this potentially compound
2757  * page.  compound_nr() can be called on a tail page, and is defined to
2758  * return 1 in that case.
2759  */
2760 static inline unsigned long compound_nr(const struct page *page)
2761 {
2762 	const struct folio *folio = (struct folio *)page;
2763 
2764 	if (!test_bit(PG_head, &folio->flags.f))
2765 		return 1;
2766 	return folio_large_nr_pages(folio);
2767 }
2768 
2769 /**
2770  * folio_next - Move to the next physical folio.
2771  * @folio: The folio we're currently operating on.
2772  *
2773  * If you have physically contiguous memory which may span more than
2774  * one folio (eg a &struct bio_vec), use this function to move from one
2775  * folio to the next.  Do not use it if the memory is only virtually
2776  * contiguous as the folios are almost certainly not adjacent to each
2777  * other.  This is the folio equivalent to writing ``page++``.
2778  *
2779  * Context: We assume that the folios are refcounted and/or locked at a
2780  * higher level and do not adjust the reference counts.
2781  * Return: The next struct folio.
2782  */
2783 static inline struct folio *folio_next(struct folio *folio)
2784 {
2785 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2786 }
2787 
2788 /**
2789  * folio_shift - The size of the memory described by this folio.
2790  * @folio: The folio.
2791  *
2792  * A folio represents a number of bytes which is a power-of-two in size.
2793  * This function tells you which power-of-two the folio is.  See also
2794  * folio_size() and folio_order().
2795  *
2796  * Context: The caller should have a reference on the folio to prevent
2797  * it from being split.  It is not necessary for the folio to be locked.
2798  * Return: The base-2 logarithm of the size of this folio.
2799  */
2800 static inline unsigned int folio_shift(const struct folio *folio)
2801 {
2802 	return PAGE_SHIFT + folio_order(folio);
2803 }
2804 
2805 /**
2806  * folio_size - The number of bytes in a folio.
2807  * @folio: The folio.
2808  *
2809  * Context: The caller should have a reference on the folio to prevent
2810  * it from being split.  It is not necessary for the folio to be locked.
2811  * Return: The number of bytes in this folio.
2812  */
2813 static inline size_t folio_size(const struct folio *folio)
2814 {
2815 	return PAGE_SIZE << folio_order(folio);
2816 }
2817 
2818 /**
2819  * folio_maybe_mapped_shared - Whether the folio is mapped into the page
2820  *			       tables of more than one MM
2821  * @folio: The folio.
2822  *
2823  * This function checks if the folio maybe currently mapped into more than one
2824  * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
2825  * MM ("mapped exclusively").
2826  *
2827  * For KSM folios, this function also returns "mapped shared" when a folio is
2828  * mapped multiple times into the same MM, because the individual page mappings
2829  * are independent.
2830  *
2831  * For small anonymous folios and anonymous hugetlb folios, the return
2832  * value will be exactly correct: non-KSM folios can only be mapped at most once
2833  * into an MM, and they cannot be partially mapped. KSM folios are
2834  * considered shared even if mapped multiple times into the same MM.
2835  *
2836  * For other folios, the result can be fuzzy:
2837  *    #. For partially-mappable large folios (THP), the return value can wrongly
2838  *       indicate "mapped shared" (false positive) if a folio was mapped by
2839  *       more than two MMs at one point in time.
2840  *    #. For pagecache folios (including hugetlb), the return value can wrongly
2841  *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2842  *       cover the same file range.
2843  *
2844  * Further, this function only considers current page table mappings that
2845  * are tracked using the folio mapcount(s).
2846  *
2847  * This function does not consider:
2848  *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2849  *       pagecache, temporary unmapping for migration).
2850  *    #. If the folio is mapped differently (VM_PFNMAP).
2851  *    #. If hugetlb page table sharing applies. Callers might want to check
2852  *       hugetlb_pmd_shared().
2853  *
2854  * Return: Whether the folio is estimated to be mapped into more than one MM.
2855  */
2856 static inline bool folio_maybe_mapped_shared(struct folio *folio)
2857 {
2858 	int mapcount = folio_mapcount(folio);
2859 
2860 	/* Only partially-mappable folios require more care. */
2861 	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2862 		return mapcount > 1;
2863 
2864 	/*
2865 	 * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
2866 	 * simply assume "mapped shared", nobody should really care
2867 	 * about this for arbitrary kernel allocations.
2868 	 */
2869 	if (!IS_ENABLED(CONFIG_MM_ID))
2870 		return true;
2871 
2872 	/*
2873 	 * A single mapping implies "mapped exclusively", even if the
2874 	 * folio flag says something different: it's easier to handle this
2875 	 * case here instead of on the RMAP hot path.
2876 	 */
2877 	if (mapcount <= 1)
2878 		return false;
2879 	return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
2880 }
2881 
2882 /**
2883  * folio_expected_ref_count - calculate the expected folio refcount
2884  * @folio: the folio
2885  *
2886  * Calculate the expected folio refcount, taking references from the pagecache,
2887  * swapcache, PG_private and page table mappings into account. Useful in
2888  * combination with folio_ref_count() to detect unexpected references (e.g.,
2889  * GUP or other temporary references).
2890  *
2891  * Does currently not consider references from the LRU cache. If the folio
2892  * was isolated from the LRU (which is the case during migration or split),
2893  * the LRU cache does not apply.
2894  *
2895  * Calling this function on an unmapped folio -- !folio_mapped() -- that is
2896  * locked will return a stable result.
2897  *
2898  * Calling this function on a mapped folio will not result in a stable result,
2899  * because nothing stops additional page table mappings from coming (e.g.,
2900  * fork()) or going (e.g., munmap()).
2901  *
2902  * Calling this function without the folio lock will also not result in a
2903  * stable result: for example, the folio might get dropped from the swapcache
2904  * concurrently.
2905  *
2906  * However, even when called without the folio lock or on a mapped folio,
2907  * this function can be used to detect unexpected references early (for example,
2908  * if it makes sense to even lock the folio and unmap it).
2909  *
2910  * The caller must add any reference (e.g., from folio_try_get()) it might be
2911  * holding itself to the result.
2912  *
2913  * Returns: the expected folio refcount.
2914  */
2915 static inline int folio_expected_ref_count(const struct folio *folio)
2916 {
2917 	const int order = folio_order(folio);
2918 	int ref_count = 0;
2919 
2920 	if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
2921 		return 0;
2922 
2923 	/* One reference per page from the swapcache. */
2924 	ref_count += folio_test_swapcache(folio) << order;
2925 
2926 	if (!folio_test_anon(folio)) {
2927 		/* One reference per page from the pagecache. */
2928 		ref_count += !!folio->mapping << order;
2929 		/* One reference from PG_private. */
2930 		ref_count += folio_test_private(folio);
2931 	}
2932 
2933 	/* One reference per page table mapping. */
2934 	return ref_count + folio_mapcount(folio);
2935 }
2936 
2937 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2938 static inline int arch_make_folio_accessible(struct folio *folio)
2939 {
2940 	return 0;
2941 }
2942 #endif
2943 
2944 /*
2945  * Some inline functions in vmstat.h depend on page_zone()
2946  */
2947 #include <linux/vmstat.h>
2948 
2949 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2950 #define HASHED_PAGE_VIRTUAL
2951 #endif
2952 
2953 #if defined(WANT_PAGE_VIRTUAL)
2954 static inline void *page_address(const struct page *page)
2955 {
2956 	return page->virtual;
2957 }
2958 static inline void set_page_address(struct page *page, void *address)
2959 {
2960 	page->virtual = address;
2961 }
2962 #define page_address_init()  do { } while(0)
2963 #endif
2964 
2965 #if defined(HASHED_PAGE_VIRTUAL)
2966 void *page_address(const struct page *page);
2967 void set_page_address(struct page *page, void *virtual);
2968 void page_address_init(void);
2969 #endif
2970 
2971 static __always_inline void *lowmem_page_address(const struct page *page)
2972 {
2973 	return page_to_virt(page);
2974 }
2975 
2976 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2977 #define page_address(page) lowmem_page_address(page)
2978 #define set_page_address(page, address)  do { } while(0)
2979 #define page_address_init()  do { } while(0)
2980 #endif
2981 
2982 static inline void *folio_address(const struct folio *folio)
2983 {
2984 	return page_address(&folio->page);
2985 }
2986 
2987 /*
2988  * Return true only if the page has been allocated with
2989  * ALLOC_NO_WATERMARKS and the low watermark was not
2990  * met implying that the system is under some pressure.
2991  */
2992 static inline bool page_is_pfmemalloc(const struct page *page)
2993 {
2994 	/*
2995 	 * lru.next has bit 1 set if the page is allocated from the
2996 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2997 	 * they do not need to preserve that information.
2998 	 */
2999 	return (uintptr_t)page->lru.next & BIT(1);
3000 }
3001 
3002 /*
3003  * Return true only if the folio has been allocated with
3004  * ALLOC_NO_WATERMARKS and the low watermark was not
3005  * met implying that the system is under some pressure.
3006  */
3007 static inline bool folio_is_pfmemalloc(const struct folio *folio)
3008 {
3009 	/*
3010 	 * lru.next has bit 1 set if the page is allocated from the
3011 	 * pfmemalloc reserves.  Callers may simply overwrite it if
3012 	 * they do not need to preserve that information.
3013 	 */
3014 	return (uintptr_t)folio->lru.next & BIT(1);
3015 }
3016 
3017 /*
3018  * Only to be called by the page allocator on a freshly allocated
3019  * page.
3020  */
3021 static inline void set_page_pfmemalloc(struct page *page)
3022 {
3023 	page->lru.next = (void *)BIT(1);
3024 }
3025 
3026 static inline void clear_page_pfmemalloc(struct page *page)
3027 {
3028 	page->lru.next = NULL;
3029 }
3030 
3031 /*
3032  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
3033  */
3034 extern void pagefault_out_of_memory(void);
3035 
3036 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
3037 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
3038 
3039 /*
3040  * Parameter block passed down to zap_pte_range in exceptional cases.
3041  */
3042 struct zap_details {
3043 	struct folio *single_folio;	/* Locked folio to be unmapped */
3044 	bool skip_cows;			/* Do not zap COWed private pages */
3045 	bool reclaim_pt;		/* Need reclaim page tables? */
3046 	bool reaping;			/* Reaping, do not block. */
3047 	zap_flags_t zap_flags;		/* Extra flags for zapping */
3048 };
3049 
3050 /*
3051  * Whether to drop the pte markers, for example, the uffd-wp information for
3052  * file-backed memory.  This should only be specified when we will completely
3053  * drop the page in the mm, either by truncation or unmapping of the vma.  By
3054  * default, the flag is not set.
3055  */
3056 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
3057 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
3058 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
3059 
3060 #ifdef CONFIG_MMU
3061 extern bool can_do_mlock(void);
3062 #else
3063 static inline bool can_do_mlock(void) { return false; }
3064 #endif
3065 extern int user_shm_lock(size_t, struct ucounts *);
3066 extern void user_shm_unlock(size_t, struct ucounts *);
3067 
3068 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
3069 			     pte_t pte);
3070 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
3071 			     pte_t pte);
3072 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
3073 				  unsigned long addr, pmd_t pmd);
3074 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
3075 				pmd_t pmd);
3076 struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
3077 		pud_t pud);
3078 
3079 void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
3080 		  unsigned long size);
3081 void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
3082 			   unsigned long size);
3083 /**
3084  * zap_vma - zap all page table entries in a vma
3085  * @vma: The vma to zap.
3086  */
3087 static inline void zap_vma(struct vm_area_struct *vma)
3088 {
3089 	zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
3090 }
3091 struct mmu_notifier_range;
3092 
3093 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3094 		unsigned long end, unsigned long floor, unsigned long ceiling);
3095 int
3096 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
3097 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3098 			void *buf, int len, int write);
3099 
3100 struct follow_pfnmap_args {
3101 	/**
3102 	 * Inputs:
3103 	 * @vma: Pointer to @vm_area_struct struct
3104 	 * @address: the virtual address to walk
3105 	 */
3106 	struct vm_area_struct *vma;
3107 	unsigned long address;
3108 	/**
3109 	 * Internals:
3110 	 *
3111 	 * The caller shouldn't touch any of these.
3112 	 */
3113 	spinlock_t *lock;
3114 	pte_t *ptep;
3115 	/**
3116 	 * Outputs:
3117 	 *
3118 	 * @pfn: the PFN of the address
3119 	 * @addr_mask: address mask covering pfn
3120 	 * @pgprot: the pgprot_t of the mapping
3121 	 * @writable: whether the mapping is writable
3122 	 * @special: whether the mapping is a special mapping (real PFN maps)
3123 	 */
3124 	unsigned long pfn;
3125 	unsigned long addr_mask;
3126 	pgprot_t pgprot;
3127 	bool writable;
3128 	bool special;
3129 };
3130 int follow_pfnmap_start(struct follow_pfnmap_args *args);
3131 void follow_pfnmap_end(struct follow_pfnmap_args *args);
3132 
3133 extern void truncate_pagecache(struct inode *inode, loff_t new);
3134 extern void truncate_setsize(struct inode *inode, loff_t newsize);
3135 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
3136 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
3137 int generic_error_remove_folio(struct address_space *mapping,
3138 		struct folio *folio);
3139 
3140 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
3141 		unsigned long address, struct pt_regs *regs);
3142 
3143 #ifdef CONFIG_MMU
3144 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
3145 				  unsigned long address, unsigned int flags,
3146 				  struct pt_regs *regs);
3147 extern int fixup_user_fault(struct mm_struct *mm,
3148 			    unsigned long address, unsigned int fault_flags,
3149 			    bool *unlocked);
3150 void unmap_mapping_pages(struct address_space *mapping,
3151 		pgoff_t start, pgoff_t nr, bool even_cows);
3152 void unmap_mapping_range(struct address_space *mapping,
3153 		loff_t const holebegin, loff_t const holelen, int even_cows);
3154 #else
3155 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
3156 					 unsigned long address, unsigned int flags,
3157 					 struct pt_regs *regs)
3158 {
3159 	/* should never happen if there's no MMU */
3160 	BUG();
3161 	return VM_FAULT_SIGBUS;
3162 }
3163 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
3164 		unsigned int fault_flags, bool *unlocked)
3165 {
3166 	/* should never happen if there's no MMU */
3167 	BUG();
3168 	return -EFAULT;
3169 }
3170 static inline void unmap_mapping_pages(struct address_space *mapping,
3171 		pgoff_t start, pgoff_t nr, bool even_cows) { }
3172 static inline void unmap_mapping_range(struct address_space *mapping,
3173 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
3174 #endif
3175 
3176 static inline void unmap_shared_mapping_range(struct address_space *mapping,
3177 		loff_t const holebegin, loff_t const holelen)
3178 {
3179 	unmap_mapping_range(mapping, holebegin, holelen, 0);
3180 }
3181 
3182 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
3183 						unsigned long addr);
3184 
3185 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
3186 		void *buf, int len, unsigned int gup_flags);
3187 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3188 		void *buf, int len, unsigned int gup_flags);
3189 
3190 #ifdef CONFIG_BPF_SYSCALL
3191 extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
3192 			      void *buf, int len, unsigned int gup_flags);
3193 #endif
3194 
3195 long get_user_pages_remote(struct mm_struct *mm,
3196 			   unsigned long start, unsigned long nr_pages,
3197 			   unsigned int gup_flags, struct page **pages,
3198 			   int *locked);
3199 long pin_user_pages_remote(struct mm_struct *mm,
3200 			   unsigned long start, unsigned long nr_pages,
3201 			   unsigned int gup_flags, struct page **pages,
3202 			   int *locked);
3203 
3204 /*
3205  * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
3206  */
3207 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
3208 						    unsigned long addr,
3209 						    int gup_flags,
3210 						    struct vm_area_struct **vmap)
3211 {
3212 	struct page *page;
3213 	struct vm_area_struct *vma;
3214 	int got;
3215 
3216 	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
3217 		return ERR_PTR(-EINVAL);
3218 
3219 	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
3220 
3221 	if (got < 0)
3222 		return ERR_PTR(got);
3223 
3224 	vma = vma_lookup(mm, addr);
3225 	if (WARN_ON_ONCE(!vma)) {
3226 		put_page(page);
3227 		return ERR_PTR(-EINVAL);
3228 	}
3229 
3230 	*vmap = vma;
3231 	return page;
3232 }
3233 
3234 long get_user_pages(unsigned long start, unsigned long nr_pages,
3235 		    unsigned int gup_flags, struct page **pages);
3236 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3237 		    unsigned int gup_flags, struct page **pages);
3238 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3239 		    struct page **pages, unsigned int gup_flags);
3240 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3241 		    struct page **pages, unsigned int gup_flags);
3242 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
3243 		      struct folio **folios, unsigned int max_folios,
3244 		      pgoff_t *offset);
3245 int folio_add_pins(struct folio *folio, unsigned int pins);
3246 
3247 int get_user_pages_fast(unsigned long start, int nr_pages,
3248 			unsigned int gup_flags, struct page **pages);
3249 int pin_user_pages_fast(unsigned long start, int nr_pages,
3250 			unsigned int gup_flags, struct page **pages);
3251 void folio_add_pin(struct folio *folio);
3252 
3253 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
3254 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
3255 			const struct task_struct *task, bool bypass_rlim);
3256 
3257 struct kvec;
3258 struct page *get_dump_page(unsigned long addr, int *locked);
3259 
3260 bool folio_mark_dirty(struct folio *folio);
3261 bool folio_mark_dirty_lock(struct folio *folio);
3262 bool set_page_dirty(struct page *page);
3263 int set_page_dirty_lock(struct page *page);
3264 
3265 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
3266 
3267 /*
3268  * Flags used by change_protection().  For now we make it a bitmap so
3269  * that we can pass in multiple flags just like parameters.  However
3270  * for now all the callers are only use one of the flags at the same
3271  * time.
3272  */
3273 /*
3274  * Whether we should manually check if we can map individual PTEs writable,
3275  * because something (e.g., COW, uffd-wp) blocks that from happening for all
3276  * PTEs automatically in a writable mapping.
3277  */
3278 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
3279 /* Whether this protection change is for NUMA hints */
3280 #define  MM_CP_PROT_NUMA                   (1UL << 1)
3281 /* Whether this change is for write protecting */
3282 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
3283 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
3284 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
3285 					    MM_CP_UFFD_WP_RESOLVE)
3286 
3287 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
3288 			     pte_t pte);
3289 extern long change_protection(struct mmu_gather *tlb,
3290 			      struct vm_area_struct *vma, unsigned long start,
3291 			      unsigned long end, unsigned long cp_flags);
3292 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
3293 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
3294 	  unsigned long start, unsigned long end, vm_flags_t newflags);
3295 
3296 /*
3297  * doesn't attempt to fault and will return short.
3298  */
3299 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3300 			     unsigned int gup_flags, struct page **pages);
3301 
3302 static inline bool get_user_page_fast_only(unsigned long addr,
3303 			unsigned int gup_flags, struct page **pagep)
3304 {
3305 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
3306 }
3307 /*
3308  * per-process(per-mm_struct) statistics.
3309  */
3310 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
3311 {
3312 	return percpu_counter_read_positive(&mm->rss_stat[member]);
3313 }
3314 
3315 static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
3316 {
3317 	return percpu_counter_sum_positive(&mm->rss_stat[member]);
3318 }
3319 
3320 void mm_trace_rss_stat(struct mm_struct *mm, int member);
3321 
3322 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
3323 {
3324 	percpu_counter_add(&mm->rss_stat[member], value);
3325 
3326 	mm_trace_rss_stat(mm, member);
3327 }
3328 
3329 static inline void inc_mm_counter(struct mm_struct *mm, int member)
3330 {
3331 	percpu_counter_inc(&mm->rss_stat[member]);
3332 
3333 	mm_trace_rss_stat(mm, member);
3334 }
3335 
3336 static inline void dec_mm_counter(struct mm_struct *mm, int member)
3337 {
3338 	percpu_counter_dec(&mm->rss_stat[member]);
3339 
3340 	mm_trace_rss_stat(mm, member);
3341 }
3342 
3343 /* Optimized variant when folio is already known not to be anon */
3344 static inline int mm_counter_file(struct folio *folio)
3345 {
3346 	if (folio_test_swapbacked(folio))
3347 		return MM_SHMEMPAGES;
3348 	return MM_FILEPAGES;
3349 }
3350 
3351 static inline int mm_counter(struct folio *folio)
3352 {
3353 	if (folio_test_anon(folio))
3354 		return MM_ANONPAGES;
3355 	return mm_counter_file(folio);
3356 }
3357 
3358 static inline unsigned long get_mm_rss(struct mm_struct *mm)
3359 {
3360 	return get_mm_counter(mm, MM_FILEPAGES) +
3361 		get_mm_counter(mm, MM_ANONPAGES) +
3362 		get_mm_counter(mm, MM_SHMEMPAGES);
3363 }
3364 
3365 static inline unsigned long get_mm_rss_sum(struct mm_struct *mm)
3366 {
3367 	return get_mm_counter_sum(mm, MM_FILEPAGES) +
3368 		get_mm_counter_sum(mm, MM_ANONPAGES) +
3369 		get_mm_counter_sum(mm, MM_SHMEMPAGES);
3370 }
3371 
3372 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
3373 {
3374 	return max(mm->hiwater_rss, get_mm_rss(mm));
3375 }
3376 
3377 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
3378 {
3379 	return max(mm->hiwater_vm, mm->total_vm);
3380 }
3381 
3382 static inline void update_hiwater_rss(struct mm_struct *mm)
3383 {
3384 	unsigned long _rss = get_mm_rss(mm);
3385 
3386 	if (data_race(mm->hiwater_rss) < _rss)
3387 		data_race(mm->hiwater_rss = _rss);
3388 }
3389 
3390 static inline void update_hiwater_vm(struct mm_struct *mm)
3391 {
3392 	if (mm->hiwater_vm < mm->total_vm)
3393 		mm->hiwater_vm = mm->total_vm;
3394 }
3395 
3396 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
3397 {
3398 	mm->hiwater_rss = get_mm_rss(mm);
3399 }
3400 
3401 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
3402 					 struct mm_struct *mm)
3403 {
3404 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
3405 
3406 	if (*maxrss < hiwater_rss)
3407 		*maxrss = hiwater_rss;
3408 }
3409 
3410 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
3411 static inline int pte_special(pte_t pte)
3412 {
3413 	return 0;
3414 }
3415 
3416 static inline pte_t pte_mkspecial(pte_t pte)
3417 {
3418 	return pte;
3419 }
3420 #endif
3421 
3422 #ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
3423 static inline bool pmd_special(pmd_t pmd)
3424 {
3425 	return false;
3426 }
3427 
3428 static inline pmd_t pmd_mkspecial(pmd_t pmd)
3429 {
3430 	return pmd;
3431 }
3432 #endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
3433 
3434 #ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
3435 static inline bool pud_special(pud_t pud)
3436 {
3437 	return false;
3438 }
3439 
3440 static inline pud_t pud_mkspecial(pud_t pud)
3441 {
3442 	return pud;
3443 }
3444 #endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
3445 
3446 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
3447 			     spinlock_t **ptl);
3448 
3449 #ifdef __PAGETABLE_P4D_FOLDED
3450 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3451 						unsigned long address)
3452 {
3453 	return 0;
3454 }
3455 #else
3456 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
3457 #endif
3458 
3459 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
3460 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3461 						unsigned long address)
3462 {
3463 	return 0;
3464 }
3465 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
3466 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
3467 
3468 #else
3469 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
3470 
3471 static inline void mm_inc_nr_puds(struct mm_struct *mm)
3472 {
3473 	if (mm_pud_folded(mm))
3474 		return;
3475 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3476 }
3477 
3478 static inline void mm_dec_nr_puds(struct mm_struct *mm)
3479 {
3480 	if (mm_pud_folded(mm))
3481 		return;
3482 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3483 }
3484 #endif
3485 
3486 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
3487 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
3488 						unsigned long address)
3489 {
3490 	return 0;
3491 }
3492 
3493 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
3494 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
3495 
3496 #else
3497 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
3498 
3499 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
3500 {
3501 	if (mm_pmd_folded(mm))
3502 		return;
3503 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3504 }
3505 
3506 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
3507 {
3508 	if (mm_pmd_folded(mm))
3509 		return;
3510 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3511 }
3512 #endif
3513 
3514 #ifdef CONFIG_MMU
3515 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
3516 {
3517 	atomic_long_set(&mm->pgtables_bytes, 0);
3518 }
3519 
3520 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3521 {
3522 	return atomic_long_read(&mm->pgtables_bytes);
3523 }
3524 
3525 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
3526 {
3527 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3528 }
3529 
3530 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
3531 {
3532 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3533 }
3534 #else
3535 
3536 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
3537 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3538 {
3539 	return 0;
3540 }
3541 
3542 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
3543 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
3544 #endif
3545 
3546 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
3547 int __pte_alloc_kernel(pmd_t *pmd);
3548 
3549 #if defined(CONFIG_MMU)
3550 
3551 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3552 		unsigned long address)
3553 {
3554 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
3555 		NULL : p4d_offset(pgd, address);
3556 }
3557 
3558 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3559 		unsigned long address)
3560 {
3561 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
3562 		NULL : pud_offset(p4d, address);
3563 }
3564 
3565 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3566 {
3567 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
3568 		NULL: pmd_offset(pud, address);
3569 }
3570 #endif /* CONFIG_MMU */
3571 
3572 enum pt_flags {
3573 	PT_kernel = PG_referenced,
3574 	PT_reserved = PG_reserved,
3575 	/* High bits are used for zone/node/section */
3576 };
3577 
3578 static inline struct ptdesc *virt_to_ptdesc(const void *x)
3579 {
3580 	return page_ptdesc(virt_to_page(x));
3581 }
3582 
3583 /**
3584  * ptdesc_address - Virtual address of page table.
3585  * @pt: Page table descriptor.
3586  *
3587  * Return: The first byte of the page table described by @pt.
3588  */
3589 static inline void *ptdesc_address(const struct ptdesc *pt)
3590 {
3591 	return folio_address(ptdesc_folio(pt));
3592 }
3593 
3594 static inline bool pagetable_is_reserved(struct ptdesc *pt)
3595 {
3596 	return test_bit(PT_reserved, &pt->pt_flags.f);
3597 }
3598 
3599 /**
3600  * ptdesc_set_kernel - Mark a ptdesc used to map the kernel
3601  * @ptdesc: The ptdesc to be marked
3602  *
3603  * Kernel page tables often need special handling. Set a flag so that
3604  * the handling code knows this ptdesc will not be used for userspace.
3605  */
3606 static inline void ptdesc_set_kernel(struct ptdesc *ptdesc)
3607 {
3608 	set_bit(PT_kernel, &ptdesc->pt_flags.f);
3609 }
3610 
3611 /**
3612  * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel
3613  * @ptdesc: The ptdesc to be unmarked
3614  *
3615  * Use when the ptdesc is no longer used to map the kernel and no longer
3616  * needs special handling.
3617  */
3618 static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc)
3619 {
3620 	/*
3621 	 * Note: the 'PG_referenced' bit does not strictly need to be
3622 	 * cleared before freeing the page. But this is nice for
3623 	 * symmetry.
3624 	 */
3625 	clear_bit(PT_kernel, &ptdesc->pt_flags.f);
3626 }
3627 
3628 /**
3629  * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel
3630  * @ptdesc: The ptdesc being tested
3631  *
3632  * Call to tell if the ptdesc used to map the kernel.
3633  */
3634 static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
3635 {
3636 	return test_bit(PT_kernel, &ptdesc->pt_flags.f);
3637 }
3638 
3639 /**
3640  * pagetable_alloc - Allocate pagetables
3641  * @gfp:    GFP flags
3642  * @order:  desired pagetable order
3643  *
3644  * pagetable_alloc allocates memory for page tables as well as a page table
3645  * descriptor to describe that memory.
3646  *
3647  * Return: The ptdesc describing the allocated page tables.
3648  */
3649 static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
3650 {
3651 	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
3652 
3653 	return page_ptdesc(page);
3654 }
3655 #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
3656 
3657 static inline void __pagetable_free(struct ptdesc *pt)
3658 {
3659 	struct page *page = ptdesc_page(pt);
3660 
3661 	__free_pages(page, compound_order(page));
3662 }
3663 
3664 #ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
3665 void pagetable_free_kernel(struct ptdesc *pt);
3666 #else
3667 static inline void pagetable_free_kernel(struct ptdesc *pt)
3668 {
3669 	__pagetable_free(pt);
3670 }
3671 #endif
3672 /**
3673  * pagetable_free - Free pagetables
3674  * @pt:	The page table descriptor
3675  *
3676  * pagetable_free frees the memory of all page tables described by a page
3677  * table descriptor and the memory for the descriptor itself.
3678  */
3679 static inline void pagetable_free(struct ptdesc *pt)
3680 {
3681 	if (ptdesc_test_kernel(pt)) {
3682 		ptdesc_clear_kernel(pt);
3683 		pagetable_free_kernel(pt);
3684 	} else {
3685 		__pagetable_free(pt);
3686 	}
3687 }
3688 
3689 #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
3690 #if ALLOC_SPLIT_PTLOCKS
3691 void __init ptlock_cache_init(void);
3692 bool ptlock_alloc(struct ptdesc *ptdesc);
3693 void ptlock_free(struct ptdesc *ptdesc);
3694 
3695 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3696 {
3697 	return ptdesc->ptl;
3698 }
3699 #else /* ALLOC_SPLIT_PTLOCKS */
3700 static inline void ptlock_cache_init(void)
3701 {
3702 }
3703 
3704 static inline bool ptlock_alloc(struct ptdesc *ptdesc)
3705 {
3706 	return true;
3707 }
3708 
3709 static inline void ptlock_free(struct ptdesc *ptdesc)
3710 {
3711 }
3712 
3713 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3714 {
3715 	return &ptdesc->ptl;
3716 }
3717 #endif /* ALLOC_SPLIT_PTLOCKS */
3718 
3719 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3720 {
3721 	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
3722 }
3723 
3724 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3725 {
3726 	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
3727 	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
3728 	return ptlock_ptr(virt_to_ptdesc(pte));
3729 }
3730 
3731 static inline bool ptlock_init(struct ptdesc *ptdesc)
3732 {
3733 	/*
3734 	 * prep_new_page() initialize page->private (and therefore page->ptl)
3735 	 * with 0. Make sure nobody took it in use in between.
3736 	 *
3737 	 * It can happen if arch try to use slab for page table allocation:
3738 	 * slab code uses page->slab_cache, which share storage with page->ptl.
3739 	 */
3740 	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
3741 	if (!ptlock_alloc(ptdesc))
3742 		return false;
3743 	spin_lock_init(ptlock_ptr(ptdesc));
3744 	return true;
3745 }
3746 
3747 #else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3748 /*
3749  * We use mm->page_table_lock to guard all pagetable pages of the mm.
3750  */
3751 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3752 {
3753 	return &mm->page_table_lock;
3754 }
3755 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3756 {
3757 	return &mm->page_table_lock;
3758 }
3759 static inline void ptlock_cache_init(void) {}
3760 static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
3761 static inline void ptlock_free(struct ptdesc *ptdesc) {}
3762 #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3763 
3764 static inline void __pagetable_ctor(struct ptdesc *ptdesc)
3765 {
3766 	struct folio *folio = ptdesc_folio(ptdesc);
3767 
3768 	__folio_set_pgtable(folio);
3769 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3770 }
3771 
3772 static inline void pagetable_dtor(struct ptdesc *ptdesc)
3773 {
3774 	struct folio *folio = ptdesc_folio(ptdesc);
3775 
3776 	ptlock_free(ptdesc);
3777 	__folio_clear_pgtable(folio);
3778 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3779 }
3780 
3781 static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
3782 {
3783 	pagetable_dtor(ptdesc);
3784 	pagetable_free(ptdesc);
3785 }
3786 
3787 static inline bool pagetable_pte_ctor(struct mm_struct *mm,
3788 				      struct ptdesc *ptdesc)
3789 {
3790 	if (mm != &init_mm && !ptlock_init(ptdesc))
3791 		return false;
3792 	__pagetable_ctor(ptdesc);
3793 	return true;
3794 }
3795 
3796 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
3797 
3798 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3799 {
3800 	return __pte_offset_map(pmd, addr, NULL);
3801 }
3802 
3803 pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3804 			   unsigned long addr, spinlock_t **ptlp);
3805 
3806 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3807 				unsigned long addr, spinlock_t **ptlp);
3808 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3809 				unsigned long addr, pmd_t *pmdvalp,
3810 				spinlock_t **ptlp);
3811 
3812 #define pte_unmap_unlock(pte, ptl)	do {		\
3813 	spin_unlock(ptl);				\
3814 	pte_unmap(pte);					\
3815 } while (0)
3816 
3817 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3818 
3819 #define pte_alloc_map(mm, pmd, address)			\
3820 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3821 
3822 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3823 	(pte_alloc(mm, pmd) ?			\
3824 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3825 
3826 #define pte_alloc_kernel(pmd, address)			\
3827 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3828 		NULL: pte_offset_kernel(pmd, address))
3829 
3830 #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3831 
3832 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3833 {
3834 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3835 	return virt_to_page((void *)((unsigned long) pmd & mask));
3836 }
3837 
3838 static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3839 {
3840 	return page_ptdesc(pmd_pgtable_page(pmd));
3841 }
3842 
3843 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3844 {
3845 	return ptlock_ptr(pmd_ptdesc(pmd));
3846 }
3847 
3848 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3849 {
3850 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3851 	ptdesc->pmd_huge_pte = NULL;
3852 #endif
3853 	return ptlock_init(ptdesc);
3854 }
3855 
3856 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3857 
3858 #else
3859 
3860 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3861 {
3862 	return &mm->page_table_lock;
3863 }
3864 
3865 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3866 
3867 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3868 
3869 #endif
3870 
3871 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3872 {
3873 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3874 	spin_lock(ptl);
3875 	return ptl;
3876 }
3877 
3878 static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
3879 				      struct ptdesc *ptdesc)
3880 {
3881 	if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
3882 		return false;
3883 	ptdesc_pmd_pts_init(ptdesc);
3884 	__pagetable_ctor(ptdesc);
3885 	return true;
3886 }
3887 
3888 /*
3889  * No scalability reason to split PUD locks yet, but follow the same pattern
3890  * as the PMD locks to make it easier if we decide to.  The VM should not be
3891  * considered ready to switch to split PUD locks yet; there may be places
3892  * which need to be converted from page_table_lock.
3893  */
3894 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3895 {
3896 	return &mm->page_table_lock;
3897 }
3898 
3899 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3900 {
3901 	spinlock_t *ptl = pud_lockptr(mm, pud);
3902 
3903 	spin_lock(ptl);
3904 	return ptl;
3905 }
3906 
3907 static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3908 {
3909 	__pagetable_ctor(ptdesc);
3910 }
3911 
3912 static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
3913 {
3914 	__pagetable_ctor(ptdesc);
3915 }
3916 
3917 static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
3918 {
3919 	__pagetable_ctor(ptdesc);
3920 }
3921 
3922 extern void __init pagecache_init(void);
3923 extern void free_initmem(void);
3924 
3925 /*
3926  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3927  * into the buddy system. The freed pages will be poisoned with pattern
3928  * "poison" if it's within range [0, UCHAR_MAX].
3929  * Return pages freed into the buddy system.
3930  */
3931 extern unsigned long free_reserved_area(void *start, void *end,
3932 					int poison, const char *s);
3933 
3934 extern void adjust_managed_page_count(struct page *page, long count);
3935 
3936 /* Free the reserved page into the buddy system, so it gets managed. */
3937 void free_reserved_page(struct page *page);
3938 
3939 static inline void mark_page_reserved(struct page *page)
3940 {
3941 	SetPageReserved(page);
3942 	adjust_managed_page_count(page, -1);
3943 }
3944 
3945 static inline void free_reserved_ptdesc(struct ptdesc *pt)
3946 {
3947 	free_reserved_page(ptdesc_page(pt));
3948 }
3949 
3950 /*
3951  * Default method to free all the __init memory into the buddy system.
3952  * The freed pages will be poisoned with pattern "poison" if it's within
3953  * range [0, UCHAR_MAX].
3954  * Return pages freed into the buddy system.
3955  */
3956 static inline unsigned long free_initmem_default(int poison)
3957 {
3958 	extern char __init_begin[], __init_end[];
3959 
3960 	return free_reserved_area(&__init_begin, &__init_end,
3961 				  poison, "unused kernel image (initmem)");
3962 }
3963 
3964 static inline unsigned long get_num_physpages(void)
3965 {
3966 	int nid;
3967 	unsigned long phys_pages = 0;
3968 
3969 	for_each_online_node(nid)
3970 		phys_pages += node_present_pages(nid);
3971 
3972 	return phys_pages;
3973 }
3974 
3975 /*
3976  * FIXME: Using memblock node mappings, an architecture may initialise its
3977  * zones, allocate the backing mem_map and account for memory holes in an
3978  * architecture independent manner.
3979  *
3980  * An architecture is expected to register range of page frames backed by
3981  * physical memory with memblock_add[_node]() before calling
3982  * free_area_init() passing in the PFN each zone ends at. At a basic
3983  * usage, an architecture is expected to do something like
3984  *
3985  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3986  * 							 max_highmem_pfn};
3987  * for_each_valid_physical_page_range()
3988  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3989  * free_area_init(max_zone_pfns);
3990  */
3991 void arch_zone_limits_init(unsigned long *max_zone_pfn);
3992 unsigned long node_map_pfn_alignment(void);
3993 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3994 						unsigned long end_pfn);
3995 extern void get_pfn_range_for_nid(unsigned int nid,
3996 			unsigned long *start_pfn, unsigned long *end_pfn);
3997 
3998 #ifndef CONFIG_NUMA
3999 static inline int early_pfn_to_nid(unsigned long pfn)
4000 {
4001 	return 0;
4002 }
4003 #else
4004 /* please see mm/page_alloc.c */
4005 extern int __meminit early_pfn_to_nid(unsigned long pfn);
4006 #endif
4007 
4008 extern void mem_init(void);
4009 extern void __init mmap_init(void);
4010 
4011 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
4012 static inline void show_mem(void)
4013 {
4014 	__show_mem(0, NULL, MAX_NR_ZONES - 1);
4015 }
4016 extern long si_mem_available(void);
4017 extern void si_meminfo(struct sysinfo * val);
4018 extern void si_meminfo_node(struct sysinfo *val, int nid);
4019 
4020 extern __printf(3, 4)
4021 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
4022 
4023 extern void setup_per_cpu_pageset(void);
4024 
4025 /* nommu.c */
4026 extern atomic_long_t mmap_pages_allocated;
4027 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
4028 
4029 /* interval_tree.c */
4030 void vma_interval_tree_insert(struct vm_area_struct *node,
4031 			      struct rb_root_cached *root);
4032 void vma_interval_tree_insert_after(struct vm_area_struct *node,
4033 				    struct vm_area_struct *prev,
4034 				    struct rb_root_cached *root);
4035 void vma_interval_tree_remove(struct vm_area_struct *node,
4036 			      struct rb_root_cached *root);
4037 struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
4038 				unsigned long start, unsigned long last);
4039 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
4040 				unsigned long start, unsigned long last);
4041 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
4042 				unsigned long start, unsigned long last);
4043 
4044 #define vma_interval_tree_foreach(vma, root, start, last)		\
4045 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
4046 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
4047 
4048 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
4049 				   struct rb_root_cached *root);
4050 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
4051 				   struct rb_root_cached *root);
4052 struct anon_vma_chain *
4053 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
4054 				  unsigned long start, unsigned long last);
4055 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
4056 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
4057 #ifdef CONFIG_DEBUG_VM_RB
4058 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
4059 #endif
4060 
4061 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
4062 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
4063 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
4064 
4065 /* mmap.c */
4066 extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
4067 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
4068 extern void exit_mmap(struct mm_struct *);
4069 bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
4070 				 unsigned long addr, bool write);
4071 
4072 static inline int check_data_rlimit(unsigned long rlim,
4073 				    unsigned long new,
4074 				    unsigned long start,
4075 				    unsigned long end_data,
4076 				    unsigned long start_data)
4077 {
4078 	if (rlim < RLIM_INFINITY) {
4079 		if (((new - start) + (end_data - start_data)) > rlim)
4080 			return -ENOSPC;
4081 	}
4082 
4083 	return 0;
4084 }
4085 
4086 extern int mm_take_all_locks(struct mm_struct *mm);
4087 extern void mm_drop_all_locks(struct mm_struct *mm);
4088 
4089 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
4090 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
4091 extern struct file *get_mm_exe_file(struct mm_struct *mm);
4092 extern struct file *get_task_exe_file(struct task_struct *task);
4093 
4094 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
4095 
4096 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
4097 				   const struct vm_special_mapping *sm);
4098 struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
4099 				   unsigned long addr, unsigned long len,
4100 				   vm_flags_t vm_flags,
4101 				   const struct vm_special_mapping *spec);
4102 
4103 unsigned long randomize_stack_top(unsigned long stack_top);
4104 unsigned long randomize_page(unsigned long start, unsigned long range);
4105 
4106 unsigned long
4107 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
4108 		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
4109 
4110 static inline unsigned long
4111 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
4112 		  unsigned long pgoff, unsigned long flags)
4113 {
4114 	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
4115 }
4116 
4117 extern unsigned long do_mmap(struct file *file, unsigned long addr,
4118 	unsigned long len, unsigned long prot, unsigned long flags,
4119 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
4120 	struct list_head *uf);
4121 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
4122 			 unsigned long start, size_t len, struct list_head *uf,
4123 			 bool unlock);
4124 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
4125 		    struct mm_struct *mm, unsigned long start,
4126 		    unsigned long end, struct list_head *uf, bool unlock);
4127 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
4128 		     struct list_head *uf);
4129 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
4130 
4131 #ifdef CONFIG_MMU
4132 extern int __mm_populate(unsigned long addr, unsigned long len,
4133 			 int ignore_errors);
4134 static inline void mm_populate(unsigned long addr, unsigned long len)
4135 {
4136 	/* Ignore errors */
4137 	(void) __mm_populate(addr, len, 1);
4138 }
4139 #else
4140 static inline void mm_populate(unsigned long addr, unsigned long len) {}
4141 #endif
4142 
4143 /* This takes the mm semaphore itself */
4144 int __must_check vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec);
4145 int vm_munmap(unsigned long start, size_t len);
4146 unsigned long __must_check vm_mmap(struct file *file, unsigned long addr,
4147 		unsigned long len, unsigned long prot,
4148 		unsigned long flag, unsigned long offset);
4149 unsigned long __must_check vm_mmap_shadow_stack(unsigned long addr,
4150 		unsigned long len, unsigned long flags);
4151 
4152 struct vm_unmapped_area_info {
4153 #define VM_UNMAPPED_AREA_TOPDOWN 1
4154 	unsigned long flags;
4155 	unsigned long length;
4156 	unsigned long low_limit;
4157 	unsigned long high_limit;
4158 	unsigned long align_mask;
4159 	unsigned long align_offset;
4160 	unsigned long start_gap;
4161 };
4162 
4163 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
4164 
4165 /* truncate.c */
4166 void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
4167 void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
4168 		uoff_t lend);
4169 void truncate_inode_pages_final(struct address_space *mapping);
4170 
4171 /* generic vm_area_ops exported for stackable file systems */
4172 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
4173 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
4174 		pgoff_t start_pgoff, pgoff_t end_pgoff);
4175 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
4176 
4177 extern unsigned long stack_guard_gap;
4178 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
4179 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
4180 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
4181 
4182 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
4183 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
4184 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
4185 					     struct vm_area_struct **pprev);
4186 
4187 /*
4188  * Look up the first VMA which intersects the interval [start_addr, end_addr)
4189  * NULL if none.  Assume start_addr < end_addr.
4190  */
4191 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
4192 			unsigned long start_addr, unsigned long end_addr);
4193 
4194 /**
4195  * vma_lookup() - Find a VMA at a specific address
4196  * @mm: The process address space.
4197  * @addr: The user address.
4198  *
4199  * Return: The vm_area_struct at the given address, %NULL otherwise.
4200  */
4201 static inline
4202 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
4203 {
4204 	return mtree_load(&mm->mm_mt, addr);
4205 }
4206 
4207 static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
4208 {
4209 	if (vma->vm_flags & VM_GROWSDOWN)
4210 		return stack_guard_gap;
4211 
4212 	/* See reasoning around the VM_SHADOW_STACK definition */
4213 	if (vma->vm_flags & VM_SHADOW_STACK)
4214 		return PAGE_SIZE;
4215 
4216 	return 0;
4217 }
4218 
4219 static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
4220 {
4221 	unsigned long gap = stack_guard_start_gap(vma);
4222 	unsigned long vm_start = vma->vm_start;
4223 
4224 	vm_start -= gap;
4225 	if (vm_start > vma->vm_start)
4226 		vm_start = 0;
4227 	return vm_start;
4228 }
4229 
4230 static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
4231 {
4232 	unsigned long vm_end = vma->vm_end;
4233 
4234 	if (vma->vm_flags & VM_GROWSUP) {
4235 		vm_end += stack_guard_gap;
4236 		if (vm_end < vma->vm_end)
4237 			vm_end = -PAGE_SIZE;
4238 	}
4239 	return vm_end;
4240 }
4241 
4242 static inline unsigned long vma_pages(const struct vm_area_struct *vma)
4243 {
4244 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
4245 }
4246 
4247 static inline unsigned long vma_last_pgoff(struct vm_area_struct *vma)
4248 {
4249 	return vma->vm_pgoff + vma_pages(vma) - 1;
4250 }
4251 
4252 static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
4253 {
4254 	return desc->end - desc->start;
4255 }
4256 
4257 static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc)
4258 {
4259 	return vma_desc_size(desc) >> PAGE_SHIFT;
4260 }
4261 
4262 /**
4263  * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN
4264  * remap is required.
4265  * @desc: The VMA descriptor for the VMA requiring remap.
4266  * @start: The virtual address to start the remap from, must be within the VMA.
4267  * @start_pfn: The first PFN in the range to remap.
4268  * @size: The size of the range to remap, in bytes, at most spanning to the end
4269  * of the VMA.
4270  */
4271 static inline void mmap_action_remap(struct vm_area_desc *desc,
4272 				     unsigned long start,
4273 				     unsigned long start_pfn,
4274 				     unsigned long size)
4275 {
4276 	struct mmap_action *action = &desc->action;
4277 
4278 	/* [start, start + size) must be within the VMA. */
4279 	WARN_ON_ONCE(start < desc->start || start >= desc->end);
4280 	WARN_ON_ONCE(start + size > desc->end);
4281 
4282 	action->type = MMAP_REMAP_PFN;
4283 	action->remap.start = start;
4284 	action->remap.start_pfn = start_pfn;
4285 	action->remap.size = size;
4286 	action->remap.pgprot = desc->page_prot;
4287 }
4288 
4289 /**
4290  * mmap_action_remap_full - helper for mmap_prepare hook to specify that the
4291  * entirety of a VMA should be PFN remapped.
4292  * @desc: The VMA descriptor for the VMA requiring remap.
4293  * @start_pfn: The first PFN in the range to remap.
4294  */
4295 static inline void mmap_action_remap_full(struct vm_area_desc *desc,
4296 					  unsigned long start_pfn)
4297 {
4298 	mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc));
4299 }
4300 
4301 /**
4302  * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN
4303  * I/O remap is required.
4304  * @desc: The VMA descriptor for the VMA requiring remap.
4305  * @start: The virtual address to start the remap from, must be within the VMA.
4306  * @start_pfn: The first PFN in the range to remap.
4307  * @size: The size of the range to remap, in bytes, at most spanning to the end
4308  * of the VMA.
4309  */
4310 static inline void mmap_action_ioremap(struct vm_area_desc *desc,
4311 				       unsigned long start,
4312 				       unsigned long start_pfn,
4313 				       unsigned long size)
4314 {
4315 	mmap_action_remap(desc, start, start_pfn, size);
4316 	desc->action.type = MMAP_IO_REMAP_PFN;
4317 }
4318 
4319 /**
4320  * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the
4321  * entirety of a VMA should be PFN I/O remapped.
4322  * @desc: The VMA descriptor for the VMA requiring remap.
4323  * @start_pfn: The first PFN in the range to remap.
4324  */
4325 static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
4326 					    unsigned long start_pfn)
4327 {
4328 	mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
4329 }
4330 
4331 /**
4332  * mmap_action_simple_ioremap - helper for mmap_prepare hook to specify that the
4333  * physical range in [start_phys_addr, start_phys_addr + size) should be I/O
4334  * remapped.
4335  * @desc: The VMA descriptor for the VMA requiring remap.
4336  * @start_phys_addr: Start of the physical memory to be mapped.
4337  * @size: Size of the area to map.
4338  *
4339  * NOTE: Some drivers might want to tweak desc->page_prot for purposes of
4340  * write-combine or similar.
4341  */
4342 static inline void mmap_action_simple_ioremap(struct vm_area_desc *desc,
4343 					      phys_addr_t start_phys_addr,
4344 					      unsigned long size)
4345 {
4346 	struct mmap_action *action = &desc->action;
4347 
4348 	action->simple_ioremap.start_phys_addr = start_phys_addr;
4349 	action->simple_ioremap.size = size;
4350 	action->type = MMAP_SIMPLE_IO_REMAP;
4351 }
4352 
4353 /**
4354  * mmap_action_map_kernel_pages - helper for mmap_prepare hook to specify that
4355  * @num kernel pages contained in the @pages array should be mapped to userland
4356  * starting at virtual address @start.
4357  * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
4358  * @start: The virtual address from which to map them.
4359  * @pages: An array of struct page pointers describing the memory to map.
4360  * @nr_pages: The number of entries in the @pages aray.
4361  */
4362 static inline void mmap_action_map_kernel_pages(struct vm_area_desc *desc,
4363 		unsigned long start, struct page **pages,
4364 		unsigned long nr_pages)
4365 {
4366 	struct mmap_action *action = &desc->action;
4367 
4368 	action->type = MMAP_MAP_KERNEL_PAGES;
4369 	action->map_kernel.start = start;
4370 	action->map_kernel.pages = pages;
4371 	action->map_kernel.nr_pages = nr_pages;
4372 	action->map_kernel.pgoff = desc->pgoff;
4373 }
4374 
4375 /**
4376  * mmap_action_map_kernel_pages_full - helper for mmap_prepare hook to specify that
4377  * kernel pages contained in the @pages array should be mapped to userland
4378  * from @desc->start to @desc->end.
4379  * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
4380  * @pages: An array of struct page pointers describing the memory to map.
4381  *
4382  * The caller must ensure that @pages contains sufficient entries to cover the
4383  * entire range described by @desc.
4384  */
4385 static inline void mmap_action_map_kernel_pages_full(struct vm_area_desc *desc,
4386 		struct page **pages)
4387 {
4388 	mmap_action_map_kernel_pages(desc, desc->start, pages,
4389 				     vma_desc_pages(desc));
4390 }
4391 
4392 int mmap_action_prepare(struct vm_area_desc *desc);
4393 int mmap_action_complete(struct vm_area_struct *vma,
4394 			 struct mmap_action *action);
4395 
4396 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
4397 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
4398 				unsigned long vm_start, unsigned long vm_end)
4399 {
4400 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
4401 
4402 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
4403 		vma = NULL;
4404 
4405 	return vma;
4406 }
4407 
4408 /**
4409  * range_is_subset - Is the specified inner range a subset of the outer range?
4410  * @outer_start: The start of the outer range.
4411  * @outer_end: The exclusive end of the outer range.
4412  * @inner_start: The start of the inner range.
4413  * @inner_end: The exclusive end of the inner range.
4414  *
4415  * Returns: %true if [inner_start, inner_end) is a subset of [outer_start,
4416  * outer_end), otherwise %false.
4417  */
4418 static inline bool range_is_subset(unsigned long outer_start,
4419 				   unsigned long outer_end,
4420 				   unsigned long inner_start,
4421 				   unsigned long inner_end)
4422 {
4423 	return outer_start <= inner_start && inner_end <= outer_end;
4424 }
4425 
4426 /**
4427  * range_in_vma - is the specified [@start, @end) range a subset of the VMA?
4428  * @vma: The VMA against which we want to check [@start, @end).
4429  * @start: The start of the range we wish to check.
4430  * @end: The exclusive end of the range we wish to check.
4431  *
4432  * Returns: %true if [@start, @end) is a subset of [@vma->vm_start,
4433  * @vma->vm_end), %false otherwise.
4434  */
4435 static inline bool range_in_vma(const struct vm_area_struct *vma,
4436 				unsigned long start, unsigned long end)
4437 {
4438 	if (!vma)
4439 		return false;
4440 
4441 	return range_is_subset(vma->vm_start, vma->vm_end, start, end);
4442 }
4443 
4444 /**
4445  * range_in_vma_desc - is the specified [@start, @end) range a subset of the VMA
4446  * described by @desc, a VMA descriptor?
4447  * @desc: The VMA descriptor against which we want to check [@start, @end).
4448  * @start: The start of the range we wish to check.
4449  * @end: The exclusive end of the range we wish to check.
4450  *
4451  * Returns: %true if [@start, @end) is a subset of [@desc->start, @desc->end),
4452  * %false otherwise.
4453  */
4454 static inline bool range_in_vma_desc(const struct vm_area_desc *desc,
4455 				     unsigned long start, unsigned long end)
4456 {
4457 	if (!desc)
4458 		return false;
4459 
4460 	return range_is_subset(desc->start, desc->end, start, end);
4461 }
4462 
4463 #ifdef CONFIG_MMU
4464 pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
4465 
4466 static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
4467 {
4468 	const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
4469 
4470 	return vm_get_page_prot(vm_flags);
4471 }
4472 
4473 void vma_set_page_prot(struct vm_area_struct *vma);
4474 #else
4475 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
4476 {
4477 	return __pgprot(0);
4478 }
4479 static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
4480 {
4481 	return __pgprot(0);
4482 }
4483 static inline void vma_set_page_prot(struct vm_area_struct *vma)
4484 {
4485 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4486 }
4487 #endif
4488 
4489 void vma_set_file(struct vm_area_struct *vma, struct file *file);
4490 
4491 #ifdef CONFIG_NUMA_BALANCING
4492 unsigned long change_prot_numa(struct vm_area_struct *vma,
4493 			unsigned long start, unsigned long end);
4494 #endif
4495 
4496 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
4497 		unsigned long addr);
4498 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
4499 		    unsigned long pfn, unsigned long size, pgprot_t pgprot);
4500 
4501 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
4502 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
4503 			struct page **pages, unsigned long *num);
4504 int map_kernel_pages_prepare(struct vm_area_desc *desc);
4505 int map_kernel_pages_complete(struct vm_area_struct *vma,
4506 			      struct mmap_action *action);
4507 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
4508 				unsigned long num);
4509 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
4510 				unsigned long num);
4511 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
4512 			bool write);
4513 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
4514 			unsigned long pfn);
4515 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
4516 			unsigned long pfn, pgprot_t pgprot);
4517 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
4518 			unsigned long pfn);
4519 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
4520 		unsigned long addr, unsigned long pfn);
4521 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
4522 
4523 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
4524 				unsigned long addr, struct page *page)
4525 {
4526 	int err = vm_insert_page(vma, addr, page);
4527 
4528 	if (err == -ENOMEM)
4529 		return VM_FAULT_OOM;
4530 	if (err < 0 && err != -EBUSY)
4531 		return VM_FAULT_SIGBUS;
4532 
4533 	return VM_FAULT_NOPAGE;
4534 }
4535 
4536 #ifndef io_remap_pfn_range_pfn
4537 static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
4538 		unsigned long size)
4539 {
4540 	return pfn;
4541 }
4542 #endif
4543 
4544 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
4545 				     unsigned long addr, unsigned long orig_pfn,
4546 				     unsigned long size, pgprot_t orig_prot)
4547 {
4548 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
4549 	const pgprot_t prot = pgprot_decrypted(orig_prot);
4550 
4551 	return remap_pfn_range(vma, addr, pfn, size, prot);
4552 }
4553 
4554 static inline vm_fault_t vmf_error(int err)
4555 {
4556 	if (err == -ENOMEM)
4557 		return VM_FAULT_OOM;
4558 	else if (err == -EHWPOISON)
4559 		return VM_FAULT_HWPOISON;
4560 	return VM_FAULT_SIGBUS;
4561 }
4562 
4563 /*
4564  * Convert errno to return value for ->page_mkwrite() calls.
4565  *
4566  * This should eventually be merged with vmf_error() above, but will need a
4567  * careful audit of all vmf_error() callers.
4568  */
4569 static inline vm_fault_t vmf_fs_error(int err)
4570 {
4571 	if (err == 0)
4572 		return VM_FAULT_LOCKED;
4573 	if (err == -EFAULT || err == -EAGAIN)
4574 		return VM_FAULT_NOPAGE;
4575 	if (err == -ENOMEM)
4576 		return VM_FAULT_OOM;
4577 	/* -ENOSPC, -EDQUOT, -EIO ... */
4578 	return VM_FAULT_SIGBUS;
4579 }
4580 
4581 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
4582 {
4583 	if (vm_fault & VM_FAULT_OOM)
4584 		return -ENOMEM;
4585 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
4586 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
4587 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4588 		return -EFAULT;
4589 	return 0;
4590 }
4591 
4592 /*
4593  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
4594  * a (NUMA hinting) fault is required.
4595  */
4596 static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
4597 					   unsigned int flags)
4598 {
4599 	/*
4600 	 * If callers don't want to honor NUMA hinting faults, no need to
4601 	 * determine if we would actually have to trigger a NUMA hinting fault.
4602 	 */
4603 	if (!(flags & FOLL_HONOR_NUMA_FAULT))
4604 		return true;
4605 
4606 	/*
4607 	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
4608 	 *
4609 	 * Requiring a fault here even for inaccessible VMAs would mean that
4610 	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
4611 	 * refuses to process NUMA hinting faults in inaccessible VMAs.
4612 	 */
4613 	return !vma_is_accessible(vma);
4614 }
4615 
4616 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
4617 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
4618 			       unsigned long size, pte_fn_t fn, void *data);
4619 extern int apply_to_existing_page_range(struct mm_struct *mm,
4620 				   unsigned long address, unsigned long size,
4621 				   pte_fn_t fn, void *data);
4622 
4623 #ifdef CONFIG_PAGE_POISONING
4624 extern void __kernel_poison_pages(struct page *page, int numpages);
4625 extern void __kernel_unpoison_pages(struct page *page, int numpages);
4626 extern bool _page_poisoning_enabled_early;
4627 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
4628 static inline bool page_poisoning_enabled(void)
4629 {
4630 	return _page_poisoning_enabled_early;
4631 }
4632 /*
4633  * For use in fast paths after init_mem_debugging() has run, or when a
4634  * false negative result is not harmful when called too early.
4635  */
4636 static inline bool page_poisoning_enabled_static(void)
4637 {
4638 	return static_branch_unlikely(&_page_poisoning_enabled);
4639 }
4640 static inline void kernel_poison_pages(struct page *page, int numpages)
4641 {
4642 	if (page_poisoning_enabled_static())
4643 		__kernel_poison_pages(page, numpages);
4644 }
4645 static inline void kernel_unpoison_pages(struct page *page, int numpages)
4646 {
4647 	if (page_poisoning_enabled_static())
4648 		__kernel_unpoison_pages(page, numpages);
4649 }
4650 #else
4651 static inline bool page_poisoning_enabled(void) { return false; }
4652 static inline bool page_poisoning_enabled_static(void) { return false; }
4653 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
4654 static inline void kernel_poison_pages(struct page *page, int numpages) { }
4655 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
4656 #endif
4657 
4658 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
4659 static inline bool want_init_on_alloc(gfp_t flags)
4660 {
4661 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4662 				&init_on_alloc))
4663 		return true;
4664 	return flags & __GFP_ZERO;
4665 }
4666 
4667 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
4668 static inline bool want_init_on_free(void)
4669 {
4670 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
4671 				   &init_on_free);
4672 }
4673 
4674 extern bool _debug_pagealloc_enabled_early;
4675 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
4676 
4677 static inline bool debug_pagealloc_enabled(void)
4678 {
4679 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
4680 		_debug_pagealloc_enabled_early;
4681 }
4682 
4683 /*
4684  * For use in fast paths after mem_debugging_and_hardening_init() has run,
4685  * or when a false negative result is not harmful when called too early.
4686  */
4687 static inline bool debug_pagealloc_enabled_static(void)
4688 {
4689 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
4690 		return false;
4691 
4692 	return static_branch_unlikely(&_debug_pagealloc_enabled);
4693 }
4694 
4695 /*
4696  * To support DEBUG_PAGEALLOC architecture must ensure that
4697  * __kernel_map_pages() never fails
4698  */
4699 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
4700 #ifdef CONFIG_DEBUG_PAGEALLOC
4701 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
4702 {
4703 	iommu_debug_check_unmapped(page, numpages);
4704 
4705 	if (debug_pagealloc_enabled_static())
4706 		__kernel_map_pages(page, numpages, 1);
4707 }
4708 
4709 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
4710 {
4711 	iommu_debug_check_unmapped(page, numpages);
4712 
4713 	if (debug_pagealloc_enabled_static())
4714 		__kernel_map_pages(page, numpages, 0);
4715 }
4716 
4717 extern unsigned int _debug_guardpage_minorder;
4718 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
4719 
4720 static inline unsigned int debug_guardpage_minorder(void)
4721 {
4722 	return _debug_guardpage_minorder;
4723 }
4724 
4725 static inline bool debug_guardpage_enabled(void)
4726 {
4727 	return static_branch_unlikely(&_debug_guardpage_enabled);
4728 }
4729 
4730 static inline bool page_is_guard(const struct page *page)
4731 {
4732 	if (!debug_guardpage_enabled())
4733 		return false;
4734 
4735 	return PageGuard(page);
4736 }
4737 
4738 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
4739 static inline bool set_page_guard(struct zone *zone, struct page *page,
4740 				  unsigned int order)
4741 {
4742 	if (!debug_guardpage_enabled())
4743 		return false;
4744 	return __set_page_guard(zone, page, order);
4745 }
4746 
4747 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
4748 static inline void clear_page_guard(struct zone *zone, struct page *page,
4749 				    unsigned int order)
4750 {
4751 	if (!debug_guardpage_enabled())
4752 		return;
4753 	__clear_page_guard(zone, page, order);
4754 }
4755 
4756 #else	/* CONFIG_DEBUG_PAGEALLOC */
4757 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
4758 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
4759 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
4760 static inline bool debug_guardpage_enabled(void) { return false; }
4761 static inline bool page_is_guard(const struct page *page) { return false; }
4762 static inline bool set_page_guard(struct zone *zone, struct page *page,
4763 			unsigned int order) { return false; }
4764 static inline void clear_page_guard(struct zone *zone, struct page *page,
4765 				unsigned int order) {}
4766 #endif	/* CONFIG_DEBUG_PAGEALLOC */
4767 
4768 #ifndef clear_pages
4769 /**
4770  * clear_pages() - clear a page range for kernel-internal use.
4771  * @addr: start address
4772  * @npages: number of pages
4773  *
4774  * Use clear_user_pages() instead when clearing a page range to be
4775  * mapped to user space.
4776  *
4777  * Does absolutely no exception handling.
4778  *
4779  * Note that even though the clearing operation is preemptible, clear_pages()
4780  * does not (and on architectures where it reduces to a few long-running
4781  * instructions, might not be able to) call cond_resched() to check if
4782  * rescheduling is required.
4783  *
4784  * When running under preemptible models this is not a problem. Under
4785  * cooperatively scheduled models, however, the caller is expected to
4786  * limit @npages to no more than PROCESS_PAGES_NON_PREEMPT_BATCH.
4787  */
4788 static inline void clear_pages(void *addr, unsigned int npages)
4789 {
4790 	do {
4791 		clear_page(addr);
4792 		addr += PAGE_SIZE;
4793 	} while (--npages);
4794 }
4795 #endif
4796 
4797 #ifndef PROCESS_PAGES_NON_PREEMPT_BATCH
4798 #ifdef clear_pages
4799 /*
4800  * The architecture defines clear_pages(), and we assume that it is
4801  * generally "fast". So choose a batch size large enough to allow the processor
4802  * headroom for optimizing the operation and yet small enough that we see
4803  * reasonable preemption latency for when this optimization is not possible
4804  * (ex. slow microarchitectures, memory bandwidth saturation.)
4805  *
4806  * With a value of 32MB and assuming a memory bandwidth of ~10GBps, this should
4807  * result in worst case preemption latency of around 3ms when clearing pages.
4808  *
4809  * (See comment above clear_pages() for why preemption latency is a concern
4810  * here.)
4811  */
4812 #define PROCESS_PAGES_NON_PREEMPT_BATCH		(SZ_32M >> PAGE_SHIFT)
4813 #else /* !clear_pages */
4814 /*
4815  * The architecture does not provide a clear_pages() implementation. Assume
4816  * that clear_page() -- which clear_pages() will fallback to -- is relatively
4817  * slow and choose a small value for PROCESS_PAGES_NON_PREEMPT_BATCH.
4818  */
4819 #define PROCESS_PAGES_NON_PREEMPT_BATCH		1
4820 #endif
4821 #endif
4822 
4823 #ifdef __HAVE_ARCH_GATE_AREA
4824 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
4825 extern int in_gate_area_no_mm(unsigned long addr);
4826 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
4827 #else
4828 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
4829 {
4830 	return NULL;
4831 }
4832 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
4833 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
4834 {
4835 	return 0;
4836 }
4837 #endif	/* __HAVE_ARCH_GATE_AREA */
4838 
4839 bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
4840 
4841 void drop_slab(void);
4842 
4843 #ifndef CONFIG_MMU
4844 #define randomize_va_space 0
4845 #else
4846 extern int randomize_va_space;
4847 #endif
4848 
4849 const char * arch_vma_name(struct vm_area_struct *vma);
4850 #ifdef CONFIG_MMU
4851 void print_vma_addr(char *prefix, unsigned long rip);
4852 #else
4853 static inline void print_vma_addr(char *prefix, unsigned long rip)
4854 {
4855 }
4856 #endif
4857 
4858 void *sparse_buffer_alloc(unsigned long size);
4859 unsigned long section_map_size(void);
4860 struct page * __populate_section_memmap(unsigned long pfn,
4861 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
4862 		struct dev_pagemap *pgmap);
4863 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
4864 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
4865 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
4866 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
4867 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
4868 			    struct vmem_altmap *altmap, unsigned long ptpfn,
4869 			    unsigned long flags);
4870 void *vmemmap_alloc_block(unsigned long size, int node);
4871 struct vmem_altmap;
4872 void *vmemmap_alloc_block_buf(unsigned long size, int node,
4873 			      struct vmem_altmap *altmap);
4874 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
4875 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
4876 		     unsigned long addr, unsigned long next);
4877 int vmemmap_check_pmd(pmd_t *pmd, int node,
4878 		      unsigned long addr, unsigned long next);
4879 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
4880 			       int node, struct vmem_altmap *altmap);
4881 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
4882 			       int node, struct vmem_altmap *altmap);
4883 int vmemmap_populate(unsigned long start, unsigned long end, int node,
4884 		struct vmem_altmap *altmap);
4885 int vmemmap_populate_hvo(unsigned long start, unsigned long end,
4886 			 unsigned int order, struct zone *zone,
4887 			 unsigned long headsize);
4888 void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
4889 			  unsigned long headsize);
4890 void vmemmap_populate_print_last(void);
4891 #ifdef CONFIG_MEMORY_HOTPLUG
4892 void vmemmap_free(unsigned long start, unsigned long end,
4893 		struct vmem_altmap *altmap);
4894 #endif
4895 
4896 #ifdef CONFIG_SPARSEMEM_VMEMMAP
4897 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4898 {
4899 	/* number of pfns from base where pfn_to_page() is valid */
4900 	if (altmap)
4901 		return altmap->reserve + altmap->free;
4902 	return 0;
4903 }
4904 
4905 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4906 				    unsigned long nr_pfns)
4907 {
4908 	altmap->alloc -= nr_pfns;
4909 }
4910 #else
4911 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4912 {
4913 	return 0;
4914 }
4915 
4916 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4917 				    unsigned long nr_pfns)
4918 {
4919 }
4920 #endif
4921 
4922 #define VMEMMAP_RESERVE_NR	2
4923 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
4924 static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
4925 					  struct dev_pagemap *pgmap)
4926 {
4927 	unsigned long nr_pages;
4928 	unsigned long nr_vmemmap_pages;
4929 
4930 	if (!pgmap || !is_power_of_2(sizeof(struct page)))
4931 		return false;
4932 
4933 	nr_pages = pgmap_vmemmap_nr(pgmap);
4934 	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
4935 	/*
4936 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
4937 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
4938 	 */
4939 	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
4940 }
4941 /*
4942  * If we don't have an architecture override, use the generic rule
4943  */
4944 #ifndef vmemmap_can_optimize
4945 #define vmemmap_can_optimize __vmemmap_can_optimize
4946 #endif
4947 
4948 #else
4949 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
4950 					   struct dev_pagemap *pgmap)
4951 {
4952 	return false;
4953 }
4954 #endif
4955 
4956 enum mf_flags {
4957 	MF_COUNT_INCREASED = 1 << 0,
4958 	MF_ACTION_REQUIRED = 1 << 1,
4959 	MF_MUST_KILL = 1 << 2,
4960 	MF_SOFT_OFFLINE = 1 << 3,
4961 	MF_UNPOISON = 1 << 4,
4962 	MF_SW_SIMULATED = 1 << 5,
4963 	MF_NO_RETRY = 1 << 6,
4964 	MF_MEM_PRE_REMOVE = 1 << 7,
4965 };
4966 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
4967 		      unsigned long count, int mf_flags);
4968 extern int memory_failure(unsigned long pfn, int flags);
4969 extern int unpoison_memory(unsigned long pfn);
4970 extern atomic_long_t num_poisoned_pages __read_mostly;
4971 extern int soft_offline_page(unsigned long pfn, int flags);
4972 #ifdef CONFIG_MEMORY_FAILURE
4973 /*
4974  * Sysfs entries for memory failure handling statistics.
4975  */
4976 extern const struct attribute_group memory_failure_attr_group;
4977 extern void memory_failure_queue(unsigned long pfn, int flags);
4978 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4979 					bool *migratable_cleared);
4980 void num_poisoned_pages_inc(unsigned long pfn);
4981 void num_poisoned_pages_sub(unsigned long pfn, long i);
4982 #else
4983 static inline void memory_failure_queue(unsigned long pfn, int flags)
4984 {
4985 }
4986 
4987 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4988 					bool *migratable_cleared)
4989 {
4990 	return 0;
4991 }
4992 
4993 static inline void num_poisoned_pages_inc(unsigned long pfn)
4994 {
4995 }
4996 
4997 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4998 {
4999 }
5000 #endif
5001 
5002 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
5003 extern void memblk_nr_poison_inc(unsigned long pfn);
5004 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
5005 #else
5006 static inline void memblk_nr_poison_inc(unsigned long pfn)
5007 {
5008 }
5009 
5010 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
5011 {
5012 }
5013 #endif
5014 
5015 #ifndef arch_memory_failure
5016 static inline int arch_memory_failure(unsigned long pfn, int flags)
5017 {
5018 	return -ENXIO;
5019 }
5020 #endif
5021 
5022 #ifndef arch_is_platform_page
5023 static inline bool arch_is_platform_page(u64 paddr)
5024 {
5025 	return false;
5026 }
5027 #endif
5028 
5029 /*
5030  * Error handlers for various types of pages.
5031  */
5032 enum mf_result {
5033 	MF_IGNORED,	/* Error: cannot be handled */
5034 	MF_FAILED,	/* Error: handling failed */
5035 	MF_DELAYED,	/* Will be handled later */
5036 	MF_RECOVERED,	/* Successfully recovered */
5037 };
5038 
5039 enum mf_action_page_type {
5040 	MF_MSG_KERNEL,
5041 	MF_MSG_KERNEL_HIGH_ORDER,
5042 	MF_MSG_DIFFERENT_COMPOUND,
5043 	MF_MSG_HUGE,
5044 	MF_MSG_FREE_HUGE,
5045 	MF_MSG_GET_HWPOISON,
5046 	MF_MSG_UNMAP_FAILED,
5047 	MF_MSG_DIRTY_SWAPCACHE,
5048 	MF_MSG_CLEAN_SWAPCACHE,
5049 	MF_MSG_DIRTY_MLOCKED_LRU,
5050 	MF_MSG_CLEAN_MLOCKED_LRU,
5051 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
5052 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
5053 	MF_MSG_DIRTY_LRU,
5054 	MF_MSG_CLEAN_LRU,
5055 	MF_MSG_TRUNCATED_LRU,
5056 	MF_MSG_BUDDY,
5057 	MF_MSG_DAX,
5058 	MF_MSG_UNSPLIT_THP,
5059 	MF_MSG_ALREADY_POISONED,
5060 	MF_MSG_PFN_MAP,
5061 	MF_MSG_UNKNOWN,
5062 };
5063 
5064 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5065 void folio_zero_user(struct folio *folio, unsigned long addr_hint);
5066 int copy_user_large_folio(struct folio *dst, struct folio *src,
5067 			  unsigned long addr_hint,
5068 			  struct vm_area_struct *vma);
5069 long copy_folio_from_user(struct folio *dst_folio,
5070 			   const void __user *usr_src,
5071 			   bool allow_pagefault);
5072 
5073 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5074 
5075 #if MAX_NUMNODES > 1
5076 void __init setup_nr_node_ids(void);
5077 #else
5078 static inline void setup_nr_node_ids(void) {}
5079 #endif
5080 
5081 extern int memcmp_pages(struct page *page1, struct page *page2);
5082 
5083 static inline int pages_identical(struct page *page1, struct page *page2)
5084 {
5085 	return !memcmp_pages(page1, page2);
5086 }
5087 
5088 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
5089 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
5090 						pgoff_t first_index, pgoff_t nr,
5091 						pgoff_t bitmap_pgoff,
5092 						unsigned long *bitmap,
5093 						pgoff_t *start,
5094 						pgoff_t *end);
5095 
5096 unsigned long wp_shared_mapping_range(struct address_space *mapping,
5097 				      pgoff_t first_index, pgoff_t nr);
5098 #endif
5099 
5100 #ifdef CONFIG_ANON_VMA_NAME
5101 int set_anon_vma_name(unsigned long addr, unsigned long size,
5102 		      const char __user *uname);
5103 #else
5104 static inline
5105 int set_anon_vma_name(unsigned long addr, unsigned long size,
5106 		      const char __user *uname)
5107 {
5108 	return -EINVAL;
5109 }
5110 #endif
5111 
5112 #ifdef CONFIG_UNACCEPTED_MEMORY
5113 
5114 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
5115 void accept_memory(phys_addr_t start, unsigned long size);
5116 
5117 #else
5118 
5119 static inline bool range_contains_unaccepted_memory(phys_addr_t start,
5120 						    unsigned long size)
5121 {
5122 	return false;
5123 }
5124 
5125 static inline void accept_memory(phys_addr_t start, unsigned long size)
5126 {
5127 }
5128 
5129 #endif
5130 
5131 static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
5132 {
5133 	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
5134 }
5135 
5136 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
5137 void vma_pgtable_walk_end(struct vm_area_struct *vma);
5138 
5139 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
5140 int reserve_mem_release_by_name(const char *name);
5141 
5142 #ifdef CONFIG_64BIT
5143 int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
5144 #else
5145 static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
5146 {
5147 	/* noop on 32 bit */
5148 	return 0;
5149 }
5150 #endif
5151 
5152 /*
5153  * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
5154  * be zeroed or not.
5155  */
5156 static inline bool user_alloc_needs_zeroing(void)
5157 {
5158 	/*
5159 	 * for user folios, arch with cache aliasing requires cache flush and
5160 	 * arc changes folio->flags to make icache coherent with dcache, so
5161 	 * always return false to make caller use
5162 	 * clear_user_page()/clear_user_highpage().
5163 	 */
5164 	return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
5165 	       !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
5166 				   &init_on_alloc);
5167 }
5168 
5169 int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
5170 int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
5171 int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
5172 
5173 /*
5174  * DMA mapping IDs for page_pool
5175  *
5176  * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
5177  * stashes it in the upper bits of page->pp_magic. Non-PP pages can have
5178  * arbitrary kernel pointers stored in the same field as pp_magic (since
5179  * it overlaps with page->lru.next), so we must ensure that we cannot
5180  * mistake a valid kernel pointer with any of the values we write into this
5181  * field.
5182  *
5183  * On architectures that set POISON_POINTER_DELTA, this is already ensured,
5184  * since this value becomes part of PP_SIGNATURE; meaning we can just use the
5185  * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
5186  * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
5187  * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
5188  * known at compile-time.
5189  *
5190  * If the value of PAGE_OFFSET is not known at compile time, or if it is too
5191  * small to leave at least 8 bits available above PP_SIGNATURE, we define the
5192  * number of bits to be 0, which turns off the DMA index tracking altogether
5193  * (see page_pool_register_dma_index()).
5194  */
5195 #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
5196 #if POISON_POINTER_DELTA > 0
5197 /* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
5198  * index to not overlap with that if set
5199  */
5200 #define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
5201 #else
5202 /* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
5203 #define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
5204 #define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
5205 			    PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
5206 			    !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
5207 			      MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
5208 
5209 #endif
5210 
5211 #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
5212 				  PP_DMA_INDEX_SHIFT)
5213 
5214 #define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
5215 #define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
5216 #define PAGE_SNAPSHOT_PG_IDLE  (1 << 2)
5217 
5218 struct page_snapshot {
5219 	struct folio folio_snapshot;
5220 	struct page page_snapshot;
5221 	unsigned long pfn;
5222 	unsigned long idx;
5223 	unsigned long flags;
5224 };
5225 
5226 static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
5227 {
5228 	return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
5229 }
5230 
5231 void snapshot_page(struct page_snapshot *ps, const struct page *page);
5232 
5233 void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte,
5234 		struct vm_area_struct *vma, unsigned long addr,
5235 		bool uffd_wp);
5236 
5237 #endif /* _LINUX_MM_H */
5238