xref: /linux/include/linux/mm.h (revision 4cff5c05e076d2ee4e34122aa956b84a2eaac587)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/errno.h>
6 #include <linux/mmdebug.h>
7 #include <linux/gfp.h>
8 #include <linux/pgalloc_tag.h>
9 #include <linux/bug.h>
10 #include <linux/list.h>
11 #include <linux/mmzone.h>
12 #include <linux/rbtree.h>
13 #include <linux/atomic.h>
14 #include <linux/debug_locks.h>
15 #include <linux/compiler.h>
16 #include <linux/mm_types.h>
17 #include <linux/mmap_lock.h>
18 #include <linux/range.h>
19 #include <linux/pfn.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/bit_spinlock.h>
22 #include <linux/shrinker.h>
23 #include <linux/resource.h>
24 #include <linux/page_ext.h>
25 #include <linux/err.h>
26 #include <linux/page-flags.h>
27 #include <linux/page_ref.h>
28 #include <linux/overflow.h>
29 #include <linux/sizes.h>
30 #include <linux/sched.h>
31 #include <linux/pgtable.h>
32 #include <linux/kasan.h>
33 #include <linux/memremap.h>
34 #include <linux/slab.h>
35 #include <linux/cacheinfo.h>
36 #include <linux/rcuwait.h>
37 #include <linux/bitmap.h>
38 #include <linux/bitops.h>
39 #include <linux/iommu-debug-pagealloc.h>
40 
41 struct mempolicy;
42 struct anon_vma;
43 struct anon_vma_chain;
44 struct user_struct;
45 struct pt_regs;
46 struct folio_batch;
47 
48 void arch_mm_preinit(void);
49 void mm_core_init_early(void);
50 void mm_core_init(void);
51 void init_mm_internals(void);
52 
53 extern atomic_long_t _totalram_pages;
54 static inline unsigned long totalram_pages(void)
55 {
56 	return (unsigned long)atomic_long_read(&_totalram_pages);
57 }
58 
59 static inline void totalram_pages_inc(void)
60 {
61 	atomic_long_inc(&_totalram_pages);
62 }
63 
64 static inline void totalram_pages_dec(void)
65 {
66 	atomic_long_dec(&_totalram_pages);
67 }
68 
69 static inline void totalram_pages_add(long count)
70 {
71 	atomic_long_add(count, &_totalram_pages);
72 }
73 
74 extern void * high_memory;
75 
76 /*
77  * Convert between pages and MB
78  * 20 is the shift for 1MB (2^20 = 1MB)
79  * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
80  * So (20 - PAGE_SHIFT) converts between pages and MB
81  */
82 #define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
83 #define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
84 
85 #ifdef CONFIG_SYSCTL
86 extern int sysctl_legacy_va_layout;
87 #else
88 #define sysctl_legacy_va_layout 0
89 #endif
90 
91 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
92 extern const int mmap_rnd_bits_min;
93 extern int mmap_rnd_bits_max __ro_after_init;
94 extern int mmap_rnd_bits __read_mostly;
95 #endif
96 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
97 extern const int mmap_rnd_compat_bits_min;
98 extern const int mmap_rnd_compat_bits_max;
99 extern int mmap_rnd_compat_bits __read_mostly;
100 #endif
101 
102 #ifndef DIRECT_MAP_PHYSMEM_END
103 # ifdef MAX_PHYSMEM_BITS
104 # define DIRECT_MAP_PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
105 # else
106 # define DIRECT_MAP_PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
107 # endif
108 #endif
109 
110 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
111 
112 #include <asm/page.h>
113 #include <asm/processor.h>
114 
115 #ifndef __pa_symbol
116 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
117 #endif
118 
119 #ifndef page_to_virt
120 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
121 #endif
122 
123 #ifndef lm_alias
124 #define lm_alias(x)	__va(__pa_symbol(x))
125 #endif
126 
127 /*
128  * To prevent common memory management code establishing
129  * a zero page mapping on a read fault.
130  * This macro should be defined within <asm/pgtable.h>.
131  * s390 does this to prevent multiplexing of hardware bits
132  * related to the physical page in case of virtualization.
133  */
134 #ifndef mm_forbids_zeropage
135 #define mm_forbids_zeropage(X)	(0)
136 #endif
137 
138 /*
139  * On some architectures it is expensive to call memset() for small sizes.
140  * If an architecture decides to implement their own version of
141  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
142  * define their own version of this macro in <asm/pgtable.h>
143  */
144 #if BITS_PER_LONG == 64
145 /* This function must be updated when the size of struct page grows above 96
146  * or reduces below 56. The idea that compiler optimizes out switch()
147  * statement, and only leaves move/store instructions. Also the compiler can
148  * combine write statements if they are both assignments and can be reordered,
149  * this can result in several of the writes here being dropped.
150  */
151 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
152 static inline void __mm_zero_struct_page(struct page *page)
153 {
154 	unsigned long *_pp = (void *)page;
155 
156 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
157 	BUILD_BUG_ON(sizeof(struct page) & 7);
158 	BUILD_BUG_ON(sizeof(struct page) < 56);
159 	BUILD_BUG_ON(sizeof(struct page) > 96);
160 
161 	switch (sizeof(struct page)) {
162 	case 96:
163 		_pp[11] = 0;
164 		fallthrough;
165 	case 88:
166 		_pp[10] = 0;
167 		fallthrough;
168 	case 80:
169 		_pp[9] = 0;
170 		fallthrough;
171 	case 72:
172 		_pp[8] = 0;
173 		fallthrough;
174 	case 64:
175 		_pp[7] = 0;
176 		fallthrough;
177 	case 56:
178 		_pp[6] = 0;
179 		_pp[5] = 0;
180 		_pp[4] = 0;
181 		_pp[3] = 0;
182 		_pp[2] = 0;
183 		_pp[1] = 0;
184 		_pp[0] = 0;
185 	}
186 }
187 #else
188 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
189 #endif
190 
191 /*
192  * Default maximum number of active map areas, this limits the number of vmas
193  * per mm struct. Users can overwrite this number by sysctl but there is a
194  * problem.
195  *
196  * When a program's coredump is generated as ELF format, a section is created
197  * per a vma. In ELF, the number of sections is represented in unsigned short.
198  * This means the number of sections should be smaller than 65535 at coredump.
199  * Because the kernel adds some informative sections to a image of program at
200  * generating coredump, we need some margin. The number of extra sections is
201  * 1-3 now and depends on arch. We use "5" as safe margin, here.
202  *
203  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
204  * not a hard limit any more. Although some userspace tools can be surprised by
205  * that.
206  */
207 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
208 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
209 
210 extern int sysctl_max_map_count;
211 
212 extern unsigned long sysctl_user_reserve_kbytes;
213 extern unsigned long sysctl_admin_reserve_kbytes;
214 
215 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
216 bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
217 #else
218 static inline bool page_range_contiguous(const struct page *page,
219 		unsigned long nr_pages)
220 {
221 	return true;
222 }
223 #endif
224 
225 /* to align the pointer to the (next) page boundary */
226 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
227 
228 /* to align the pointer to the (prev) page boundary */
229 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
230 
231 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
232 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
233 
234 /**
235  * folio_page_idx - Return the number of a page in a folio.
236  * @folio: The folio.
237  * @page: The folio page.
238  *
239  * This function expects that the page is actually part of the folio.
240  * The returned number is relative to the start of the folio.
241  */
242 static inline unsigned long folio_page_idx(const struct folio *folio,
243 		const struct page *page)
244 {
245 	return page - &folio->page;
246 }
247 
248 static inline struct folio *lru_to_folio(struct list_head *head)
249 {
250 	return list_entry((head)->prev, struct folio, lru);
251 }
252 
253 void setup_initial_init_mm(void *start_code, void *end_code,
254 			   void *end_data, void *brk);
255 
256 /*
257  * Linux kernel virtual memory manager primitives.
258  * The idea being to have a "virtual" mm in the same way
259  * we have a virtual fs - giving a cleaner interface to the
260  * mm details, and allowing different kinds of memory mappings
261  * (from shared memory to executable loading to arbitrary
262  * mmap() functions).
263  */
264 
265 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
266 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
267 void vm_area_free(struct vm_area_struct *);
268 
269 #ifndef CONFIG_MMU
270 extern struct rb_root nommu_region_tree;
271 extern struct rw_semaphore nommu_region_sem;
272 
273 extern unsigned int kobjsize(const void *objp);
274 #endif
275 
276 /*
277  * vm_flags in vm_area_struct, see mm_types.h.
278  * When changing, update also include/trace/events/mmflags.h
279  */
280 
281 #define VM_NONE		0x00000000
282 
283 /**
284  * typedef vma_flag_t - specifies an individual VMA flag by bit number.
285  *
286  * This value is made type safe by sparse to avoid passing invalid flag values
287  * around.
288  */
289 typedef int __bitwise vma_flag_t;
290 
291 #define DECLARE_VMA_BIT(name, bitnum) \
292 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
293 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
294 	VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT)
295 enum {
296 	DECLARE_VMA_BIT(READ, 0),
297 	DECLARE_VMA_BIT(WRITE, 1),
298 	DECLARE_VMA_BIT(EXEC, 2),
299 	DECLARE_VMA_BIT(SHARED, 3),
300 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
301 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
302 	DECLARE_VMA_BIT(MAYWRITE, 5),
303 	DECLARE_VMA_BIT(MAYEXEC, 6),
304 	DECLARE_VMA_BIT(MAYSHARE, 7),
305 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
306 #ifdef CONFIG_MMU
307 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
308 #else
309 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
310 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
311 #endif /* CONFIG_MMU */
312 	/* Page-ranges managed without "struct page", just pure PFN */
313 	DECLARE_VMA_BIT(PFNMAP, 10),
314 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
315 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
316 	DECLARE_VMA_BIT(LOCKED, 13),
317 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
318 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
319 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
320 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
321 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
322 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
323 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
324 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
325 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
326 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
327 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
328 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
329 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
330 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
331 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
332 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
333 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
334 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
335 	/* These bits are reused, we define specific uses below. */
336 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
337 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
338 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
339 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
340 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
341 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
342 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
343 	/*
344 	 * This flag is used to connect VFIO to arch specific KVM code. It
345 	 * indicates that the memory under this VMA is safe for use with any
346 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
347 	 * platforms, are thought to be unsafe and can cause machine crashes
348 	 * if KVM does not lock down the memory type.
349 	 */
350 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
351 #ifdef CONFIG_PPC32
352 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
353 #else
354 	DECLARE_VMA_BIT(DROPPABLE, 40),
355 #endif
356 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
357 	DECLARE_VMA_BIT(SEALED, 42),
358 	/* Flags that reuse flags above. */
359 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
360 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
361 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
362 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
363 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
364 #if defined(CONFIG_X86_USER_SHADOW_STACK)
365 	/*
366 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
367 	 * support core mm.
368 	 *
369 	 * These VMAs will get a single end guard page. This helps userspace
370 	 * protect itself from attacks. A single page is enough for current
371 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
372 	 * arch/x86/kernel/shstk.c for more details on the guard size.
373 	 */
374 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
375 #elif defined(CONFIG_ARM64_GCS)
376 	/*
377 	 * arm64's Guarded Control Stack implements similar functionality and
378 	 * has similar constraints to shadow stacks.
379 	 */
380 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
381 #endif
382 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
383 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
384 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
385 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
386 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
387 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
388 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
389 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
390 #ifdef CONFIG_STACK_GROWSUP
391 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
392 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
393 #else
394 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
395 #endif
396 };
397 #undef DECLARE_VMA_BIT
398 #undef DECLARE_VMA_BIT_ALIAS
399 
400 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
401 #define VM_READ		INIT_VM_FLAG(READ)
402 #define VM_WRITE	INIT_VM_FLAG(WRITE)
403 #define VM_EXEC		INIT_VM_FLAG(EXEC)
404 #define VM_SHARED	INIT_VM_FLAG(SHARED)
405 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
406 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
407 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
408 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
409 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
410 #ifdef CONFIG_MMU
411 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
412 #else
413 #define VM_UFFD_MISSING	VM_NONE
414 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
415 #endif
416 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
417 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
418 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
419 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
420 #define VM_IO		INIT_VM_FLAG(IO)
421 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
422 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
423 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
424 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
425 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
426 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
427 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
428 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
429 #define VM_SYNC		INIT_VM_FLAG(SYNC)
430 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
431 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
432 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
433 #ifdef CONFIG_MEM_SOFT_DIRTY
434 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
435 #else
436 #define VM_SOFTDIRTY	VM_NONE
437 #endif
438 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
439 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
440 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
441 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
442 #define VM_STACK	INIT_VM_FLAG(STACK)
443 #ifdef CONFIG_STACK_GROWSUP
444 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
445 #else
446 #define VM_STACK_EARLY	VM_NONE
447 #endif
448 #ifdef CONFIG_ARCH_HAS_PKEYS
449 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
450 /* Despite the naming, these are FLAGS not bits. */
451 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
452 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
453 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
454 #if CONFIG_ARCH_PKEY_BITS > 3
455 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
456 #else
457 #define VM_PKEY_BIT3  VM_NONE
458 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
459 #if CONFIG_ARCH_PKEY_BITS > 4
460 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
461 #else
462 #define VM_PKEY_BIT4  VM_NONE
463 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
464 #endif /* CONFIG_ARCH_HAS_PKEYS */
465 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
466 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
467 #else
468 #define VM_SHADOW_STACK	VM_NONE
469 #endif
470 #if defined(CONFIG_PPC64)
471 #define VM_SAO		INIT_VM_FLAG(SAO)
472 #elif defined(CONFIG_PARISC)
473 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
474 #elif defined(CONFIG_SPARC64)
475 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
476 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
477 #elif defined(CONFIG_ARM64)
478 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
479 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
480 #elif !defined(CONFIG_MMU)
481 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
482 #endif
483 #ifndef VM_GROWSUP
484 #define VM_GROWSUP	VM_NONE
485 #endif
486 #ifdef CONFIG_ARM64_MTE
487 #define VM_MTE		INIT_VM_FLAG(MTE)
488 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
489 #else
490 #define VM_MTE		VM_NONE
491 #define VM_MTE_ALLOWED	VM_NONE
492 #endif
493 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
494 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
495 #else
496 #define VM_UFFD_MINOR	VM_NONE
497 #endif
498 #ifdef CONFIG_64BIT
499 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
500 #define VM_SEALED		INIT_VM_FLAG(SEALED)
501 #else
502 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
503 #define VM_SEALED		VM_NONE
504 #endif
505 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
506 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
507 #else
508 #define VM_DROPPABLE		VM_NONE
509 #endif
510 
511 /* Bits set in the VMA until the stack is in its final location */
512 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
513 
514 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
515 
516 /* Common data flag combinations */
517 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
518 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
519 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
520 				 VM_MAYWRITE | VM_MAYEXEC)
521 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
522 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
523 
524 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
525 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
526 #endif
527 
528 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
529 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
530 #endif
531 
532 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
533 
534 #ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
535 #define VM_SEALED_SYSMAP	VM_SEALED
536 #else
537 #define VM_SEALED_SYSMAP	VM_NONE
538 #endif
539 
540 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
541 
542 /* VMA basic access permission flags */
543 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
544 
545 /*
546  * Special vmas that are non-mergable, non-mlock()able.
547  */
548 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
549 
550 /*
551  * Physically remapped pages are special. Tell the
552  * rest of the world about it:
553  *   VM_IO tells people not to look at these pages
554  *	(accesses can have side effects).
555  *   VM_PFNMAP tells the core MM that the base pages are just
556  *	raw PFN mappings, and do not have a "struct page" associated
557  *	with them.
558  *   VM_DONTEXPAND
559  *      Disable vma merging and expanding with mremap().
560  *   VM_DONTDUMP
561  *      Omit vma from core dump, even when VM_IO turned off.
562  */
563 #define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
564 
565 /* This mask prevents VMA from being scanned with khugepaged */
566 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
567 
568 /* This mask defines which mm->def_flags a process can inherit its parent */
569 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
570 
571 /* This mask represents all the VMA flag bits used by mlock */
572 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
573 
574 /* These flags can be updated atomically via VMA/mmap read lock. */
575 #define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
576 
577 /* Arch-specific flags to clear when updating VM flags on protection change */
578 #ifndef VM_ARCH_CLEAR
579 #define VM_ARCH_CLEAR	VM_NONE
580 #endif
581 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
582 
583 /*
584  * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
585  * possesses it but the other does not, the merged VMA should nonetheless have
586  * applied to it:
587  *
588  *   VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its
589  *                  references cleared via /proc/$pid/clear_refs, any merged VMA
590  *                  should be considered soft-dirty also as it operates at a VMA
591  *                  granularity.
592  *
593  * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that
594  *                  mapped page tables may contain metadata not described by the
595  *                  VMA and thus any merged VMA may also contain this metadata,
596  *                  and thus we must make this flag sticky.
597  */
598 #define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
599 
600 /*
601  * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
602  * of these flags and the other not does not preclude a merge.
603  *
604  *    VM_STICKY - When merging VMAs, VMA flags must match, unless they are
605  *                'sticky'. If any sticky flags exist in either VMA, we simply
606  *                set all of them on the merged VMA.
607  */
608 #define VM_IGNORE_MERGE VM_STICKY
609 
610 /*
611  * Flags which should result in page tables being copied on fork. These are
612  * flags which indicate that the VMA maps page tables which cannot be
613  * reconsistuted upon page fault, so necessitate page table copying upon fork.
614  *
615  * Note that these flags should be compared with the DESTINATION VMA not the
616  * source, as VM_UFFD_WP may not be propagated to destination, while all other
617  * flags will be.
618  *
619  * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
620  *                           reasonably reconstructed on page fault.
621  *
622  *              VM_UFFD_WP - Encodes metadata about an installed uffd
623  *                           write protect handler, which cannot be
624  *                           reconstructed on page fault.
625  *
626  *                           We always copy pgtables when dst_vma has uffd-wp
627  *                           enabled even if it's file-backed
628  *                           (e.g. shmem). Because when uffd-wp is enabled,
629  *                           pgtable contains uffd-wp protection information,
630  *                           that's something we can't retrieve from page cache,
631  *                           and skip copying will lose those info.
632  *
633  *          VM_MAYBE_GUARD - Could contain page guard region markers which
634  *                           by design are a property of the page tables
635  *                           only and thus cannot be reconstructed on page
636  *                           fault.
637  */
638 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
639 
640 /*
641  * mapping from the currently active vm_flags protection bits (the
642  * low four bits) to a page protection mask..
643  */
644 
645 /*
646  * The default fault flags that should be used by most of the
647  * arch-specific page fault handlers.
648  */
649 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
650 			     FAULT_FLAG_KILLABLE | \
651 			     FAULT_FLAG_INTERRUPTIBLE)
652 
653 /**
654  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
655  * @flags: Fault flags.
656  *
657  * This is mostly used for places where we want to try to avoid taking
658  * the mmap_lock for too long a time when waiting for another condition
659  * to change, in which case we can try to be polite to release the
660  * mmap_lock in the first round to avoid potential starvation of other
661  * processes that would also want the mmap_lock.
662  *
663  * Return: true if the page fault allows retry and this is the first
664  * attempt of the fault handling; false otherwise.
665  */
666 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
667 {
668 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
669 	    (!(flags & FAULT_FLAG_TRIED));
670 }
671 
672 #define FAULT_FLAG_TRACE \
673 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
674 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
675 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
676 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
677 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
678 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
679 	{ FAULT_FLAG_USER,		"USER" }, \
680 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
681 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
682 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
683 	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
684 
685 /*
686  * vm_fault is filled by the pagefault handler and passed to the vma's
687  * ->fault function. The vma's ->fault is responsible for returning a bitmask
688  * of VM_FAULT_xxx flags that give details about how the fault was handled.
689  *
690  * MM layer fills up gfp_mask for page allocations but fault handler might
691  * alter it if its implementation requires a different allocation context.
692  *
693  * pgoff should be used in favour of virtual_address, if possible.
694  */
695 struct vm_fault {
696 	const struct {
697 		struct vm_area_struct *vma;	/* Target VMA */
698 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
699 		pgoff_t pgoff;			/* Logical page offset based on vma */
700 		unsigned long address;		/* Faulting virtual address - masked */
701 		unsigned long real_address;	/* Faulting virtual address - unmasked */
702 	};
703 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
704 					 * XXX: should really be 'const' */
705 	pmd_t *pmd;			/* Pointer to pmd entry matching
706 					 * the 'address' */
707 	pud_t *pud;			/* Pointer to pud entry matching
708 					 * the 'address'
709 					 */
710 	union {
711 		pte_t orig_pte;		/* Value of PTE at the time of fault */
712 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
713 					 * used by PMD fault only.
714 					 */
715 	};
716 
717 	struct page *cow_page;		/* Page handler may use for COW fault */
718 	struct page *page;		/* ->fault handlers should return a
719 					 * page here, unless VM_FAULT_NOPAGE
720 					 * is set (which is also implied by
721 					 * VM_FAULT_ERROR).
722 					 */
723 	/* These three entries are valid only while holding ptl lock */
724 	pte_t *pte;			/* Pointer to pte entry matching
725 					 * the 'address'. NULL if the page
726 					 * table hasn't been allocated.
727 					 */
728 	spinlock_t *ptl;		/* Page table lock.
729 					 * Protects pte page table if 'pte'
730 					 * is not NULL, otherwise pmd.
731 					 */
732 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
733 					 * vm_ops->map_pages() sets up a page
734 					 * table from atomic context.
735 					 * do_fault_around() pre-allocates
736 					 * page table to avoid allocation from
737 					 * atomic context.
738 					 */
739 };
740 
741 /*
742  * These are the virtual MM functions - opening of an area, closing and
743  * unmapping it (needed to keep files on disk up-to-date etc), pointer
744  * to the functions called when a no-page or a wp-page exception occurs.
745  */
746 struct vm_operations_struct {
747 	void (*open)(struct vm_area_struct * area);
748 	/**
749 	 * @close: Called when the VMA is being removed from the MM.
750 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
751 	 */
752 	void (*close)(struct vm_area_struct * area);
753 	/* Called any time before splitting to check if it's allowed */
754 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
755 	int (*mremap)(struct vm_area_struct *area);
756 	/*
757 	 * Called by mprotect() to make driver-specific permission
758 	 * checks before mprotect() is finalised.   The VMA must not
759 	 * be modified.  Returns 0 if mprotect() can proceed.
760 	 */
761 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
762 			unsigned long end, unsigned long newflags);
763 	vm_fault_t (*fault)(struct vm_fault *vmf);
764 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
765 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
766 			pgoff_t start_pgoff, pgoff_t end_pgoff);
767 	unsigned long (*pagesize)(struct vm_area_struct * area);
768 
769 	/* notification that a previously read-only page is about to become
770 	 * writable, if an error is returned it will cause a SIGBUS */
771 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
772 
773 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
774 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
775 
776 	/* called by access_process_vm when get_user_pages() fails, typically
777 	 * for use by special VMAs. See also generic_access_phys() for a generic
778 	 * implementation useful for any iomem mapping.
779 	 */
780 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
781 		      void *buf, int len, int write);
782 
783 	/* Called by the /proc/PID/maps code to ask the vma whether it
784 	 * has a special name.  Returning non-NULL will also cause this
785 	 * vma to be dumped unconditionally. */
786 	const char *(*name)(struct vm_area_struct *vma);
787 
788 #ifdef CONFIG_NUMA
789 	/*
790 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
791 	 * to hold the policy upon return.  Caller should pass NULL @new to
792 	 * remove a policy and fall back to surrounding context--i.e. do not
793 	 * install a MPOL_DEFAULT policy, nor the task or system default
794 	 * mempolicy.
795 	 */
796 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
797 
798 	/*
799 	 * get_policy() op must add reference [mpol_get()] to any policy at
800 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
801 	 * in mm/mempolicy.c will do this automatically.
802 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
803 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
804 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
805 	 * must return NULL--i.e., do not "fallback" to task or system default
806 	 * policy.
807 	 */
808 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
809 					unsigned long addr, pgoff_t *ilx);
810 #endif
811 #ifdef CONFIG_FIND_NORMAL_PAGE
812 	/*
813 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
814 	 * allows for returning a "normal" page from vm_normal_page() even
815 	 * though the PTE indicates that the "struct page" either does not exist
816 	 * or should not be touched: "special".
817 	 *
818 	 * Do not add new users: this really only works when a "normal" page
819 	 * was mapped, but then the PTE got changed to something weird (+
820 	 * marked special) that would not make pte_pfn() identify the originally
821 	 * inserted page.
822 	 */
823 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
824 					 unsigned long addr);
825 #endif /* CONFIG_FIND_NORMAL_PAGE */
826 };
827 
828 #ifdef CONFIG_NUMA_BALANCING
829 static inline void vma_numab_state_init(struct vm_area_struct *vma)
830 {
831 	vma->numab_state = NULL;
832 }
833 static inline void vma_numab_state_free(struct vm_area_struct *vma)
834 {
835 	kfree(vma->numab_state);
836 }
837 #else
838 static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
839 static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
840 #endif /* CONFIG_NUMA_BALANCING */
841 
842 /*
843  * These must be here rather than mmap_lock.h as dependent on vm_fault type,
844  * declared in this header.
845  */
846 #ifdef CONFIG_PER_VMA_LOCK
847 static inline void release_fault_lock(struct vm_fault *vmf)
848 {
849 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
850 		vma_end_read(vmf->vma);
851 	else
852 		mmap_read_unlock(vmf->vma->vm_mm);
853 }
854 
855 static inline void assert_fault_locked(const struct vm_fault *vmf)
856 {
857 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
858 		vma_assert_locked(vmf->vma);
859 	else
860 		mmap_assert_locked(vmf->vma->vm_mm);
861 }
862 #else
863 static inline void release_fault_lock(struct vm_fault *vmf)
864 {
865 	mmap_read_unlock(vmf->vma->vm_mm);
866 }
867 
868 static inline void assert_fault_locked(const struct vm_fault *vmf)
869 {
870 	mmap_assert_locked(vmf->vma->vm_mm);
871 }
872 #endif /* CONFIG_PER_VMA_LOCK */
873 
874 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
875 {
876 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
877 }
878 
879 static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
880 {
881 	return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
882 }
883 
884 static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
885 {
886 	return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
887 }
888 
889 static inline void mm_flags_set(int flag, struct mm_struct *mm)
890 {
891 	set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
892 }
893 
894 static inline void mm_flags_clear(int flag, struct mm_struct *mm)
895 {
896 	clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
897 }
898 
899 static inline void mm_flags_clear_all(struct mm_struct *mm)
900 {
901 	bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
902 }
903 
904 extern const struct vm_operations_struct vma_dummy_vm_ops;
905 
906 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
907 {
908 	memset(vma, 0, sizeof(*vma));
909 	vma->vm_mm = mm;
910 	vma->vm_ops = &vma_dummy_vm_ops;
911 	INIT_LIST_HEAD(&vma->anon_vma_chain);
912 	vma_lock_init(vma, false);
913 }
914 
915 /* Use when VMA is not part of the VMA tree and needs no locking */
916 static inline void vm_flags_init(struct vm_area_struct *vma,
917 				 vm_flags_t flags)
918 {
919 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
920 	vma_flags_clear_all(&vma->flags);
921 	vma_flags_overwrite_word(&vma->flags, flags);
922 }
923 
924 /*
925  * Use when VMA is part of the VMA tree and modifications need coordination
926  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
927  * it should be locked explicitly beforehand.
928  */
929 static inline void vm_flags_reset(struct vm_area_struct *vma,
930 				  vm_flags_t flags)
931 {
932 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
933 	vma_assert_write_locked(vma);
934 	vm_flags_init(vma, flags);
935 }
936 
937 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
938 				       vm_flags_t flags)
939 {
940 	vma_assert_write_locked(vma);
941 	/*
942 	 * If VMA flags exist beyond the first system word, also clear these. It
943 	 * is assumed the write once behaviour is required only for the first
944 	 * system word.
945 	 */
946 	if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
947 		unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
948 
949 		bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG);
950 	}
951 
952 	vma_flags_overwrite_word_once(&vma->flags, flags);
953 }
954 
955 static inline void vm_flags_set(struct vm_area_struct *vma,
956 				vm_flags_t flags)
957 {
958 	vma_start_write(vma);
959 	vma_flags_set_word(&vma->flags, flags);
960 }
961 
962 static inline void vm_flags_clear(struct vm_area_struct *vma,
963 				  vm_flags_t flags)
964 {
965 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
966 	vma_start_write(vma);
967 	vma_flags_clear_word(&vma->flags, flags);
968 }
969 
970 /*
971  * Use only if VMA is not part of the VMA tree or has no other users and
972  * therefore needs no locking.
973  */
974 static inline void __vm_flags_mod(struct vm_area_struct *vma,
975 				  vm_flags_t set, vm_flags_t clear)
976 {
977 	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
978 }
979 
980 /*
981  * Use only when the order of set/clear operations is unimportant, otherwise
982  * use vm_flags_{set|clear} explicitly.
983  */
984 static inline void vm_flags_mod(struct vm_area_struct *vma,
985 				vm_flags_t set, vm_flags_t clear)
986 {
987 	vma_start_write(vma);
988 	__vm_flags_mod(vma, set, clear);
989 }
990 
991 static inline bool __vma_flag_atomic_valid(struct vm_area_struct *vma,
992 					   vma_flag_t bit)
993 {
994 	const vm_flags_t mask = BIT((__force int)bit);
995 
996 	/* Only specific flags are permitted */
997 	if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED)))
998 		return false;
999 
1000 	return true;
1001 }
1002 
1003 /*
1004  * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
1005  * valid flags are allowed to do this.
1006  */
1007 static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
1008 				       vma_flag_t bit)
1009 {
1010 	unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
1011 
1012 	vma_assert_stabilised(vma);
1013 	if (__vma_flag_atomic_valid(vma, bit))
1014 		set_bit((__force int)bit, bitmap);
1015 }
1016 
1017 /*
1018  * Test for VMA flag atomically. Requires no locks. Only specific valid flags
1019  * are allowed to do this.
1020  *
1021  * This is necessarily racey, so callers must ensure that serialisation is
1022  * achieved through some other means, or that races are permissible.
1023  */
1024 static inline bool vma_flag_test_atomic(struct vm_area_struct *vma,
1025 					vma_flag_t bit)
1026 {
1027 	if (__vma_flag_atomic_valid(vma, bit))
1028 		return test_bit((__force int)bit, &vma->vm_flags);
1029 
1030 	return false;
1031 }
1032 
1033 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1034 {
1035 	vma->vm_ops = NULL;
1036 }
1037 
1038 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1039 {
1040 	return !vma->vm_ops;
1041 }
1042 
1043 /*
1044  * Indicate if the VMA is a heap for the given task; for
1045  * /proc/PID/maps that is the heap of the main task.
1046  */
1047 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
1048 {
1049 	return vma->vm_start < vma->vm_mm->brk &&
1050 		vma->vm_end > vma->vm_mm->start_brk;
1051 }
1052 
1053 /*
1054  * Indicate if the VMA is a stack for the given task; for
1055  * /proc/PID/maps that is the stack of the main task.
1056  */
1057 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
1058 {
1059 	/*
1060 	 * We make no effort to guess what a given thread considers to be
1061 	 * its "stack".  It's not even well-defined for programs written
1062 	 * languages like Go.
1063 	 */
1064 	return vma->vm_start <= vma->vm_mm->start_stack &&
1065 		vma->vm_end >= vma->vm_mm->start_stack;
1066 }
1067 
1068 static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
1069 {
1070 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1071 
1072 	if (!maybe_stack)
1073 		return false;
1074 
1075 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1076 						VM_STACK_INCOMPLETE_SETUP)
1077 		return true;
1078 
1079 	return false;
1080 }
1081 
1082 static inline bool vma_is_foreign(const struct vm_area_struct *vma)
1083 {
1084 	if (!current->mm)
1085 		return true;
1086 
1087 	if (current->mm != vma->vm_mm)
1088 		return true;
1089 
1090 	return false;
1091 }
1092 
1093 static inline bool vma_is_accessible(const struct vm_area_struct *vma)
1094 {
1095 	return vma->vm_flags & VM_ACCESS_FLAGS;
1096 }
1097 
1098 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
1099 {
1100 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1101 		(VM_SHARED | VM_MAYWRITE);
1102 }
1103 
1104 static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
1105 {
1106 	return is_shared_maywrite(vma->vm_flags);
1107 }
1108 
1109 static inline
1110 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1111 {
1112 	return mas_find(&vmi->mas, max - 1);
1113 }
1114 
1115 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1116 {
1117 	/*
1118 	 * Uses mas_find() to get the first VMA when the iterator starts.
1119 	 * Calling mas_next() could skip the first entry.
1120 	 */
1121 	return mas_find(&vmi->mas, ULONG_MAX);
1122 }
1123 
1124 static inline
1125 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1126 {
1127 	return mas_next_range(&vmi->mas, ULONG_MAX);
1128 }
1129 
1130 
1131 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1132 {
1133 	return mas_prev(&vmi->mas, 0);
1134 }
1135 
1136 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1137 			unsigned long start, unsigned long end, gfp_t gfp)
1138 {
1139 	__mas_set_range(&vmi->mas, start, end - 1);
1140 	mas_store_gfp(&vmi->mas, NULL, gfp);
1141 	if (unlikely(mas_is_err(&vmi->mas)))
1142 		return -ENOMEM;
1143 
1144 	return 0;
1145 }
1146 
1147 /* Free any unused preallocations */
1148 static inline void vma_iter_free(struct vma_iterator *vmi)
1149 {
1150 	mas_destroy(&vmi->mas);
1151 }
1152 
1153 static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1154 				      struct vm_area_struct *vma)
1155 {
1156 	vmi->mas.index = vma->vm_start;
1157 	vmi->mas.last = vma->vm_end - 1;
1158 	mas_store(&vmi->mas, vma);
1159 	if (unlikely(mas_is_err(&vmi->mas)))
1160 		return -ENOMEM;
1161 
1162 	vma_mark_attached(vma);
1163 	return 0;
1164 }
1165 
1166 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1167 {
1168 	mas_pause(&vmi->mas);
1169 }
1170 
1171 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1172 {
1173 	mas_set(&vmi->mas, addr);
1174 }
1175 
1176 #define for_each_vma(__vmi, __vma)					\
1177 	while (((__vma) = vma_next(&(__vmi))) != NULL)
1178 
1179 /* The MM code likes to work with exclusive end addresses */
1180 #define for_each_vma_range(__vmi, __vma, __end)				\
1181 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1182 
1183 #ifdef CONFIG_SHMEM
1184 /*
1185  * The vma_is_shmem is not inline because it is used only by slow
1186  * paths in userfault.
1187  */
1188 bool vma_is_shmem(const struct vm_area_struct *vma);
1189 bool vma_is_anon_shmem(const struct vm_area_struct *vma);
1190 #else
1191 static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
1192 static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
1193 #endif
1194 
1195 int vma_is_stack_for_current(const struct vm_area_struct *vma);
1196 
1197 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1198 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1199 
1200 struct mmu_gather;
1201 struct inode;
1202 
1203 extern void prep_compound_page(struct page *page, unsigned int order);
1204 
1205 static inline unsigned int folio_large_order(const struct folio *folio)
1206 {
1207 	return folio->_flags_1 & 0xff;
1208 }
1209 
1210 #ifdef NR_PAGES_IN_LARGE_FOLIO
1211 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1212 {
1213 	return folio->_nr_pages;
1214 }
1215 #else
1216 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1217 {
1218 	return 1L << folio_large_order(folio);
1219 }
1220 #endif
1221 
1222 /*
1223  * compound_order() can be called without holding a reference, which means
1224  * that niceties like page_folio() don't work.  These callers should be
1225  * prepared to handle wild return values.  For example, PG_head may be
1226  * set before the order is initialised, or this may be a tail page.
1227  * See compaction.c for some good examples.
1228  */
1229 static inline unsigned int compound_order(const struct page *page)
1230 {
1231 	const struct folio *folio = (struct folio *)page;
1232 
1233 	if (!test_bit(PG_head, &folio->flags.f))
1234 		return 0;
1235 	return folio_large_order(folio);
1236 }
1237 
1238 /**
1239  * folio_order - The allocation order of a folio.
1240  * @folio: The folio.
1241  *
1242  * A folio is composed of 2^order pages.  See get_order() for the definition
1243  * of order.
1244  *
1245  * Return: The order of the folio.
1246  */
1247 static inline unsigned int folio_order(const struct folio *folio)
1248 {
1249 	if (!folio_test_large(folio))
1250 		return 0;
1251 	return folio_large_order(folio);
1252 }
1253 
1254 /**
1255  * folio_reset_order - Reset the folio order and derived _nr_pages
1256  * @folio: The folio.
1257  *
1258  * Reset the order and derived _nr_pages to 0. Must only be used in the
1259  * process of splitting large folios.
1260  */
1261 static inline void folio_reset_order(struct folio *folio)
1262 {
1263 	if (WARN_ON_ONCE(!folio_test_large(folio)))
1264 		return;
1265 	folio->_flags_1 &= ~0xffUL;
1266 #ifdef NR_PAGES_IN_LARGE_FOLIO
1267 	folio->_nr_pages = 0;
1268 #endif
1269 }
1270 
1271 #include <linux/huge_mm.h>
1272 
1273 /*
1274  * Methods to modify the page usage count.
1275  *
1276  * What counts for a page usage:
1277  * - cache mapping   (page->mapping)
1278  * - private data    (page->private)
1279  * - page mapped in a task's page tables, each mapping
1280  *   is counted separately
1281  *
1282  * Also, many kernel routines increase the page count before a critical
1283  * routine so they can be sure the page doesn't go away from under them.
1284  */
1285 
1286 /*
1287  * Drop a ref, return true if the refcount fell to zero (the page has no users)
1288  */
1289 static inline int put_page_testzero(struct page *page)
1290 {
1291 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1292 	return page_ref_dec_and_test(page);
1293 }
1294 
1295 static inline int folio_put_testzero(struct folio *folio)
1296 {
1297 	return put_page_testzero(&folio->page);
1298 }
1299 
1300 /*
1301  * Try to grab a ref unless the page has a refcount of zero, return false if
1302  * that is the case.
1303  * This can be called when MMU is off so it must not access
1304  * any of the virtual mappings.
1305  */
1306 static inline bool get_page_unless_zero(struct page *page)
1307 {
1308 	return page_ref_add_unless(page, 1, 0);
1309 }
1310 
1311 static inline struct folio *folio_get_nontail_page(struct page *page)
1312 {
1313 	if (unlikely(!get_page_unless_zero(page)))
1314 		return NULL;
1315 	return (struct folio *)page;
1316 }
1317 
1318 extern int page_is_ram(unsigned long pfn);
1319 
1320 enum {
1321 	REGION_INTERSECTS,
1322 	REGION_DISJOINT,
1323 	REGION_MIXED,
1324 };
1325 
1326 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1327 		      unsigned long desc);
1328 
1329 /* Support for virtually mapped pages */
1330 struct page *vmalloc_to_page(const void *addr);
1331 unsigned long vmalloc_to_pfn(const void *addr);
1332 
1333 /*
1334  * Determine if an address is within the vmalloc range
1335  *
1336  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1337  * is no special casing required.
1338  */
1339 #ifdef CONFIG_MMU
1340 extern bool is_vmalloc_addr(const void *x);
1341 extern int is_vmalloc_or_module_addr(const void *x);
1342 #else
1343 static inline bool is_vmalloc_addr(const void *x)
1344 {
1345 	return false;
1346 }
1347 static inline int is_vmalloc_or_module_addr(const void *x)
1348 {
1349 	return 0;
1350 }
1351 #endif
1352 
1353 /*
1354  * How many times the entire folio is mapped as a single unit (eg by a
1355  * PMD or PUD entry).  This is probably not what you want, except for
1356  * debugging purposes or implementation of other core folio_*() primitives.
1357  */
1358 static inline int folio_entire_mapcount(const struct folio *folio)
1359 {
1360 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1361 	if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
1362 		return 0;
1363 	return atomic_read(&folio->_entire_mapcount) + 1;
1364 }
1365 
1366 static inline int folio_large_mapcount(const struct folio *folio)
1367 {
1368 	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1369 	return atomic_read(&folio->_large_mapcount) + 1;
1370 }
1371 
1372 /**
1373  * folio_mapcount() - Number of mappings of this folio.
1374  * @folio: The folio.
1375  *
1376  * The folio mapcount corresponds to the number of present user page table
1377  * entries that reference any part of a folio. Each such present user page
1378  * table entry must be paired with exactly on folio reference.
1379  *
1380  * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1381  * exactly once.
1382  *
1383  * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1384  * references the entire folio counts exactly once, even when such special
1385  * page table entries are comprised of multiple ordinary page table entries.
1386  *
1387  * Will report 0 for pages which cannot be mapped into userspace, such as
1388  * slab, page tables and similar.
1389  *
1390  * Return: The number of times this folio is mapped.
1391  */
1392 static inline int folio_mapcount(const struct folio *folio)
1393 {
1394 	int mapcount;
1395 
1396 	if (likely(!folio_test_large(folio))) {
1397 		mapcount = atomic_read(&folio->_mapcount) + 1;
1398 		if (page_mapcount_is_type(mapcount))
1399 			mapcount = 0;
1400 		return mapcount;
1401 	}
1402 	return folio_large_mapcount(folio);
1403 }
1404 
1405 /**
1406  * folio_mapped - Is this folio mapped into userspace?
1407  * @folio: The folio.
1408  *
1409  * Return: True if any page in this folio is referenced by user page tables.
1410  */
1411 static inline bool folio_mapped(const struct folio *folio)
1412 {
1413 	return folio_mapcount(folio) >= 1;
1414 }
1415 
1416 /*
1417  * Return true if this page is mapped into pagetables.
1418  * For compound page it returns true if any sub-page of compound page is mapped,
1419  * even if this particular sub-page is not itself mapped by any PTE or PMD.
1420  */
1421 static inline bool page_mapped(const struct page *page)
1422 {
1423 	return folio_mapped(page_folio(page));
1424 }
1425 
1426 static inline struct page *virt_to_head_page(const void *x)
1427 {
1428 	struct page *page = virt_to_page(x);
1429 
1430 	return compound_head(page);
1431 }
1432 
1433 static inline struct folio *virt_to_folio(const void *x)
1434 {
1435 	struct page *page = virt_to_page(x);
1436 
1437 	return page_folio(page);
1438 }
1439 
1440 void __folio_put(struct folio *folio);
1441 
1442 void split_page(struct page *page, unsigned int order);
1443 void folio_copy(struct folio *dst, struct folio *src);
1444 int folio_mc_copy(struct folio *dst, struct folio *src);
1445 
1446 unsigned long nr_free_buffer_pages(void);
1447 
1448 /* Returns the number of bytes in this potentially compound page. */
1449 static inline unsigned long page_size(const struct page *page)
1450 {
1451 	return PAGE_SIZE << compound_order(page);
1452 }
1453 
1454 /* Returns the number of bits needed for the number of bytes in a page */
1455 static inline unsigned int page_shift(struct page *page)
1456 {
1457 	return PAGE_SHIFT + compound_order(page);
1458 }
1459 
1460 /**
1461  * thp_order - Order of a transparent huge page.
1462  * @page: Head page of a transparent huge page.
1463  */
1464 static inline unsigned int thp_order(struct page *page)
1465 {
1466 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1467 	return compound_order(page);
1468 }
1469 
1470 /**
1471  * thp_size - Size of a transparent huge page.
1472  * @page: Head page of a transparent huge page.
1473  *
1474  * Return: Number of bytes in this page.
1475  */
1476 static inline unsigned long thp_size(struct page *page)
1477 {
1478 	return PAGE_SIZE << thp_order(page);
1479 }
1480 
1481 #ifdef CONFIG_MMU
1482 /*
1483  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1484  * servicing faults for write access.  In the normal case, do always want
1485  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1486  * that do not have writing enabled, when used by access_process_vm.
1487  */
1488 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1489 {
1490 	if (likely(vma->vm_flags & VM_WRITE))
1491 		pte = pte_mkwrite(pte, vma);
1492 	return pte;
1493 }
1494 
1495 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
1496 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1497 		struct page *page, unsigned int nr, unsigned long addr);
1498 
1499 vm_fault_t finish_fault(struct vm_fault *vmf);
1500 #endif
1501 
1502 /*
1503  * Multiple processes may "see" the same page. E.g. for untouched
1504  * mappings of /dev/null, all processes see the same page full of
1505  * zeroes, and text pages of executables and shared libraries have
1506  * only one copy in memory, at most, normally.
1507  *
1508  * For the non-reserved pages, page_count(page) denotes a reference count.
1509  *   page_count() == 0 means the page is free. page->lru is then used for
1510  *   freelist management in the buddy allocator.
1511  *   page_count() > 0  means the page has been allocated.
1512  *
1513  * Pages are allocated by the slab allocator in order to provide memory
1514  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1515  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1516  * unless a particular usage is carefully commented. (the responsibility of
1517  * freeing the kmalloc memory is the caller's, of course).
1518  *
1519  * A page may be used by anyone else who does a __get_free_page().
1520  * In this case, page_count still tracks the references, and should only
1521  * be used through the normal accessor functions. The top bits of page->flags
1522  * and page->virtual store page management information, but all other fields
1523  * are unused and could be used privately, carefully. The management of this
1524  * page is the responsibility of the one who allocated it, and those who have
1525  * subsequently been given references to it.
1526  *
1527  * The other pages (we may call them "pagecache pages") are completely
1528  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1529  * The following discussion applies only to them.
1530  *
1531  * A pagecache page contains an opaque `private' member, which belongs to the
1532  * page's address_space. Usually, this is the address of a circular list of
1533  * the page's disk buffers. PG_private must be set to tell the VM to call
1534  * into the filesystem to release these pages.
1535  *
1536  * A folio may belong to an inode's memory mapping. In this case,
1537  * folio->mapping points to the inode, and folio->index is the file
1538  * offset of the folio, in units of PAGE_SIZE.
1539  *
1540  * If pagecache pages are not associated with an inode, they are said to be
1541  * anonymous pages. These may become associated with the swapcache, and in that
1542  * case PG_swapcache is set, and page->private is an offset into the swapcache.
1543  *
1544  * In either case (swapcache or inode backed), the pagecache itself holds one
1545  * reference to the page. Setting PG_private should also increment the
1546  * refcount. The each user mapping also has a reference to the page.
1547  *
1548  * The pagecache pages are stored in a per-mapping radix tree, which is
1549  * rooted at mapping->i_pages, and indexed by offset.
1550  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1551  * lists, we instead now tag pages as dirty/writeback in the radix tree.
1552  *
1553  * All pagecache pages may be subject to I/O:
1554  * - inode pages may need to be read from disk,
1555  * - inode pages which have been modified and are MAP_SHARED may need
1556  *   to be written back to the inode on disk,
1557  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1558  *   modified may need to be swapped out to swap space and (later) to be read
1559  *   back into memory.
1560  */
1561 
1562 /* 127: arbitrary random number, small enough to assemble well */
1563 #define folio_ref_zero_or_close_to_overflow(folio) \
1564 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1565 
1566 /**
1567  * folio_get - Increment the reference count on a folio.
1568  * @folio: The folio.
1569  *
1570  * Context: May be called in any context, as long as you know that
1571  * you have a refcount on the folio.  If you do not already have one,
1572  * folio_try_get() may be the right interface for you to use.
1573  */
1574 static inline void folio_get(struct folio *folio)
1575 {
1576 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1577 	folio_ref_inc(folio);
1578 }
1579 
1580 static inline void get_page(struct page *page)
1581 {
1582 	struct folio *folio = page_folio(page);
1583 	if (WARN_ON_ONCE(folio_test_slab(folio)))
1584 		return;
1585 	if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
1586 		return;
1587 	folio_get(folio);
1588 }
1589 
1590 static inline __must_check bool try_get_page(struct page *page)
1591 {
1592 	page = compound_head(page);
1593 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1594 		return false;
1595 	page_ref_inc(page);
1596 	return true;
1597 }
1598 
1599 /**
1600  * folio_put - Decrement the reference count on a folio.
1601  * @folio: The folio.
1602  *
1603  * If the folio's reference count reaches zero, the memory will be
1604  * released back to the page allocator and may be used by another
1605  * allocation immediately.  Do not access the memory or the struct folio
1606  * after calling folio_put() unless you can be sure that it wasn't the
1607  * last reference.
1608  *
1609  * Context: May be called in process or interrupt context, but not in NMI
1610  * context.  May be called while holding a spinlock.
1611  */
1612 static inline void folio_put(struct folio *folio)
1613 {
1614 	if (folio_put_testzero(folio))
1615 		__folio_put(folio);
1616 }
1617 
1618 /**
1619  * folio_put_refs - Reduce the reference count on a folio.
1620  * @folio: The folio.
1621  * @refs: The amount to subtract from the folio's reference count.
1622  *
1623  * If the folio's reference count reaches zero, the memory will be
1624  * released back to the page allocator and may be used by another
1625  * allocation immediately.  Do not access the memory or the struct folio
1626  * after calling folio_put_refs() unless you can be sure that these weren't
1627  * the last references.
1628  *
1629  * Context: May be called in process or interrupt context, but not in NMI
1630  * context.  May be called while holding a spinlock.
1631  */
1632 static inline void folio_put_refs(struct folio *folio, int refs)
1633 {
1634 	if (folio_ref_sub_and_test(folio, refs))
1635 		__folio_put(folio);
1636 }
1637 
1638 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1639 
1640 /*
1641  * union release_pages_arg - an array of pages or folios
1642  *
1643  * release_pages() releases a simple array of multiple pages, and
1644  * accepts various different forms of said page array: either
1645  * a regular old boring array of pages, an array of folios, or
1646  * an array of encoded page pointers.
1647  *
1648  * The transparent union syntax for this kind of "any of these
1649  * argument types" is all kinds of ugly, so look away.
1650  */
1651 typedef union {
1652 	struct page **pages;
1653 	struct folio **folios;
1654 	struct encoded_page **encoded_pages;
1655 } release_pages_arg __attribute__ ((__transparent_union__));
1656 
1657 void release_pages(release_pages_arg, int nr);
1658 
1659 /**
1660  * folios_put - Decrement the reference count on an array of folios.
1661  * @folios: The folios.
1662  *
1663  * Like folio_put(), but for a batch of folios.  This is more efficient
1664  * than writing the loop yourself as it will optimise the locks which need
1665  * to be taken if the folios are freed.  The folios batch is returned
1666  * empty and ready to be reused for another batch; there is no need to
1667  * reinitialise it.
1668  *
1669  * Context: May be called in process or interrupt context, but not in NMI
1670  * context.  May be called while holding a spinlock.
1671  */
1672 static inline void folios_put(struct folio_batch *folios)
1673 {
1674 	folios_put_refs(folios, NULL);
1675 }
1676 
1677 static inline void put_page(struct page *page)
1678 {
1679 	struct folio *folio = page_folio(page);
1680 
1681 	if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
1682 		return;
1683 
1684 	folio_put(folio);
1685 }
1686 
1687 /*
1688  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1689  * the page's refcount so that two separate items are tracked: the original page
1690  * reference count, and also a new count of how many pin_user_pages() calls were
1691  * made against the page. ("gup-pinned" is another term for the latter).
1692  *
1693  * With this scheme, pin_user_pages() becomes special: such pages are marked as
1694  * distinct from normal pages. As such, the unpin_user_page() call (and its
1695  * variants) must be used in order to release gup-pinned pages.
1696  *
1697  * Choice of value:
1698  *
1699  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1700  * counts with respect to pin_user_pages() and unpin_user_page() becomes
1701  * simpler, due to the fact that adding an even power of two to the page
1702  * refcount has the effect of using only the upper N bits, for the code that
1703  * counts up using the bias value. This means that the lower bits are left for
1704  * the exclusive use of the original code that increments and decrements by one
1705  * (or at least, by much smaller values than the bias value).
1706  *
1707  * Of course, once the lower bits overflow into the upper bits (and this is
1708  * OK, because subtraction recovers the original values), then visual inspection
1709  * no longer suffices to directly view the separate counts. However, for normal
1710  * applications that don't have huge page reference counts, this won't be an
1711  * issue.
1712  *
1713  * Locking: the lockless algorithm described in folio_try_get_rcu()
1714  * provides safe operation for get_user_pages(), folio_mkclean() and
1715  * other calls that race to set up page table entries.
1716  */
1717 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1718 
1719 void unpin_user_page(struct page *page);
1720 void unpin_folio(struct folio *folio);
1721 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1722 				 bool make_dirty);
1723 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1724 				      bool make_dirty);
1725 void unpin_user_pages(struct page **pages, unsigned long npages);
1726 void unpin_user_folio(struct folio *folio, unsigned long npages);
1727 void unpin_folios(struct folio **folios, unsigned long nfolios);
1728 
1729 static inline bool is_cow_mapping(vm_flags_t flags)
1730 {
1731 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1732 }
1733 
1734 #ifndef CONFIG_MMU
1735 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1736 {
1737 	/*
1738 	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1739 	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1740 	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1741 	 * underlying memory if ptrace is active, so this is only possible if
1742 	 * ptrace does not apply. Note that there is no mprotect() to upgrade
1743 	 * write permissions later.
1744 	 */
1745 	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1746 }
1747 #endif
1748 
1749 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1750 #define SECTION_IN_PAGE_FLAGS
1751 #endif
1752 
1753 /*
1754  * The identification function is mainly used by the buddy allocator for
1755  * determining if two pages could be buddies. We are not really identifying
1756  * the zone since we could be using the section number id if we do not have
1757  * node id available in page flags.
1758  * We only guarantee that it will return the same value for two combinable
1759  * pages in a zone.
1760  */
1761 static inline int page_zone_id(struct page *page)
1762 {
1763 	return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
1764 }
1765 
1766 #ifdef NODE_NOT_IN_PAGE_FLAGS
1767 int memdesc_nid(memdesc_flags_t mdf);
1768 #else
1769 static inline int memdesc_nid(memdesc_flags_t mdf)
1770 {
1771 	return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
1772 }
1773 #endif
1774 
1775 static inline int page_to_nid(const struct page *page)
1776 {
1777 	return memdesc_nid(PF_POISONED_CHECK(page)->flags);
1778 }
1779 
1780 static inline int folio_nid(const struct folio *folio)
1781 {
1782 	return memdesc_nid(folio->flags);
1783 }
1784 
1785 #ifdef CONFIG_NUMA_BALANCING
1786 /* page access time bits needs to hold at least 4 seconds */
1787 #define PAGE_ACCESS_TIME_MIN_BITS	12
1788 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1789 #define PAGE_ACCESS_TIME_BUCKETS				\
1790 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1791 #else
1792 #define PAGE_ACCESS_TIME_BUCKETS	0
1793 #endif
1794 
1795 #define PAGE_ACCESS_TIME_MASK				\
1796 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1797 
1798 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1799 {
1800 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1801 }
1802 
1803 static inline int cpupid_to_pid(int cpupid)
1804 {
1805 	return cpupid & LAST__PID_MASK;
1806 }
1807 
1808 static inline int cpupid_to_cpu(int cpupid)
1809 {
1810 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1811 }
1812 
1813 static inline int cpupid_to_nid(int cpupid)
1814 {
1815 	return cpu_to_node(cpupid_to_cpu(cpupid));
1816 }
1817 
1818 static inline bool cpupid_pid_unset(int cpupid)
1819 {
1820 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1821 }
1822 
1823 static inline bool cpupid_cpu_unset(int cpupid)
1824 {
1825 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1826 }
1827 
1828 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1829 {
1830 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1831 }
1832 
1833 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1834 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1835 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1836 {
1837 	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1838 }
1839 
1840 static inline int folio_last_cpupid(struct folio *folio)
1841 {
1842 	return folio->_last_cpupid;
1843 }
1844 static inline void page_cpupid_reset_last(struct page *page)
1845 {
1846 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1847 }
1848 #else
1849 static inline int folio_last_cpupid(struct folio *folio)
1850 {
1851 	return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1852 }
1853 
1854 int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1855 
1856 static inline void page_cpupid_reset_last(struct page *page)
1857 {
1858 	page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1859 }
1860 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1861 
1862 static inline int folio_xchg_access_time(struct folio *folio, int time)
1863 {
1864 	int last_time;
1865 
1866 	last_time = folio_xchg_last_cpupid(folio,
1867 					   time >> PAGE_ACCESS_TIME_BUCKETS);
1868 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1869 }
1870 
1871 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1872 {
1873 	unsigned int pid_bit;
1874 
1875 	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1876 	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1877 		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1878 	}
1879 }
1880 
1881 bool folio_use_access_time(struct folio *folio);
1882 #else /* !CONFIG_NUMA_BALANCING */
1883 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1884 {
1885 	return folio_nid(folio); /* XXX */
1886 }
1887 
1888 static inline int folio_xchg_access_time(struct folio *folio, int time)
1889 {
1890 	return 0;
1891 }
1892 
1893 static inline int folio_last_cpupid(struct folio *folio)
1894 {
1895 	return folio_nid(folio); /* XXX */
1896 }
1897 
1898 static inline int cpupid_to_nid(int cpupid)
1899 {
1900 	return -1;
1901 }
1902 
1903 static inline int cpupid_to_pid(int cpupid)
1904 {
1905 	return -1;
1906 }
1907 
1908 static inline int cpupid_to_cpu(int cpupid)
1909 {
1910 	return -1;
1911 }
1912 
1913 static inline int cpu_pid_to_cpupid(int nid, int pid)
1914 {
1915 	return -1;
1916 }
1917 
1918 static inline bool cpupid_pid_unset(int cpupid)
1919 {
1920 	return true;
1921 }
1922 
1923 static inline void page_cpupid_reset_last(struct page *page)
1924 {
1925 }
1926 
1927 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1928 {
1929 	return false;
1930 }
1931 
1932 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1933 {
1934 }
1935 static inline bool folio_use_access_time(struct folio *folio)
1936 {
1937 	return false;
1938 }
1939 #endif /* CONFIG_NUMA_BALANCING */
1940 
1941 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1942 
1943 /*
1944  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1945  * setting tags for all pages to native kernel tag value 0xff, as the default
1946  * value 0x00 maps to 0xff.
1947  */
1948 
1949 static inline u8 page_kasan_tag(const struct page *page)
1950 {
1951 	u8 tag = KASAN_TAG_KERNEL;
1952 
1953 	if (kasan_enabled()) {
1954 		tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1955 		tag ^= 0xff;
1956 	}
1957 
1958 	return tag;
1959 }
1960 
1961 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1962 {
1963 	unsigned long old_flags, flags;
1964 
1965 	if (!kasan_enabled())
1966 		return;
1967 
1968 	tag ^= 0xff;
1969 	old_flags = READ_ONCE(page->flags.f);
1970 	do {
1971 		flags = old_flags;
1972 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1973 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1974 	} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
1975 }
1976 
1977 static inline void page_kasan_tag_reset(struct page *page)
1978 {
1979 	if (kasan_enabled())
1980 		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1981 }
1982 
1983 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1984 
1985 static inline u8 page_kasan_tag(const struct page *page)
1986 {
1987 	return 0xff;
1988 }
1989 
1990 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1991 static inline void page_kasan_tag_reset(struct page *page) { }
1992 
1993 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1994 
1995 static inline struct zone *page_zone(const struct page *page)
1996 {
1997 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1998 }
1999 
2000 static inline pg_data_t *page_pgdat(const struct page *page)
2001 {
2002 	return NODE_DATA(page_to_nid(page));
2003 }
2004 
2005 static inline pg_data_t *folio_pgdat(const struct folio *folio)
2006 {
2007 	return NODE_DATA(folio_nid(folio));
2008 }
2009 
2010 static inline struct zone *folio_zone(const struct folio *folio)
2011 {
2012 	return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
2013 }
2014 
2015 #ifdef SECTION_IN_PAGE_FLAGS
2016 static inline void set_page_section(struct page *page, unsigned long section)
2017 {
2018 	page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
2019 	page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
2020 }
2021 
2022 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2023 {
2024 	return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
2025 }
2026 #else /* !SECTION_IN_PAGE_FLAGS */
2027 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2028 {
2029 	return 0;
2030 }
2031 #endif /* SECTION_IN_PAGE_FLAGS */
2032 
2033 /**
2034  * folio_pfn - Return the Page Frame Number of a folio.
2035  * @folio: The folio.
2036  *
2037  * A folio may contain multiple pages.  The pages have consecutive
2038  * Page Frame Numbers.
2039  *
2040  * Return: The Page Frame Number of the first page in the folio.
2041  */
2042 static inline unsigned long folio_pfn(const struct folio *folio)
2043 {
2044 	return page_to_pfn(&folio->page);
2045 }
2046 
2047 static inline struct folio *pfn_folio(unsigned long pfn)
2048 {
2049 	return page_folio(pfn_to_page(pfn));
2050 }
2051 
2052 #ifdef CONFIG_MMU
2053 static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
2054 {
2055 	return pfn_pte(page_to_pfn(page), pgprot);
2056 }
2057 
2058 /**
2059  * folio_mk_pte - Create a PTE for this folio
2060  * @folio: The folio to create a PTE for
2061  * @pgprot: The page protection bits to use
2062  *
2063  * Create a page table entry for the first page of this folio.
2064  * This is suitable for passing to set_ptes().
2065  *
2066  * Return: A page table entry suitable for mapping this folio.
2067  */
2068 static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
2069 {
2070 	return pfn_pte(folio_pfn(folio), pgprot);
2071 }
2072 
2073 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2074 /**
2075  * folio_mk_pmd - Create a PMD for this folio
2076  * @folio: The folio to create a PMD for
2077  * @pgprot: The page protection bits to use
2078  *
2079  * Create a page table entry for the first page of this folio.
2080  * This is suitable for passing to set_pmd_at().
2081  *
2082  * Return: A page table entry suitable for mapping this folio.
2083  */
2084 static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
2085 {
2086 	return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
2087 }
2088 
2089 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2090 /**
2091  * folio_mk_pud - Create a PUD for this folio
2092  * @folio: The folio to create a PUD for
2093  * @pgprot: The page protection bits to use
2094  *
2095  * Create a page table entry for the first page of this folio.
2096  * This is suitable for passing to set_pud_at().
2097  *
2098  * Return: A page table entry suitable for mapping this folio.
2099  */
2100 static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
2101 {
2102 	return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
2103 }
2104 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2105 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2106 #endif /* CONFIG_MMU */
2107 
2108 static inline bool folio_has_pincount(const struct folio *folio)
2109 {
2110 	if (IS_ENABLED(CONFIG_64BIT))
2111 		return folio_test_large(folio);
2112 	return folio_order(folio) > 1;
2113 }
2114 
2115 /**
2116  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
2117  * @folio: The folio.
2118  *
2119  * This function checks if a folio has been pinned via a call to
2120  * a function in the pin_user_pages() family.
2121  *
2122  * For small folios, the return value is partially fuzzy: false is not fuzzy,
2123  * because it means "definitely not pinned for DMA", but true means "probably
2124  * pinned for DMA, but possibly a false positive due to having at least
2125  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
2126  *
2127  * False positives are OK, because: a) it's unlikely for a folio to
2128  * get that many refcounts, and b) all the callers of this routine are
2129  * expected to be able to deal gracefully with a false positive.
2130  *
2131  * For most large folios, the result will be exactly correct. That's because
2132  * we have more tracking data available: the _pincount field is used
2133  * instead of the GUP_PIN_COUNTING_BIAS scheme.
2134  *
2135  * For more information, please see Documentation/core-api/pin_user_pages.rst.
2136  *
2137  * Return: True, if it is likely that the folio has been "dma-pinned".
2138  * False, if the folio is definitely not dma-pinned.
2139  */
2140 static inline bool folio_maybe_dma_pinned(struct folio *folio)
2141 {
2142 	if (folio_has_pincount(folio))
2143 		return atomic_read(&folio->_pincount) > 0;
2144 
2145 	/*
2146 	 * folio_ref_count() is signed. If that refcount overflows, then
2147 	 * folio_ref_count() returns a negative value, and callers will avoid
2148 	 * further incrementing the refcount.
2149 	 *
2150 	 * Here, for that overflow case, use the sign bit to count a little
2151 	 * bit higher via unsigned math, and thus still get an accurate result.
2152 	 */
2153 	return ((unsigned int)folio_ref_count(folio)) >=
2154 		GUP_PIN_COUNTING_BIAS;
2155 }
2156 
2157 /*
2158  * This should most likely only be called during fork() to see whether we
2159  * should break the cow immediately for an anon page on the src mm.
2160  *
2161  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
2162  */
2163 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
2164 					  struct folio *folio)
2165 {
2166 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
2167 
2168 	if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
2169 		return false;
2170 
2171 	return folio_maybe_dma_pinned(folio);
2172 }
2173 
2174 /**
2175  * is_zero_page - Query if a page is a zero page
2176  * @page: The page to query
2177  *
2178  * This returns true if @page is one of the permanent zero pages.
2179  */
2180 static inline bool is_zero_page(const struct page *page)
2181 {
2182 	return is_zero_pfn(page_to_pfn(page));
2183 }
2184 
2185 /**
2186  * is_zero_folio - Query if a folio is a zero page
2187  * @folio: The folio to query
2188  *
2189  * This returns true if @folio is one of the permanent zero pages.
2190  */
2191 static inline bool is_zero_folio(const struct folio *folio)
2192 {
2193 	return is_zero_page(&folio->page);
2194 }
2195 
2196 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2197 #ifdef CONFIG_MIGRATION
2198 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2199 {
2200 #ifdef CONFIG_CMA
2201 	int mt = folio_migratetype(folio);
2202 
2203 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2204 		return false;
2205 #endif
2206 	/* The zero page can be "pinned" but gets special handling. */
2207 	if (is_zero_folio(folio))
2208 		return true;
2209 
2210 	/* Coherent device memory must always allow eviction. */
2211 	if (folio_is_device_coherent(folio))
2212 		return false;
2213 
2214 	/*
2215 	 * Filesystems can only tolerate transient delays to truncate and
2216 	 * hole-punch operations
2217 	 */
2218 	if (folio_is_fsdax(folio))
2219 		return false;
2220 
2221 	/* Otherwise, non-movable zone folios can be pinned. */
2222 	return !folio_is_zone_movable(folio);
2223 
2224 }
2225 #else
2226 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2227 {
2228 	return true;
2229 }
2230 #endif
2231 
2232 static inline void set_page_zone(struct page *page, enum zone_type zone)
2233 {
2234 	page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
2235 	page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2236 }
2237 
2238 static inline void set_page_node(struct page *page, unsigned long node)
2239 {
2240 	page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
2241 	page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
2242 }
2243 
2244 static inline void set_page_links(struct page *page, enum zone_type zone,
2245 	unsigned long node, unsigned long pfn)
2246 {
2247 	set_page_zone(page, zone);
2248 	set_page_node(page, node);
2249 #ifdef SECTION_IN_PAGE_FLAGS
2250 	set_page_section(page, pfn_to_section_nr(pfn));
2251 #endif
2252 }
2253 
2254 /**
2255  * folio_nr_pages - The number of pages in the folio.
2256  * @folio: The folio.
2257  *
2258  * Return: A positive power of two.
2259  */
2260 static inline unsigned long folio_nr_pages(const struct folio *folio)
2261 {
2262 	if (!folio_test_large(folio))
2263 		return 1;
2264 	return folio_large_nr_pages(folio);
2265 }
2266 
2267 #if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
2268 /*
2269  * We don't expect any folios that exceed buddy sizes (and consequently
2270  * memory sections).
2271  */
2272 #define MAX_FOLIO_ORDER		MAX_PAGE_ORDER
2273 #elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
2274 /*
2275  * Only pages within a single memory section are guaranteed to be
2276  * contiguous. By limiting folios to a single memory section, all folio
2277  * pages are guaranteed to be contiguous.
2278  */
2279 #define MAX_FOLIO_ORDER		PFN_SECTION_SHIFT
2280 #elif defined(CONFIG_HUGETLB_PAGE)
2281 /*
2282  * There is no real limit on the folio size. We limit them to the maximum we
2283  * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
2284  * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
2285  */
2286 #define MAX_FOLIO_ORDER		get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
2287 #else
2288 /*
2289  * Without hugetlb, gigantic folios that are bigger than a single PUD are
2290  * currently impossible.
2291  */
2292 #define MAX_FOLIO_ORDER		PUD_ORDER
2293 #endif
2294 
2295 #define MAX_FOLIO_NR_PAGES	(1UL << MAX_FOLIO_ORDER)
2296 
2297 /*
2298  * compound_nr() returns the number of pages in this potentially compound
2299  * page.  compound_nr() can be called on a tail page, and is defined to
2300  * return 1 in that case.
2301  */
2302 static inline unsigned long compound_nr(const struct page *page)
2303 {
2304 	const struct folio *folio = (struct folio *)page;
2305 
2306 	if (!test_bit(PG_head, &folio->flags.f))
2307 		return 1;
2308 	return folio_large_nr_pages(folio);
2309 }
2310 
2311 /**
2312  * folio_next - Move to the next physical folio.
2313  * @folio: The folio we're currently operating on.
2314  *
2315  * If you have physically contiguous memory which may span more than
2316  * one folio (eg a &struct bio_vec), use this function to move from one
2317  * folio to the next.  Do not use it if the memory is only virtually
2318  * contiguous as the folios are almost certainly not adjacent to each
2319  * other.  This is the folio equivalent to writing ``page++``.
2320  *
2321  * Context: We assume that the folios are refcounted and/or locked at a
2322  * higher level and do not adjust the reference counts.
2323  * Return: The next struct folio.
2324  */
2325 static inline struct folio *folio_next(struct folio *folio)
2326 {
2327 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2328 }
2329 
2330 /**
2331  * folio_shift - The size of the memory described by this folio.
2332  * @folio: The folio.
2333  *
2334  * A folio represents a number of bytes which is a power-of-two in size.
2335  * This function tells you which power-of-two the folio is.  See also
2336  * folio_size() and folio_order().
2337  *
2338  * Context: The caller should have a reference on the folio to prevent
2339  * it from being split.  It is not necessary for the folio to be locked.
2340  * Return: The base-2 logarithm of the size of this folio.
2341  */
2342 static inline unsigned int folio_shift(const struct folio *folio)
2343 {
2344 	return PAGE_SHIFT + folio_order(folio);
2345 }
2346 
2347 /**
2348  * folio_size - The number of bytes in a folio.
2349  * @folio: The folio.
2350  *
2351  * Context: The caller should have a reference on the folio to prevent
2352  * it from being split.  It is not necessary for the folio to be locked.
2353  * Return: The number of bytes in this folio.
2354  */
2355 static inline size_t folio_size(const struct folio *folio)
2356 {
2357 	return PAGE_SIZE << folio_order(folio);
2358 }
2359 
2360 /**
2361  * folio_maybe_mapped_shared - Whether the folio is mapped into the page
2362  *			       tables of more than one MM
2363  * @folio: The folio.
2364  *
2365  * This function checks if the folio maybe currently mapped into more than one
2366  * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
2367  * MM ("mapped exclusively").
2368  *
2369  * For KSM folios, this function also returns "mapped shared" when a folio is
2370  * mapped multiple times into the same MM, because the individual page mappings
2371  * are independent.
2372  *
2373  * For small anonymous folios and anonymous hugetlb folios, the return
2374  * value will be exactly correct: non-KSM folios can only be mapped at most once
2375  * into an MM, and they cannot be partially mapped. KSM folios are
2376  * considered shared even if mapped multiple times into the same MM.
2377  *
2378  * For other folios, the result can be fuzzy:
2379  *    #. For partially-mappable large folios (THP), the return value can wrongly
2380  *       indicate "mapped shared" (false positive) if a folio was mapped by
2381  *       more than two MMs at one point in time.
2382  *    #. For pagecache folios (including hugetlb), the return value can wrongly
2383  *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2384  *       cover the same file range.
2385  *
2386  * Further, this function only considers current page table mappings that
2387  * are tracked using the folio mapcount(s).
2388  *
2389  * This function does not consider:
2390  *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2391  *       pagecache, temporary unmapping for migration).
2392  *    #. If the folio is mapped differently (VM_PFNMAP).
2393  *    #. If hugetlb page table sharing applies. Callers might want to check
2394  *       hugetlb_pmd_shared().
2395  *
2396  * Return: Whether the folio is estimated to be mapped into more than one MM.
2397  */
2398 static inline bool folio_maybe_mapped_shared(struct folio *folio)
2399 {
2400 	int mapcount = folio_mapcount(folio);
2401 
2402 	/* Only partially-mappable folios require more care. */
2403 	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2404 		return mapcount > 1;
2405 
2406 	/*
2407 	 * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
2408 	 * simply assume "mapped shared", nobody should really care
2409 	 * about this for arbitrary kernel allocations.
2410 	 */
2411 	if (!IS_ENABLED(CONFIG_MM_ID))
2412 		return true;
2413 
2414 	/*
2415 	 * A single mapping implies "mapped exclusively", even if the
2416 	 * folio flag says something different: it's easier to handle this
2417 	 * case here instead of on the RMAP hot path.
2418 	 */
2419 	if (mapcount <= 1)
2420 		return false;
2421 	return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
2422 }
2423 
2424 /**
2425  * folio_expected_ref_count - calculate the expected folio refcount
2426  * @folio: the folio
2427  *
2428  * Calculate the expected folio refcount, taking references from the pagecache,
2429  * swapcache, PG_private and page table mappings into account. Useful in
2430  * combination with folio_ref_count() to detect unexpected references (e.g.,
2431  * GUP or other temporary references).
2432  *
2433  * Does currently not consider references from the LRU cache. If the folio
2434  * was isolated from the LRU (which is the case during migration or split),
2435  * the LRU cache does not apply.
2436  *
2437  * Calling this function on an unmapped folio -- !folio_mapped() -- that is
2438  * locked will return a stable result.
2439  *
2440  * Calling this function on a mapped folio will not result in a stable result,
2441  * because nothing stops additional page table mappings from coming (e.g.,
2442  * fork()) or going (e.g., munmap()).
2443  *
2444  * Calling this function without the folio lock will also not result in a
2445  * stable result: for example, the folio might get dropped from the swapcache
2446  * concurrently.
2447  *
2448  * However, even when called without the folio lock or on a mapped folio,
2449  * this function can be used to detect unexpected references early (for example,
2450  * if it makes sense to even lock the folio and unmap it).
2451  *
2452  * The caller must add any reference (e.g., from folio_try_get()) it might be
2453  * holding itself to the result.
2454  *
2455  * Returns the expected folio refcount.
2456  */
2457 static inline int folio_expected_ref_count(const struct folio *folio)
2458 {
2459 	const int order = folio_order(folio);
2460 	int ref_count = 0;
2461 
2462 	if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
2463 		return 0;
2464 
2465 	/* One reference per page from the swapcache. */
2466 	ref_count += folio_test_swapcache(folio) << order;
2467 
2468 	if (!folio_test_anon(folio)) {
2469 		/* One reference per page from the pagecache. */
2470 		ref_count += !!folio->mapping << order;
2471 		/* One reference from PG_private. */
2472 		ref_count += folio_test_private(folio);
2473 	}
2474 
2475 	/* One reference per page table mapping. */
2476 	return ref_count + folio_mapcount(folio);
2477 }
2478 
2479 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2480 static inline int arch_make_folio_accessible(struct folio *folio)
2481 {
2482 	return 0;
2483 }
2484 #endif
2485 
2486 /*
2487  * Some inline functions in vmstat.h depend on page_zone()
2488  */
2489 #include <linux/vmstat.h>
2490 
2491 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2492 #define HASHED_PAGE_VIRTUAL
2493 #endif
2494 
2495 #if defined(WANT_PAGE_VIRTUAL)
2496 static inline void *page_address(const struct page *page)
2497 {
2498 	return page->virtual;
2499 }
2500 static inline void set_page_address(struct page *page, void *address)
2501 {
2502 	page->virtual = address;
2503 }
2504 #define page_address_init()  do { } while(0)
2505 #endif
2506 
2507 #if defined(HASHED_PAGE_VIRTUAL)
2508 void *page_address(const struct page *page);
2509 void set_page_address(struct page *page, void *virtual);
2510 void page_address_init(void);
2511 #endif
2512 
2513 static __always_inline void *lowmem_page_address(const struct page *page)
2514 {
2515 	return page_to_virt(page);
2516 }
2517 
2518 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2519 #define page_address(page) lowmem_page_address(page)
2520 #define set_page_address(page, address)  do { } while(0)
2521 #define page_address_init()  do { } while(0)
2522 #endif
2523 
2524 static inline void *folio_address(const struct folio *folio)
2525 {
2526 	return page_address(&folio->page);
2527 }
2528 
2529 /*
2530  * Return true only if the page has been allocated with
2531  * ALLOC_NO_WATERMARKS and the low watermark was not
2532  * met implying that the system is under some pressure.
2533  */
2534 static inline bool page_is_pfmemalloc(const struct page *page)
2535 {
2536 	/*
2537 	 * lru.next has bit 1 set if the page is allocated from the
2538 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2539 	 * they do not need to preserve that information.
2540 	 */
2541 	return (uintptr_t)page->lru.next & BIT(1);
2542 }
2543 
2544 /*
2545  * Return true only if the folio has been allocated with
2546  * ALLOC_NO_WATERMARKS and the low watermark was not
2547  * met implying that the system is under some pressure.
2548  */
2549 static inline bool folio_is_pfmemalloc(const struct folio *folio)
2550 {
2551 	/*
2552 	 * lru.next has bit 1 set if the page is allocated from the
2553 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2554 	 * they do not need to preserve that information.
2555 	 */
2556 	return (uintptr_t)folio->lru.next & BIT(1);
2557 }
2558 
2559 /*
2560  * Only to be called by the page allocator on a freshly allocated
2561  * page.
2562  */
2563 static inline void set_page_pfmemalloc(struct page *page)
2564 {
2565 	page->lru.next = (void *)BIT(1);
2566 }
2567 
2568 static inline void clear_page_pfmemalloc(struct page *page)
2569 {
2570 	page->lru.next = NULL;
2571 }
2572 
2573 /*
2574  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2575  */
2576 extern void pagefault_out_of_memory(void);
2577 
2578 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
2579 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2580 
2581 /*
2582  * Parameter block passed down to zap_pte_range in exceptional cases.
2583  */
2584 struct zap_details {
2585 	struct folio *single_folio;	/* Locked folio to be unmapped */
2586 	bool even_cows;			/* Zap COWed private pages too? */
2587 	bool reclaim_pt;		/* Need reclaim page tables? */
2588 	zap_flags_t zap_flags;		/* Extra flags for zapping */
2589 };
2590 
2591 /*
2592  * Whether to drop the pte markers, for example, the uffd-wp information for
2593  * file-backed memory.  This should only be specified when we will completely
2594  * drop the page in the mm, either by truncation or unmapping of the vma.  By
2595  * default, the flag is not set.
2596  */
2597 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
2598 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
2599 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
2600 
2601 #ifdef CONFIG_MMU
2602 extern bool can_do_mlock(void);
2603 #else
2604 static inline bool can_do_mlock(void) { return false; }
2605 #endif
2606 extern int user_shm_lock(size_t, struct ucounts *);
2607 extern void user_shm_unlock(size_t, struct ucounts *);
2608 
2609 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2610 			     pte_t pte);
2611 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2612 			     pte_t pte);
2613 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2614 				  unsigned long addr, pmd_t pmd);
2615 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2616 				pmd_t pmd);
2617 struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
2618 		pud_t pud);
2619 
2620 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2621 		  unsigned long size);
2622 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2623 			   unsigned long size, struct zap_details *details);
2624 static inline void zap_vma_pages(struct vm_area_struct *vma)
2625 {
2626 	zap_page_range_single(vma, vma->vm_start,
2627 			      vma->vm_end - vma->vm_start, NULL);
2628 }
2629 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2630 		struct vm_area_struct *start_vma, unsigned long start,
2631 		unsigned long end, unsigned long tree_end);
2632 
2633 struct mmu_notifier_range;
2634 
2635 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2636 		unsigned long end, unsigned long floor, unsigned long ceiling);
2637 int
2638 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2639 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2640 			void *buf, int len, int write);
2641 
2642 struct follow_pfnmap_args {
2643 	/**
2644 	 * Inputs:
2645 	 * @vma: Pointer to @vm_area_struct struct
2646 	 * @address: the virtual address to walk
2647 	 */
2648 	struct vm_area_struct *vma;
2649 	unsigned long address;
2650 	/**
2651 	 * Internals:
2652 	 *
2653 	 * The caller shouldn't touch any of these.
2654 	 */
2655 	spinlock_t *lock;
2656 	pte_t *ptep;
2657 	/**
2658 	 * Outputs:
2659 	 *
2660 	 * @pfn: the PFN of the address
2661 	 * @addr_mask: address mask covering pfn
2662 	 * @pgprot: the pgprot_t of the mapping
2663 	 * @writable: whether the mapping is writable
2664 	 * @special: whether the mapping is a special mapping (real PFN maps)
2665 	 */
2666 	unsigned long pfn;
2667 	unsigned long addr_mask;
2668 	pgprot_t pgprot;
2669 	bool writable;
2670 	bool special;
2671 };
2672 int follow_pfnmap_start(struct follow_pfnmap_args *args);
2673 void follow_pfnmap_end(struct follow_pfnmap_args *args);
2674 
2675 extern void truncate_pagecache(struct inode *inode, loff_t new);
2676 extern void truncate_setsize(struct inode *inode, loff_t newsize);
2677 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2678 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2679 int generic_error_remove_folio(struct address_space *mapping,
2680 		struct folio *folio);
2681 
2682 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2683 		unsigned long address, struct pt_regs *regs);
2684 
2685 #ifdef CONFIG_MMU
2686 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2687 				  unsigned long address, unsigned int flags,
2688 				  struct pt_regs *regs);
2689 extern int fixup_user_fault(struct mm_struct *mm,
2690 			    unsigned long address, unsigned int fault_flags,
2691 			    bool *unlocked);
2692 void unmap_mapping_pages(struct address_space *mapping,
2693 		pgoff_t start, pgoff_t nr, bool even_cows);
2694 void unmap_mapping_range(struct address_space *mapping,
2695 		loff_t const holebegin, loff_t const holelen, int even_cows);
2696 #else
2697 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2698 					 unsigned long address, unsigned int flags,
2699 					 struct pt_regs *regs)
2700 {
2701 	/* should never happen if there's no MMU */
2702 	BUG();
2703 	return VM_FAULT_SIGBUS;
2704 }
2705 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2706 		unsigned int fault_flags, bool *unlocked)
2707 {
2708 	/* should never happen if there's no MMU */
2709 	BUG();
2710 	return -EFAULT;
2711 }
2712 static inline void unmap_mapping_pages(struct address_space *mapping,
2713 		pgoff_t start, pgoff_t nr, bool even_cows) { }
2714 static inline void unmap_mapping_range(struct address_space *mapping,
2715 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2716 #endif
2717 
2718 static inline void unmap_shared_mapping_range(struct address_space *mapping,
2719 		loff_t const holebegin, loff_t const holelen)
2720 {
2721 	unmap_mapping_range(mapping, holebegin, holelen, 0);
2722 }
2723 
2724 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2725 						unsigned long addr);
2726 
2727 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2728 		void *buf, int len, unsigned int gup_flags);
2729 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2730 		void *buf, int len, unsigned int gup_flags);
2731 
2732 #ifdef CONFIG_BPF_SYSCALL
2733 extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
2734 			      void *buf, int len, unsigned int gup_flags);
2735 #endif
2736 
2737 long get_user_pages_remote(struct mm_struct *mm,
2738 			   unsigned long start, unsigned long nr_pages,
2739 			   unsigned int gup_flags, struct page **pages,
2740 			   int *locked);
2741 long pin_user_pages_remote(struct mm_struct *mm,
2742 			   unsigned long start, unsigned long nr_pages,
2743 			   unsigned int gup_flags, struct page **pages,
2744 			   int *locked);
2745 
2746 /*
2747  * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2748  */
2749 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2750 						    unsigned long addr,
2751 						    int gup_flags,
2752 						    struct vm_area_struct **vmap)
2753 {
2754 	struct page *page;
2755 	struct vm_area_struct *vma;
2756 	int got;
2757 
2758 	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2759 		return ERR_PTR(-EINVAL);
2760 
2761 	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2762 
2763 	if (got < 0)
2764 		return ERR_PTR(got);
2765 
2766 	vma = vma_lookup(mm, addr);
2767 	if (WARN_ON_ONCE(!vma)) {
2768 		put_page(page);
2769 		return ERR_PTR(-EINVAL);
2770 	}
2771 
2772 	*vmap = vma;
2773 	return page;
2774 }
2775 
2776 long get_user_pages(unsigned long start, unsigned long nr_pages,
2777 		    unsigned int gup_flags, struct page **pages);
2778 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2779 		    unsigned int gup_flags, struct page **pages);
2780 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2781 		    struct page **pages, unsigned int gup_flags);
2782 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2783 		    struct page **pages, unsigned int gup_flags);
2784 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
2785 		      struct folio **folios, unsigned int max_folios,
2786 		      pgoff_t *offset);
2787 int folio_add_pins(struct folio *folio, unsigned int pins);
2788 
2789 int get_user_pages_fast(unsigned long start, int nr_pages,
2790 			unsigned int gup_flags, struct page **pages);
2791 int pin_user_pages_fast(unsigned long start, int nr_pages,
2792 			unsigned int gup_flags, struct page **pages);
2793 void folio_add_pin(struct folio *folio);
2794 
2795 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2796 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2797 			const struct task_struct *task, bool bypass_rlim);
2798 
2799 struct kvec;
2800 struct page *get_dump_page(unsigned long addr, int *locked);
2801 
2802 bool folio_mark_dirty(struct folio *folio);
2803 bool folio_mark_dirty_lock(struct folio *folio);
2804 bool set_page_dirty(struct page *page);
2805 int set_page_dirty_lock(struct page *page);
2806 
2807 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2808 
2809 /*
2810  * Flags used by change_protection().  For now we make it a bitmap so
2811  * that we can pass in multiple flags just like parameters.  However
2812  * for now all the callers are only use one of the flags at the same
2813  * time.
2814  */
2815 /*
2816  * Whether we should manually check if we can map individual PTEs writable,
2817  * because something (e.g., COW, uffd-wp) blocks that from happening for all
2818  * PTEs automatically in a writable mapping.
2819  */
2820 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2821 /* Whether this protection change is for NUMA hints */
2822 #define  MM_CP_PROT_NUMA                   (1UL << 1)
2823 /* Whether this change is for write protecting */
2824 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2825 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2826 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2827 					    MM_CP_UFFD_WP_RESOLVE)
2828 
2829 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2830 			     pte_t pte);
2831 extern long change_protection(struct mmu_gather *tlb,
2832 			      struct vm_area_struct *vma, unsigned long start,
2833 			      unsigned long end, unsigned long cp_flags);
2834 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2835 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
2836 	  unsigned long start, unsigned long end, vm_flags_t newflags);
2837 
2838 /*
2839  * doesn't attempt to fault and will return short.
2840  */
2841 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2842 			     unsigned int gup_flags, struct page **pages);
2843 
2844 static inline bool get_user_page_fast_only(unsigned long addr,
2845 			unsigned int gup_flags, struct page **pagep)
2846 {
2847 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2848 }
2849 /*
2850  * per-process(per-mm_struct) statistics.
2851  */
2852 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2853 {
2854 	return percpu_counter_read_positive(&mm->rss_stat[member]);
2855 }
2856 
2857 static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
2858 {
2859 	return percpu_counter_sum_positive(&mm->rss_stat[member]);
2860 }
2861 
2862 void mm_trace_rss_stat(struct mm_struct *mm, int member);
2863 
2864 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2865 {
2866 	percpu_counter_add(&mm->rss_stat[member], value);
2867 
2868 	mm_trace_rss_stat(mm, member);
2869 }
2870 
2871 static inline void inc_mm_counter(struct mm_struct *mm, int member)
2872 {
2873 	percpu_counter_inc(&mm->rss_stat[member]);
2874 
2875 	mm_trace_rss_stat(mm, member);
2876 }
2877 
2878 static inline void dec_mm_counter(struct mm_struct *mm, int member)
2879 {
2880 	percpu_counter_dec(&mm->rss_stat[member]);
2881 
2882 	mm_trace_rss_stat(mm, member);
2883 }
2884 
2885 /* Optimized variant when folio is already known not to be anon */
2886 static inline int mm_counter_file(struct folio *folio)
2887 {
2888 	if (folio_test_swapbacked(folio))
2889 		return MM_SHMEMPAGES;
2890 	return MM_FILEPAGES;
2891 }
2892 
2893 static inline int mm_counter(struct folio *folio)
2894 {
2895 	if (folio_test_anon(folio))
2896 		return MM_ANONPAGES;
2897 	return mm_counter_file(folio);
2898 }
2899 
2900 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2901 {
2902 	return get_mm_counter(mm, MM_FILEPAGES) +
2903 		get_mm_counter(mm, MM_ANONPAGES) +
2904 		get_mm_counter(mm, MM_SHMEMPAGES);
2905 }
2906 
2907 static inline unsigned long get_mm_rss_sum(struct mm_struct *mm)
2908 {
2909 	return get_mm_counter_sum(mm, MM_FILEPAGES) +
2910 		get_mm_counter_sum(mm, MM_ANONPAGES) +
2911 		get_mm_counter_sum(mm, MM_SHMEMPAGES);
2912 }
2913 
2914 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2915 {
2916 	return max(mm->hiwater_rss, get_mm_rss(mm));
2917 }
2918 
2919 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2920 {
2921 	return max(mm->hiwater_vm, mm->total_vm);
2922 }
2923 
2924 static inline void update_hiwater_rss(struct mm_struct *mm)
2925 {
2926 	unsigned long _rss = get_mm_rss(mm);
2927 
2928 	if (data_race(mm->hiwater_rss) < _rss)
2929 		data_race(mm->hiwater_rss = _rss);
2930 }
2931 
2932 static inline void update_hiwater_vm(struct mm_struct *mm)
2933 {
2934 	if (mm->hiwater_vm < mm->total_vm)
2935 		mm->hiwater_vm = mm->total_vm;
2936 }
2937 
2938 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2939 {
2940 	mm->hiwater_rss = get_mm_rss(mm);
2941 }
2942 
2943 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2944 					 struct mm_struct *mm)
2945 {
2946 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2947 
2948 	if (*maxrss < hiwater_rss)
2949 		*maxrss = hiwater_rss;
2950 }
2951 
2952 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2953 static inline int pte_special(pte_t pte)
2954 {
2955 	return 0;
2956 }
2957 
2958 static inline pte_t pte_mkspecial(pte_t pte)
2959 {
2960 	return pte;
2961 }
2962 #endif
2963 
2964 #ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
2965 static inline bool pmd_special(pmd_t pmd)
2966 {
2967 	return false;
2968 }
2969 
2970 static inline pmd_t pmd_mkspecial(pmd_t pmd)
2971 {
2972 	return pmd;
2973 }
2974 #endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
2975 
2976 #ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
2977 static inline bool pud_special(pud_t pud)
2978 {
2979 	return false;
2980 }
2981 
2982 static inline pud_t pud_mkspecial(pud_t pud)
2983 {
2984 	return pud;
2985 }
2986 #endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
2987 
2988 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2989 			     spinlock_t **ptl);
2990 
2991 #ifdef __PAGETABLE_P4D_FOLDED
2992 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2993 						unsigned long address)
2994 {
2995 	return 0;
2996 }
2997 #else
2998 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2999 #endif
3000 
3001 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
3002 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3003 						unsigned long address)
3004 {
3005 	return 0;
3006 }
3007 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
3008 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
3009 
3010 #else
3011 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
3012 
3013 static inline void mm_inc_nr_puds(struct mm_struct *mm)
3014 {
3015 	if (mm_pud_folded(mm))
3016 		return;
3017 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3018 }
3019 
3020 static inline void mm_dec_nr_puds(struct mm_struct *mm)
3021 {
3022 	if (mm_pud_folded(mm))
3023 		return;
3024 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3025 }
3026 #endif
3027 
3028 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
3029 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
3030 						unsigned long address)
3031 {
3032 	return 0;
3033 }
3034 
3035 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
3036 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
3037 
3038 #else
3039 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
3040 
3041 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
3042 {
3043 	if (mm_pmd_folded(mm))
3044 		return;
3045 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3046 }
3047 
3048 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
3049 {
3050 	if (mm_pmd_folded(mm))
3051 		return;
3052 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3053 }
3054 #endif
3055 
3056 #ifdef CONFIG_MMU
3057 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
3058 {
3059 	atomic_long_set(&mm->pgtables_bytes, 0);
3060 }
3061 
3062 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3063 {
3064 	return atomic_long_read(&mm->pgtables_bytes);
3065 }
3066 
3067 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
3068 {
3069 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3070 }
3071 
3072 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
3073 {
3074 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3075 }
3076 #else
3077 
3078 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
3079 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3080 {
3081 	return 0;
3082 }
3083 
3084 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
3085 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
3086 #endif
3087 
3088 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
3089 int __pte_alloc_kernel(pmd_t *pmd);
3090 
3091 #if defined(CONFIG_MMU)
3092 
3093 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3094 		unsigned long address)
3095 {
3096 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
3097 		NULL : p4d_offset(pgd, address);
3098 }
3099 
3100 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3101 		unsigned long address)
3102 {
3103 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
3104 		NULL : pud_offset(p4d, address);
3105 }
3106 
3107 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3108 {
3109 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
3110 		NULL: pmd_offset(pud, address);
3111 }
3112 #endif /* CONFIG_MMU */
3113 
3114 enum pt_flags {
3115 	PT_kernel = PG_referenced,
3116 	PT_reserved = PG_reserved,
3117 	/* High bits are used for zone/node/section */
3118 };
3119 
3120 static inline struct ptdesc *virt_to_ptdesc(const void *x)
3121 {
3122 	return page_ptdesc(virt_to_page(x));
3123 }
3124 
3125 /**
3126  * ptdesc_address - Virtual address of page table.
3127  * @pt: Page table descriptor.
3128  *
3129  * Return: The first byte of the page table described by @pt.
3130  */
3131 static inline void *ptdesc_address(const struct ptdesc *pt)
3132 {
3133 	return folio_address(ptdesc_folio(pt));
3134 }
3135 
3136 static inline bool pagetable_is_reserved(struct ptdesc *pt)
3137 {
3138 	return test_bit(PT_reserved, &pt->pt_flags.f);
3139 }
3140 
3141 /**
3142  * ptdesc_set_kernel - Mark a ptdesc used to map the kernel
3143  * @ptdesc: The ptdesc to be marked
3144  *
3145  * Kernel page tables often need special handling. Set a flag so that
3146  * the handling code knows this ptdesc will not be used for userspace.
3147  */
3148 static inline void ptdesc_set_kernel(struct ptdesc *ptdesc)
3149 {
3150 	set_bit(PT_kernel, &ptdesc->pt_flags.f);
3151 }
3152 
3153 /**
3154  * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel
3155  * @ptdesc: The ptdesc to be unmarked
3156  *
3157  * Use when the ptdesc is no longer used to map the kernel and no longer
3158  * needs special handling.
3159  */
3160 static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc)
3161 {
3162 	/*
3163 	 * Note: the 'PG_referenced' bit does not strictly need to be
3164 	 * cleared before freeing the page. But this is nice for
3165 	 * symmetry.
3166 	 */
3167 	clear_bit(PT_kernel, &ptdesc->pt_flags.f);
3168 }
3169 
3170 /**
3171  * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel
3172  * @ptdesc: The ptdesc being tested
3173  *
3174  * Call to tell if the ptdesc used to map the kernel.
3175  */
3176 static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
3177 {
3178 	return test_bit(PT_kernel, &ptdesc->pt_flags.f);
3179 }
3180 
3181 /**
3182  * pagetable_alloc - Allocate pagetables
3183  * @gfp:    GFP flags
3184  * @order:  desired pagetable order
3185  *
3186  * pagetable_alloc allocates memory for page tables as well as a page table
3187  * descriptor to describe that memory.
3188  *
3189  * Return: The ptdesc describing the allocated page tables.
3190  */
3191 static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
3192 {
3193 	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
3194 
3195 	return page_ptdesc(page);
3196 }
3197 #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
3198 
3199 static inline void __pagetable_free(struct ptdesc *pt)
3200 {
3201 	struct page *page = ptdesc_page(pt);
3202 
3203 	__free_pages(page, compound_order(page));
3204 }
3205 
3206 #ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
3207 void pagetable_free_kernel(struct ptdesc *pt);
3208 #else
3209 static inline void pagetable_free_kernel(struct ptdesc *pt)
3210 {
3211 	__pagetable_free(pt);
3212 }
3213 #endif
3214 /**
3215  * pagetable_free - Free pagetables
3216  * @pt:	The page table descriptor
3217  *
3218  * pagetable_free frees the memory of all page tables described by a page
3219  * table descriptor and the memory for the descriptor itself.
3220  */
3221 static inline void pagetable_free(struct ptdesc *pt)
3222 {
3223 	if (ptdesc_test_kernel(pt)) {
3224 		ptdesc_clear_kernel(pt);
3225 		pagetable_free_kernel(pt);
3226 	} else {
3227 		__pagetable_free(pt);
3228 	}
3229 }
3230 
3231 #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
3232 #if ALLOC_SPLIT_PTLOCKS
3233 void __init ptlock_cache_init(void);
3234 bool ptlock_alloc(struct ptdesc *ptdesc);
3235 void ptlock_free(struct ptdesc *ptdesc);
3236 
3237 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3238 {
3239 	return ptdesc->ptl;
3240 }
3241 #else /* ALLOC_SPLIT_PTLOCKS */
3242 static inline void ptlock_cache_init(void)
3243 {
3244 }
3245 
3246 static inline bool ptlock_alloc(struct ptdesc *ptdesc)
3247 {
3248 	return true;
3249 }
3250 
3251 static inline void ptlock_free(struct ptdesc *ptdesc)
3252 {
3253 }
3254 
3255 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3256 {
3257 	return &ptdesc->ptl;
3258 }
3259 #endif /* ALLOC_SPLIT_PTLOCKS */
3260 
3261 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3262 {
3263 	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
3264 }
3265 
3266 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3267 {
3268 	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
3269 	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
3270 	return ptlock_ptr(virt_to_ptdesc(pte));
3271 }
3272 
3273 static inline bool ptlock_init(struct ptdesc *ptdesc)
3274 {
3275 	/*
3276 	 * prep_new_page() initialize page->private (and therefore page->ptl)
3277 	 * with 0. Make sure nobody took it in use in between.
3278 	 *
3279 	 * It can happen if arch try to use slab for page table allocation:
3280 	 * slab code uses page->slab_cache, which share storage with page->ptl.
3281 	 */
3282 	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
3283 	if (!ptlock_alloc(ptdesc))
3284 		return false;
3285 	spin_lock_init(ptlock_ptr(ptdesc));
3286 	return true;
3287 }
3288 
3289 #else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3290 /*
3291  * We use mm->page_table_lock to guard all pagetable pages of the mm.
3292  */
3293 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3294 {
3295 	return &mm->page_table_lock;
3296 }
3297 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3298 {
3299 	return &mm->page_table_lock;
3300 }
3301 static inline void ptlock_cache_init(void) {}
3302 static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
3303 static inline void ptlock_free(struct ptdesc *ptdesc) {}
3304 #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3305 
3306 static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
3307 {
3308 	return compound_nr(ptdesc_page(ptdesc));
3309 }
3310 
3311 static inline void __pagetable_ctor(struct ptdesc *ptdesc)
3312 {
3313 	pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
3314 
3315 	__SetPageTable(ptdesc_page(ptdesc));
3316 	mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));
3317 }
3318 
3319 static inline void pagetable_dtor(struct ptdesc *ptdesc)
3320 {
3321 	pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
3322 
3323 	ptlock_free(ptdesc);
3324 	__ClearPageTable(ptdesc_page(ptdesc));
3325 	mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
3326 }
3327 
3328 static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
3329 {
3330 	pagetable_dtor(ptdesc);
3331 	pagetable_free(ptdesc);
3332 }
3333 
3334 static inline bool pagetable_pte_ctor(struct mm_struct *mm,
3335 				      struct ptdesc *ptdesc)
3336 {
3337 	if (mm != &init_mm && !ptlock_init(ptdesc))
3338 		return false;
3339 	__pagetable_ctor(ptdesc);
3340 	return true;
3341 }
3342 
3343 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
3344 
3345 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3346 {
3347 	return __pte_offset_map(pmd, addr, NULL);
3348 }
3349 
3350 pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3351 			   unsigned long addr, spinlock_t **ptlp);
3352 
3353 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3354 				unsigned long addr, spinlock_t **ptlp);
3355 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3356 				unsigned long addr, pmd_t *pmdvalp,
3357 				spinlock_t **ptlp);
3358 
3359 #define pte_unmap_unlock(pte, ptl)	do {		\
3360 	spin_unlock(ptl);				\
3361 	pte_unmap(pte);					\
3362 } while (0)
3363 
3364 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3365 
3366 #define pte_alloc_map(mm, pmd, address)			\
3367 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3368 
3369 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3370 	(pte_alloc(mm, pmd) ?			\
3371 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3372 
3373 #define pte_alloc_kernel(pmd, address)			\
3374 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3375 		NULL: pte_offset_kernel(pmd, address))
3376 
3377 #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3378 
3379 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3380 {
3381 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3382 	return virt_to_page((void *)((unsigned long) pmd & mask));
3383 }
3384 
3385 static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3386 {
3387 	return page_ptdesc(pmd_pgtable_page(pmd));
3388 }
3389 
3390 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3391 {
3392 	return ptlock_ptr(pmd_ptdesc(pmd));
3393 }
3394 
3395 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3396 {
3397 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3398 	ptdesc->pmd_huge_pte = NULL;
3399 #endif
3400 	return ptlock_init(ptdesc);
3401 }
3402 
3403 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3404 
3405 #else
3406 
3407 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3408 {
3409 	return &mm->page_table_lock;
3410 }
3411 
3412 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3413 
3414 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3415 
3416 #endif
3417 
3418 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3419 {
3420 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3421 	spin_lock(ptl);
3422 	return ptl;
3423 }
3424 
3425 static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
3426 				      struct ptdesc *ptdesc)
3427 {
3428 	if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
3429 		return false;
3430 	ptdesc_pmd_pts_init(ptdesc);
3431 	__pagetable_ctor(ptdesc);
3432 	return true;
3433 }
3434 
3435 /*
3436  * No scalability reason to split PUD locks yet, but follow the same pattern
3437  * as the PMD locks to make it easier if we decide to.  The VM should not be
3438  * considered ready to switch to split PUD locks yet; there may be places
3439  * which need to be converted from page_table_lock.
3440  */
3441 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3442 {
3443 	return &mm->page_table_lock;
3444 }
3445 
3446 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3447 {
3448 	spinlock_t *ptl = pud_lockptr(mm, pud);
3449 
3450 	spin_lock(ptl);
3451 	return ptl;
3452 }
3453 
3454 static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3455 {
3456 	__pagetable_ctor(ptdesc);
3457 }
3458 
3459 static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
3460 {
3461 	__pagetable_ctor(ptdesc);
3462 }
3463 
3464 static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
3465 {
3466 	__pagetable_ctor(ptdesc);
3467 }
3468 
3469 extern void __init pagecache_init(void);
3470 extern void free_initmem(void);
3471 
3472 /*
3473  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3474  * into the buddy system. The freed pages will be poisoned with pattern
3475  * "poison" if it's within range [0, UCHAR_MAX].
3476  * Return pages freed into the buddy system.
3477  */
3478 extern unsigned long free_reserved_area(void *start, void *end,
3479 					int poison, const char *s);
3480 
3481 extern void adjust_managed_page_count(struct page *page, long count);
3482 
3483 extern void reserve_bootmem_region(phys_addr_t start,
3484 				   phys_addr_t end, int nid);
3485 
3486 /* Free the reserved page into the buddy system, so it gets managed. */
3487 void free_reserved_page(struct page *page);
3488 
3489 static inline void mark_page_reserved(struct page *page)
3490 {
3491 	SetPageReserved(page);
3492 	adjust_managed_page_count(page, -1);
3493 }
3494 
3495 static inline void free_reserved_ptdesc(struct ptdesc *pt)
3496 {
3497 	free_reserved_page(ptdesc_page(pt));
3498 }
3499 
3500 /*
3501  * Default method to free all the __init memory into the buddy system.
3502  * The freed pages will be poisoned with pattern "poison" if it's within
3503  * range [0, UCHAR_MAX].
3504  * Return pages freed into the buddy system.
3505  */
3506 static inline unsigned long free_initmem_default(int poison)
3507 {
3508 	extern char __init_begin[], __init_end[];
3509 
3510 	return free_reserved_area(&__init_begin, &__init_end,
3511 				  poison, "unused kernel image (initmem)");
3512 }
3513 
3514 static inline unsigned long get_num_physpages(void)
3515 {
3516 	int nid;
3517 	unsigned long phys_pages = 0;
3518 
3519 	for_each_online_node(nid)
3520 		phys_pages += node_present_pages(nid);
3521 
3522 	return phys_pages;
3523 }
3524 
3525 /*
3526  * FIXME: Using memblock node mappings, an architecture may initialise its
3527  * zones, allocate the backing mem_map and account for memory holes in an
3528  * architecture independent manner.
3529  *
3530  * An architecture is expected to register range of page frames backed by
3531  * physical memory with memblock_add[_node]() before calling
3532  * free_area_init() passing in the PFN each zone ends at. At a basic
3533  * usage, an architecture is expected to do something like
3534  *
3535  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3536  * 							 max_highmem_pfn};
3537  * for_each_valid_physical_page_range()
3538  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3539  * free_area_init(max_zone_pfns);
3540  */
3541 void arch_zone_limits_init(unsigned long *max_zone_pfn);
3542 unsigned long node_map_pfn_alignment(void);
3543 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3544 						unsigned long end_pfn);
3545 extern void get_pfn_range_for_nid(unsigned int nid,
3546 			unsigned long *start_pfn, unsigned long *end_pfn);
3547 
3548 #ifndef CONFIG_NUMA
3549 static inline int early_pfn_to_nid(unsigned long pfn)
3550 {
3551 	return 0;
3552 }
3553 #else
3554 /* please see mm/page_alloc.c */
3555 extern int __meminit early_pfn_to_nid(unsigned long pfn);
3556 #endif
3557 
3558 extern void mem_init(void);
3559 extern void __init mmap_init(void);
3560 
3561 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3562 static inline void show_mem(void)
3563 {
3564 	__show_mem(0, NULL, MAX_NR_ZONES - 1);
3565 }
3566 extern long si_mem_available(void);
3567 extern void si_meminfo(struct sysinfo * val);
3568 extern void si_meminfo_node(struct sysinfo *val, int nid);
3569 
3570 extern __printf(3, 4)
3571 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3572 
3573 extern void setup_per_cpu_pageset(void);
3574 
3575 /* nommu.c */
3576 extern atomic_long_t mmap_pages_allocated;
3577 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3578 
3579 /* interval_tree.c */
3580 void vma_interval_tree_insert(struct vm_area_struct *node,
3581 			      struct rb_root_cached *root);
3582 void vma_interval_tree_insert_after(struct vm_area_struct *node,
3583 				    struct vm_area_struct *prev,
3584 				    struct rb_root_cached *root);
3585 void vma_interval_tree_remove(struct vm_area_struct *node,
3586 			      struct rb_root_cached *root);
3587 struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
3588 				unsigned long start, unsigned long last);
3589 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3590 				unsigned long start, unsigned long last);
3591 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3592 				unsigned long start, unsigned long last);
3593 
3594 #define vma_interval_tree_foreach(vma, root, start, last)		\
3595 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
3596 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
3597 
3598 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3599 				   struct rb_root_cached *root);
3600 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3601 				   struct rb_root_cached *root);
3602 struct anon_vma_chain *
3603 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3604 				  unsigned long start, unsigned long last);
3605 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3606 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
3607 #ifdef CONFIG_DEBUG_VM_RB
3608 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3609 #endif
3610 
3611 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
3612 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3613 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3614 
3615 /* mmap.c */
3616 extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
3617 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3618 extern void exit_mmap(struct mm_struct *);
3619 bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
3620 				 unsigned long addr, bool write);
3621 
3622 static inline int check_data_rlimit(unsigned long rlim,
3623 				    unsigned long new,
3624 				    unsigned long start,
3625 				    unsigned long end_data,
3626 				    unsigned long start_data)
3627 {
3628 	if (rlim < RLIM_INFINITY) {
3629 		if (((new - start) + (end_data - start_data)) > rlim)
3630 			return -ENOSPC;
3631 	}
3632 
3633 	return 0;
3634 }
3635 
3636 extern int mm_take_all_locks(struct mm_struct *mm);
3637 extern void mm_drop_all_locks(struct mm_struct *mm);
3638 
3639 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3640 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3641 extern struct file *get_mm_exe_file(struct mm_struct *mm);
3642 extern struct file *get_task_exe_file(struct task_struct *task);
3643 
3644 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3645 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3646 
3647 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3648 				   const struct vm_special_mapping *sm);
3649 struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3650 				   unsigned long addr, unsigned long len,
3651 				   vm_flags_t vm_flags,
3652 				   const struct vm_special_mapping *spec);
3653 
3654 unsigned long randomize_stack_top(unsigned long stack_top);
3655 unsigned long randomize_page(unsigned long start, unsigned long range);
3656 
3657 unsigned long
3658 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3659 		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3660 
3661 static inline unsigned long
3662 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3663 		  unsigned long pgoff, unsigned long flags)
3664 {
3665 	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3666 }
3667 
3668 extern unsigned long do_mmap(struct file *file, unsigned long addr,
3669 	unsigned long len, unsigned long prot, unsigned long flags,
3670 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3671 	struct list_head *uf);
3672 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3673 			 unsigned long start, size_t len, struct list_head *uf,
3674 			 bool unlock);
3675 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3676 		    struct mm_struct *mm, unsigned long start,
3677 		    unsigned long end, struct list_head *uf, bool unlock);
3678 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3679 		     struct list_head *uf);
3680 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3681 
3682 #ifdef CONFIG_MMU
3683 extern int __mm_populate(unsigned long addr, unsigned long len,
3684 			 int ignore_errors);
3685 static inline void mm_populate(unsigned long addr, unsigned long len)
3686 {
3687 	/* Ignore errors */
3688 	(void) __mm_populate(addr, len, 1);
3689 }
3690 #else
3691 static inline void mm_populate(unsigned long addr, unsigned long len) {}
3692 #endif
3693 
3694 /* This takes the mm semaphore itself */
3695 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3696 extern int vm_munmap(unsigned long, size_t);
3697 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3698         unsigned long, unsigned long,
3699         unsigned long, unsigned long);
3700 
3701 struct vm_unmapped_area_info {
3702 #define VM_UNMAPPED_AREA_TOPDOWN 1
3703 	unsigned long flags;
3704 	unsigned long length;
3705 	unsigned long low_limit;
3706 	unsigned long high_limit;
3707 	unsigned long align_mask;
3708 	unsigned long align_offset;
3709 	unsigned long start_gap;
3710 };
3711 
3712 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3713 
3714 /* truncate.c */
3715 void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
3716 void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
3717 		uoff_t lend);
3718 void truncate_inode_pages_final(struct address_space *mapping);
3719 
3720 /* generic vm_area_ops exported for stackable file systems */
3721 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3722 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3723 		pgoff_t start_pgoff, pgoff_t end_pgoff);
3724 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3725 
3726 extern unsigned long stack_guard_gap;
3727 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3728 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3729 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3730 
3731 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
3732 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3733 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3734 					     struct vm_area_struct **pprev);
3735 
3736 /*
3737  * Look up the first VMA which intersects the interval [start_addr, end_addr)
3738  * NULL if none.  Assume start_addr < end_addr.
3739  */
3740 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3741 			unsigned long start_addr, unsigned long end_addr);
3742 
3743 /**
3744  * vma_lookup() - Find a VMA at a specific address
3745  * @mm: The process address space.
3746  * @addr: The user address.
3747  *
3748  * Return: The vm_area_struct at the given address, %NULL otherwise.
3749  */
3750 static inline
3751 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3752 {
3753 	return mtree_load(&mm->mm_mt, addr);
3754 }
3755 
3756 static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
3757 {
3758 	if (vma->vm_flags & VM_GROWSDOWN)
3759 		return stack_guard_gap;
3760 
3761 	/* See reasoning around the VM_SHADOW_STACK definition */
3762 	if (vma->vm_flags & VM_SHADOW_STACK)
3763 		return PAGE_SIZE;
3764 
3765 	return 0;
3766 }
3767 
3768 static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
3769 {
3770 	unsigned long gap = stack_guard_start_gap(vma);
3771 	unsigned long vm_start = vma->vm_start;
3772 
3773 	vm_start -= gap;
3774 	if (vm_start > vma->vm_start)
3775 		vm_start = 0;
3776 	return vm_start;
3777 }
3778 
3779 static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
3780 {
3781 	unsigned long vm_end = vma->vm_end;
3782 
3783 	if (vma->vm_flags & VM_GROWSUP) {
3784 		vm_end += stack_guard_gap;
3785 		if (vm_end < vma->vm_end)
3786 			vm_end = -PAGE_SIZE;
3787 	}
3788 	return vm_end;
3789 }
3790 
3791 static inline unsigned long vma_pages(const struct vm_area_struct *vma)
3792 {
3793 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3794 }
3795 
3796 static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
3797 {
3798 	return desc->end - desc->start;
3799 }
3800 
3801 static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc)
3802 {
3803 	return vma_desc_size(desc) >> PAGE_SHIFT;
3804 }
3805 
3806 /**
3807  * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN
3808  * remap is required.
3809  * @desc: The VMA descriptor for the VMA requiring remap.
3810  * @start: The virtual address to start the remap from, must be within the VMA.
3811  * @start_pfn: The first PFN in the range to remap.
3812  * @size: The size of the range to remap, in bytes, at most spanning to the end
3813  * of the VMA.
3814  */
3815 static inline void mmap_action_remap(struct vm_area_desc *desc,
3816 				     unsigned long start,
3817 				     unsigned long start_pfn,
3818 				     unsigned long size)
3819 {
3820 	struct mmap_action *action = &desc->action;
3821 
3822 	/* [start, start + size) must be within the VMA. */
3823 	WARN_ON_ONCE(start < desc->start || start >= desc->end);
3824 	WARN_ON_ONCE(start + size > desc->end);
3825 
3826 	action->type = MMAP_REMAP_PFN;
3827 	action->remap.start = start;
3828 	action->remap.start_pfn = start_pfn;
3829 	action->remap.size = size;
3830 	action->remap.pgprot = desc->page_prot;
3831 }
3832 
3833 /**
3834  * mmap_action_remap_full - helper for mmap_prepare hook to specify that the
3835  * entirety of a VMA should be PFN remapped.
3836  * @desc: The VMA descriptor for the VMA requiring remap.
3837  * @start_pfn: The first PFN in the range to remap.
3838  */
3839 static inline void mmap_action_remap_full(struct vm_area_desc *desc,
3840 					  unsigned long start_pfn)
3841 {
3842 	mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc));
3843 }
3844 
3845 /**
3846  * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN
3847  * I/O remap is required.
3848  * @desc: The VMA descriptor for the VMA requiring remap.
3849  * @start: The virtual address to start the remap from, must be within the VMA.
3850  * @start_pfn: The first PFN in the range to remap.
3851  * @size: The size of the range to remap, in bytes, at most spanning to the end
3852  * of the VMA.
3853  */
3854 static inline void mmap_action_ioremap(struct vm_area_desc *desc,
3855 				       unsigned long start,
3856 				       unsigned long start_pfn,
3857 				       unsigned long size)
3858 {
3859 	mmap_action_remap(desc, start, start_pfn, size);
3860 	desc->action.type = MMAP_IO_REMAP_PFN;
3861 }
3862 
3863 /**
3864  * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the
3865  * entirety of a VMA should be PFN I/O remapped.
3866  * @desc: The VMA descriptor for the VMA requiring remap.
3867  * @start_pfn: The first PFN in the range to remap.
3868  */
3869 static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
3870 					  unsigned long start_pfn)
3871 {
3872 	mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
3873 }
3874 
3875 void mmap_action_prepare(struct mmap_action *action,
3876 			 struct vm_area_desc *desc);
3877 int mmap_action_complete(struct mmap_action *action,
3878 			 struct vm_area_struct *vma);
3879 
3880 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
3881 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3882 				unsigned long vm_start, unsigned long vm_end)
3883 {
3884 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3885 
3886 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3887 		vma = NULL;
3888 
3889 	return vma;
3890 }
3891 
3892 static inline bool range_in_vma(const struct vm_area_struct *vma,
3893 				unsigned long start, unsigned long end)
3894 {
3895 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3896 }
3897 
3898 #ifdef CONFIG_MMU
3899 pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
3900 void vma_set_page_prot(struct vm_area_struct *vma);
3901 #else
3902 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
3903 {
3904 	return __pgprot(0);
3905 }
3906 static inline void vma_set_page_prot(struct vm_area_struct *vma)
3907 {
3908 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3909 }
3910 #endif
3911 
3912 void vma_set_file(struct vm_area_struct *vma, struct file *file);
3913 
3914 #ifdef CONFIG_NUMA_BALANCING
3915 unsigned long change_prot_numa(struct vm_area_struct *vma,
3916 			unsigned long start, unsigned long end);
3917 #endif
3918 
3919 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3920 		unsigned long addr);
3921 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3922 		    unsigned long pfn, unsigned long size, pgprot_t pgprot);
3923 
3924 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3925 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3926 			struct page **pages, unsigned long *num);
3927 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3928 				unsigned long num);
3929 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3930 				unsigned long num);
3931 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
3932 			bool write);
3933 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3934 			unsigned long pfn);
3935 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3936 			unsigned long pfn, pgprot_t pgprot);
3937 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3938 			unsigned long pfn);
3939 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3940 		unsigned long addr, unsigned long pfn);
3941 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3942 
3943 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3944 				unsigned long addr, struct page *page)
3945 {
3946 	int err = vm_insert_page(vma, addr, page);
3947 
3948 	if (err == -ENOMEM)
3949 		return VM_FAULT_OOM;
3950 	if (err < 0 && err != -EBUSY)
3951 		return VM_FAULT_SIGBUS;
3952 
3953 	return VM_FAULT_NOPAGE;
3954 }
3955 
3956 #ifndef io_remap_pfn_range_pfn
3957 static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
3958 		unsigned long size)
3959 {
3960 	return pfn;
3961 }
3962 #endif
3963 
3964 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3965 				     unsigned long addr, unsigned long orig_pfn,
3966 				     unsigned long size, pgprot_t orig_prot)
3967 {
3968 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
3969 	const pgprot_t prot = pgprot_decrypted(orig_prot);
3970 
3971 	return remap_pfn_range(vma, addr, pfn, size, prot);
3972 }
3973 
3974 static inline vm_fault_t vmf_error(int err)
3975 {
3976 	if (err == -ENOMEM)
3977 		return VM_FAULT_OOM;
3978 	else if (err == -EHWPOISON)
3979 		return VM_FAULT_HWPOISON;
3980 	return VM_FAULT_SIGBUS;
3981 }
3982 
3983 /*
3984  * Convert errno to return value for ->page_mkwrite() calls.
3985  *
3986  * This should eventually be merged with vmf_error() above, but will need a
3987  * careful audit of all vmf_error() callers.
3988  */
3989 static inline vm_fault_t vmf_fs_error(int err)
3990 {
3991 	if (err == 0)
3992 		return VM_FAULT_LOCKED;
3993 	if (err == -EFAULT || err == -EAGAIN)
3994 		return VM_FAULT_NOPAGE;
3995 	if (err == -ENOMEM)
3996 		return VM_FAULT_OOM;
3997 	/* -ENOSPC, -EDQUOT, -EIO ... */
3998 	return VM_FAULT_SIGBUS;
3999 }
4000 
4001 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
4002 {
4003 	if (vm_fault & VM_FAULT_OOM)
4004 		return -ENOMEM;
4005 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
4006 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
4007 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4008 		return -EFAULT;
4009 	return 0;
4010 }
4011 
4012 /*
4013  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
4014  * a (NUMA hinting) fault is required.
4015  */
4016 static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
4017 					   unsigned int flags)
4018 {
4019 	/*
4020 	 * If callers don't want to honor NUMA hinting faults, no need to
4021 	 * determine if we would actually have to trigger a NUMA hinting fault.
4022 	 */
4023 	if (!(flags & FOLL_HONOR_NUMA_FAULT))
4024 		return true;
4025 
4026 	/*
4027 	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
4028 	 *
4029 	 * Requiring a fault here even for inaccessible VMAs would mean that
4030 	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
4031 	 * refuses to process NUMA hinting faults in inaccessible VMAs.
4032 	 */
4033 	return !vma_is_accessible(vma);
4034 }
4035 
4036 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
4037 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
4038 			       unsigned long size, pte_fn_t fn, void *data);
4039 extern int apply_to_existing_page_range(struct mm_struct *mm,
4040 				   unsigned long address, unsigned long size,
4041 				   pte_fn_t fn, void *data);
4042 
4043 #ifdef CONFIG_PAGE_POISONING
4044 extern void __kernel_poison_pages(struct page *page, int numpages);
4045 extern void __kernel_unpoison_pages(struct page *page, int numpages);
4046 extern bool _page_poisoning_enabled_early;
4047 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
4048 static inline bool page_poisoning_enabled(void)
4049 {
4050 	return _page_poisoning_enabled_early;
4051 }
4052 /*
4053  * For use in fast paths after init_mem_debugging() has run, or when a
4054  * false negative result is not harmful when called too early.
4055  */
4056 static inline bool page_poisoning_enabled_static(void)
4057 {
4058 	return static_branch_unlikely(&_page_poisoning_enabled);
4059 }
4060 static inline void kernel_poison_pages(struct page *page, int numpages)
4061 {
4062 	if (page_poisoning_enabled_static())
4063 		__kernel_poison_pages(page, numpages);
4064 }
4065 static inline void kernel_unpoison_pages(struct page *page, int numpages)
4066 {
4067 	if (page_poisoning_enabled_static())
4068 		__kernel_unpoison_pages(page, numpages);
4069 }
4070 #else
4071 static inline bool page_poisoning_enabled(void) { return false; }
4072 static inline bool page_poisoning_enabled_static(void) { return false; }
4073 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
4074 static inline void kernel_poison_pages(struct page *page, int numpages) { }
4075 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
4076 #endif
4077 
4078 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
4079 static inline bool want_init_on_alloc(gfp_t flags)
4080 {
4081 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4082 				&init_on_alloc))
4083 		return true;
4084 	return flags & __GFP_ZERO;
4085 }
4086 
4087 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
4088 static inline bool want_init_on_free(void)
4089 {
4090 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
4091 				   &init_on_free);
4092 }
4093 
4094 extern bool _debug_pagealloc_enabled_early;
4095 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
4096 
4097 static inline bool debug_pagealloc_enabled(void)
4098 {
4099 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
4100 		_debug_pagealloc_enabled_early;
4101 }
4102 
4103 /*
4104  * For use in fast paths after mem_debugging_and_hardening_init() has run,
4105  * or when a false negative result is not harmful when called too early.
4106  */
4107 static inline bool debug_pagealloc_enabled_static(void)
4108 {
4109 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
4110 		return false;
4111 
4112 	return static_branch_unlikely(&_debug_pagealloc_enabled);
4113 }
4114 
4115 /*
4116  * To support DEBUG_PAGEALLOC architecture must ensure that
4117  * __kernel_map_pages() never fails
4118  */
4119 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
4120 #ifdef CONFIG_DEBUG_PAGEALLOC
4121 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
4122 {
4123 	iommu_debug_check_unmapped(page, numpages);
4124 
4125 	if (debug_pagealloc_enabled_static())
4126 		__kernel_map_pages(page, numpages, 1);
4127 }
4128 
4129 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
4130 {
4131 	iommu_debug_check_unmapped(page, numpages);
4132 
4133 	if (debug_pagealloc_enabled_static())
4134 		__kernel_map_pages(page, numpages, 0);
4135 }
4136 
4137 extern unsigned int _debug_guardpage_minorder;
4138 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
4139 
4140 static inline unsigned int debug_guardpage_minorder(void)
4141 {
4142 	return _debug_guardpage_minorder;
4143 }
4144 
4145 static inline bool debug_guardpage_enabled(void)
4146 {
4147 	return static_branch_unlikely(&_debug_guardpage_enabled);
4148 }
4149 
4150 static inline bool page_is_guard(const struct page *page)
4151 {
4152 	if (!debug_guardpage_enabled())
4153 		return false;
4154 
4155 	return PageGuard(page);
4156 }
4157 
4158 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
4159 static inline bool set_page_guard(struct zone *zone, struct page *page,
4160 				  unsigned int order)
4161 {
4162 	if (!debug_guardpage_enabled())
4163 		return false;
4164 	return __set_page_guard(zone, page, order);
4165 }
4166 
4167 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
4168 static inline void clear_page_guard(struct zone *zone, struct page *page,
4169 				    unsigned int order)
4170 {
4171 	if (!debug_guardpage_enabled())
4172 		return;
4173 	__clear_page_guard(zone, page, order);
4174 }
4175 
4176 #else	/* CONFIG_DEBUG_PAGEALLOC */
4177 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
4178 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
4179 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
4180 static inline bool debug_guardpage_enabled(void) { return false; }
4181 static inline bool page_is_guard(const struct page *page) { return false; }
4182 static inline bool set_page_guard(struct zone *zone, struct page *page,
4183 			unsigned int order) { return false; }
4184 static inline void clear_page_guard(struct zone *zone, struct page *page,
4185 				unsigned int order) {}
4186 #endif	/* CONFIG_DEBUG_PAGEALLOC */
4187 
4188 #ifndef clear_pages
4189 /**
4190  * clear_pages() - clear a page range for kernel-internal use.
4191  * @addr: start address
4192  * @npages: number of pages
4193  *
4194  * Use clear_user_pages() instead when clearing a page range to be
4195  * mapped to user space.
4196  *
4197  * Does absolutely no exception handling.
4198  *
4199  * Note that even though the clearing operation is preemptible, clear_pages()
4200  * does not (and on architectures where it reduces to a few long-running
4201  * instructions, might not be able to) call cond_resched() to check if
4202  * rescheduling is required.
4203  *
4204  * When running under preemptible models this is not a problem. Under
4205  * cooperatively scheduled models, however, the caller is expected to
4206  * limit @npages to no more than PROCESS_PAGES_NON_PREEMPT_BATCH.
4207  */
4208 static inline void clear_pages(void *addr, unsigned int npages)
4209 {
4210 	do {
4211 		clear_page(addr);
4212 		addr += PAGE_SIZE;
4213 	} while (--npages);
4214 }
4215 #endif
4216 
4217 #ifndef PROCESS_PAGES_NON_PREEMPT_BATCH
4218 #ifdef clear_pages
4219 /*
4220  * The architecture defines clear_pages(), and we assume that it is
4221  * generally "fast". So choose a batch size large enough to allow the processor
4222  * headroom for optimizing the operation and yet small enough that we see
4223  * reasonable preemption latency for when this optimization is not possible
4224  * (ex. slow microarchitectures, memory bandwidth saturation.)
4225  *
4226  * With a value of 32MB and assuming a memory bandwidth of ~10GBps, this should
4227  * result in worst case preemption latency of around 3ms when clearing pages.
4228  *
4229  * (See comment above clear_pages() for why preemption latency is a concern
4230  * here.)
4231  */
4232 #define PROCESS_PAGES_NON_PREEMPT_BATCH		(SZ_32M >> PAGE_SHIFT)
4233 #else /* !clear_pages */
4234 /*
4235  * The architecture does not provide a clear_pages() implementation. Assume
4236  * that clear_page() -- which clear_pages() will fallback to -- is relatively
4237  * slow and choose a small value for PROCESS_PAGES_NON_PREEMPT_BATCH.
4238  */
4239 #define PROCESS_PAGES_NON_PREEMPT_BATCH		1
4240 #endif
4241 #endif
4242 
4243 #ifdef __HAVE_ARCH_GATE_AREA
4244 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
4245 extern int in_gate_area_no_mm(unsigned long addr);
4246 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
4247 #else
4248 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
4249 {
4250 	return NULL;
4251 }
4252 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
4253 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
4254 {
4255 	return 0;
4256 }
4257 #endif	/* __HAVE_ARCH_GATE_AREA */
4258 
4259 bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
4260 
4261 void drop_slab(void);
4262 
4263 #ifndef CONFIG_MMU
4264 #define randomize_va_space 0
4265 #else
4266 extern int randomize_va_space;
4267 #endif
4268 
4269 const char * arch_vma_name(struct vm_area_struct *vma);
4270 #ifdef CONFIG_MMU
4271 void print_vma_addr(char *prefix, unsigned long rip);
4272 #else
4273 static inline void print_vma_addr(char *prefix, unsigned long rip)
4274 {
4275 }
4276 #endif
4277 
4278 void *sparse_buffer_alloc(unsigned long size);
4279 unsigned long section_map_size(void);
4280 struct page * __populate_section_memmap(unsigned long pfn,
4281 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
4282 		struct dev_pagemap *pgmap);
4283 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
4284 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
4285 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
4286 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
4287 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
4288 			    struct vmem_altmap *altmap, unsigned long ptpfn,
4289 			    unsigned long flags);
4290 void *vmemmap_alloc_block(unsigned long size, int node);
4291 struct vmem_altmap;
4292 void *vmemmap_alloc_block_buf(unsigned long size, int node,
4293 			      struct vmem_altmap *altmap);
4294 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
4295 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
4296 		     unsigned long addr, unsigned long next);
4297 int vmemmap_check_pmd(pmd_t *pmd, int node,
4298 		      unsigned long addr, unsigned long next);
4299 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
4300 			       int node, struct vmem_altmap *altmap);
4301 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
4302 			       int node, struct vmem_altmap *altmap);
4303 int vmemmap_populate(unsigned long start, unsigned long end, int node,
4304 		struct vmem_altmap *altmap);
4305 int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
4306 			 unsigned long headsize);
4307 int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
4308 		     unsigned long headsize);
4309 void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
4310 			  unsigned long headsize);
4311 void vmemmap_populate_print_last(void);
4312 #ifdef CONFIG_MEMORY_HOTPLUG
4313 void vmemmap_free(unsigned long start, unsigned long end,
4314 		struct vmem_altmap *altmap);
4315 #endif
4316 
4317 #ifdef CONFIG_SPARSEMEM_VMEMMAP
4318 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4319 {
4320 	/* number of pfns from base where pfn_to_page() is valid */
4321 	if (altmap)
4322 		return altmap->reserve + altmap->free;
4323 	return 0;
4324 }
4325 
4326 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4327 				    unsigned long nr_pfns)
4328 {
4329 	altmap->alloc -= nr_pfns;
4330 }
4331 #else
4332 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4333 {
4334 	return 0;
4335 }
4336 
4337 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4338 				    unsigned long nr_pfns)
4339 {
4340 }
4341 #endif
4342 
4343 #define VMEMMAP_RESERVE_NR	2
4344 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
4345 static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
4346 					  struct dev_pagemap *pgmap)
4347 {
4348 	unsigned long nr_pages;
4349 	unsigned long nr_vmemmap_pages;
4350 
4351 	if (!pgmap || !is_power_of_2(sizeof(struct page)))
4352 		return false;
4353 
4354 	nr_pages = pgmap_vmemmap_nr(pgmap);
4355 	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
4356 	/*
4357 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
4358 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
4359 	 */
4360 	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
4361 }
4362 /*
4363  * If we don't have an architecture override, use the generic rule
4364  */
4365 #ifndef vmemmap_can_optimize
4366 #define vmemmap_can_optimize __vmemmap_can_optimize
4367 #endif
4368 
4369 #else
4370 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
4371 					   struct dev_pagemap *pgmap)
4372 {
4373 	return false;
4374 }
4375 #endif
4376 
4377 enum mf_flags {
4378 	MF_COUNT_INCREASED = 1 << 0,
4379 	MF_ACTION_REQUIRED = 1 << 1,
4380 	MF_MUST_KILL = 1 << 2,
4381 	MF_SOFT_OFFLINE = 1 << 3,
4382 	MF_UNPOISON = 1 << 4,
4383 	MF_SW_SIMULATED = 1 << 5,
4384 	MF_NO_RETRY = 1 << 6,
4385 	MF_MEM_PRE_REMOVE = 1 << 7,
4386 };
4387 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
4388 		      unsigned long count, int mf_flags);
4389 extern int memory_failure(unsigned long pfn, int flags);
4390 extern int unpoison_memory(unsigned long pfn);
4391 extern atomic_long_t num_poisoned_pages __read_mostly;
4392 extern int soft_offline_page(unsigned long pfn, int flags);
4393 #ifdef CONFIG_MEMORY_FAILURE
4394 /*
4395  * Sysfs entries for memory failure handling statistics.
4396  */
4397 extern const struct attribute_group memory_failure_attr_group;
4398 extern void memory_failure_queue(unsigned long pfn, int flags);
4399 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4400 					bool *migratable_cleared);
4401 void num_poisoned_pages_inc(unsigned long pfn);
4402 void num_poisoned_pages_sub(unsigned long pfn, long i);
4403 #else
4404 static inline void memory_failure_queue(unsigned long pfn, int flags)
4405 {
4406 }
4407 
4408 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4409 					bool *migratable_cleared)
4410 {
4411 	return 0;
4412 }
4413 
4414 static inline void num_poisoned_pages_inc(unsigned long pfn)
4415 {
4416 }
4417 
4418 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4419 {
4420 }
4421 #endif
4422 
4423 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
4424 extern void memblk_nr_poison_inc(unsigned long pfn);
4425 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
4426 #else
4427 static inline void memblk_nr_poison_inc(unsigned long pfn)
4428 {
4429 }
4430 
4431 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
4432 {
4433 }
4434 #endif
4435 
4436 #ifndef arch_memory_failure
4437 static inline int arch_memory_failure(unsigned long pfn, int flags)
4438 {
4439 	return -ENXIO;
4440 }
4441 #endif
4442 
4443 #ifndef arch_is_platform_page
4444 static inline bool arch_is_platform_page(u64 paddr)
4445 {
4446 	return false;
4447 }
4448 #endif
4449 
4450 /*
4451  * Error handlers for various types of pages.
4452  */
4453 enum mf_result {
4454 	MF_IGNORED,	/* Error: cannot be handled */
4455 	MF_FAILED,	/* Error: handling failed */
4456 	MF_DELAYED,	/* Will be handled later */
4457 	MF_RECOVERED,	/* Successfully recovered */
4458 };
4459 
4460 enum mf_action_page_type {
4461 	MF_MSG_KERNEL,
4462 	MF_MSG_KERNEL_HIGH_ORDER,
4463 	MF_MSG_DIFFERENT_COMPOUND,
4464 	MF_MSG_HUGE,
4465 	MF_MSG_FREE_HUGE,
4466 	MF_MSG_GET_HWPOISON,
4467 	MF_MSG_UNMAP_FAILED,
4468 	MF_MSG_DIRTY_SWAPCACHE,
4469 	MF_MSG_CLEAN_SWAPCACHE,
4470 	MF_MSG_DIRTY_MLOCKED_LRU,
4471 	MF_MSG_CLEAN_MLOCKED_LRU,
4472 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
4473 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
4474 	MF_MSG_DIRTY_LRU,
4475 	MF_MSG_CLEAN_LRU,
4476 	MF_MSG_TRUNCATED_LRU,
4477 	MF_MSG_BUDDY,
4478 	MF_MSG_DAX,
4479 	MF_MSG_UNSPLIT_THP,
4480 	MF_MSG_ALREADY_POISONED,
4481 	MF_MSG_PFN_MAP,
4482 	MF_MSG_UNKNOWN,
4483 };
4484 
4485 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4486 void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4487 int copy_user_large_folio(struct folio *dst, struct folio *src,
4488 			  unsigned long addr_hint,
4489 			  struct vm_area_struct *vma);
4490 long copy_folio_from_user(struct folio *dst_folio,
4491 			   const void __user *usr_src,
4492 			   bool allow_pagefault);
4493 
4494 /**
4495  * vma_is_special_huge - Are transhuge page-table entries considered special?
4496  * @vma: Pointer to the struct vm_area_struct to consider
4497  *
4498  * Whether transhuge page-table entries are considered "special" following
4499  * the definition in vm_normal_page().
4500  *
4501  * Return: true if transhuge page-table entries should be considered special,
4502  * false otherwise.
4503  */
4504 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4505 {
4506 	return vma_is_dax(vma) || (vma->vm_file &&
4507 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4508 }
4509 
4510 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4511 
4512 #if MAX_NUMNODES > 1
4513 void __init setup_nr_node_ids(void);
4514 #else
4515 static inline void setup_nr_node_ids(void) {}
4516 #endif
4517 
4518 extern int memcmp_pages(struct page *page1, struct page *page2);
4519 
4520 static inline int pages_identical(struct page *page1, struct page *page2)
4521 {
4522 	return !memcmp_pages(page1, page2);
4523 }
4524 
4525 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
4526 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4527 						pgoff_t first_index, pgoff_t nr,
4528 						pgoff_t bitmap_pgoff,
4529 						unsigned long *bitmap,
4530 						pgoff_t *start,
4531 						pgoff_t *end);
4532 
4533 unsigned long wp_shared_mapping_range(struct address_space *mapping,
4534 				      pgoff_t first_index, pgoff_t nr);
4535 #endif
4536 
4537 #ifdef CONFIG_ANON_VMA_NAME
4538 int set_anon_vma_name(unsigned long addr, unsigned long size,
4539 		      const char __user *uname);
4540 #else
4541 static inline
4542 int set_anon_vma_name(unsigned long addr, unsigned long size,
4543 		      const char __user *uname)
4544 {
4545 	return -EINVAL;
4546 }
4547 #endif
4548 
4549 #ifdef CONFIG_UNACCEPTED_MEMORY
4550 
4551 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
4552 void accept_memory(phys_addr_t start, unsigned long size);
4553 
4554 #else
4555 
4556 static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4557 						    unsigned long size)
4558 {
4559 	return false;
4560 }
4561 
4562 static inline void accept_memory(phys_addr_t start, unsigned long size)
4563 {
4564 }
4565 
4566 #endif
4567 
4568 static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4569 {
4570 	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
4571 }
4572 
4573 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4574 void vma_pgtable_walk_end(struct vm_area_struct *vma);
4575 
4576 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
4577 int reserve_mem_release_by_name(const char *name);
4578 
4579 #ifdef CONFIG_64BIT
4580 int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
4581 #else
4582 static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
4583 {
4584 	/* noop on 32 bit */
4585 	return 0;
4586 }
4587 #endif
4588 
4589 /*
4590  * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
4591  * be zeroed or not.
4592  */
4593 static inline bool user_alloc_needs_zeroing(void)
4594 {
4595 	/*
4596 	 * for user folios, arch with cache aliasing requires cache flush and
4597 	 * arc changes folio->flags to make icache coherent with dcache, so
4598 	 * always return false to make caller use
4599 	 * clear_user_page()/clear_user_highpage().
4600 	 */
4601 	return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
4602 	       !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4603 				   &init_on_alloc);
4604 }
4605 
4606 int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
4607 int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
4608 int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
4609 
4610 /*
4611  * DMA mapping IDs for page_pool
4612  *
4613  * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
4614  * stashes it in the upper bits of page->pp_magic. We always want to be able to
4615  * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
4616  * pages can have arbitrary kernel pointers stored in the same field as pp_magic
4617  * (since it overlaps with page->lru.next), so we must ensure that we cannot
4618  * mistake a valid kernel pointer with any of the values we write into this
4619  * field.
4620  *
4621  * On architectures that set POISON_POINTER_DELTA, this is already ensured,
4622  * since this value becomes part of PP_SIGNATURE; meaning we can just use the
4623  * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
4624  * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
4625  * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
4626  * known at compile-time.
4627  *
4628  * If the value of PAGE_OFFSET is not known at compile time, or if it is too
4629  * small to leave at least 8 bits available above PP_SIGNATURE, we define the
4630  * number of bits to be 0, which turns off the DMA index tracking altogether
4631  * (see page_pool_register_dma_index()).
4632  */
4633 #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
4634 #if POISON_POINTER_DELTA > 0
4635 /* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
4636  * index to not overlap with that if set
4637  */
4638 #define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
4639 #else
4640 /* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
4641 #define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
4642 #define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
4643 			    PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
4644 			    !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
4645 			      MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
4646 
4647 #endif
4648 
4649 #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
4650 				  PP_DMA_INDEX_SHIFT)
4651 
4652 /* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
4653  * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
4654  * the head page of compound page and bit 1 for pfmemalloc page, as well as the
4655  * bits used for the DMA index. page_is_pfmemalloc() is checked in
4656  * __page_pool_put_page() to avoid recycling the pfmemalloc page.
4657  */
4658 #define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
4659 
4660 #ifdef CONFIG_PAGE_POOL
4661 static inline bool page_pool_page_is_pp(const struct page *page)
4662 {
4663 	return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
4664 }
4665 #else
4666 static inline bool page_pool_page_is_pp(const struct page *page)
4667 {
4668 	return false;
4669 }
4670 #endif
4671 
4672 #define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
4673 #define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
4674 #define PAGE_SNAPSHOT_PG_IDLE  (1 << 2)
4675 
4676 struct page_snapshot {
4677 	struct folio folio_snapshot;
4678 	struct page page_snapshot;
4679 	unsigned long pfn;
4680 	unsigned long idx;
4681 	unsigned long flags;
4682 };
4683 
4684 static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
4685 {
4686 	return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
4687 }
4688 
4689 void snapshot_page(struct page_snapshot *ps, const struct page *page);
4690 
4691 #endif /* _LINUX_MM_H */
4692