xref: /linux/tools/testing/vma/include/dup.h (revision 3ee584538259c356c66146ac46f2e4fd2ba28bee)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 
3 #pragma once
4 
5 /* Forward declarations to avoid header cycle. */
6 struct vm_area_struct;
7 static inline void vma_start_write(struct vm_area_struct *vma);
8 
9 extern const struct vm_operations_struct vma_dummy_vm_ops;
10 extern unsigned long stack_guard_gap;
11 extern const struct vm_operations_struct vma_dummy_vm_ops;
12 extern unsigned long rlimit(unsigned int limit);
13 struct task_struct *get_current(void);
14 
15 #define MMF_HAS_MDWE	28
16 #define current get_current()
17 
18 /*
19  * Define the task command name length as enum, then it can be visible to
20  * BPF programs.
21  */
22 enum {
23 	TASK_COMM_LEN = 16,
24 };
25 
26 /* PARTIALLY implemented types. */
27 struct mm_struct {
28 	struct maple_tree mm_mt;
29 	int map_count;			/* number of VMAs */
30 	unsigned long total_vm;	   /* Total pages mapped */
31 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
32 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
33 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
34 	unsigned long stack_vm;	   /* VM_STACK */
35 
36 	union {
37 		vm_flags_t def_flags;
38 		vma_flags_t def_vma_flags;
39 	};
40 
41 	mm_flags_t flags; /* Must use mm_flags_* helpers to access */
42 };
43 struct address_space {
44 	struct rb_root_cached	i_mmap;
45 	unsigned long		flags;
46 	atomic_t		i_mmap_writable;
47 };
48 struct file_operations {
49 	int (*mmap)(struct file *, struct vm_area_struct *);
50 	int (*mmap_prepare)(struct vm_area_desc *);
51 };
52 struct file {
53 	struct address_space	*f_mapping;
54 	const struct file_operations	*f_op;
55 };
56 struct anon_vma_chain {
57 	struct anon_vma *anon_vma;
58 	struct list_head same_vma;
59 };
60 struct task_struct {
61 	char comm[TASK_COMM_LEN];
62 	pid_t pid;
63 	struct mm_struct *mm;
64 
65 	/* Used for emulating ABI behavior of previous Linux versions: */
66 	unsigned int			personality;
67 };
68 
69 struct kref {
70 	refcount_t refcount;
71 };
72 
73 struct anon_vma_name {
74 	struct kref kref;
75 	/* The name needs to be at the end because it is dynamically sized. */
76 	char name[];
77 };
78 
79 /*
80  * Contains declarations that are DUPLICATED from kernel source in order to
81  * faciliate userland VMA testing.
82  *
83  * These must be kept in sync with kernel source.
84  */
85 
86 #define VMA_LOCK_OFFSET	0x40000000
87 
88 typedef struct { unsigned long v; } freeptr_t;
89 
90 #define VM_NONE		0x00000000
91 
92 typedef int __bitwise vma_flag_t;
93 
94 #define ACCESS_PRIVATE(p, member) ((p)->member)
95 
96 #define DECLARE_VMA_BIT(name, bitnum) \
97 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
98 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
99 	VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT
100 enum {
101 	DECLARE_VMA_BIT(READ, 0),
102 	DECLARE_VMA_BIT(WRITE, 1),
103 	DECLARE_VMA_BIT(EXEC, 2),
104 	DECLARE_VMA_BIT(SHARED, 3),
105 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
106 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
107 	DECLARE_VMA_BIT(MAYWRITE, 5),
108 	DECLARE_VMA_BIT(MAYEXEC, 6),
109 	DECLARE_VMA_BIT(MAYSHARE, 7),
110 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
111 #ifdef CONFIG_MMU
112 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
113 #else
114 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
115 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
116 #endif /* CONFIG_MMU */
117 	/* Page-ranges managed without "struct page", just pure PFN */
118 	DECLARE_VMA_BIT(PFNMAP, 10),
119 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
120 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
121 	DECLARE_VMA_BIT(LOCKED, 13),
122 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
123 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
124 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
125 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
126 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
127 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
128 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
129 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
130 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
131 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
132 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
133 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
134 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
135 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
136 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
137 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
138 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
139 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
140 	/* These bits are reused, we define specific uses below. */
141 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
142 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
143 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
144 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
145 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
146 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
147 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
148 	/*
149 	 * This flag is used to connect VFIO to arch specific KVM code. It
150 	 * indicates that the memory under this VMA is safe for use with any
151 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
152 	 * platforms, are thought to be unsafe and can cause machine crashes
153 	 * if KVM does not lock down the memory type.
154 	 */
155 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
156 #ifdef CONFIG_PPC32
157 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
158 #else
159 	DECLARE_VMA_BIT(DROPPABLE, 40),
160 #endif
161 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
162 	DECLARE_VMA_BIT(SEALED, 42),
163 	/* Flags that reuse flags above. */
164 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
165 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
166 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
167 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
168 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
169 #if defined(CONFIG_X86_USER_SHADOW_STACK)
170 	/*
171 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
172 	 * support core mm.
173 	 *
174 	 * These VMAs will get a single end guard page. This helps userspace
175 	 * protect itself from attacks. A single page is enough for current
176 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
177 	 * arch/x86/kernel/shstk.c for more details on the guard size.
178 	 */
179 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
180 #elif defined(CONFIG_ARM64_GCS)
181 	/*
182 	 * arm64's Guarded Control Stack implements similar functionality and
183 	 * has similar constraints to shadow stacks.
184 	 */
185 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
186 #endif
187 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
188 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
189 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
190 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
191 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
192 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
193 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
194 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
195 #ifdef CONFIG_STACK_GROWSUP
196 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
197 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
198 #else
199 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
200 #endif
201 };
202 
203 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
204 #define VM_READ		INIT_VM_FLAG(READ)
205 #define VM_WRITE	INIT_VM_FLAG(WRITE)
206 #define VM_EXEC		INIT_VM_FLAG(EXEC)
207 #define VM_SHARED	INIT_VM_FLAG(SHARED)
208 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
209 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
210 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
211 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
212 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
213 #ifdef CONFIG_MMU
214 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
215 #else
216 #define VM_UFFD_MISSING	VM_NONE
217 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
218 #endif
219 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
220 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
221 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
222 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
223 #define VM_IO		INIT_VM_FLAG(IO)
224 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
225 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
226 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
227 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
228 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
229 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
230 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
231 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
232 #define VM_SYNC		INIT_VM_FLAG(SYNC)
233 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
234 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
235 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
236 #ifdef CONFIG_MEM_SOFT_DIRTY
237 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
238 #else
239 #define VM_SOFTDIRTY	VM_NONE
240 #endif
241 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
242 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
243 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
244 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
245 #define VM_STACK	INIT_VM_FLAG(STACK)
246 #ifdef CONFIG_STACK_GROWS_UP
247 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
248 #else
249 #define VM_STACK_EARLY	VM_NONE
250 #endif
251 #ifdef CONFIG_ARCH_HAS_PKEYS
252 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
253 /* Despite the naming, these are FLAGS not bits. */
254 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
255 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
256 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
257 #if CONFIG_ARCH_PKEY_BITS > 3
258 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
259 #else
260 #define VM_PKEY_BIT3  VM_NONE
261 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
262 #if CONFIG_ARCH_PKEY_BITS > 4
263 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
264 #else
265 #define VM_PKEY_BIT4  VM_NONE
266 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
267 #endif /* CONFIG_ARCH_HAS_PKEYS */
268 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
269 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
270 #else
271 #define VM_SHADOW_STACK	VM_NONE
272 #endif
273 #if defined(CONFIG_PPC64)
274 #define VM_SAO		INIT_VM_FLAG(SAO)
275 #elif defined(CONFIG_PARISC)
276 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
277 #elif defined(CONFIG_SPARC64)
278 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
279 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
280 #elif defined(CONFIG_ARM64)
281 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
282 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
283 #elif !defined(CONFIG_MMU)
284 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
285 #endif
286 #ifndef VM_GROWSUP
287 #define VM_GROWSUP	VM_NONE
288 #endif
289 #ifdef CONFIG_ARM64_MTE
290 #define VM_MTE		INIT_VM_FLAG(MTE)
291 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
292 #else
293 #define VM_MTE		VM_NONE
294 #define VM_MTE_ALLOWED	VM_NONE
295 #endif
296 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
297 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
298 #else
299 #define VM_UFFD_MINOR	VM_NONE
300 #endif
301 #ifdef CONFIG_64BIT
302 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
303 #define VM_SEALED		INIT_VM_FLAG(SEALED)
304 #else
305 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
306 #define VM_SEALED		VM_NONE
307 #endif
308 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
309 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
310 #else
311 #define VM_DROPPABLE		VM_NONE
312 #endif
313 
314 /* Bits set in the VMA until the stack is in its final location */
315 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
316 
317 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
318 
319 /* Common data flag combinations */
320 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
321 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
322 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
323 				 VM_MAYWRITE | VM_MAYEXEC)
324 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
325 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
326 
327 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
328 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
329 #endif
330 
331 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
332 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
333 #endif
334 
335 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
336 
337 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
338 
339 /* VMA basic access permission flags */
340 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
341 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
342 
343 /*
344  * Special vmas that are non-mergable, non-mlock()able.
345  */
346 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
347 
348 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,	\
349 				     VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
350 
351 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
352 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
353 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
354 #define STACK_TOP		TASK_SIZE_LOW
355 #define STACK_TOP_MAX		TASK_SIZE_MAX
356 
357 /* This mask represents all the VMA flag bits used by mlock */
358 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
359 
360 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
361 
362 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
363 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
364 
365 #define RLIMIT_STACK		3	/* max stack size */
366 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
367 
368 #define CAP_IPC_LOCK         14
369 
370 #ifdef CONFIG_MEM_SOFT_DIRTY
371 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
372 #else
373 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
374 #endif
375 
376 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
377 
378 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
379 
380 #define pgprot_val(x)		((x).pgprot)
381 #define __pgprot(x)		((pgprot_t) { (x) } )
382 
383 #define for_each_vma(__vmi, __vma)					\
384 	while (((__vma) = vma_next(&(__vmi))) != NULL)
385 
386 /* The MM code likes to work with exclusive end addresses */
387 #define for_each_vma_range(__vmi, __vma, __end)				\
388 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
389 
390 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
391 
392 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
393 
394 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
395 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
396 
397 #define AS_MM_ALL_LOCKS 2
398 
399 #define swap(a, b) \
400 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
401 
402 /*
403  * Flags for bug emulation.
404  *
405  * These occupy the top three bytes.
406  */
407 enum {
408 	READ_IMPLIES_EXEC =	0x0400000,
409 };
410 
411 struct vma_iterator {
412 	struct ma_state mas;
413 };
414 
415 #define VMA_ITERATOR(name, __mm, __addr)				\
416 	struct vma_iterator name = {					\
417 		.mas = {						\
418 			.tree = &(__mm)->mm_mt,				\
419 			.index = __addr,				\
420 			.node = NULL,					\
421 			.status = ma_start,				\
422 		},							\
423 	}
424 
425 #define DEFINE_MUTEX(mutexname) \
426 	struct mutex mutexname = {}
427 
428 #define DECLARE_BITMAP(name, bits) \
429 	unsigned long name[BITS_TO_LONGS(bits)]
430 
431 #define EMPTY_VMA_FLAGS ((vma_flags_t){ })
432 
433 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
434 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
435 
436 static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
437 {
438 	const unsigned long *bitmap = flags->__vma_flags;
439 
440 	return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
441 }
442 
443 /* What action should be taken after an .mmap_prepare call is complete? */
444 enum mmap_action_type {
445 	MMAP_NOTHING,		/* Mapping is complete, no further action. */
446 	MMAP_REMAP_PFN,		/* Remap PFN range. */
447 	MMAP_IO_REMAP_PFN,	/* I/O remap PFN range. */
448 };
449 
450 /*
451  * Describes an action an mmap_prepare hook can instruct to be taken to complete
452  * the mapping of a VMA. Specified in vm_area_desc.
453  */
454 struct mmap_action {
455 	union {
456 		/* Remap range. */
457 		struct {
458 			unsigned long start;
459 			unsigned long start_pfn;
460 			unsigned long size;
461 			pgprot_t pgprot;
462 		} remap;
463 	};
464 	enum mmap_action_type type;
465 
466 	/*
467 	 * If specified, this hook is invoked after the selected action has been
468 	 * successfully completed. Note that the VMA write lock still held.
469 	 *
470 	 * The absolute minimum ought to be done here.
471 	 *
472 	 * Returns 0 on success, or an error code.
473 	 */
474 	int (*success_hook)(const struct vm_area_struct *vma);
475 
476 	/*
477 	 * If specified, this hook is invoked when an error occurred when
478 	 * attempting the selection action.
479 	 *
480 	 * The hook can return an error code in order to filter the error, but
481 	 * it is not valid to clear the error here.
482 	 */
483 	int (*error_hook)(int err);
484 
485 	/*
486 	 * This should be set in rare instances where the operation required
487 	 * that the rmap should not be able to access the VMA until
488 	 * completely set up.
489 	 */
490 	bool hide_from_rmap_until_complete :1;
491 };
492 
493 /* Operations which modify VMAs. */
494 enum vma_operation {
495 	VMA_OP_SPLIT,
496 	VMA_OP_MERGE_UNFAULTED,
497 	VMA_OP_REMAP,
498 	VMA_OP_FORK,
499 };
500 
501 /*
502  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
503  * manipulate mutable fields which will cause those fields to be updated in the
504  * resultant VMA.
505  *
506  * Helper functions are not required for manipulating any field.
507  */
508 struct vm_area_desc {
509 	/* Immutable state. */
510 	const struct mm_struct *const mm;
511 	struct file *const file; /* May vary from vm_file in stacked callers. */
512 	unsigned long start;
513 	unsigned long end;
514 
515 	/* Mutable fields. Populated with initial state. */
516 	pgoff_t pgoff;
517 	struct file *vm_file;
518 	vma_flags_t vma_flags;
519 	pgprot_t page_prot;
520 
521 	/* Write-only fields. */
522 	const struct vm_operations_struct *vm_ops;
523 	void *private_data;
524 
525 	/* Take further action? */
526 	struct mmap_action action;
527 };
528 
529 struct vm_area_struct {
530 	/* The first cache line has the info for VMA tree walking. */
531 
532 	union {
533 		struct {
534 			/* VMA covers [vm_start; vm_end) addresses within mm */
535 			unsigned long vm_start;
536 			unsigned long vm_end;
537 		};
538 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
539 	};
540 
541 	struct mm_struct *vm_mm;	/* The address space we belong to. */
542 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
543 
544 	/*
545 	 * Flags, see mm.h.
546 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
547 	 */
548 	union {
549 		const vm_flags_t vm_flags;
550 		vma_flags_t flags;
551 	};
552 
553 #ifdef CONFIG_PER_VMA_LOCK
554 	/*
555 	 * Can only be written (using WRITE_ONCE()) while holding both:
556 	 *  - mmap_lock (in write mode)
557 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
558 	 * Can be read reliably while holding one of:
559 	 *  - mmap_lock (in read or write mode)
560 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
561 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
562 	 * while holding nothing (except RCU to keep the VMA struct allocated).
563 	 *
564 	 * This sequence counter is explicitly allowed to overflow; sequence
565 	 * counter reuse can only lead to occasional unnecessary use of the
566 	 * slowpath.
567 	 */
568 	unsigned int vm_lock_seq;
569 #endif
570 
571 	/*
572 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
573 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
574 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
575 	 * or brk vma (with NULL file) can only be in an anon_vma list.
576 	 */
577 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
578 					  * page_table_lock */
579 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
580 
581 	/* Function pointers to deal with this struct. */
582 	const struct vm_operations_struct *vm_ops;
583 
584 	/* Information about our backing store: */
585 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
586 					   units */
587 	struct file * vm_file;		/* File we map to (can be NULL). */
588 	void * vm_private_data;		/* was vm_pte (shared mem) */
589 
590 #ifdef CONFIG_SWAP
591 	atomic_long_t swap_readahead_info;
592 #endif
593 #ifndef CONFIG_MMU
594 	struct vm_region *vm_region;	/* NOMMU mapping region */
595 #endif
596 #ifdef CONFIG_NUMA
597 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
598 #endif
599 #ifdef CONFIG_NUMA_BALANCING
600 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
601 #endif
602 #ifdef CONFIG_PER_VMA_LOCK
603 	/* Unstable RCU readers are allowed to read this. */
604 	refcount_t vm_refcnt;
605 #endif
606 	/*
607 	 * For areas with an address space and backing store,
608 	 * linkage into the address_space->i_mmap interval tree.
609 	 *
610 	 */
611 	struct {
612 		struct rb_node rb;
613 		unsigned long rb_subtree_last;
614 	} shared;
615 #ifdef CONFIG_ANON_VMA_NAME
616 	/*
617 	 * For private and shared anonymous mappings, a pointer to a null
618 	 * terminated string containing the name given to the vma, or NULL if
619 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
620 	 */
621 	struct anon_vma_name *anon_name;
622 #endif
623 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
624 } __randomize_layout;
625 
626 struct vm_operations_struct {
627 	void (*open)(struct vm_area_struct * area);
628 	/**
629 	 * @close: Called when the VMA is being removed from the MM.
630 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
631 	 */
632 	void (*close)(struct vm_area_struct * area);
633 	/* Called any time before splitting to check if it's allowed */
634 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
635 	int (*mremap)(struct vm_area_struct *area);
636 	/*
637 	 * Called by mprotect() to make driver-specific permission
638 	 * checks before mprotect() is finalised.   The VMA must not
639 	 * be modified.  Returns 0 if mprotect() can proceed.
640 	 */
641 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
642 			unsigned long end, unsigned long newflags);
643 	vm_fault_t (*fault)(struct vm_fault *vmf);
644 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
645 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
646 			pgoff_t start_pgoff, pgoff_t end_pgoff);
647 	unsigned long (*pagesize)(struct vm_area_struct * area);
648 
649 	/* notification that a previously read-only page is about to become
650 	 * writable, if an error is returned it will cause a SIGBUS */
651 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
652 
653 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
654 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
655 
656 	/* called by access_process_vm when get_user_pages() fails, typically
657 	 * for use by special VMAs. See also generic_access_phys() for a generic
658 	 * implementation useful for any iomem mapping.
659 	 */
660 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
661 		      void *buf, int len, int write);
662 
663 	/* Called by the /proc/PID/maps code to ask the vma whether it
664 	 * has a special name.  Returning non-NULL will also cause this
665 	 * vma to be dumped unconditionally. */
666 	const char *(*name)(struct vm_area_struct *vma);
667 
668 #ifdef CONFIG_NUMA
669 	/*
670 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
671 	 * to hold the policy upon return.  Caller should pass NULL @new to
672 	 * remove a policy and fall back to surrounding context--i.e. do not
673 	 * install a MPOL_DEFAULT policy, nor the task or system default
674 	 * mempolicy.
675 	 */
676 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
677 
678 	/*
679 	 * get_policy() op must add reference [mpol_get()] to any policy at
680 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
681 	 * in mm/mempolicy.c will do this automatically.
682 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
683 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
684 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
685 	 * must return NULL--i.e., do not "fallback" to task or system default
686 	 * policy.
687 	 */
688 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
689 					unsigned long addr, pgoff_t *ilx);
690 #endif
691 #ifdef CONFIG_FIND_NORMAL_PAGE
692 	/*
693 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
694 	 * allows for returning a "normal" page from vm_normal_page() even
695 	 * though the PTE indicates that the "struct page" either does not exist
696 	 * or should not be touched: "special".
697 	 *
698 	 * Do not add new users: this really only works when a "normal" page
699 	 * was mapped, but then the PTE got changed to something weird (+
700 	 * marked special) that would not make pte_pfn() identify the originally
701 	 * inserted page.
702 	 */
703 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
704 					 unsigned long addr);
705 #endif /* CONFIG_FIND_NORMAL_PAGE */
706 };
707 
708 struct vm_unmapped_area_info {
709 #define VM_UNMAPPED_AREA_TOPDOWN 1
710 	unsigned long flags;
711 	unsigned long length;
712 	unsigned long low_limit;
713 	unsigned long high_limit;
714 	unsigned long align_mask;
715 	unsigned long align_offset;
716 	unsigned long start_gap;
717 };
718 
719 struct pagetable_move_control {
720 	struct vm_area_struct *old; /* Source VMA. */
721 	struct vm_area_struct *new; /* Destination VMA. */
722 	unsigned long old_addr; /* Address from which the move begins. */
723 	unsigned long old_end; /* Exclusive address at which old range ends. */
724 	unsigned long new_addr; /* Address to move page tables to. */
725 	unsigned long len_in; /* Bytes to remap specified by user. */
726 
727 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
728 	bool for_stack; /* Is this an early temp stack being moved? */
729 };
730 
731 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
732 	struct pagetable_move_control name = {				\
733 		.old = old_,						\
734 		.new = new_,						\
735 		.old_addr = old_addr_,					\
736 		.old_end = (old_addr_) + (len_),			\
737 		.new_addr = new_addr_,					\
738 		.len_in = len_,						\
739 	}
740 
741 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
742 {
743 	mas_pause(&vmi->mas);
744 }
745 
746 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
747 {
748 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
749 }
750 
751 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
752 {
753 	return __pgprot(vm_flags);
754 }
755 
756 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
757 {
758 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
759 }
760 
761 /*
762  * Copy value to the first system word of VMA flags, non-atomically.
763  *
764  * IMPORTANT: This does not overwrite bytes past the first system word. The
765  * caller must account for this.
766  */
767 static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
768 {
769 	*ACCESS_PRIVATE(flags, __vma_flags) = value;
770 }
771 
772 /*
773  * Copy value to the first system word of VMA flags ONCE, non-atomically.
774  *
775  * IMPORTANT: This does not overwrite bytes past the first system word. The
776  * caller must account for this.
777  */
778 static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
779 {
780 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
781 
782 	WRITE_ONCE(*bitmap, value);
783 }
784 
785 /* Update the first system word of VMA flags setting bits, non-atomically. */
786 static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
787 {
788 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
789 
790 	*bitmap |= value;
791 }
792 
793 /* Update the first system word of VMA flags clearing bits, non-atomically. */
794 static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
795 {
796 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
797 
798 	*bitmap &= ~value;
799 }
800 
801 static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
802 {
803 	bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
804 }
805 
806 static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
807 		vma_flag_t bit)
808 {
809 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
810 
811 	__set_bit((__force int)bit, bitmap);
812 }
813 
814 /* Use when VMA is not part of the VMA tree and needs no locking */
815 static inline void vm_flags_init(struct vm_area_struct *vma,
816 				 vm_flags_t flags)
817 {
818 	vma_flags_clear_all(&vma->flags);
819 	vma_flags_overwrite_word(&vma->flags, flags);
820 }
821 
822 /*
823  * Use when VMA is part of the VMA tree and modifications need coordination
824  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
825  * it should be locked explicitly beforehand.
826  */
827 static inline void vm_flags_reset(struct vm_area_struct *vma,
828 				  vm_flags_t flags)
829 {
830 	vma_assert_write_locked(vma);
831 	vm_flags_init(vma, flags);
832 }
833 
834 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
835 				       vm_flags_t flags)
836 {
837 	vma_assert_write_locked(vma);
838 	/*
839 	 * The user should only be interested in avoiding reordering of
840 	 * assignment to the first word.
841 	 */
842 	vma_flags_clear_all(&vma->flags);
843 	vma_flags_overwrite_word_once(&vma->flags, flags);
844 }
845 
846 static inline void vm_flags_set(struct vm_area_struct *vma,
847 				vm_flags_t flags)
848 {
849 	vma_start_write(vma);
850 	vma_flags_set_word(&vma->flags, flags);
851 }
852 
853 static inline void vm_flags_clear(struct vm_area_struct *vma,
854 				  vm_flags_t flags)
855 {
856 	vma_start_write(vma);
857 	vma_flags_clear_word(&vma->flags, flags);
858 }
859 
860 static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
861 		size_t count, const vma_flag_t *bits)
862 {
863 	int i;
864 
865 	for (i = 0; i < count; i++)
866 		vma_flags_set_flag(&flags, bits[i]);
867 	return flags;
868 }
869 
870 #define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS,			\
871 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
872 
873 #define append_vma_flags(flags, ...) __mk_vma_flags(flags,			\
874 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
875 
876 static __always_inline bool vma_flags_test(const vma_flags_t *flags,
877 		vma_flag_t bit)
878 {
879 	const unsigned long *bitmap = flags->__vma_flags;
880 
881 	return test_bit((__force int)bit, bitmap);
882 }
883 
884 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
885 						      vma_flags_t to_and)
886 {
887 	vma_flags_t dst;
888 	unsigned long *bitmap_dst = dst.__vma_flags;
889 	const unsigned long *bitmap = flags->__vma_flags;
890 	const unsigned long *bitmap_to_and = to_and.__vma_flags;
891 
892 	bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
893 	return dst;
894 }
895 
896 #define vma_flags_and(flags, ...)		\
897 	vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
898 
899 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
900 		vma_flags_t to_test)
901 {
902 	const unsigned long *bitmap = flags->__vma_flags;
903 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
904 
905 	return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
906 }
907 
908 #define vma_flags_test_any(flags, ...) \
909 	vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
910 
911 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
912 		vma_flags_t to_test)
913 {
914 	const unsigned long *bitmap = flags->__vma_flags;
915 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
916 
917 	return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
918 }
919 
920 #define vma_flags_test_all(flags, ...) \
921 	vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
922 
923 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
924 {
925 	unsigned long *bitmap = flags->__vma_flags;
926 	const unsigned long *bitmap_to_set = to_set.__vma_flags;
927 
928 	bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
929 }
930 
931 #define vma_flags_set(flags, ...) \
932 	vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
933 
934 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear)
935 {
936 	unsigned long *bitmap = flags->__vma_flags;
937 	const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
938 
939 	bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
940 }
941 
942 #define vma_flags_clear(flags, ...) \
943 	vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
944 
945 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
946 		const vma_flags_t *flags_other)
947 {
948 	vma_flags_t dst;
949 	const unsigned long *bitmap_other = flags_other->__vma_flags;
950 	const unsigned long *bitmap = flags->__vma_flags;
951 	unsigned long *bitmap_dst = dst.__vma_flags;
952 
953 	bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
954 	return dst;
955 }
956 
957 static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
958 						const vma_flags_t *flags_other)
959 {
960 	const unsigned long *bitmap = flags->__vma_flags;
961 	const unsigned long *bitmap_other = flags_other->__vma_flags;
962 
963 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
964 }
965 
966 static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
967 						vma_flags_t flags_other)
968 {
969 	const unsigned long *bitmap = flags->__vma_flags;
970 	const unsigned long *bitmap_other = flags_other.__vma_flags;
971 
972 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
973 }
974 
975 #define vma_flags_same(flags, ...) \
976 	vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
977 
978 static inline bool vma_test_all_mask(const struct vm_area_struct *vma,
979 				     vma_flags_t flags)
980 {
981 	return vma_flags_test_all_mask(&vma->flags, flags);
982 }
983 
984 #define vma_test_all(vma, ...) \
985 	vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
986 
987 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
988 {
989 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
990 		(VM_SHARED | VM_MAYWRITE);
991 }
992 
993 static inline void vma_set_flags_mask(struct vm_area_struct *vma,
994 				      vma_flags_t flags)
995 {
996 	vma_flags_set_mask(&vma->flags, flags);
997 }
998 
999 #define vma_set_flags(vma, ...) \
1000 	vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1001 
1002 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
1003 		vma_flag_t bit)
1004 {
1005 	return vma_flags_test(&desc->vma_flags, bit);
1006 }
1007 
1008 static inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
1009 					    vma_flags_t flags)
1010 {
1011 	return vma_flags_test_any_mask(&desc->vma_flags, flags);
1012 }
1013 
1014 #define vma_desc_test_any(desc, ...) \
1015 	vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
1016 
1017 static inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
1018 		vma_flags_t flags)
1019 {
1020 	return vma_flags_test_all_mask(&desc->vma_flags, flags);
1021 }
1022 
1023 #define vma_desc_test_all(desc, ...) \
1024 	vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
1025 
1026 static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
1027 					   vma_flags_t flags)
1028 {
1029 	vma_flags_set_mask(&desc->vma_flags, flags);
1030 }
1031 
1032 #define vma_desc_set_flags(desc, ...) \
1033 	vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1034 
1035 static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1036 					     vma_flags_t flags)
1037 {
1038 	vma_flags_clear_mask(&desc->vma_flags, flags);
1039 }
1040 
1041 #define vma_desc_clear_flags(desc, ...) \
1042 	vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1043 
1044 static inline bool is_shared_maywrite(const vma_flags_t *flags)
1045 {
1046 	return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1047 }
1048 
1049 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1050 {
1051 	return is_shared_maywrite(&vma->flags);
1052 }
1053 
1054 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1055 {
1056 	/*
1057 	 * Uses mas_find() to get the first VMA when the iterator starts.
1058 	 * Calling mas_next() could skip the first entry.
1059 	 */
1060 	return mas_find(&vmi->mas, ULONG_MAX);
1061 }
1062 
1063 /*
1064  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
1065  * assertions should be made either under mmap_write_lock or when the object
1066  * has been isolated under mmap_write_lock, ensuring no competing writers.
1067  */
1068 static inline void vma_assert_attached(struct vm_area_struct *vma)
1069 {
1070 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
1071 }
1072 
1073 static inline void vma_assert_detached(struct vm_area_struct *vma)
1074 {
1075 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
1076 }
1077 
1078 static inline void vma_assert_write_locked(struct vm_area_struct *);
1079 static inline void vma_mark_attached(struct vm_area_struct *vma)
1080 {
1081 	vma_assert_write_locked(vma);
1082 	vma_assert_detached(vma);
1083 	refcount_set_release(&vma->vm_refcnt, 1);
1084 }
1085 
1086 static inline void vma_mark_detached(struct vm_area_struct *vma)
1087 {
1088 	vma_assert_write_locked(vma);
1089 	vma_assert_attached(vma);
1090 	/* We are the only writer, so no need to use vma_refcount_put(). */
1091 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
1092 		/*
1093 		 * Reader must have temporarily raised vm_refcnt but it will
1094 		 * drop it without using the vma since vma is write-locked.
1095 		 */
1096 	}
1097 }
1098 
1099 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
1100 {
1101 	memset(vma, 0, sizeof(*vma));
1102 	vma->vm_mm = mm;
1103 	vma->vm_ops = &vma_dummy_vm_ops;
1104 	INIT_LIST_HEAD(&vma->anon_vma_chain);
1105 	vma->vm_lock_seq = UINT_MAX;
1106 }
1107 
1108 /*
1109  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
1110  * kernel/fork.c, so we have to these broadly available there, and temporarily
1111  * define them here to resolve the dependency cycle.
1112  */
1113 #define is_exec_mapping(flags) \
1114 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
1115 
1116 #define is_stack_mapping(flags) \
1117 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
1118 
1119 #define is_data_mapping(flags) \
1120 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
1121 
1122 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
1123 				   long npages)
1124 {
1125 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1126 
1127 	if (is_exec_mapping(flags))
1128 		mm->exec_vm += npages;
1129 	else if (is_stack_mapping(flags))
1130 		mm->stack_vm += npages;
1131 	else if (is_data_mapping(flags))
1132 		mm->data_vm += npages;
1133 }
1134 
1135 #undef is_exec_mapping
1136 #undef is_stack_mapping
1137 #undef is_data_mapping
1138 
1139 static inline void vm_unacct_memory(long pages)
1140 {
1141 	vm_acct_memory(-pages);
1142 }
1143 
1144 static inline void mapping_allow_writable(struct address_space *mapping)
1145 {
1146 	atomic_inc(&mapping->i_mmap_writable);
1147 }
1148 
1149 static inline
1150 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1151 {
1152 	return mas_find(&vmi->mas, max - 1);
1153 }
1154 
1155 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1156 			unsigned long start, unsigned long end, gfp_t gfp)
1157 {
1158 	__mas_set_range(&vmi->mas, start, end - 1);
1159 	mas_store_gfp(&vmi->mas, NULL, gfp);
1160 	if (unlikely(mas_is_err(&vmi->mas)))
1161 		return -ENOMEM;
1162 
1163 	return 0;
1164 }
1165 
1166 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1167 {
1168 	vma->vm_ops = NULL;
1169 }
1170 
1171 /* Declared in vma.h. */
1172 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1173 		struct vm_area_desc *desc);
1174 
1175 static inline int __compat_vma_mmap(const struct file_operations *f_op,
1176 		struct file *file, struct vm_area_struct *vma)
1177 {
1178 	struct vm_area_desc desc = {
1179 		.mm = vma->vm_mm,
1180 		.file = file,
1181 		.start = vma->vm_start,
1182 		.end = vma->vm_end,
1183 
1184 		.pgoff = vma->vm_pgoff,
1185 		.vm_file = vma->vm_file,
1186 		.vma_flags = vma->flags,
1187 		.page_prot = vma->vm_page_prot,
1188 
1189 		.action.type = MMAP_NOTHING, /* Default */
1190 	};
1191 	int err;
1192 
1193 	err = f_op->mmap_prepare(&desc);
1194 	if (err)
1195 		return err;
1196 
1197 	mmap_action_prepare(&desc.action, &desc);
1198 	set_vma_from_desc(vma, &desc);
1199 	return mmap_action_complete(&desc.action, vma);
1200 }
1201 
1202 static inline int compat_vma_mmap(struct file *file,
1203 		struct vm_area_struct *vma)
1204 {
1205 	return __compat_vma_mmap(file->f_op, file, vma);
1206 }
1207 
1208 
1209 static inline void vma_iter_init(struct vma_iterator *vmi,
1210 		struct mm_struct *mm, unsigned long addr)
1211 {
1212 	mas_init(&vmi->mas, &mm->mm_mt, addr);
1213 }
1214 
1215 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1216 {
1217 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1218 }
1219 
1220 static inline void mmap_assert_locked(struct mm_struct *);
1221 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1222 						unsigned long start_addr,
1223 						unsigned long end_addr)
1224 {
1225 	unsigned long index = start_addr;
1226 
1227 	mmap_assert_locked(mm);
1228 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
1229 }
1230 
1231 static inline
1232 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
1233 {
1234 	return mtree_load(&mm->mm_mt, addr);
1235 }
1236 
1237 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1238 {
1239 	return mas_prev(&vmi->mas, 0);
1240 }
1241 
1242 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1243 {
1244 	mas_set(&vmi->mas, addr);
1245 }
1246 
1247 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1248 {
1249 	return !vma->vm_ops;
1250 }
1251 
1252 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
1253 #define vma_iter_load(vmi) \
1254 	mas_walk(&(vmi)->mas)
1255 
1256 static inline struct vm_area_struct *
1257 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1258 			struct vm_area_struct **pprev)
1259 {
1260 	struct vm_area_struct *vma;
1261 	VMA_ITERATOR(vmi, mm, addr);
1262 
1263 	vma = vma_iter_load(&vmi);
1264 	*pprev = vma_prev(&vmi);
1265 	if (!vma)
1266 		vma = vma_next(&vmi);
1267 	return vma;
1268 }
1269 
1270 #undef vma_iter_load
1271 
1272 static inline void vma_iter_free(struct vma_iterator *vmi)
1273 {
1274 	mas_destroy(&vmi->mas);
1275 }
1276 
1277 static inline
1278 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1279 {
1280 	return mas_next_range(&vmi->mas, ULONG_MAX);
1281 }
1282 
1283 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1284 
1285 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1286 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1287 {
1288 	vm_flags_t vm_flags = vma->vm_flags;
1289 	pgprot_t vm_page_prot;
1290 
1291 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1292 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1293 
1294 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1295 		vm_flags &= ~VM_SHARED;
1296 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1297 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1298 	}
1299 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1300 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1301 }
1302 
1303 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1304 {
1305 	if (vma->vm_flags & VM_GROWSDOWN)
1306 		return stack_guard_gap;
1307 
1308 	/* See reasoning around the VM_SHADOW_STACK definition */
1309 	if (vma->vm_flags & VM_SHADOW_STACK)
1310 		return PAGE_SIZE;
1311 
1312 	return 0;
1313 }
1314 
1315 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1316 {
1317 	unsigned long gap = stack_guard_start_gap(vma);
1318 	unsigned long vm_start = vma->vm_start;
1319 
1320 	vm_start -= gap;
1321 	if (vm_start > vma->vm_start)
1322 		vm_start = 0;
1323 	return vm_start;
1324 }
1325 
1326 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1327 {
1328 	unsigned long vm_end = vma->vm_end;
1329 
1330 	if (vma->vm_flags & VM_GROWSUP) {
1331 		vm_end += stack_guard_gap;
1332 		if (vm_end < vma->vm_end)
1333 			vm_end = -PAGE_SIZE;
1334 	}
1335 	return vm_end;
1336 }
1337 
1338 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1339 {
1340 	return vma->vm_flags & VM_ACCESS_FLAGS;
1341 }
1342 
1343 static inline bool mlock_future_ok(const struct mm_struct *mm,
1344 		vm_flags_t vm_flags, unsigned long bytes)
1345 {
1346 	unsigned long locked_pages, limit_pages;
1347 
1348 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1349 		return true;
1350 
1351 	locked_pages = bytes >> PAGE_SHIFT;
1352 	locked_pages += mm->locked_vm;
1353 
1354 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1355 	limit_pages >>= PAGE_SHIFT;
1356 
1357 	return locked_pages <= limit_pages;
1358 }
1359 
1360 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1361 {
1362 	/* If MDWE is disabled, we have nothing to deny. */
1363 	if (mm_flags_test(MMF_HAS_MDWE, current->mm))
1364 		return false;
1365 
1366 	/* If the new VMA is not executable, we have nothing to deny. */
1367 	if (!(new & VM_EXEC))
1368 		return false;
1369 
1370 	/* Under MDWE we do not accept newly writably executable VMAs... */
1371 	if (new & VM_WRITE)
1372 		return true;
1373 
1374 	/* ...nor previously non-executable VMAs becoming executable. */
1375 	if (!(old & VM_EXEC))
1376 		return true;
1377 
1378 	return false;
1379 }
1380 
1381 static inline int mapping_map_writable(struct address_space *mapping)
1382 {
1383 	return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1384 		0 : -EPERM;
1385 }
1386 
1387 /* Did the driver provide valid mmap hook configuration? */
1388 static inline bool can_mmap_file(struct file *file)
1389 {
1390 	bool has_mmap = file->f_op->mmap;
1391 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1392 
1393 	/* Hooks are mutually exclusive. */
1394 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1395 		return false;
1396 	if (!has_mmap && !has_mmap_prepare)
1397 		return false;
1398 
1399 	return true;
1400 }
1401 
1402 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1403 {
1404 	if (file->f_op->mmap_prepare)
1405 		return compat_vma_mmap(file, vma);
1406 
1407 	return file->f_op->mmap(file, vma);
1408 }
1409 
1410 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1411 {
1412 	return file->f_op->mmap_prepare(desc);
1413 }
1414 
1415 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1416 {
1417 	/* Changing an anonymous vma with this is illegal */
1418 	get_file(file);
1419 	swap(vma->vm_file, file);
1420 	fput(file);
1421 }
1422 
1423 extern int sysctl_max_map_count;
1424 static inline int get_sysctl_max_map_count(void)
1425 {
1426 	return READ_ONCE(sysctl_max_map_count);
1427 }
1428 
1429 #ifndef pgtable_supports_soft_dirty
1430 #define pgtable_supports_soft_dirty()	IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
1431 #endif
1432