xref: /linux/tools/testing/vma/include/dup.h (revision 3a6455d56bd7c4cfb1ea35ddae052943065e338e)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 
3 #pragma once
4 
5 /* Forward declarations to avoid header cycle. */
6 struct vm_area_struct;
7 static inline void vma_start_write(struct vm_area_struct *vma);
8 
9 extern const struct vm_operations_struct vma_dummy_vm_ops;
10 extern unsigned long stack_guard_gap;
11 extern const struct vm_operations_struct vma_dummy_vm_ops;
12 extern unsigned long rlimit(unsigned int limit);
13 struct task_struct *get_current(void);
14 
15 #define MMF_HAS_MDWE	28
16 #define current get_current()
17 
18 /*
19  * Define the task command name length as enum, then it can be visible to
20  * BPF programs.
21  */
22 enum {
23 	TASK_COMM_LEN = 16,
24 };
25 
26 /* PARTIALLY implemented types. */
27 struct mm_struct {
28 	struct maple_tree mm_mt;
29 	int map_count;			/* number of VMAs */
30 	unsigned long total_vm;	   /* Total pages mapped */
31 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
32 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
33 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
34 	unsigned long stack_vm;	   /* VM_STACK */
35 
36 	union {
37 		vm_flags_t def_flags;
38 		vma_flags_t def_vma_flags;
39 	};
40 
41 	mm_flags_t flags; /* Must use mm_flags_* helpers to access */
42 };
43 struct address_space {
44 	struct rb_root_cached	i_mmap;
45 	unsigned long		flags;
46 	atomic_t		i_mmap_writable;
47 };
48 struct file_operations {
49 	int (*mmap)(struct file *, struct vm_area_struct *);
50 	int (*mmap_prepare)(struct vm_area_desc *);
51 };
52 struct file {
53 	struct address_space	*f_mapping;
54 	const struct file_operations	*f_op;
55 };
56 struct anon_vma_chain {
57 	struct anon_vma *anon_vma;
58 	struct list_head same_vma;
59 };
60 struct task_struct {
61 	char comm[TASK_COMM_LEN];
62 	pid_t pid;
63 	struct mm_struct *mm;
64 
65 	/* Used for emulating ABI behavior of previous Linux versions: */
66 	unsigned int			personality;
67 };
68 
69 struct kref {
70 	refcount_t refcount;
71 };
72 
73 struct anon_vma_name {
74 	struct kref kref;
75 	/* The name needs to be at the end because it is dynamically sized. */
76 	char name[];
77 };
78 
79 /*
80  * Contains declarations that are DUPLICATED from kernel source in order to
81  * faciliate userland VMA testing.
82  *
83  * These must be kept in sync with kernel source.
84  */
85 
86 #define VMA_LOCK_OFFSET	0x40000000
87 
88 typedef struct { unsigned long v; } freeptr_t;
89 
90 #define VM_NONE		0x00000000
91 
92 typedef int __bitwise vma_flag_t;
93 
94 #define ACCESS_PRIVATE(p, member) ((p)->member)
95 
96 #define DECLARE_VMA_BIT(name, bitnum) \
97 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
98 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
99 	VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT
100 enum {
101 	DECLARE_VMA_BIT(READ, 0),
102 	DECLARE_VMA_BIT(WRITE, 1),
103 	DECLARE_VMA_BIT(EXEC, 2),
104 	DECLARE_VMA_BIT(SHARED, 3),
105 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
106 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
107 	DECLARE_VMA_BIT(MAYWRITE, 5),
108 	DECLARE_VMA_BIT(MAYEXEC, 6),
109 	DECLARE_VMA_BIT(MAYSHARE, 7),
110 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
111 #ifdef CONFIG_MMU
112 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
113 #else
114 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
115 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
116 #endif /* CONFIG_MMU */
117 	/* Page-ranges managed without "struct page", just pure PFN */
118 	DECLARE_VMA_BIT(PFNMAP, 10),
119 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
120 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
121 	DECLARE_VMA_BIT(LOCKED, 13),
122 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
123 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
124 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
125 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
126 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
127 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
128 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
129 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
130 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
131 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
132 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
133 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
134 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
135 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
136 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
137 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
138 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
139 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
140 	/* These bits are reused, we define specific uses below. */
141 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
142 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
143 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
144 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
145 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
146 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
147 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
148 	/*
149 	 * This flag is used to connect VFIO to arch specific KVM code. It
150 	 * indicates that the memory under this VMA is safe for use with any
151 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
152 	 * platforms, are thought to be unsafe and can cause machine crashes
153 	 * if KVM does not lock down the memory type.
154 	 */
155 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
156 #ifdef CONFIG_PPC32
157 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
158 #else
159 	DECLARE_VMA_BIT(DROPPABLE, 40),
160 #endif
161 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
162 	DECLARE_VMA_BIT(SEALED, 42),
163 	/* Flags that reuse flags above. */
164 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
165 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
166 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
167 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
168 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
169 #if defined(CONFIG_X86_USER_SHADOW_STACK)
170 	/*
171 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
172 	 * support core mm.
173 	 *
174 	 * These VMAs will get a single end guard page. This helps userspace
175 	 * protect itself from attacks. A single page is enough for current
176 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
177 	 * arch/x86/kernel/shstk.c for more details on the guard size.
178 	 */
179 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
180 #elif defined(CONFIG_ARM64_GCS)
181 	/*
182 	 * arm64's Guarded Control Stack implements similar functionality and
183 	 * has similar constraints to shadow stacks.
184 	 */
185 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
186 #endif
187 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
188 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
189 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
190 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
191 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
192 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
193 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
194 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
195 #ifdef CONFIG_STACK_GROWSUP
196 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
197 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
198 #else
199 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
200 #endif
201 };
202 
203 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
204 #define VM_READ		INIT_VM_FLAG(READ)
205 #define VM_WRITE	INIT_VM_FLAG(WRITE)
206 #define VM_EXEC		INIT_VM_FLAG(EXEC)
207 #define VM_SHARED	INIT_VM_FLAG(SHARED)
208 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
209 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
210 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
211 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
212 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
213 #ifdef CONFIG_MMU
214 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
215 #else
216 #define VM_UFFD_MISSING	VM_NONE
217 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
218 #endif
219 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
220 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
221 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
222 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
223 #define VM_IO		INIT_VM_FLAG(IO)
224 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
225 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
226 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
227 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
228 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
229 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
230 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
231 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
232 #define VM_SYNC		INIT_VM_FLAG(SYNC)
233 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
234 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
235 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
236 #ifdef CONFIG_MEM_SOFT_DIRTY
237 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
238 #else
239 #define VM_SOFTDIRTY	VM_NONE
240 #endif
241 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
242 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
243 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
244 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
245 #define VM_STACK	INIT_VM_FLAG(STACK)
246 #ifdef CONFIG_STACK_GROWS_UP
247 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
248 #else
249 #define VM_STACK_EARLY	VM_NONE
250 #endif
251 #ifdef CONFIG_ARCH_HAS_PKEYS
252 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
253 /* Despite the naming, these are FLAGS not bits. */
254 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
255 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
256 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
257 #if CONFIG_ARCH_PKEY_BITS > 3
258 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
259 #else
260 #define VM_PKEY_BIT3  VM_NONE
261 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
262 #if CONFIG_ARCH_PKEY_BITS > 4
263 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
264 #else
265 #define VM_PKEY_BIT4  VM_NONE
266 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
267 #endif /* CONFIG_ARCH_HAS_PKEYS */
268 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
269 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
270 #else
271 #define VM_SHADOW_STACK	VM_NONE
272 #endif
273 #if defined(CONFIG_PPC64)
274 #define VM_SAO		INIT_VM_FLAG(SAO)
275 #elif defined(CONFIG_PARISC)
276 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
277 #elif defined(CONFIG_SPARC64)
278 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
279 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
280 #elif defined(CONFIG_ARM64)
281 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
282 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
283 #elif !defined(CONFIG_MMU)
284 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
285 #endif
286 #ifndef VM_GROWSUP
287 #define VM_GROWSUP	VM_NONE
288 #endif
289 #ifdef CONFIG_ARM64_MTE
290 #define VM_MTE		INIT_VM_FLAG(MTE)
291 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
292 #else
293 #define VM_MTE		VM_NONE
294 #define VM_MTE_ALLOWED	VM_NONE
295 #endif
296 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
297 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
298 #else
299 #define VM_UFFD_MINOR	VM_NONE
300 #endif
301 #ifdef CONFIG_64BIT
302 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
303 #define VM_SEALED		INIT_VM_FLAG(SEALED)
304 #else
305 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
306 #define VM_SEALED		VM_NONE
307 #endif
308 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
309 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
310 #else
311 #define VM_DROPPABLE		VM_NONE
312 #endif
313 
314 /* Bits set in the VMA until the stack is in its final location */
315 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
316 
317 #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
318 		       VM_EXEC_BIT : VM_READ_BIT)
319 
320 /* Common data flag combinations */
321 #define VMA_DATA_FLAGS_TSK_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
322 		TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
323 		VMA_MAYEXEC_BIT)
324 #define VMA_DATA_FLAGS_NON_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
325 		VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
326 #define VMA_DATA_FLAGS_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
327 		VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
328 		VMA_MAYEXEC_BIT)
329 
330 #ifndef VMA_DATA_DEFAULT_FLAGS		/* arch can override this */
331 #define VMA_DATA_DEFAULT_FLAGS  VMA_DATA_FLAGS_EXEC
332 #endif
333 
334 #ifndef VMA_STACK_DEFAULT_FLAGS		/* arch can override this */
335 #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
336 #endif
337 
338 #define VMA_STACK_FLAGS	append_vma_flags(VMA_STACK_DEFAULT_FLAGS,	\
339 		VMA_STACK_BIT, VMA_ACCOUNT_BIT)
340 /* Temporary until VMA flags conversion complete. */
341 #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
342 
343 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
344 
345 /* VMA basic access permission flags */
346 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
347 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
348 
349 /*
350  * Special vmas that are non-mergable, non-mlock()able.
351  */
352 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
353 
354 #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
355 				       VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
356 
357 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,	\
358 				     VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
359 
360 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
361 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
362 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
363 #define STACK_TOP		TASK_SIZE_LOW
364 #define STACK_TOP_MAX		TASK_SIZE_MAX
365 
366 /* This mask represents all the VMA flag bits used by mlock */
367 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
368 
369 #define RLIMIT_STACK		3	/* max stack size */
370 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
371 
372 #define CAP_IPC_LOCK         14
373 
374 #ifdef CONFIG_MEM_SOFT_DIRTY
375 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
376 #else
377 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
378 #endif
379 
380 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
381 
382 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
383 
384 #define pgprot_val(x)		((x).pgprot)
385 #define __pgprot(x)		((pgprot_t) { (x) } )
386 
387 #define for_each_vma(__vmi, __vma)					\
388 	while (((__vma) = vma_next(&(__vmi))) != NULL)
389 
390 /* The MM code likes to work with exclusive end addresses */
391 #define for_each_vma_range(__vmi, __vma, __end)				\
392 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
393 
394 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
395 
396 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
397 
398 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
399 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
400 
401 #define AS_MM_ALL_LOCKS 2
402 
403 #define swap(a, b) \
404 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
405 
406 /*
407  * Flags for bug emulation.
408  *
409  * These occupy the top three bytes.
410  */
411 enum {
412 	READ_IMPLIES_EXEC =	0x0400000,
413 };
414 
415 struct vma_iterator {
416 	struct ma_state mas;
417 };
418 
419 #define VMA_ITERATOR(name, __mm, __addr)				\
420 	struct vma_iterator name = {					\
421 		.mas = {						\
422 			.tree = &(__mm)->mm_mt,				\
423 			.index = __addr,				\
424 			.node = NULL,					\
425 			.status = ma_start,				\
426 		},							\
427 	}
428 
429 #define DEFINE_MUTEX(mutexname) \
430 	struct mutex mutexname = {}
431 
432 #define DECLARE_BITMAP(name, bits) \
433 	unsigned long name[BITS_TO_LONGS(bits)]
434 
435 #define EMPTY_VMA_FLAGS ((vma_flags_t){ })
436 
437 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
438 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
439 
440 static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
441 {
442 	const unsigned long *bitmap = flags->__vma_flags;
443 
444 	return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
445 }
446 
447 /* What action should be taken after an .mmap_prepare call is complete? */
448 enum mmap_action_type {
449 	MMAP_NOTHING,		/* Mapping is complete, no further action. */
450 	MMAP_REMAP_PFN,		/* Remap PFN range. */
451 	MMAP_IO_REMAP_PFN,	/* I/O remap PFN range. */
452 };
453 
454 /*
455  * Describes an action an mmap_prepare hook can instruct to be taken to complete
456  * the mapping of a VMA. Specified in vm_area_desc.
457  */
458 struct mmap_action {
459 	union {
460 		/* Remap range. */
461 		struct {
462 			unsigned long start;
463 			unsigned long start_pfn;
464 			unsigned long size;
465 			pgprot_t pgprot;
466 		} remap;
467 	};
468 	enum mmap_action_type type;
469 
470 	/*
471 	 * If specified, this hook is invoked after the selected action has been
472 	 * successfully completed. Note that the VMA write lock still held.
473 	 *
474 	 * The absolute minimum ought to be done here.
475 	 *
476 	 * Returns 0 on success, or an error code.
477 	 */
478 	int (*success_hook)(const struct vm_area_struct *vma);
479 
480 	/*
481 	 * If specified, this hook is invoked when an error occurred when
482 	 * attempting the selection action.
483 	 *
484 	 * The hook can return an error code in order to filter the error, but
485 	 * it is not valid to clear the error here.
486 	 */
487 	int (*error_hook)(int err);
488 
489 	/*
490 	 * This should be set in rare instances where the operation required
491 	 * that the rmap should not be able to access the VMA until
492 	 * completely set up.
493 	 */
494 	bool hide_from_rmap_until_complete :1;
495 };
496 
497 /* Operations which modify VMAs. */
498 enum vma_operation {
499 	VMA_OP_SPLIT,
500 	VMA_OP_MERGE_UNFAULTED,
501 	VMA_OP_REMAP,
502 	VMA_OP_FORK,
503 };
504 
505 /*
506  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
507  * manipulate mutable fields which will cause those fields to be updated in the
508  * resultant VMA.
509  *
510  * Helper functions are not required for manipulating any field.
511  */
512 struct vm_area_desc {
513 	/* Immutable state. */
514 	const struct mm_struct *const mm;
515 	struct file *const file; /* May vary from vm_file in stacked callers. */
516 	unsigned long start;
517 	unsigned long end;
518 
519 	/* Mutable fields. Populated with initial state. */
520 	pgoff_t pgoff;
521 	struct file *vm_file;
522 	vma_flags_t vma_flags;
523 	pgprot_t page_prot;
524 
525 	/* Write-only fields. */
526 	const struct vm_operations_struct *vm_ops;
527 	void *private_data;
528 
529 	/* Take further action? */
530 	struct mmap_action action;
531 };
532 
533 struct vm_area_struct {
534 	/* The first cache line has the info for VMA tree walking. */
535 
536 	union {
537 		struct {
538 			/* VMA covers [vm_start; vm_end) addresses within mm */
539 			unsigned long vm_start;
540 			unsigned long vm_end;
541 		};
542 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
543 	};
544 
545 	struct mm_struct *vm_mm;	/* The address space we belong to. */
546 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
547 
548 	/*
549 	 * Flags, see mm.h.
550 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
551 	 */
552 	union {
553 		const vm_flags_t vm_flags;
554 		vma_flags_t flags;
555 	};
556 
557 #ifdef CONFIG_PER_VMA_LOCK
558 	/*
559 	 * Can only be written (using WRITE_ONCE()) while holding both:
560 	 *  - mmap_lock (in write mode)
561 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
562 	 * Can be read reliably while holding one of:
563 	 *  - mmap_lock (in read or write mode)
564 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
565 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
566 	 * while holding nothing (except RCU to keep the VMA struct allocated).
567 	 *
568 	 * This sequence counter is explicitly allowed to overflow; sequence
569 	 * counter reuse can only lead to occasional unnecessary use of the
570 	 * slowpath.
571 	 */
572 	unsigned int vm_lock_seq;
573 #endif
574 
575 	/*
576 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
577 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
578 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
579 	 * or brk vma (with NULL file) can only be in an anon_vma list.
580 	 */
581 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
582 					  * page_table_lock */
583 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
584 
585 	/* Function pointers to deal with this struct. */
586 	const struct vm_operations_struct *vm_ops;
587 
588 	/* Information about our backing store: */
589 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
590 					   units */
591 	struct file * vm_file;		/* File we map to (can be NULL). */
592 	void * vm_private_data;		/* was vm_pte (shared mem) */
593 
594 #ifdef CONFIG_SWAP
595 	atomic_long_t swap_readahead_info;
596 #endif
597 #ifndef CONFIG_MMU
598 	struct vm_region *vm_region;	/* NOMMU mapping region */
599 #endif
600 #ifdef CONFIG_NUMA
601 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
602 #endif
603 #ifdef CONFIG_NUMA_BALANCING
604 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
605 #endif
606 #ifdef CONFIG_PER_VMA_LOCK
607 	/* Unstable RCU readers are allowed to read this. */
608 	refcount_t vm_refcnt;
609 #endif
610 	/*
611 	 * For areas with an address space and backing store,
612 	 * linkage into the address_space->i_mmap interval tree.
613 	 *
614 	 */
615 	struct {
616 		struct rb_node rb;
617 		unsigned long rb_subtree_last;
618 	} shared;
619 #ifdef CONFIG_ANON_VMA_NAME
620 	/*
621 	 * For private and shared anonymous mappings, a pointer to a null
622 	 * terminated string containing the name given to the vma, or NULL if
623 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
624 	 */
625 	struct anon_vma_name *anon_name;
626 #endif
627 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
628 } __randomize_layout;
629 
630 struct vm_operations_struct {
631 	void (*open)(struct vm_area_struct * area);
632 	/**
633 	 * @close: Called when the VMA is being removed from the MM.
634 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
635 	 */
636 	void (*close)(struct vm_area_struct * area);
637 	/* Called any time before splitting to check if it's allowed */
638 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
639 	int (*mremap)(struct vm_area_struct *area);
640 	/*
641 	 * Called by mprotect() to make driver-specific permission
642 	 * checks before mprotect() is finalised.   The VMA must not
643 	 * be modified.  Returns 0 if mprotect() can proceed.
644 	 */
645 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
646 			unsigned long end, unsigned long newflags);
647 	vm_fault_t (*fault)(struct vm_fault *vmf);
648 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
649 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
650 			pgoff_t start_pgoff, pgoff_t end_pgoff);
651 	unsigned long (*pagesize)(struct vm_area_struct * area);
652 
653 	/* notification that a previously read-only page is about to become
654 	 * writable, if an error is returned it will cause a SIGBUS */
655 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
656 
657 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
658 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
659 
660 	/* called by access_process_vm when get_user_pages() fails, typically
661 	 * for use by special VMAs. See also generic_access_phys() for a generic
662 	 * implementation useful for any iomem mapping.
663 	 */
664 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
665 		      void *buf, int len, int write);
666 
667 	/* Called by the /proc/PID/maps code to ask the vma whether it
668 	 * has a special name.  Returning non-NULL will also cause this
669 	 * vma to be dumped unconditionally. */
670 	const char *(*name)(struct vm_area_struct *vma);
671 
672 #ifdef CONFIG_NUMA
673 	/*
674 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
675 	 * to hold the policy upon return.  Caller should pass NULL @new to
676 	 * remove a policy and fall back to surrounding context--i.e. do not
677 	 * install a MPOL_DEFAULT policy, nor the task or system default
678 	 * mempolicy.
679 	 */
680 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
681 
682 	/*
683 	 * get_policy() op must add reference [mpol_get()] to any policy at
684 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
685 	 * in mm/mempolicy.c will do this automatically.
686 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
687 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
688 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
689 	 * must return NULL--i.e., do not "fallback" to task or system default
690 	 * policy.
691 	 */
692 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
693 					unsigned long addr, pgoff_t *ilx);
694 #endif
695 #ifdef CONFIG_FIND_NORMAL_PAGE
696 	/*
697 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
698 	 * allows for returning a "normal" page from vm_normal_page() even
699 	 * though the PTE indicates that the "struct page" either does not exist
700 	 * or should not be touched: "special".
701 	 *
702 	 * Do not add new users: this really only works when a "normal" page
703 	 * was mapped, but then the PTE got changed to something weird (+
704 	 * marked special) that would not make pte_pfn() identify the originally
705 	 * inserted page.
706 	 */
707 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
708 					 unsigned long addr);
709 #endif /* CONFIG_FIND_NORMAL_PAGE */
710 };
711 
712 struct vm_unmapped_area_info {
713 #define VM_UNMAPPED_AREA_TOPDOWN 1
714 	unsigned long flags;
715 	unsigned long length;
716 	unsigned long low_limit;
717 	unsigned long high_limit;
718 	unsigned long align_mask;
719 	unsigned long align_offset;
720 	unsigned long start_gap;
721 };
722 
723 struct pagetable_move_control {
724 	struct vm_area_struct *old; /* Source VMA. */
725 	struct vm_area_struct *new; /* Destination VMA. */
726 	unsigned long old_addr; /* Address from which the move begins. */
727 	unsigned long old_end; /* Exclusive address at which old range ends. */
728 	unsigned long new_addr; /* Address to move page tables to. */
729 	unsigned long len_in; /* Bytes to remap specified by user. */
730 
731 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
732 	bool for_stack; /* Is this an early temp stack being moved? */
733 };
734 
735 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
736 	struct pagetable_move_control name = {				\
737 		.old = old_,						\
738 		.new = new_,						\
739 		.old_addr = old_addr_,					\
740 		.old_end = (old_addr_) + (len_),			\
741 		.new_addr = new_addr_,					\
742 		.len_in = len_,						\
743 	}
744 
745 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
746 {
747 	mas_pause(&vmi->mas);
748 }
749 
750 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
751 {
752 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
753 }
754 
755 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
756 {
757 	return __pgprot(vm_flags);
758 }
759 
760 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
761 {
762 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
763 }
764 
765 /*
766  * Copy value to the first system word of VMA flags, non-atomically.
767  *
768  * IMPORTANT: This does not overwrite bytes past the first system word. The
769  * caller must account for this.
770  */
771 static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags,
772 		unsigned long value)
773 {
774 	unsigned long *bitmap = flags->__vma_flags;
775 
776 	bitmap[0] = value;
777 }
778 
779 /*
780  * Copy value to the first system word of VMA flags ONCE, non-atomically.
781  *
782  * IMPORTANT: This does not overwrite bytes past the first system word. The
783  * caller must account for this.
784  */
785 static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags,
786 		unsigned long value)
787 {
788 	unsigned long *bitmap = flags->__vma_flags;
789 
790 	WRITE_ONCE(*bitmap, value);
791 }
792 
793 /* Update the first system word of VMA flags setting bits, non-atomically. */
794 static __always_inline void vma_flags_set_word(vma_flags_t *flags,
795 		unsigned long value)
796 {
797 	unsigned long *bitmap = flags->__vma_flags;
798 
799 	*bitmap |= value;
800 }
801 
802 /* Update the first system word of VMA flags clearing bits, non-atomically. */
803 static __always_inline void vma_flags_clear_word(vma_flags_t *flags,
804 		unsigned long value)
805 {
806 	unsigned long *bitmap = flags->__vma_flags;
807 
808 	*bitmap &= ~value;
809 }
810 
811 static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
812 {
813 	bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
814 }
815 
816 /*
817  * Helper function which converts a vma_flags_t value to a legacy vm_flags_t
818  * value. This is only valid if the input flags value can be expressed in a
819  * system word.
820  *
821  * Will be removed once the conversion to VMA flags is complete.
822  */
823 static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags)
824 {
825 	return (vm_flags_t)flags.__vma_flags[0];
826 }
827 
828 /*
829  * Helper function which converts a legacy vm_flags_t value to a vma_flags_t
830  * value.
831  *
832  * Will be removed once the conversion to VMA flags is complete.
833  */
834 static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags)
835 {
836 	vma_flags_t ret = EMPTY_VMA_FLAGS;
837 
838 	vma_flags_overwrite_word(&ret, flags);
839 	return ret;
840 }
841 
842 static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
843 		vma_flag_t bit)
844 {
845 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
846 
847 	__set_bit((__force int)bit, bitmap);
848 }
849 
850 /* Use when VMA is not part of the VMA tree and needs no locking */
851 static inline void vm_flags_init(struct vm_area_struct *vma,
852 				 vm_flags_t flags)
853 {
854 	vma_flags_clear_all(&vma->flags);
855 	vma_flags_overwrite_word(&vma->flags, flags);
856 }
857 
858 /*
859  * Use when VMA is part of the VMA tree and modifications need coordination
860  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
861  * it should be locked explicitly beforehand.
862  */
863 static inline void vm_flags_reset(struct vm_area_struct *vma,
864 				  vm_flags_t flags)
865 {
866 	vma_assert_write_locked(vma);
867 	vm_flags_init(vma, flags);
868 }
869 
870 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
871 				       vm_flags_t flags)
872 {
873 	vma_assert_write_locked(vma);
874 	/*
875 	 * The user should only be interested in avoiding reordering of
876 	 * assignment to the first word.
877 	 */
878 	vma_flags_clear_all(&vma->flags);
879 	vma_flags_overwrite_word_once(&vma->flags, flags);
880 }
881 
882 static inline void vm_flags_set(struct vm_area_struct *vma,
883 				vm_flags_t flags)
884 {
885 	vma_start_write(vma);
886 	vma_flags_set_word(&vma->flags, flags);
887 }
888 
889 static inline void vm_flags_clear(struct vm_area_struct *vma,
890 				  vm_flags_t flags)
891 {
892 	vma_start_write(vma);
893 	vma_flags_clear_word(&vma->flags, flags);
894 }
895 
896 static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
897 		size_t count, const vma_flag_t *bits)
898 {
899 	int i;
900 
901 	for (i = 0; i < count; i++)
902 		vma_flags_set_flag(&flags, bits[i]);
903 	return flags;
904 }
905 
906 #define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS,			\
907 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
908 
909 #define append_vma_flags(flags, ...) __mk_vma_flags(flags,			\
910 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
911 
912 static __always_inline int vma_flags_count(const vma_flags_t *flags)
913 {
914 	const unsigned long *bitmap = flags->__vma_flags;
915 
916 	return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
917 }
918 
919 static __always_inline bool vma_flags_test(const vma_flags_t *flags,
920 		vma_flag_t bit)
921 {
922 	const unsigned long *bitmap = flags->__vma_flags;
923 
924 	return test_bit((__force int)bit, bitmap);
925 }
926 
927 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
928 						      vma_flags_t to_and)
929 {
930 	vma_flags_t dst;
931 	unsigned long *bitmap_dst = dst.__vma_flags;
932 	const unsigned long *bitmap = flags->__vma_flags;
933 	const unsigned long *bitmap_to_and = to_and.__vma_flags;
934 
935 	bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
936 	return dst;
937 }
938 
939 #define vma_flags_and(flags, ...)		\
940 	vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
941 
942 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
943 		vma_flags_t to_test)
944 {
945 	const unsigned long *bitmap = flags->__vma_flags;
946 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
947 
948 	return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
949 }
950 
951 #define vma_flags_test_any(flags, ...) \
952 	vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
953 
954 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
955 		vma_flags_t to_test)
956 {
957 	const unsigned long *bitmap = flags->__vma_flags;
958 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
959 
960 	return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
961 }
962 
963 #define vma_flags_test_all(flags, ...) \
964 	vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
965 
966 static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
967 						vma_flags_t flagmask)
968 {
969 	VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
970 
971 	return vma_flags_test_any_mask(flags, flagmask);
972 }
973 
974 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
975 {
976 	unsigned long *bitmap = flags->__vma_flags;
977 	const unsigned long *bitmap_to_set = to_set.__vma_flags;
978 
979 	bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
980 }
981 
982 #define vma_flags_set(flags, ...) \
983 	vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
984 
985 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear)
986 {
987 	unsigned long *bitmap = flags->__vma_flags;
988 	const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
989 
990 	bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
991 }
992 
993 #define vma_flags_clear(flags, ...) \
994 	vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
995 
996 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
997 		const vma_flags_t *flags_other)
998 {
999 	vma_flags_t dst;
1000 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1001 	const unsigned long *bitmap = flags->__vma_flags;
1002 	unsigned long *bitmap_dst = dst.__vma_flags;
1003 
1004 	bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1005 	return dst;
1006 }
1007 
1008 static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
1009 						const vma_flags_t *flags_other)
1010 {
1011 	const unsigned long *bitmap = flags->__vma_flags;
1012 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1013 
1014 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1015 }
1016 
1017 static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
1018 						vma_flags_t flags_other)
1019 {
1020 	const unsigned long *bitmap = flags->__vma_flags;
1021 	const unsigned long *bitmap_other = flags_other.__vma_flags;
1022 
1023 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1024 }
1025 
1026 #define vma_flags_same(flags, ...) \
1027 	vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
1028 
1029 static __always_inline bool vma_test(const struct vm_area_struct *vma,
1030 		vma_flag_t bit)
1031 {
1032 	return vma_flags_test(&vma->flags, bit);
1033 }
1034 
1035 static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
1036 		vma_flags_t flags)
1037 {
1038 	return vma_flags_test_any_mask(&vma->flags, flags);
1039 }
1040 
1041 #define vma_test_any(vma, ...) \
1042 	vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
1043 
1044 static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
1045 		vma_flags_t flags)
1046 {
1047 	return vma_flags_test_all_mask(&vma->flags, flags);
1048 }
1049 
1050 #define vma_test_all(vma, ...) \
1051 	vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
1052 
1053 static __always_inline bool
1054 vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
1055 {
1056 	return vma_flags_test_single_mask(&vma->flags, flagmask);
1057 }
1058 
1059 static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
1060 		vma_flags_t flags)
1061 {
1062 	vma_flags_set_mask(&vma->flags, flags);
1063 }
1064 
1065 #define vma_set_flags(vma, ...) \
1066 	vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1067 
1068 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
1069 		vma_flag_t bit)
1070 {
1071 	return vma_flags_test(&desc->vma_flags, bit);
1072 }
1073 
1074 static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
1075 		vma_flags_t flags)
1076 {
1077 	return vma_flags_test_any_mask(&desc->vma_flags, flags);
1078 }
1079 
1080 #define vma_desc_test_any(desc, ...) \
1081 	vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
1082 
1083 static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
1084 		vma_flags_t flags)
1085 {
1086 	return vma_flags_test_all_mask(&desc->vma_flags, flags);
1087 }
1088 
1089 #define vma_desc_test_all(desc, ...) \
1090 	vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
1091 
1092 static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
1093 		vma_flags_t flags)
1094 {
1095 	vma_flags_set_mask(&desc->vma_flags, flags);
1096 }
1097 
1098 #define vma_desc_set_flags(desc, ...) \
1099 	vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1100 
1101 static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1102 		vma_flags_t flags)
1103 {
1104 	vma_flags_clear_mask(&desc->vma_flags, flags);
1105 }
1106 
1107 #define vma_desc_clear_flags(desc, ...) \
1108 	vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1109 
1110 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
1111 {
1112 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1113 		(VM_SHARED | VM_MAYWRITE);
1114 }
1115 
1116 static inline bool is_shared_maywrite(const vma_flags_t *flags)
1117 {
1118 	return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1119 }
1120 
1121 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1122 {
1123 	return is_shared_maywrite(&vma->flags);
1124 }
1125 
1126 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1127 {
1128 	/*
1129 	 * Uses mas_find() to get the first VMA when the iterator starts.
1130 	 * Calling mas_next() could skip the first entry.
1131 	 */
1132 	return mas_find(&vmi->mas, ULONG_MAX);
1133 }
1134 
1135 /*
1136  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
1137  * assertions should be made either under mmap_write_lock or when the object
1138  * has been isolated under mmap_write_lock, ensuring no competing writers.
1139  */
1140 static inline void vma_assert_attached(struct vm_area_struct *vma)
1141 {
1142 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
1143 }
1144 
1145 static inline void vma_assert_detached(struct vm_area_struct *vma)
1146 {
1147 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
1148 }
1149 
1150 static inline void vma_assert_write_locked(struct vm_area_struct *);
1151 static inline void vma_mark_attached(struct vm_area_struct *vma)
1152 {
1153 	vma_assert_write_locked(vma);
1154 	vma_assert_detached(vma);
1155 	refcount_set_release(&vma->vm_refcnt, 1);
1156 }
1157 
1158 static inline void vma_mark_detached(struct vm_area_struct *vma)
1159 {
1160 	vma_assert_write_locked(vma);
1161 	vma_assert_attached(vma);
1162 	/* We are the only writer, so no need to use vma_refcount_put(). */
1163 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
1164 		/*
1165 		 * Reader must have temporarily raised vm_refcnt but it will
1166 		 * drop it without using the vma since vma is write-locked.
1167 		 */
1168 	}
1169 }
1170 
1171 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
1172 {
1173 	memset(vma, 0, sizeof(*vma));
1174 	vma->vm_mm = mm;
1175 	vma->vm_ops = &vma_dummy_vm_ops;
1176 	INIT_LIST_HEAD(&vma->anon_vma_chain);
1177 	vma->vm_lock_seq = UINT_MAX;
1178 }
1179 
1180 /*
1181  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
1182  * kernel/fork.c, so we have to these broadly available there, and temporarily
1183  * define them here to resolve the dependency cycle.
1184  */
1185 #define is_exec_mapping(flags) \
1186 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
1187 
1188 #define is_stack_mapping(flags) \
1189 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
1190 
1191 #define is_data_mapping(flags) \
1192 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
1193 
1194 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
1195 				   long npages)
1196 {
1197 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1198 
1199 	if (is_exec_mapping(flags))
1200 		mm->exec_vm += npages;
1201 	else if (is_stack_mapping(flags))
1202 		mm->stack_vm += npages;
1203 	else if (is_data_mapping(flags))
1204 		mm->data_vm += npages;
1205 }
1206 
1207 #undef is_exec_mapping
1208 #undef is_stack_mapping
1209 #undef is_data_mapping
1210 
1211 static inline void vm_unacct_memory(long pages)
1212 {
1213 	vm_acct_memory(-pages);
1214 }
1215 
1216 static inline void mapping_allow_writable(struct address_space *mapping)
1217 {
1218 	atomic_inc(&mapping->i_mmap_writable);
1219 }
1220 
1221 static inline
1222 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1223 {
1224 	return mas_find(&vmi->mas, max - 1);
1225 }
1226 
1227 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1228 			unsigned long start, unsigned long end, gfp_t gfp)
1229 {
1230 	__mas_set_range(&vmi->mas, start, end - 1);
1231 	mas_store_gfp(&vmi->mas, NULL, gfp);
1232 	if (unlikely(mas_is_err(&vmi->mas)))
1233 		return -ENOMEM;
1234 
1235 	return 0;
1236 }
1237 
1238 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1239 {
1240 	vma->vm_ops = NULL;
1241 }
1242 
1243 /* Declared in vma.h. */
1244 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1245 		struct vm_area_desc *desc);
1246 
1247 static inline int __compat_vma_mmap(const struct file_operations *f_op,
1248 		struct file *file, struct vm_area_struct *vma)
1249 {
1250 	struct vm_area_desc desc = {
1251 		.mm = vma->vm_mm,
1252 		.file = file,
1253 		.start = vma->vm_start,
1254 		.end = vma->vm_end,
1255 
1256 		.pgoff = vma->vm_pgoff,
1257 		.vm_file = vma->vm_file,
1258 		.vma_flags = vma->flags,
1259 		.page_prot = vma->vm_page_prot,
1260 
1261 		.action.type = MMAP_NOTHING, /* Default */
1262 	};
1263 	int err;
1264 
1265 	err = f_op->mmap_prepare(&desc);
1266 	if (err)
1267 		return err;
1268 
1269 	mmap_action_prepare(&desc.action, &desc);
1270 	set_vma_from_desc(vma, &desc);
1271 	return mmap_action_complete(&desc.action, vma);
1272 }
1273 
1274 static inline int compat_vma_mmap(struct file *file,
1275 		struct vm_area_struct *vma)
1276 {
1277 	return __compat_vma_mmap(file->f_op, file, vma);
1278 }
1279 
1280 
1281 static inline void vma_iter_init(struct vma_iterator *vmi,
1282 		struct mm_struct *mm, unsigned long addr)
1283 {
1284 	mas_init(&vmi->mas, &mm->mm_mt, addr);
1285 }
1286 
1287 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1288 {
1289 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1290 }
1291 
1292 static inline void mmap_assert_locked(struct mm_struct *);
1293 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1294 						unsigned long start_addr,
1295 						unsigned long end_addr)
1296 {
1297 	unsigned long index = start_addr;
1298 
1299 	mmap_assert_locked(mm);
1300 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
1301 }
1302 
1303 static inline
1304 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
1305 {
1306 	return mtree_load(&mm->mm_mt, addr);
1307 }
1308 
1309 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1310 {
1311 	return mas_prev(&vmi->mas, 0);
1312 }
1313 
1314 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1315 {
1316 	mas_set(&vmi->mas, addr);
1317 }
1318 
1319 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1320 {
1321 	return !vma->vm_ops;
1322 }
1323 
1324 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
1325 #define vma_iter_load(vmi) \
1326 	mas_walk(&(vmi)->mas)
1327 
1328 static inline struct vm_area_struct *
1329 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1330 			struct vm_area_struct **pprev)
1331 {
1332 	struct vm_area_struct *vma;
1333 	VMA_ITERATOR(vmi, mm, addr);
1334 
1335 	vma = vma_iter_load(&vmi);
1336 	*pprev = vma_prev(&vmi);
1337 	if (!vma)
1338 		vma = vma_next(&vmi);
1339 	return vma;
1340 }
1341 
1342 #undef vma_iter_load
1343 
1344 static inline void vma_iter_free(struct vma_iterator *vmi)
1345 {
1346 	mas_destroy(&vmi->mas);
1347 }
1348 
1349 static inline
1350 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1351 {
1352 	return mas_next_range(&vmi->mas, ULONG_MAX);
1353 }
1354 
1355 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1356 
1357 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1358 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1359 {
1360 	vm_flags_t vm_flags = vma->vm_flags;
1361 	pgprot_t vm_page_prot;
1362 
1363 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1364 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1365 
1366 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1367 		vm_flags &= ~VM_SHARED;
1368 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1369 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1370 	}
1371 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1372 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1373 }
1374 
1375 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1376 {
1377 	if (vma->vm_flags & VM_GROWSDOWN)
1378 		return stack_guard_gap;
1379 
1380 	/* See reasoning around the VM_SHADOW_STACK definition */
1381 	if (vma->vm_flags & VM_SHADOW_STACK)
1382 		return PAGE_SIZE;
1383 
1384 	return 0;
1385 }
1386 
1387 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1388 {
1389 	unsigned long gap = stack_guard_start_gap(vma);
1390 	unsigned long vm_start = vma->vm_start;
1391 
1392 	vm_start -= gap;
1393 	if (vm_start > vma->vm_start)
1394 		vm_start = 0;
1395 	return vm_start;
1396 }
1397 
1398 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1399 {
1400 	unsigned long vm_end = vma->vm_end;
1401 
1402 	if (vma->vm_flags & VM_GROWSUP) {
1403 		vm_end += stack_guard_gap;
1404 		if (vm_end < vma->vm_end)
1405 			vm_end = -PAGE_SIZE;
1406 	}
1407 	return vm_end;
1408 }
1409 
1410 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1411 {
1412 	return vma->vm_flags & VM_ACCESS_FLAGS;
1413 }
1414 
1415 static inline bool mlock_future_ok(const struct mm_struct *mm,
1416 		vm_flags_t vm_flags, unsigned long bytes)
1417 {
1418 	unsigned long locked_pages, limit_pages;
1419 
1420 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1421 		return true;
1422 
1423 	locked_pages = bytes >> PAGE_SHIFT;
1424 	locked_pages += mm->locked_vm;
1425 
1426 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1427 	limit_pages >>= PAGE_SHIFT;
1428 
1429 	return locked_pages <= limit_pages;
1430 }
1431 
1432 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1433 {
1434 	/* If MDWE is disabled, we have nothing to deny. */
1435 	if (mm_flags_test(MMF_HAS_MDWE, current->mm))
1436 		return false;
1437 
1438 	/* If the new VMA is not executable, we have nothing to deny. */
1439 	if (!(new & VM_EXEC))
1440 		return false;
1441 
1442 	/* Under MDWE we do not accept newly writably executable VMAs... */
1443 	if (new & VM_WRITE)
1444 		return true;
1445 
1446 	/* ...nor previously non-executable VMAs becoming executable. */
1447 	if (!(old & VM_EXEC))
1448 		return true;
1449 
1450 	return false;
1451 }
1452 
1453 static inline int mapping_map_writable(struct address_space *mapping)
1454 {
1455 	return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1456 		0 : -EPERM;
1457 }
1458 
1459 /* Did the driver provide valid mmap hook configuration? */
1460 static inline bool can_mmap_file(struct file *file)
1461 {
1462 	bool has_mmap = file->f_op->mmap;
1463 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1464 
1465 	/* Hooks are mutually exclusive. */
1466 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1467 		return false;
1468 	if (!has_mmap && !has_mmap_prepare)
1469 		return false;
1470 
1471 	return true;
1472 }
1473 
1474 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1475 {
1476 	if (file->f_op->mmap_prepare)
1477 		return compat_vma_mmap(file, vma);
1478 
1479 	return file->f_op->mmap(file, vma);
1480 }
1481 
1482 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1483 {
1484 	return file->f_op->mmap_prepare(desc);
1485 }
1486 
1487 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1488 {
1489 	/* Changing an anonymous vma with this is illegal */
1490 	get_file(file);
1491 	swap(vma->vm_file, file);
1492 	fput(file);
1493 }
1494 
1495 extern int sysctl_max_map_count;
1496 static inline int get_sysctl_max_map_count(void)
1497 {
1498 	return READ_ONCE(sysctl_max_map_count);
1499 }
1500 
1501 #ifndef pgtable_supports_soft_dirty
1502 #define pgtable_supports_soft_dirty()	IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
1503 #endif
1504