xref: /linux/tools/testing/vma/vma_internal.h (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #ifdef __CONCAT
25 #undef __CONCAT
26 #endif
27 
28 #include <linux/args.h>
29 #include <linux/atomic.h>
30 #include <linux/bitmap.h>
31 #include <linux/list.h>
32 #include <linux/maple_tree.h>
33 #include <linux/mm.h>
34 #include <linux/rbtree.h>
35 #include <linux/refcount.h>
36 #include <linux/slab.h>
37 
38 extern unsigned long stack_guard_gap;
39 #ifdef CONFIG_MMU
40 extern unsigned long mmap_min_addr;
41 extern unsigned long dac_mmap_min_addr;
42 #else
43 #define mmap_min_addr		0UL
44 #define dac_mmap_min_addr	0UL
45 #endif
46 
47 #define ACCESS_PRIVATE(p, member) ((p)->member)
48 
49 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
50 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
51 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
52 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
53 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
54 
55 #define MMF_HAS_MDWE	28
56 
57 /*
58  * vm_flags in vm_area_struct, see mm_types.h.
59  * When changing, update also include/trace/events/mmflags.h
60  */
61 
62 #define VM_NONE		0x00000000
63 
64 /**
65  * typedef vma_flag_t - specifies an individual VMA flag by bit number.
66  *
67  * This value is made type safe by sparse to avoid passing invalid flag values
68  * around.
69  */
70 typedef int __bitwise vma_flag_t;
71 
72 #define DECLARE_VMA_BIT(name, bitnum) \
73 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
74 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
75 	VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT
76 enum {
77 	DECLARE_VMA_BIT(READ, 0),
78 	DECLARE_VMA_BIT(WRITE, 1),
79 	DECLARE_VMA_BIT(EXEC, 2),
80 	DECLARE_VMA_BIT(SHARED, 3),
81 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
82 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
83 	DECLARE_VMA_BIT(MAYWRITE, 5),
84 	DECLARE_VMA_BIT(MAYEXEC, 6),
85 	DECLARE_VMA_BIT(MAYSHARE, 7),
86 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
87 #ifdef CONFIG_MMU
88 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
89 #else
90 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
91 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
92 #endif /* CONFIG_MMU */
93 	/* Page-ranges managed without "struct page", just pure PFN */
94 	DECLARE_VMA_BIT(PFNMAP, 10),
95 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
96 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
97 	DECLARE_VMA_BIT(LOCKED, 13),
98 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
99 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
100 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
101 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
102 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
103 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
104 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
105 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
106 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
107 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
108 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
109 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
110 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
111 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
112 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
113 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
114 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
115 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
116 	/* These bits are reused, we define specific uses below. */
117 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
118 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
119 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
120 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
121 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
122 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
123 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
124 	/*
125 	 * This flag is used to connect VFIO to arch specific KVM code. It
126 	 * indicates that the memory under this VMA is safe for use with any
127 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
128 	 * platforms, are thought to be unsafe and can cause machine crashes
129 	 * if KVM does not lock down the memory type.
130 	 */
131 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
132 #ifdef CONFIG_PPC32
133 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
134 #else
135 	DECLARE_VMA_BIT(DROPPABLE, 40),
136 #endif
137 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
138 	DECLARE_VMA_BIT(SEALED, 42),
139 	/* Flags that reuse flags above. */
140 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
141 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
142 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
143 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
144 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
145 #if defined(CONFIG_X86_USER_SHADOW_STACK)
146 	/*
147 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
148 	 * support core mm.
149 	 *
150 	 * These VMAs will get a single end guard page. This helps userspace
151 	 * protect itself from attacks. A single page is enough for current
152 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
153 	 * arch/x86/kernel/shstk.c for more details on the guard size.
154 	 */
155 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
156 #elif defined(CONFIG_ARM64_GCS)
157 	/*
158 	 * arm64's Guarded Control Stack implements similar functionality and
159 	 * has similar constraints to shadow stacks.
160 	 */
161 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
162 #endif
163 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
164 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
165 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
166 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
167 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
168 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
169 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
170 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
171 #ifdef CONFIG_STACK_GROWSUP
172 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
173 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
174 #else
175 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
176 #endif
177 };
178 
179 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
180 #define VM_READ		INIT_VM_FLAG(READ)
181 #define VM_WRITE	INIT_VM_FLAG(WRITE)
182 #define VM_EXEC		INIT_VM_FLAG(EXEC)
183 #define VM_SHARED	INIT_VM_FLAG(SHARED)
184 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
185 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
186 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
187 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
188 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
189 #ifdef CONFIG_MMU
190 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
191 #else
192 #define VM_UFFD_MISSING	VM_NONE
193 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
194 #endif
195 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
196 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
197 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
198 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
199 #define VM_IO		INIT_VM_FLAG(IO)
200 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
201 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
202 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
203 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
204 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
205 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
206 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
207 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
208 #define VM_SYNC		INIT_VM_FLAG(SYNC)
209 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
210 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
211 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
212 #ifdef CONFIG_MEM_SOFT_DIRTY
213 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
214 #else
215 #define VM_SOFTDIRTY	VM_NONE
216 #endif
217 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
218 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
219 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
220 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
221 #define VM_STACK	INIT_VM_FLAG(STACK)
222 #ifdef CONFIG_STACK_GROWS_UP
223 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
224 #else
225 #define VM_STACK_EARLY	VM_NONE
226 #endif
227 #ifdef CONFIG_ARCH_HAS_PKEYS
228 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
229 /* Despite the naming, these are FLAGS not bits. */
230 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
231 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
232 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
233 #if CONFIG_ARCH_PKEY_BITS > 3
234 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
235 #else
236 #define VM_PKEY_BIT3  VM_NONE
237 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
238 #if CONFIG_ARCH_PKEY_BITS > 4
239 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
240 #else
241 #define VM_PKEY_BIT4  VM_NONE
242 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
243 #endif /* CONFIG_ARCH_HAS_PKEYS */
244 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
245 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
246 #else
247 #define VM_SHADOW_STACK	VM_NONE
248 #endif
249 #if defined(CONFIG_PPC64)
250 #define VM_SAO		INIT_VM_FLAG(SAO)
251 #elif defined(CONFIG_PARISC)
252 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
253 #elif defined(CONFIG_SPARC64)
254 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
255 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
256 #elif defined(CONFIG_ARM64)
257 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
258 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
259 #elif !defined(CONFIG_MMU)
260 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
261 #endif
262 #ifndef VM_GROWSUP
263 #define VM_GROWSUP	VM_NONE
264 #endif
265 #ifdef CONFIG_ARM64_MTE
266 #define VM_MTE		INIT_VM_FLAG(MTE)
267 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
268 #else
269 #define VM_MTE		VM_NONE
270 #define VM_MTE_ALLOWED	VM_NONE
271 #endif
272 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
273 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
274 #else
275 #define VM_UFFD_MINOR	VM_NONE
276 #endif
277 #ifdef CONFIG_64BIT
278 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
279 #define VM_SEALED		INIT_VM_FLAG(SEALED)
280 #else
281 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
282 #define VM_SEALED		VM_NONE
283 #endif
284 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
285 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
286 #else
287 #define VM_DROPPABLE		VM_NONE
288 #endif
289 
290 /* Bits set in the VMA until the stack is in its final location */
291 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
292 
293 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
294 
295 /* Common data flag combinations */
296 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
297 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
298 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
299 				 VM_MAYWRITE | VM_MAYEXEC)
300 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
301 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
302 
303 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
304 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
305 #endif
306 
307 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
308 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
309 #endif
310 
311 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
312 
313 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
314 
315 /* VMA basic access permission flags */
316 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
317 
318 /*
319  * Special vmas that are non-mergable, non-mlock()able.
320  */
321 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
322 
323 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
324 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
325 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
326 #define STACK_TOP		TASK_SIZE_LOW
327 #define STACK_TOP_MAX		TASK_SIZE_MAX
328 
329 /* This mask represents all the VMA flag bits used by mlock */
330 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
331 
332 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
333 
334 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
335 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
336 
337 #define RLIMIT_STACK		3	/* max stack size */
338 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
339 
340 #define CAP_IPC_LOCK         14
341 
342 /*
343  * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
344  * possesses it but the other does not, the merged VMA should nonetheless have
345  * applied to it:
346  *
347  *   VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its
348  *                  references cleared via /proc/$pid/clear_refs, any merged VMA
349  *                  should be considered soft-dirty also as it operates at a VMA
350  *                  granularity.
351  */
352 #define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
353 
354 /*
355  * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
356  * of these flags and the other not does not preclude a merge.
357  *
358  *    VM_STICKY - When merging VMAs, VMA flags must match, unless they are
359  *                'sticky'. If any sticky flags exist in either VMA, we simply
360  *                set all of them on the merged VMA.
361  */
362 #define VM_IGNORE_MERGE VM_STICKY
363 
364 /*
365  * Flags which should result in page tables being copied on fork. These are
366  * flags which indicate that the VMA maps page tables which cannot be
367  * reconsistuted upon page fault, so necessitate page table copying upon
368  *
369  * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
370  *                           reasonably reconstructed on page fault.
371  *
372  *              VM_UFFD_WP - Encodes metadata about an installed uffd
373  *                           write protect handler, which cannot be
374  *                           reconstructed on page fault.
375  *
376  *                           We always copy pgtables when dst_vma has uffd-wp
377  *                           enabled even if it's file-backed
378  *                           (e.g. shmem). Because when uffd-wp is enabled,
379  *                           pgtable contains uffd-wp protection information,
380  *                           that's something we can't retrieve from page cache,
381  *                           and skip copying will lose those info.
382  *
383  *          VM_MAYBE_GUARD - Could contain page guard region markers which
384  *                           by design are a property of the page tables
385  *                           only and thus cannot be reconstructed on page
386  *                           fault.
387  */
388 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
389 
390 #define FIRST_USER_ADDRESS	0UL
391 #define USER_PGTABLES_CEILING	0UL
392 
393 #define vma_policy(vma) NULL
394 
395 #define down_write_nest_lock(sem, nest_lock)
396 
397 #define pgprot_val(x)		((x).pgprot)
398 #define __pgprot(x)		((pgprot_t) { (x) } )
399 
400 #define for_each_vma(__vmi, __vma)					\
401 	while (((__vma) = vma_next(&(__vmi))) != NULL)
402 
403 /* The MM code likes to work with exclusive end addresses */
404 #define for_each_vma_range(__vmi, __vma, __end)				\
405 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
406 
407 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
408 
409 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
410 
411 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
412 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
413 
414 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
415 
416 #define AS_MM_ALL_LOCKS 2
417 
418 /* We hardcode this for now. */
419 #define sysctl_max_map_count 0x1000000UL
420 
421 #define pgoff_t unsigned long
422 typedef unsigned long	pgprotval_t;
423 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
424 typedef unsigned long vm_flags_t;
425 typedef __bitwise unsigned int vm_fault_t;
426 
427 /*
428  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
429  * either way :)
430  */
431 #define pr_warn_once pr_err
432 
433 #define data_race(expr) expr
434 
435 #define ASSERT_EXCLUSIVE_WRITER(x)
436 
437 #define pgtable_supports_soft_dirty() 1
438 
439 /**
440  * swap - swap values of @a and @b
441  * @a: first value
442  * @b: second value
443  */
444 #define swap(a, b) \
445 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
446 
447 struct kref {
448 	refcount_t refcount;
449 };
450 
451 /*
452  * Define the task command name length as enum, then it can be visible to
453  * BPF programs.
454  */
455 enum {
456 	TASK_COMM_LEN = 16,
457 };
458 
459 /*
460  * Flags for bug emulation.
461  *
462  * These occupy the top three bytes.
463  */
464 enum {
465 	READ_IMPLIES_EXEC =	0x0400000,
466 };
467 
468 struct task_struct {
469 	char comm[TASK_COMM_LEN];
470 	pid_t pid;
471 	struct mm_struct *mm;
472 
473 	/* Used for emulating ABI behavior of previous Linux versions: */
474 	unsigned int			personality;
475 };
476 
477 struct task_struct *get_current(void);
478 #define current get_current()
479 
480 struct anon_vma {
481 	struct anon_vma *root;
482 	struct rb_root_cached rb_root;
483 
484 	/* Test fields. */
485 	bool was_cloned;
486 	bool was_unlinked;
487 };
488 
489 struct anon_vma_chain {
490 	struct anon_vma *anon_vma;
491 	struct list_head same_vma;
492 };
493 
494 struct anon_vma_name {
495 	struct kref kref;
496 	/* The name needs to be at the end because it is dynamically sized. */
497 	char name[];
498 };
499 
500 struct vma_iterator {
501 	struct ma_state mas;
502 };
503 
504 #define VMA_ITERATOR(name, __mm, __addr)				\
505 	struct vma_iterator name = {					\
506 		.mas = {						\
507 			.tree = &(__mm)->mm_mt,				\
508 			.index = __addr,				\
509 			.node = NULL,					\
510 			.status = ma_start,				\
511 		},							\
512 	}
513 
514 struct address_space {
515 	struct rb_root_cached	i_mmap;
516 	unsigned long		flags;
517 	atomic_t		i_mmap_writable;
518 };
519 
520 struct vm_userfaultfd_ctx {};
521 struct mempolicy {};
522 struct mmu_gather {};
523 struct mutex {};
524 #define DEFINE_MUTEX(mutexname) \
525 	struct mutex mutexname = {}
526 
527 #define DECLARE_BITMAP(name, bits) \
528 	unsigned long name[BITS_TO_LONGS(bits)]
529 
530 #define NUM_MM_FLAG_BITS (64)
531 typedef struct {
532 	__private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
533 } mm_flags_t;
534 
535 /*
536  * Opaque type representing current VMA (vm_area_struct) flag state. Must be
537  * accessed via vma_flags_xxx() helper functions.
538  */
539 #define NUM_VMA_FLAG_BITS BITS_PER_LONG
540 typedef struct {
541 	DECLARE_BITMAP(__vma_flags, NUM_VMA_FLAG_BITS);
542 } __private vma_flags_t;
543 
544 #define EMPTY_VMA_FLAGS ((vma_flags_t){ })
545 
546 struct mm_struct {
547 	struct maple_tree mm_mt;
548 	int map_count;			/* number of VMAs */
549 	unsigned long total_vm;	   /* Total pages mapped */
550 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
551 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
552 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
553 	unsigned long stack_vm;	   /* VM_STACK */
554 
555 	unsigned long def_flags;
556 
557 	mm_flags_t flags; /* Must use mm_flags_* helpers to access */
558 };
559 
560 struct vm_area_struct;
561 
562 
563 /* What action should be taken after an .mmap_prepare call is complete? */
564 enum mmap_action_type {
565 	MMAP_NOTHING,		/* Mapping is complete, no further action. */
566 	MMAP_REMAP_PFN,		/* Remap PFN range. */
567 	MMAP_IO_REMAP_PFN,	/* I/O remap PFN range. */
568 };
569 
570 /*
571  * Describes an action an mmap_prepare hook can instruct to be taken to complete
572  * the mapping of a VMA. Specified in vm_area_desc.
573  */
574 struct mmap_action {
575 	union {
576 		/* Remap range. */
577 		struct {
578 			unsigned long start;
579 			unsigned long start_pfn;
580 			unsigned long size;
581 			pgprot_t pgprot;
582 		} remap;
583 	};
584 	enum mmap_action_type type;
585 
586 	/*
587 	 * If specified, this hook is invoked after the selected action has been
588 	 * successfully completed. Note that the VMA write lock still held.
589 	 *
590 	 * The absolute minimum ought to be done here.
591 	 *
592 	 * Returns 0 on success, or an error code.
593 	 */
594 	int (*success_hook)(const struct vm_area_struct *vma);
595 
596 	/*
597 	 * If specified, this hook is invoked when an error occurred when
598 	 * attempting the selection action.
599 	 *
600 	 * The hook can return an error code in order to filter the error, but
601 	 * it is not valid to clear the error here.
602 	 */
603 	int (*error_hook)(int err);
604 
605 	/*
606 	 * This should be set in rare instances where the operation required
607 	 * that the rmap should not be able to access the VMA until
608 	 * completely set up.
609 	 */
610 	bool hide_from_rmap_until_complete :1;
611 };
612 
613 /* Operations which modify VMAs. */
614 enum vma_operation {
615 	VMA_OP_SPLIT,
616 	VMA_OP_MERGE_UNFAULTED,
617 	VMA_OP_REMAP,
618 	VMA_OP_FORK,
619 };
620 
621 /*
622  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
623  * manipulate mutable fields which will cause those fields to be updated in the
624  * resultant VMA.
625  *
626  * Helper functions are not required for manipulating any field.
627  */
628 struct vm_area_desc {
629 	/* Immutable state. */
630 	const struct mm_struct *const mm;
631 	struct file *const file; /* May vary from vm_file in stacked callers. */
632 	unsigned long start;
633 	unsigned long end;
634 
635 	/* Mutable fields. Populated with initial state. */
636 	pgoff_t pgoff;
637 	struct file *vm_file;
638 	union {
639 		vm_flags_t vm_flags;
640 		vma_flags_t vma_flags;
641 	};
642 	pgprot_t page_prot;
643 
644 	/* Write-only fields. */
645 	const struct vm_operations_struct *vm_ops;
646 	void *private_data;
647 
648 	/* Take further action? */
649 	struct mmap_action action;
650 };
651 
652 struct file_operations {
653 	int (*mmap)(struct file *, struct vm_area_struct *);
654 	int (*mmap_prepare)(struct vm_area_desc *);
655 };
656 
657 struct file {
658 	struct address_space	*f_mapping;
659 	const struct file_operations	*f_op;
660 };
661 
662 #define VMA_LOCK_OFFSET	0x40000000
663 
664 typedef struct { unsigned long v; } freeptr_t;
665 
666 struct vm_area_struct {
667 	/* The first cache line has the info for VMA tree walking. */
668 
669 	union {
670 		struct {
671 			/* VMA covers [vm_start; vm_end) addresses within mm */
672 			unsigned long vm_start;
673 			unsigned long vm_end;
674 		};
675 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
676 	};
677 
678 	struct mm_struct *vm_mm;	/* The address space we belong to. */
679 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
680 
681 	/*
682 	 * Flags, see mm.h.
683 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
684 	 */
685 	union {
686 		const vm_flags_t vm_flags;
687 		vma_flags_t flags;
688 	};
689 
690 #ifdef CONFIG_PER_VMA_LOCK
691 	/*
692 	 * Can only be written (using WRITE_ONCE()) while holding both:
693 	 *  - mmap_lock (in write mode)
694 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
695 	 * Can be read reliably while holding one of:
696 	 *  - mmap_lock (in read or write mode)
697 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
698 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
699 	 * while holding nothing (except RCU to keep the VMA struct allocated).
700 	 *
701 	 * This sequence counter is explicitly allowed to overflow; sequence
702 	 * counter reuse can only lead to occasional unnecessary use of the
703 	 * slowpath.
704 	 */
705 	unsigned int vm_lock_seq;
706 #endif
707 
708 	/*
709 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
710 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
711 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
712 	 * or brk vma (with NULL file) can only be in an anon_vma list.
713 	 */
714 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
715 					  * page_table_lock */
716 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
717 
718 	/* Function pointers to deal with this struct. */
719 	const struct vm_operations_struct *vm_ops;
720 
721 	/* Information about our backing store: */
722 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
723 					   units */
724 	struct file * vm_file;		/* File we map to (can be NULL). */
725 	void * vm_private_data;		/* was vm_pte (shared mem) */
726 
727 #ifdef CONFIG_SWAP
728 	atomic_long_t swap_readahead_info;
729 #endif
730 #ifndef CONFIG_MMU
731 	struct vm_region *vm_region;	/* NOMMU mapping region */
732 #endif
733 #ifdef CONFIG_NUMA
734 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
735 #endif
736 #ifdef CONFIG_NUMA_BALANCING
737 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
738 #endif
739 #ifdef CONFIG_PER_VMA_LOCK
740 	/* Unstable RCU readers are allowed to read this. */
741 	refcount_t vm_refcnt;
742 #endif
743 	/*
744 	 * For areas with an address space and backing store,
745 	 * linkage into the address_space->i_mmap interval tree.
746 	 *
747 	 */
748 	struct {
749 		struct rb_node rb;
750 		unsigned long rb_subtree_last;
751 	} shared;
752 #ifdef CONFIG_ANON_VMA_NAME
753 	/*
754 	 * For private and shared anonymous mappings, a pointer to a null
755 	 * terminated string containing the name given to the vma, or NULL if
756 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
757 	 */
758 	struct anon_vma_name *anon_name;
759 #endif
760 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
761 } __randomize_layout;
762 
763 struct vm_fault {};
764 
765 struct vm_operations_struct {
766 	void (*open)(struct vm_area_struct * area);
767 	/**
768 	 * @close: Called when the VMA is being removed from the MM.
769 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
770 	 */
771 	void (*close)(struct vm_area_struct * area);
772 	/* Called any time before splitting to check if it's allowed */
773 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
774 	int (*mremap)(struct vm_area_struct *area);
775 	/*
776 	 * Called by mprotect() to make driver-specific permission
777 	 * checks before mprotect() is finalised.   The VMA must not
778 	 * be modified.  Returns 0 if mprotect() can proceed.
779 	 */
780 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
781 			unsigned long end, unsigned long newflags);
782 	vm_fault_t (*fault)(struct vm_fault *vmf);
783 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
784 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
785 			pgoff_t start_pgoff, pgoff_t end_pgoff);
786 	unsigned long (*pagesize)(struct vm_area_struct * area);
787 
788 	/* notification that a previously read-only page is about to become
789 	 * writable, if an error is returned it will cause a SIGBUS */
790 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
791 
792 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
793 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
794 
795 	/* called by access_process_vm when get_user_pages() fails, typically
796 	 * for use by special VMAs. See also generic_access_phys() for a generic
797 	 * implementation useful for any iomem mapping.
798 	 */
799 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
800 		      void *buf, int len, int write);
801 
802 	/* Called by the /proc/PID/maps code to ask the vma whether it
803 	 * has a special name.  Returning non-NULL will also cause this
804 	 * vma to be dumped unconditionally. */
805 	const char *(*name)(struct vm_area_struct *vma);
806 
807 #ifdef CONFIG_NUMA
808 	/*
809 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
810 	 * to hold the policy upon return.  Caller should pass NULL @new to
811 	 * remove a policy and fall back to surrounding context--i.e. do not
812 	 * install a MPOL_DEFAULT policy, nor the task or system default
813 	 * mempolicy.
814 	 */
815 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
816 
817 	/*
818 	 * get_policy() op must add reference [mpol_get()] to any policy at
819 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
820 	 * in mm/mempolicy.c will do this automatically.
821 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
822 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
823 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
824 	 * must return NULL--i.e., do not "fallback" to task or system default
825 	 * policy.
826 	 */
827 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
828 					unsigned long addr, pgoff_t *ilx);
829 #endif
830 #ifdef CONFIG_FIND_NORMAL_PAGE
831 	/*
832 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
833 	 * allows for returning a "normal" page from vm_normal_page() even
834 	 * though the PTE indicates that the "struct page" either does not exist
835 	 * or should not be touched: "special".
836 	 *
837 	 * Do not add new users: this really only works when a "normal" page
838 	 * was mapped, but then the PTE got changed to something weird (+
839 	 * marked special) that would not make pte_pfn() identify the originally
840 	 * inserted page.
841 	 */
842 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
843 					 unsigned long addr);
844 #endif /* CONFIG_FIND_NORMAL_PAGE */
845 };
846 
847 struct vm_unmapped_area_info {
848 #define VM_UNMAPPED_AREA_TOPDOWN 1
849 	unsigned long flags;
850 	unsigned long length;
851 	unsigned long low_limit;
852 	unsigned long high_limit;
853 	unsigned long align_mask;
854 	unsigned long align_offset;
855 	unsigned long start_gap;
856 };
857 
858 struct pagetable_move_control {
859 	struct vm_area_struct *old; /* Source VMA. */
860 	struct vm_area_struct *new; /* Destination VMA. */
861 	unsigned long old_addr; /* Address from which the move begins. */
862 	unsigned long old_end; /* Exclusive address at which old range ends. */
863 	unsigned long new_addr; /* Address to move page tables to. */
864 	unsigned long len_in; /* Bytes to remap specified by user. */
865 
866 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
867 	bool for_stack; /* Is this an early temp stack being moved? */
868 };
869 
870 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
871 	struct pagetable_move_control name = {				\
872 		.old = old_,						\
873 		.new = new_,						\
874 		.old_addr = old_addr_,					\
875 		.old_end = (old_addr_) + (len_),			\
876 		.new_addr = new_addr_,					\
877 		.len_in = len_,						\
878 	}
879 
880 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
881 {
882 	mas_pause(&vmi->mas);
883 }
884 
885 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
886 {
887 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
888 }
889 
890 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
891 {
892 	return __pgprot(vm_flags);
893 }
894 
895 static inline void vma_flags_clear_all(vma_flags_t *flags)
896 {
897 	bitmap_zero(flags->__vma_flags, NUM_VMA_FLAG_BITS);
898 }
899 
900 static inline void vma_flag_set(vma_flags_t *flags, vma_flag_t bit)
901 {
902 	unsigned long *bitmap = flags->__vma_flags;
903 
904 	__set_bit((__force int)bit, bitmap);
905 }
906 
907 static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits)
908 {
909 	vma_flags_t flags;
910 	int i;
911 
912 	vma_flags_clear_all(&flags);
913 	for (i = 0; i < count; i++)
914 		vma_flag_set(&flags, bits[i]);
915 	return flags;
916 }
917 
918 #define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \
919 					 (const vma_flag_t []){__VA_ARGS__})
920 
921 static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
922 		vma_flags_t to_test)
923 {
924 	const unsigned long *bitmap = flags->__vma_flags;
925 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
926 
927 	return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
928 }
929 
930 #define vma_flags_test(flags, ...) \
931 	vma_flags_test_mask(flags, mk_vma_flags(__VA_ARGS__))
932 
933 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
934 		vma_flags_t to_test)
935 {
936 	const unsigned long *bitmap = flags->__vma_flags;
937 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
938 
939 	return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
940 }
941 
942 #define vma_flags_test_all(flags, ...) \
943 	vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
944 
945 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
946 {
947 	unsigned long *bitmap = flags->__vma_flags;
948 	const unsigned long *bitmap_to_set = to_set.__vma_flags;
949 
950 	bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
951 }
952 
953 #define vma_flags_set(flags, ...) \
954 	vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
955 
956 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear)
957 {
958 	unsigned long *bitmap = flags->__vma_flags;
959 	const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
960 
961 	bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
962 }
963 
964 #define vma_flags_clear(flags, ...) \
965 	vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
966 
967 static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
968 					   vma_flags_t flags)
969 {
970 	return vma_flags_test_all_mask(&vma->flags, flags);
971 }
972 
973 #define vma_test_all_flags(vma, ...) \
974 	vma_test_all_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
975 
976 static inline void vma_set_flags_mask(struct vm_area_struct *vma,
977 				      vma_flags_t flags)
978 {
979 	vma_flags_set_mask(&vma->flags, flags);
980 }
981 
982 #define vma_set_flags(vma, ...) \
983 	vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
984 
985 static inline bool vma_desc_test_flags_mask(const struct vm_area_desc *desc,
986 					    vma_flags_t flags)
987 {
988 	return vma_flags_test_mask(&desc->vma_flags, flags);
989 }
990 
991 #define vma_desc_test_flags(desc, ...) \
992 	vma_desc_test_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
993 
994 static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
995 					   vma_flags_t flags)
996 {
997 	vma_flags_set_mask(&desc->vma_flags, flags);
998 }
999 
1000 #define vma_desc_set_flags(desc, ...) \
1001 	vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1002 
1003 static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1004 					     vma_flags_t flags)
1005 {
1006 	vma_flags_clear_mask(&desc->vma_flags, flags);
1007 }
1008 
1009 #define vma_desc_clear_flags(desc, ...) \
1010 	vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1011 
1012 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
1013 {
1014 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1015 		(VM_SHARED | VM_MAYWRITE);
1016 }
1017 
1018 static inline bool is_shared_maywrite(const vma_flags_t *flags)
1019 {
1020 	return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1021 }
1022 
1023 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1024 {
1025 	return is_shared_maywrite(&vma->flags);
1026 }
1027 
1028 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1029 {
1030 	/*
1031 	 * Uses mas_find() to get the first VMA when the iterator starts.
1032 	 * Calling mas_next() could skip the first entry.
1033 	 */
1034 	return mas_find(&vmi->mas, ULONG_MAX);
1035 }
1036 
1037 /*
1038  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
1039  * assertions should be made either under mmap_write_lock or when the object
1040  * has been isolated under mmap_write_lock, ensuring no competing writers.
1041  */
1042 static inline void vma_assert_attached(struct vm_area_struct *vma)
1043 {
1044 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
1045 }
1046 
1047 static inline void vma_assert_detached(struct vm_area_struct *vma)
1048 {
1049 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
1050 }
1051 
1052 static inline void vma_assert_write_locked(struct vm_area_struct *);
1053 static inline void vma_mark_attached(struct vm_area_struct *vma)
1054 {
1055 	vma_assert_write_locked(vma);
1056 	vma_assert_detached(vma);
1057 	refcount_set_release(&vma->vm_refcnt, 1);
1058 }
1059 
1060 static inline void vma_mark_detached(struct vm_area_struct *vma)
1061 {
1062 	vma_assert_write_locked(vma);
1063 	vma_assert_attached(vma);
1064 	/* We are the only writer, so no need to use vma_refcount_put(). */
1065 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
1066 		/*
1067 		 * Reader must have temporarily raised vm_refcnt but it will
1068 		 * drop it without using the vma since vma is write-locked.
1069 		 */
1070 	}
1071 }
1072 
1073 extern const struct vm_operations_struct vma_dummy_vm_ops;
1074 
1075 extern unsigned long rlimit(unsigned int limit);
1076 
1077 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
1078 {
1079 	memset(vma, 0, sizeof(*vma));
1080 	vma->vm_mm = mm;
1081 	vma->vm_ops = &vma_dummy_vm_ops;
1082 	INIT_LIST_HEAD(&vma->anon_vma_chain);
1083 	vma->vm_lock_seq = UINT_MAX;
1084 }
1085 
1086 /*
1087  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
1088  * kernel/fork.c, so we have to these broadly available there, and temporarily
1089  * define them here to resolve the dependency cycle.
1090  */
1091 
1092 #define is_exec_mapping(flags) \
1093 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
1094 
1095 #define is_stack_mapping(flags) \
1096 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
1097 
1098 #define is_data_mapping(flags) \
1099 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
1100 
1101 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
1102 				   long npages)
1103 {
1104 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1105 
1106 	if (is_exec_mapping(flags))
1107 		mm->exec_vm += npages;
1108 	else if (is_stack_mapping(flags))
1109 		mm->stack_vm += npages;
1110 	else if (is_data_mapping(flags))
1111 		mm->data_vm += npages;
1112 }
1113 
1114 #undef is_exec_mapping
1115 #undef is_stack_mapping
1116 #undef is_data_mapping
1117 
1118 /* Currently stubbed but we may later wish to un-stub. */
1119 static inline void vm_acct_memory(long pages);
1120 static inline void vm_unacct_memory(long pages)
1121 {
1122 	vm_acct_memory(-pages);
1123 }
1124 
1125 static inline void mapping_allow_writable(struct address_space *mapping)
1126 {
1127 	atomic_inc(&mapping->i_mmap_writable);
1128 }
1129 
1130 static inline
1131 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1132 {
1133 	return mas_find(&vmi->mas, max - 1);
1134 }
1135 
1136 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1137 			unsigned long start, unsigned long end, gfp_t gfp)
1138 {
1139 	__mas_set_range(&vmi->mas, start, end - 1);
1140 	mas_store_gfp(&vmi->mas, NULL, gfp);
1141 	if (unlikely(mas_is_err(&vmi->mas)))
1142 		return -ENOMEM;
1143 
1144 	return 0;
1145 }
1146 
1147 static inline void mmap_assert_locked(struct mm_struct *);
1148 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1149 						unsigned long start_addr,
1150 						unsigned long end_addr)
1151 {
1152 	unsigned long index = start_addr;
1153 
1154 	mmap_assert_locked(mm);
1155 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
1156 }
1157 
1158 static inline
1159 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
1160 {
1161 	return mtree_load(&mm->mm_mt, addr);
1162 }
1163 
1164 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1165 {
1166 	return mas_prev(&vmi->mas, 0);
1167 }
1168 
1169 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1170 {
1171 	mas_set(&vmi->mas, addr);
1172 }
1173 
1174 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1175 {
1176 	return !vma->vm_ops;
1177 }
1178 
1179 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
1180 #define vma_iter_load(vmi) \
1181 	mas_walk(&(vmi)->mas)
1182 
1183 static inline struct vm_area_struct *
1184 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1185 			struct vm_area_struct **pprev)
1186 {
1187 	struct vm_area_struct *vma;
1188 	VMA_ITERATOR(vmi, mm, addr);
1189 
1190 	vma = vma_iter_load(&vmi);
1191 	*pprev = vma_prev(&vmi);
1192 	if (!vma)
1193 		vma = vma_next(&vmi);
1194 	return vma;
1195 }
1196 
1197 #undef vma_iter_load
1198 
1199 static inline void vma_iter_init(struct vma_iterator *vmi,
1200 		struct mm_struct *mm, unsigned long addr)
1201 {
1202 	mas_init(&vmi->mas, &mm->mm_mt, addr);
1203 }
1204 
1205 /* Stubbed functions. */
1206 
1207 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
1208 {
1209 	return NULL;
1210 }
1211 
1212 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
1213 					struct vm_userfaultfd_ctx vm_ctx)
1214 {
1215 	return true;
1216 }
1217 
1218 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
1219 				    struct anon_vma_name *anon_name2)
1220 {
1221 	return true;
1222 }
1223 
1224 static inline void might_sleep(void)
1225 {
1226 }
1227 
1228 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1229 {
1230 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1231 }
1232 
1233 static inline void fput(struct file *file)
1234 {
1235 }
1236 
1237 static inline void mpol_put(struct mempolicy *pol)
1238 {
1239 }
1240 
1241 static inline void lru_add_drain(void)
1242 {
1243 }
1244 
1245 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
1246 {
1247 }
1248 
1249 static inline void update_hiwater_rss(struct mm_struct *mm)
1250 {
1251 }
1252 
1253 static inline void update_hiwater_vm(struct mm_struct *mm)
1254 {
1255 }
1256 
1257 struct unmap_desc;
1258 
1259 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
1260 {
1261 }
1262 
1263 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
1264 {
1265 	(void)tlb;
1266 	(void)desc;
1267 }
1268 
1269 static inline void mapping_unmap_writable(struct address_space *mapping)
1270 {
1271 }
1272 
1273 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
1274 {
1275 }
1276 
1277 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
1278 {
1279 }
1280 
1281 static inline struct file *get_file(struct file *f)
1282 {
1283 	return f;
1284 }
1285 
1286 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
1287 {
1288 	return 0;
1289 }
1290 
1291 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
1292 				 enum vma_operation operation)
1293 {
1294 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
1295 	if (src->anon_vma != NULL) {
1296 		dst->anon_vma = src->anon_vma;
1297 		dst->anon_vma->was_cloned = true;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static inline void vma_start_write(struct vm_area_struct *vma)
1304 {
1305 	/* Used to indicate to tests that a write operation has begun. */
1306 	vma->vm_lock_seq++;
1307 }
1308 
1309 static inline __must_check
1310 int vma_start_write_killable(struct vm_area_struct *vma)
1311 {
1312 	/* Used to indicate to tests that a write operation has begun. */
1313 	vma->vm_lock_seq++;
1314 	return 0;
1315 }
1316 
1317 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
1318 					 unsigned long start,
1319 					 unsigned long end,
1320 					 struct vm_area_struct *next)
1321 {
1322 }
1323 
1324 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
1325 
1326 static inline void vma_iter_free(struct vma_iterator *vmi)
1327 {
1328 	mas_destroy(&vmi->mas);
1329 }
1330 
1331 static inline
1332 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1333 {
1334 	return mas_next_range(&vmi->mas, ULONG_MAX);
1335 }
1336 
1337 static inline void vm_acct_memory(long pages)
1338 {
1339 }
1340 
1341 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
1342 					    struct rb_root_cached *rb)
1343 {
1344 }
1345 
1346 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
1347 					    struct rb_root_cached *rb)
1348 {
1349 }
1350 
1351 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
1352 {
1353 }
1354 
1355 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
1356 						 struct rb_root_cached *rb)
1357 {
1358 }
1359 
1360 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
1361 						 struct rb_root_cached *rb)
1362 {
1363 }
1364 
1365 static inline void uprobe_mmap(struct vm_area_struct *vma)
1366 {
1367 }
1368 
1369 static inline void uprobe_munmap(struct vm_area_struct *vma,
1370 				 unsigned long start, unsigned long end)
1371 {
1372 }
1373 
1374 static inline void i_mmap_lock_write(struct address_space *mapping)
1375 {
1376 }
1377 
1378 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
1379 {
1380 }
1381 
1382 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
1383 {
1384 }
1385 
1386 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
1387 {
1388 	/* For testing purposes, indicate that the anon_vma was unlinked. */
1389 	vma->anon_vma->was_unlinked = true;
1390 }
1391 
1392 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
1393 {
1394 }
1395 
1396 static inline void i_mmap_unlock_write(struct address_space *mapping)
1397 {
1398 }
1399 
1400 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
1401 					 unsigned long start,
1402 					 unsigned long end,
1403 					 struct list_head *unmaps)
1404 {
1405 	return 0;
1406 }
1407 
1408 static inline void mmap_write_downgrade(struct mm_struct *mm)
1409 {
1410 }
1411 
1412 static inline void mmap_read_unlock(struct mm_struct *mm)
1413 {
1414 }
1415 
1416 static inline void mmap_write_unlock(struct mm_struct *mm)
1417 {
1418 }
1419 
1420 static inline int mmap_write_lock_killable(struct mm_struct *mm)
1421 {
1422 	return 0;
1423 }
1424 
1425 static inline bool can_modify_mm(struct mm_struct *mm,
1426 				 unsigned long start,
1427 				 unsigned long end)
1428 {
1429 	return true;
1430 }
1431 
1432 static inline void arch_unmap(struct mm_struct *mm,
1433 				 unsigned long start,
1434 				 unsigned long end)
1435 {
1436 }
1437 
1438 static inline void mmap_assert_locked(struct mm_struct *mm)
1439 {
1440 }
1441 
1442 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
1443 {
1444 	return true;
1445 }
1446 
1447 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
1448 			  vm_flags_t vm_flags)
1449 {
1450 }
1451 
1452 static inline bool mapping_can_writeback(struct address_space *mapping)
1453 {
1454 	return true;
1455 }
1456 
1457 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
1458 {
1459 	return false;
1460 }
1461 
1462 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1463 {
1464 	return false;
1465 }
1466 
1467 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
1468 {
1469 	return false;
1470 }
1471 
1472 static inline void mmap_assert_write_locked(struct mm_struct *mm)
1473 {
1474 }
1475 
1476 static inline void mutex_lock(struct mutex *lock)
1477 {
1478 }
1479 
1480 static inline void mutex_unlock(struct mutex *lock)
1481 {
1482 }
1483 
1484 static inline bool mutex_is_locked(struct mutex *lock)
1485 {
1486 	return true;
1487 }
1488 
1489 static inline bool signal_pending(void *p)
1490 {
1491 	return false;
1492 }
1493 
1494 static inline bool is_file_hugepages(struct file *file)
1495 {
1496 	return false;
1497 }
1498 
1499 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
1500 {
1501 	return 0;
1502 }
1503 
1504 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
1505 				 unsigned long npages)
1506 {
1507 	return true;
1508 }
1509 
1510 static inline int shmem_zero_setup(struct vm_area_struct *vma)
1511 {
1512 	return 0;
1513 }
1514 
1515 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1516 {
1517 	vma->vm_ops = NULL;
1518 }
1519 
1520 static inline void ksm_add_vma(struct vm_area_struct *vma)
1521 {
1522 }
1523 
1524 static inline void perf_event_mmap(struct vm_area_struct *vma)
1525 {
1526 }
1527 
1528 static inline bool vma_is_dax(struct vm_area_struct *vma)
1529 {
1530 	return false;
1531 }
1532 
1533 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1534 {
1535 	return NULL;
1536 }
1537 
1538 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1539 
1540 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1541 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1542 {
1543 	vm_flags_t vm_flags = vma->vm_flags;
1544 	pgprot_t vm_page_prot;
1545 
1546 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1547 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1548 
1549 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1550 		vm_flags &= ~VM_SHARED;
1551 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1552 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1553 	}
1554 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1555 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1556 }
1557 
1558 static inline bool arch_validate_flags(vm_flags_t flags)
1559 {
1560 	return true;
1561 }
1562 
1563 static inline void vma_close(struct vm_area_struct *vma)
1564 {
1565 }
1566 
1567 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
1568 {
1569 	return 0;
1570 }
1571 
1572 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1573 {
1574 	if (vma->vm_flags & VM_GROWSDOWN)
1575 		return stack_guard_gap;
1576 
1577 	/* See reasoning around the VM_SHADOW_STACK definition */
1578 	if (vma->vm_flags & VM_SHADOW_STACK)
1579 		return PAGE_SIZE;
1580 
1581 	return 0;
1582 }
1583 
1584 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1585 {
1586 	unsigned long gap = stack_guard_start_gap(vma);
1587 	unsigned long vm_start = vma->vm_start;
1588 
1589 	vm_start -= gap;
1590 	if (vm_start > vma->vm_start)
1591 		vm_start = 0;
1592 	return vm_start;
1593 }
1594 
1595 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1596 {
1597 	unsigned long vm_end = vma->vm_end;
1598 
1599 	if (vma->vm_flags & VM_GROWSUP) {
1600 		vm_end += stack_guard_gap;
1601 		if (vm_end < vma->vm_end)
1602 			vm_end = -PAGE_SIZE;
1603 	}
1604 	return vm_end;
1605 }
1606 
1607 static inline int is_hugepage_only_range(struct mm_struct *mm,
1608 					unsigned long addr, unsigned long len)
1609 {
1610 	return 0;
1611 }
1612 
1613 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1614 {
1615 	return vma->vm_flags & VM_ACCESS_FLAGS;
1616 }
1617 
1618 static inline bool capable(int cap)
1619 {
1620 	return true;
1621 }
1622 
1623 static inline bool mlock_future_ok(const struct mm_struct *mm,
1624 		vm_flags_t vm_flags, unsigned long bytes)
1625 {
1626 	unsigned long locked_pages, limit_pages;
1627 
1628 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1629 		return true;
1630 
1631 	locked_pages = bytes >> PAGE_SHIFT;
1632 	locked_pages += mm->locked_vm;
1633 
1634 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1635 	limit_pages >>= PAGE_SHIFT;
1636 
1637 	return locked_pages <= limit_pages;
1638 }
1639 
1640 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1641 {
1642 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1643 
1644 	if (!anon_vma)
1645 		return -ENOMEM;
1646 
1647 	anon_vma->root = anon_vma;
1648 	vma->anon_vma = anon_vma;
1649 
1650 	return 0;
1651 }
1652 
1653 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1654 {
1655 	if (likely(vma->anon_vma))
1656 		return 0;
1657 
1658 	return __anon_vma_prepare(vma);
1659 }
1660 
1661 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1662 					      struct list_head *uf)
1663 {
1664 }
1665 
1666 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
1667 {
1668 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
1669 }
1670 
1671 /*
1672  * Copy value to the first system word of VMA flags, non-atomically.
1673  *
1674  * IMPORTANT: This does not overwrite bytes past the first system word. The
1675  * caller must account for this.
1676  */
1677 static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
1678 {
1679 	*ACCESS_PRIVATE(flags, __vma_flags) = value;
1680 }
1681 
1682 /*
1683  * Copy value to the first system word of VMA flags ONCE, non-atomically.
1684  *
1685  * IMPORTANT: This does not overwrite bytes past the first system word. The
1686  * caller must account for this.
1687  */
1688 static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
1689 {
1690 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
1691 
1692 	WRITE_ONCE(*bitmap, value);
1693 }
1694 
1695 /* Update the first system word of VMA flags setting bits, non-atomically. */
1696 static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
1697 {
1698 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
1699 
1700 	*bitmap |= value;
1701 }
1702 
1703 /* Update the first system word of VMA flags clearing bits, non-atomically. */
1704 static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
1705 {
1706 	unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
1707 
1708 	*bitmap &= ~value;
1709 }
1710 
1711 
1712 /* Use when VMA is not part of the VMA tree and needs no locking */
1713 static inline void vm_flags_init(struct vm_area_struct *vma,
1714 				 vm_flags_t flags)
1715 {
1716 	vma_flags_clear_all(&vma->flags);
1717 	vma_flags_overwrite_word(&vma->flags, flags);
1718 }
1719 
1720 /*
1721  * Use when VMA is part of the VMA tree and modifications need coordination
1722  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
1723  * it should be locked explicitly beforehand.
1724  */
1725 static inline void vm_flags_reset(struct vm_area_struct *vma,
1726 				  vm_flags_t flags)
1727 {
1728 	vma_assert_write_locked(vma);
1729 	vm_flags_init(vma, flags);
1730 }
1731 
1732 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
1733 				       vm_flags_t flags)
1734 {
1735 	vma_assert_write_locked(vma);
1736 	/*
1737 	 * The user should only be interested in avoiding reordering of
1738 	 * assignment to the first word.
1739 	 */
1740 	vma_flags_clear_all(&vma->flags);
1741 	vma_flags_overwrite_word_once(&vma->flags, flags);
1742 }
1743 
1744 static inline void vm_flags_set(struct vm_area_struct *vma,
1745 				vm_flags_t flags)
1746 {
1747 	vma_start_write(vma);
1748 	vma_flags_set_word(&vma->flags, flags);
1749 }
1750 
1751 static inline void vm_flags_clear(struct vm_area_struct *vma,
1752 				  vm_flags_t flags)
1753 {
1754 	vma_start_write(vma);
1755 	vma_flags_clear_word(&vma->flags, flags);
1756 }
1757 
1758 /*
1759  * Denies creating a writable executable mapping or gaining executable permissions.
1760  *
1761  * This denies the following:
1762  *
1763  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1764  *
1765  *     b)      mmap(PROT_WRITE)
1766  *             mprotect(PROT_EXEC)
1767  *
1768  *     c)      mmap(PROT_WRITE)
1769  *             mprotect(PROT_READ)
1770  *             mprotect(PROT_EXEC)
1771  *
1772  * But allows the following:
1773  *
1774  *     d)      mmap(PROT_READ | PROT_EXEC)
1775  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1776  *
1777  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1778  * (MDWE) protection mask for the current process.
1779  *
1780  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1781  * we propose to set.
1782  *
1783  * Return: false if proposed change is OK, true if not ok and should be denied.
1784  */
1785 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1786 {
1787 	/* If MDWE is disabled, we have nothing to deny. */
1788 	if (mm_flags_test(MMF_HAS_MDWE, current->mm))
1789 		return false;
1790 
1791 	/* If the new VMA is not executable, we have nothing to deny. */
1792 	if (!(new & VM_EXEC))
1793 		return false;
1794 
1795 	/* Under MDWE we do not accept newly writably executable VMAs... */
1796 	if (new & VM_WRITE)
1797 		return true;
1798 
1799 	/* ...nor previously non-executable VMAs becoming executable. */
1800 	if (!(old & VM_EXEC))
1801 		return true;
1802 
1803 	return false;
1804 }
1805 
1806 static inline int mapping_map_writable(struct address_space *mapping)
1807 {
1808 	return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1809 		0 : -EPERM;
1810 }
1811 
1812 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1813 {
1814 	return 0;
1815 }
1816 
1817 static inline void free_pgd_range(struct mmu_gather *tlb,
1818 			unsigned long addr, unsigned long end,
1819 			unsigned long floor, unsigned long ceiling)
1820 {
1821 }
1822 
1823 static inline int ksm_execve(struct mm_struct *mm)
1824 {
1825 	return 0;
1826 }
1827 
1828 static inline void ksm_exit(struct mm_struct *mm)
1829 {
1830 }
1831 
1832 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1833 {
1834 	if (reset_refcnt)
1835 		refcount_set(&vma->vm_refcnt, 0);
1836 }
1837 
1838 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1839 {
1840 }
1841 
1842 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1843 {
1844 }
1845 
1846 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1847 				     struct vm_area_struct *new_vma)
1848 {
1849 }
1850 
1851 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1852 {
1853 }
1854 
1855 /* Declared in vma.h. */
1856 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1857 		struct vm_area_desc *desc);
1858 
1859 static inline void mmap_action_prepare(struct mmap_action *action,
1860 					   struct vm_area_desc *desc)
1861 {
1862 }
1863 
1864 static inline int mmap_action_complete(struct mmap_action *action,
1865 					   struct vm_area_struct *vma)
1866 {
1867 	return 0;
1868 }
1869 
1870 static inline int __compat_vma_mmap(const struct file_operations *f_op,
1871 		struct file *file, struct vm_area_struct *vma)
1872 {
1873 	struct vm_area_desc desc = {
1874 		.mm = vma->vm_mm,
1875 		.file = file,
1876 		.start = vma->vm_start,
1877 		.end = vma->vm_end,
1878 
1879 		.pgoff = vma->vm_pgoff,
1880 		.vm_file = vma->vm_file,
1881 		.vm_flags = vma->vm_flags,
1882 		.page_prot = vma->vm_page_prot,
1883 
1884 		.action.type = MMAP_NOTHING, /* Default */
1885 	};
1886 	int err;
1887 
1888 	err = f_op->mmap_prepare(&desc);
1889 	if (err)
1890 		return err;
1891 
1892 	mmap_action_prepare(&desc.action, &desc);
1893 	set_vma_from_desc(vma, &desc);
1894 	return mmap_action_complete(&desc.action, vma);
1895 }
1896 
1897 static inline int compat_vma_mmap(struct file *file,
1898 		struct vm_area_struct *vma)
1899 {
1900 	return __compat_vma_mmap(file->f_op, file, vma);
1901 }
1902 
1903 /* Did the driver provide valid mmap hook configuration? */
1904 static inline bool can_mmap_file(struct file *file)
1905 {
1906 	bool has_mmap = file->f_op->mmap;
1907 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1908 
1909 	/* Hooks are mutually exclusive. */
1910 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1911 		return false;
1912 	if (!has_mmap && !has_mmap_prepare)
1913 		return false;
1914 
1915 	return true;
1916 }
1917 
1918 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1919 {
1920 	if (file->f_op->mmap_prepare)
1921 		return compat_vma_mmap(file, vma);
1922 
1923 	return file->f_op->mmap(file, vma);
1924 }
1925 
1926 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1927 {
1928 	return file->f_op->mmap_prepare(desc);
1929 }
1930 
1931 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1932 {
1933 }
1934 
1935 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1936 {
1937 	/* Changing an anonymous vma with this is illegal */
1938 	get_file(file);
1939 	swap(vma->vm_file, file);
1940 	fput(file);
1941 }
1942 
1943 static inline bool shmem_file(struct file *file)
1944 {
1945 	return false;
1946 }
1947 
1948 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
1949 		const struct file *file, vm_flags_t vm_flags)
1950 {
1951 	return vm_flags;
1952 }
1953 
1954 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
1955 {
1956 }
1957 
1958 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
1959 		unsigned long pfn, unsigned long size, pgprot_t pgprot)
1960 {
1961 	return 0;
1962 }
1963 
1964 static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
1965 		struct list_head *uf)
1966 {
1967 	return 0;
1968 }
1969 
1970 #endif	/* __MM_VMA_INTERNAL_H */
1971