xref: /linux/tools/testing/vma/vma_internal.h (revision da939ef4c494246bc2102ecb628bbcc71d650410)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/atomic.h>
25 #include <linux/list.h>
26 #include <linux/maple_tree.h>
27 #include <linux/mm.h>
28 #include <linux/rbtree.h>
29 #include <linux/refcount.h>
30 
31 extern unsigned long stack_guard_gap;
32 #ifdef CONFIG_MMU
33 extern unsigned long mmap_min_addr;
34 extern unsigned long dac_mmap_min_addr;
35 #else
36 #define mmap_min_addr		0UL
37 #define dac_mmap_min_addr	0UL
38 #endif
39 
40 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
41 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
42 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
43 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
44 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
45 
46 #define MMF_HAS_MDWE	28
47 
48 #define VM_NONE		0x00000000
49 #define VM_READ		0x00000001
50 #define VM_WRITE	0x00000002
51 #define VM_EXEC		0x00000004
52 #define VM_SHARED	0x00000008
53 #define VM_MAYREAD	0x00000010
54 #define VM_MAYWRITE	0x00000020
55 #define VM_MAYEXEC	0x00000040
56 #define VM_GROWSDOWN	0x00000100
57 #define VM_PFNMAP	0x00000400
58 #define VM_LOCKED	0x00002000
59 #define VM_IO           0x00004000
60 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
61 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
62 #define VM_DONTEXPAND	0x00040000
63 #define VM_LOCKONFAULT	0x00080000
64 #define VM_ACCOUNT	0x00100000
65 #define VM_NORESERVE	0x00200000
66 #define VM_MIXEDMAP	0x10000000
67 #define VM_STACK	VM_GROWSDOWN
68 #define VM_SHADOW_STACK	VM_NONE
69 #define VM_SOFTDIRTY	0
70 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
71 #define VM_GROWSUP	VM_NONE
72 
73 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
74 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
75 
76 #ifdef CONFIG_STACK_GROWSUP
77 #define VM_STACK	VM_GROWSUP
78 #define VM_STACK_EARLY	VM_GROWSDOWN
79 #else
80 #define VM_STACK	VM_GROWSDOWN
81 #define VM_STACK_EARLY	0
82 #endif
83 
84 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
85 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
86 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
87 #define STACK_TOP		TASK_SIZE_LOW
88 #define STACK_TOP_MAX		TASK_SIZE_MAX
89 
90 /* This mask represents all the VMA flag bits used by mlock */
91 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
92 
93 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
94 
95 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
96 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
97 
98 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
99 
100 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
101 
102 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
103 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
104 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
105 
106 #define RLIMIT_STACK		3	/* max stack size */
107 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
108 
109 #define CAP_IPC_LOCK         14
110 
111 #ifdef CONFIG_64BIT
112 #define VM_SEALED_BIT	42
113 #define VM_SEALED	BIT(VM_SEALED_BIT)
114 #else
115 #define VM_SEALED	VM_NONE
116 #endif
117 
118 #define FIRST_USER_ADDRESS	0UL
119 #define USER_PGTABLES_CEILING	0UL
120 
121 #define vma_policy(vma) NULL
122 
123 #define down_write_nest_lock(sem, nest_lock)
124 
125 #define pgprot_val(x)		((x).pgprot)
126 #define __pgprot(x)		((pgprot_t) { (x) } )
127 
128 #define for_each_vma(__vmi, __vma)					\
129 	while (((__vma) = vma_next(&(__vmi))) != NULL)
130 
131 /* The MM code likes to work with exclusive end addresses */
132 #define for_each_vma_range(__vmi, __vma, __end)				\
133 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
134 
135 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
136 
137 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
138 
139 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
140 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
141 
142 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
143 
144 #define AS_MM_ALL_LOCKS 2
145 
146 /* We hardcode this for now. */
147 #define sysctl_max_map_count 0x1000000UL
148 
149 #define pgoff_t unsigned long
150 typedef unsigned long	pgprotval_t;
151 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
152 typedef unsigned long vm_flags_t;
153 typedef __bitwise unsigned int vm_fault_t;
154 
155 /*
156  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
157  * either way :)
158  */
159 #define pr_warn_once pr_err
160 
161 #define data_race(expr) expr
162 
163 #define ASSERT_EXCLUSIVE_WRITER(x)
164 
165 /**
166  * swap - swap values of @a and @b
167  * @a: first value
168  * @b: second value
169  */
170 #define swap(a, b) \
171 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
172 
173 struct kref {
174 	refcount_t refcount;
175 };
176 
177 /*
178  * Define the task command name length as enum, then it can be visible to
179  * BPF programs.
180  */
181 enum {
182 	TASK_COMM_LEN = 16,
183 };
184 
185 /*
186  * Flags for bug emulation.
187  *
188  * These occupy the top three bytes.
189  */
190 enum {
191 	READ_IMPLIES_EXEC =	0x0400000,
192 };
193 
194 struct task_struct {
195 	char comm[TASK_COMM_LEN];
196 	pid_t pid;
197 	struct mm_struct *mm;
198 
199 	/* Used for emulating ABI behavior of previous Linux versions: */
200 	unsigned int			personality;
201 };
202 
203 struct task_struct *get_current(void);
204 #define current get_current()
205 
206 struct anon_vma {
207 	struct anon_vma *root;
208 	struct rb_root_cached rb_root;
209 
210 	/* Test fields. */
211 	bool was_cloned;
212 	bool was_unlinked;
213 };
214 
215 struct anon_vma_chain {
216 	struct anon_vma *anon_vma;
217 	struct list_head same_vma;
218 };
219 
220 struct anon_vma_name {
221 	struct kref kref;
222 	/* The name needs to be at the end because it is dynamically sized. */
223 	char name[];
224 };
225 
226 struct vma_iterator {
227 	struct ma_state mas;
228 };
229 
230 #define VMA_ITERATOR(name, __mm, __addr)				\
231 	struct vma_iterator name = {					\
232 		.mas = {						\
233 			.tree = &(__mm)->mm_mt,				\
234 			.index = __addr,				\
235 			.node = NULL,					\
236 			.status = ma_start,				\
237 		},							\
238 	}
239 
240 struct address_space {
241 	struct rb_root_cached	i_mmap;
242 	unsigned long		flags;
243 	atomic_t		i_mmap_writable;
244 };
245 
246 struct vm_userfaultfd_ctx {};
247 struct mempolicy {};
248 struct mmu_gather {};
249 struct mutex {};
250 #define DEFINE_MUTEX(mutexname) \
251 	struct mutex mutexname = {}
252 
253 #define DECLARE_BITMAP(name, bits) \
254 	unsigned long name[BITS_TO_LONGS(bits)]
255 
256 #define NUM_MM_FLAG_BITS (64)
257 typedef struct {
258 	__private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
259 } mm_flags_t;
260 
261 struct mm_struct {
262 	struct maple_tree mm_mt;
263 	int map_count;			/* number of VMAs */
264 	unsigned long total_vm;	   /* Total pages mapped */
265 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
266 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
267 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
268 	unsigned long stack_vm;	   /* VM_STACK */
269 
270 	unsigned long def_flags;
271 
272 	mm_flags_t flags; /* Must use mm_flags_* helpers to access */
273 };
274 
275 struct vm_area_struct;
276 
277 /*
278  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
279  * manipulate mutable fields which will cause those fields to be updated in the
280  * resultant VMA.
281  *
282  * Helper functions are not required for manipulating any field.
283  */
284 struct vm_area_desc {
285 	/* Immutable state. */
286 	struct mm_struct *mm;
287 	unsigned long start;
288 	unsigned long end;
289 
290 	/* Mutable fields. Populated with initial state. */
291 	pgoff_t pgoff;
292 	struct file *file;
293 	vm_flags_t vm_flags;
294 	pgprot_t page_prot;
295 
296 	/* Write-only fields. */
297 	const struct vm_operations_struct *vm_ops;
298 	void *private_data;
299 };
300 
301 struct file_operations {
302 	int (*mmap)(struct file *, struct vm_area_struct *);
303 	int (*mmap_prepare)(struct vm_area_desc *);
304 };
305 
306 struct file {
307 	struct address_space	*f_mapping;
308 	const struct file_operations	*f_op;
309 };
310 
311 #define VMA_LOCK_OFFSET	0x40000000
312 
313 typedef struct { unsigned long v; } freeptr_t;
314 
315 struct vm_area_struct {
316 	/* The first cache line has the info for VMA tree walking. */
317 
318 	union {
319 		struct {
320 			/* VMA covers [vm_start; vm_end) addresses within mm */
321 			unsigned long vm_start;
322 			unsigned long vm_end;
323 		};
324 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
325 	};
326 
327 	struct mm_struct *vm_mm;	/* The address space we belong to. */
328 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
329 
330 	/*
331 	 * Flags, see mm.h.
332 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
333 	 */
334 	union {
335 		const vm_flags_t vm_flags;
336 		vm_flags_t __private __vm_flags;
337 	};
338 
339 #ifdef CONFIG_PER_VMA_LOCK
340 	/*
341 	 * Can only be written (using WRITE_ONCE()) while holding both:
342 	 *  - mmap_lock (in write mode)
343 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
344 	 * Can be read reliably while holding one of:
345 	 *  - mmap_lock (in read or write mode)
346 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
347 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
348 	 * while holding nothing (except RCU to keep the VMA struct allocated).
349 	 *
350 	 * This sequence counter is explicitly allowed to overflow; sequence
351 	 * counter reuse can only lead to occasional unnecessary use of the
352 	 * slowpath.
353 	 */
354 	unsigned int vm_lock_seq;
355 #endif
356 
357 	/*
358 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
359 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
360 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
361 	 * or brk vma (with NULL file) can only be in an anon_vma list.
362 	 */
363 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
364 					  * page_table_lock */
365 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
366 
367 	/* Function pointers to deal with this struct. */
368 	const struct vm_operations_struct *vm_ops;
369 
370 	/* Information about our backing store: */
371 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
372 					   units */
373 	struct file * vm_file;		/* File we map to (can be NULL). */
374 	void * vm_private_data;		/* was vm_pte (shared mem) */
375 
376 #ifdef CONFIG_SWAP
377 	atomic_long_t swap_readahead_info;
378 #endif
379 #ifndef CONFIG_MMU
380 	struct vm_region *vm_region;	/* NOMMU mapping region */
381 #endif
382 #ifdef CONFIG_NUMA
383 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
384 #endif
385 #ifdef CONFIG_NUMA_BALANCING
386 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
387 #endif
388 #ifdef CONFIG_PER_VMA_LOCK
389 	/* Unstable RCU readers are allowed to read this. */
390 	refcount_t vm_refcnt;
391 #endif
392 	/*
393 	 * For areas with an address space and backing store,
394 	 * linkage into the address_space->i_mmap interval tree.
395 	 *
396 	 */
397 	struct {
398 		struct rb_node rb;
399 		unsigned long rb_subtree_last;
400 	} shared;
401 #ifdef CONFIG_ANON_VMA_NAME
402 	/*
403 	 * For private and shared anonymous mappings, a pointer to a null
404 	 * terminated string containing the name given to the vma, or NULL if
405 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
406 	 */
407 	struct anon_vma_name *anon_name;
408 #endif
409 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
410 } __randomize_layout;
411 
412 struct vm_fault {};
413 
414 struct vm_operations_struct {
415 	void (*open)(struct vm_area_struct * area);
416 	/**
417 	 * @close: Called when the VMA is being removed from the MM.
418 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
419 	 */
420 	void (*close)(struct vm_area_struct * area);
421 	/* Called any time before splitting to check if it's allowed */
422 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
423 	int (*mremap)(struct vm_area_struct *area);
424 	/*
425 	 * Called by mprotect() to make driver-specific permission
426 	 * checks before mprotect() is finalised.   The VMA must not
427 	 * be modified.  Returns 0 if mprotect() can proceed.
428 	 */
429 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
430 			unsigned long end, unsigned long newflags);
431 	vm_fault_t (*fault)(struct vm_fault *vmf);
432 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
433 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
434 			pgoff_t start_pgoff, pgoff_t end_pgoff);
435 	unsigned long (*pagesize)(struct vm_area_struct * area);
436 
437 	/* notification that a previously read-only page is about to become
438 	 * writable, if an error is returned it will cause a SIGBUS */
439 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
440 
441 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
442 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
443 
444 	/* called by access_process_vm when get_user_pages() fails, typically
445 	 * for use by special VMAs. See also generic_access_phys() for a generic
446 	 * implementation useful for any iomem mapping.
447 	 */
448 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
449 		      void *buf, int len, int write);
450 
451 	/* Called by the /proc/PID/maps code to ask the vma whether it
452 	 * has a special name.  Returning non-NULL will also cause this
453 	 * vma to be dumped unconditionally. */
454 	const char *(*name)(struct vm_area_struct *vma);
455 
456 #ifdef CONFIG_NUMA
457 	/*
458 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
459 	 * to hold the policy upon return.  Caller should pass NULL @new to
460 	 * remove a policy and fall back to surrounding context--i.e. do not
461 	 * install a MPOL_DEFAULT policy, nor the task or system default
462 	 * mempolicy.
463 	 */
464 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
465 
466 	/*
467 	 * get_policy() op must add reference [mpol_get()] to any policy at
468 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
469 	 * in mm/mempolicy.c will do this automatically.
470 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
471 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
472 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
473 	 * must return NULL--i.e., do not "fallback" to task or system default
474 	 * policy.
475 	 */
476 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
477 					unsigned long addr, pgoff_t *ilx);
478 #endif
479 #ifdef CONFIG_FIND_NORMAL_PAGE
480 	/*
481 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
482 	 * allows for returning a "normal" page from vm_normal_page() even
483 	 * though the PTE indicates that the "struct page" either does not exist
484 	 * or should not be touched: "special".
485 	 *
486 	 * Do not add new users: this really only works when a "normal" page
487 	 * was mapped, but then the PTE got changed to something weird (+
488 	 * marked special) that would not make pte_pfn() identify the originally
489 	 * inserted page.
490 	 */
491 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
492 					 unsigned long addr);
493 #endif /* CONFIG_FIND_NORMAL_PAGE */
494 };
495 
496 struct vm_unmapped_area_info {
497 #define VM_UNMAPPED_AREA_TOPDOWN 1
498 	unsigned long flags;
499 	unsigned long length;
500 	unsigned long low_limit;
501 	unsigned long high_limit;
502 	unsigned long align_mask;
503 	unsigned long align_offset;
504 	unsigned long start_gap;
505 };
506 
507 struct pagetable_move_control {
508 	struct vm_area_struct *old; /* Source VMA. */
509 	struct vm_area_struct *new; /* Destination VMA. */
510 	unsigned long old_addr; /* Address from which the move begins. */
511 	unsigned long old_end; /* Exclusive address at which old range ends. */
512 	unsigned long new_addr; /* Address to move page tables to. */
513 	unsigned long len_in; /* Bytes to remap specified by user. */
514 
515 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
516 	bool for_stack; /* Is this an early temp stack being moved? */
517 };
518 
519 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
520 	struct pagetable_move_control name = {				\
521 		.old = old_,						\
522 		.new = new_,						\
523 		.old_addr = old_addr_,					\
524 		.old_end = (old_addr_) + (len_),			\
525 		.new_addr = new_addr_,					\
526 		.len_in = len_,						\
527 	}
528 
529 struct kmem_cache_args {
530 	/**
531 	 * @align: The required alignment for the objects.
532 	 *
533 	 * %0 means no specific alignment is requested.
534 	 */
535 	unsigned int align;
536 	/**
537 	 * @useroffset: Usercopy region offset.
538 	 *
539 	 * %0 is a valid offset, when @usersize is non-%0
540 	 */
541 	unsigned int useroffset;
542 	/**
543 	 * @usersize: Usercopy region size.
544 	 *
545 	 * %0 means no usercopy region is specified.
546 	 */
547 	unsigned int usersize;
548 	/**
549 	 * @freeptr_offset: Custom offset for the free pointer
550 	 * in &SLAB_TYPESAFE_BY_RCU caches
551 	 *
552 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
553 	 * outside of the object. This might cause the object to grow in size.
554 	 * Cache creators that have a reason to avoid this can specify a custom
555 	 * free pointer offset in their struct where the free pointer will be
556 	 * placed.
557 	 *
558 	 * Note that placing the free pointer inside the object requires the
559 	 * caller to ensure that no fields are invalidated that are required to
560 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
561 	 * details).
562 	 *
563 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
564 	 * is specified, %use_freeptr_offset must be set %true.
565 	 *
566 	 * Note that @ctor currently isn't supported with custom free pointers
567 	 * as a @ctor requires an external free pointer.
568 	 */
569 	unsigned int freeptr_offset;
570 	/**
571 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
572 	 */
573 	bool use_freeptr_offset;
574 	/**
575 	 * @ctor: A constructor for the objects.
576 	 *
577 	 * The constructor is invoked for each object in a newly allocated slab
578 	 * page. It is the cache user's responsibility to free object in the
579 	 * same state as after calling the constructor, or deal appropriately
580 	 * with any differences between a freshly constructed and a reallocated
581 	 * object.
582 	 *
583 	 * %NULL means no constructor.
584 	 */
585 	void (*ctor)(void *);
586 };
587 
588 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
589 {
590 	mas_pause(&vmi->mas);
591 }
592 
593 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
594 {
595 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
596 }
597 
598 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
599 {
600 	return __pgprot(vm_flags);
601 }
602 
603 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
604 {
605 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
606 		(VM_SHARED | VM_MAYWRITE);
607 }
608 
609 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
610 {
611 	return is_shared_maywrite(vma->vm_flags);
612 }
613 
614 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
615 {
616 	/*
617 	 * Uses mas_find() to get the first VMA when the iterator starts.
618 	 * Calling mas_next() could skip the first entry.
619 	 */
620 	return mas_find(&vmi->mas, ULONG_MAX);
621 }
622 
623 /*
624  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
625  * assertions should be made either under mmap_write_lock or when the object
626  * has been isolated under mmap_write_lock, ensuring no competing writers.
627  */
628 static inline void vma_assert_attached(struct vm_area_struct *vma)
629 {
630 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
631 }
632 
633 static inline void vma_assert_detached(struct vm_area_struct *vma)
634 {
635 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
636 }
637 
638 static inline void vma_assert_write_locked(struct vm_area_struct *);
639 static inline void vma_mark_attached(struct vm_area_struct *vma)
640 {
641 	vma_assert_write_locked(vma);
642 	vma_assert_detached(vma);
643 	refcount_set_release(&vma->vm_refcnt, 1);
644 }
645 
646 static inline void vma_mark_detached(struct vm_area_struct *vma)
647 {
648 	vma_assert_write_locked(vma);
649 	vma_assert_attached(vma);
650 	/* We are the only writer, so no need to use vma_refcount_put(). */
651 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
652 		/*
653 		 * Reader must have temporarily raised vm_refcnt but it will
654 		 * drop it without using the vma since vma is write-locked.
655 		 */
656 	}
657 }
658 
659 extern const struct vm_operations_struct vma_dummy_vm_ops;
660 
661 extern unsigned long rlimit(unsigned int limit);
662 
663 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
664 {
665 	memset(vma, 0, sizeof(*vma));
666 	vma->vm_mm = mm;
667 	vma->vm_ops = &vma_dummy_vm_ops;
668 	INIT_LIST_HEAD(&vma->anon_vma_chain);
669 	vma->vm_lock_seq = UINT_MAX;
670 }
671 
672 struct kmem_cache {
673 	const char *name;
674 	size_t object_size;
675 	struct kmem_cache_args *args;
676 };
677 
678 static inline struct kmem_cache *__kmem_cache_create(const char *name,
679 						     size_t object_size,
680 						     struct kmem_cache_args *args)
681 {
682 	struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
683 
684 	ret->name = name;
685 	ret->object_size = object_size;
686 	ret->args = args;
687 
688 	return ret;
689 }
690 
691 #define kmem_cache_create(__name, __object_size, __args, ...)           \
692 	__kmem_cache_create((__name), (__object_size), (__args))
693 
694 static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
695 {
696 	(void)gfpflags;
697 
698 	return calloc(s->object_size, 1);
699 }
700 
701 static inline void kmem_cache_free(struct kmem_cache *s, void *x)
702 {
703 	free(x);
704 }
705 
706 /*
707  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
708  * kernel/fork.c, so we have to these broadly available there, and temporarily
709  * define them here to resolve the dependency cycle.
710  */
711 
712 #define is_exec_mapping(flags) \
713 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
714 
715 #define is_stack_mapping(flags) \
716 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
717 
718 #define is_data_mapping(flags) \
719 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
720 
721 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
722 				   long npages)
723 {
724 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
725 
726 	if (is_exec_mapping(flags))
727 		mm->exec_vm += npages;
728 	else if (is_stack_mapping(flags))
729 		mm->stack_vm += npages;
730 	else if (is_data_mapping(flags))
731 		mm->data_vm += npages;
732 }
733 
734 #undef is_exec_mapping
735 #undef is_stack_mapping
736 #undef is_data_mapping
737 
738 /* Currently stubbed but we may later wish to un-stub. */
739 static inline void vm_acct_memory(long pages);
740 static inline void vm_unacct_memory(long pages)
741 {
742 	vm_acct_memory(-pages);
743 }
744 
745 static inline void mapping_allow_writable(struct address_space *mapping)
746 {
747 	atomic_inc(&mapping->i_mmap_writable);
748 }
749 
750 static inline void vma_set_range(struct vm_area_struct *vma,
751 				 unsigned long start, unsigned long end,
752 				 pgoff_t pgoff)
753 {
754 	vma->vm_start = start;
755 	vma->vm_end = end;
756 	vma->vm_pgoff = pgoff;
757 }
758 
759 static inline
760 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
761 {
762 	return mas_find(&vmi->mas, max - 1);
763 }
764 
765 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
766 			unsigned long start, unsigned long end, gfp_t gfp)
767 {
768 	__mas_set_range(&vmi->mas, start, end - 1);
769 	mas_store_gfp(&vmi->mas, NULL, gfp);
770 	if (unlikely(mas_is_err(&vmi->mas)))
771 		return -ENOMEM;
772 
773 	return 0;
774 }
775 
776 static inline void mmap_assert_locked(struct mm_struct *);
777 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
778 						unsigned long start_addr,
779 						unsigned long end_addr)
780 {
781 	unsigned long index = start_addr;
782 
783 	mmap_assert_locked(mm);
784 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
785 }
786 
787 static inline
788 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
789 {
790 	return mtree_load(&mm->mm_mt, addr);
791 }
792 
793 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
794 {
795 	return mas_prev(&vmi->mas, 0);
796 }
797 
798 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
799 {
800 	mas_set(&vmi->mas, addr);
801 }
802 
803 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
804 {
805 	return !vma->vm_ops;
806 }
807 
808 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
809 #define vma_iter_load(vmi) \
810 	mas_walk(&(vmi)->mas)
811 
812 static inline struct vm_area_struct *
813 find_vma_prev(struct mm_struct *mm, unsigned long addr,
814 			struct vm_area_struct **pprev)
815 {
816 	struct vm_area_struct *vma;
817 	VMA_ITERATOR(vmi, mm, addr);
818 
819 	vma = vma_iter_load(&vmi);
820 	*pprev = vma_prev(&vmi);
821 	if (!vma)
822 		vma = vma_next(&vmi);
823 	return vma;
824 }
825 
826 #undef vma_iter_load
827 
828 static inline void vma_iter_init(struct vma_iterator *vmi,
829 		struct mm_struct *mm, unsigned long addr)
830 {
831 	mas_init(&vmi->mas, &mm->mm_mt, addr);
832 }
833 
834 /* Stubbed functions. */
835 
836 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
837 {
838 	return NULL;
839 }
840 
841 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
842 					struct vm_userfaultfd_ctx vm_ctx)
843 {
844 	return true;
845 }
846 
847 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
848 				    struct anon_vma_name *anon_name2)
849 {
850 	return true;
851 }
852 
853 static inline void might_sleep(void)
854 {
855 }
856 
857 static inline unsigned long vma_pages(struct vm_area_struct *vma)
858 {
859 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
860 }
861 
862 static inline void fput(struct file *)
863 {
864 }
865 
866 static inline void mpol_put(struct mempolicy *)
867 {
868 }
869 
870 static inline void lru_add_drain(void)
871 {
872 }
873 
874 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
875 {
876 }
877 
878 static inline void update_hiwater_rss(struct mm_struct *)
879 {
880 }
881 
882 static inline void update_hiwater_vm(struct mm_struct *)
883 {
884 }
885 
886 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
887 		      struct vm_area_struct *vma, unsigned long start_addr,
888 		      unsigned long end_addr, unsigned long tree_end,
889 		      bool mm_wr_locked)
890 {
891 	(void)tlb;
892 	(void)mas;
893 	(void)vma;
894 	(void)start_addr;
895 	(void)end_addr;
896 	(void)tree_end;
897 	(void)mm_wr_locked;
898 }
899 
900 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
901 		   struct vm_area_struct *vma, unsigned long floor,
902 		   unsigned long ceiling, bool mm_wr_locked)
903 {
904 	(void)tlb;
905 	(void)mas;
906 	(void)vma;
907 	(void)floor;
908 	(void)ceiling;
909 	(void)mm_wr_locked;
910 }
911 
912 static inline void mapping_unmap_writable(struct address_space *)
913 {
914 }
915 
916 static inline void flush_dcache_mmap_lock(struct address_space *)
917 {
918 }
919 
920 static inline void tlb_finish_mmu(struct mmu_gather *)
921 {
922 }
923 
924 static inline struct file *get_file(struct file *f)
925 {
926 	return f;
927 }
928 
929 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
930 {
931 	return 0;
932 }
933 
934 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
935 {
936 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
937 	if (src->anon_vma != NULL) {
938 		dst->anon_vma = src->anon_vma;
939 		dst->anon_vma->was_cloned = true;
940 	}
941 
942 	return 0;
943 }
944 
945 static inline void vma_start_write(struct vm_area_struct *vma)
946 {
947 	/* Used to indicate to tests that a write operation has begun. */
948 	vma->vm_lock_seq++;
949 }
950 
951 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
952 					 unsigned long start,
953 					 unsigned long end,
954 					 struct vm_area_struct *next)
955 {
956 	(void)vma;
957 	(void)start;
958 	(void)end;
959 	(void)next;
960 }
961 
962 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
963 
964 static inline void vma_iter_free(struct vma_iterator *vmi)
965 {
966 	mas_destroy(&vmi->mas);
967 }
968 
969 static inline
970 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
971 {
972 	return mas_next_range(&vmi->mas, ULONG_MAX);
973 }
974 
975 static inline void vm_acct_memory(long pages)
976 {
977 }
978 
979 static inline void vma_interval_tree_insert(struct vm_area_struct *,
980 					    struct rb_root_cached *)
981 {
982 }
983 
984 static inline void vma_interval_tree_remove(struct vm_area_struct *,
985 					    struct rb_root_cached *)
986 {
987 }
988 
989 static inline void flush_dcache_mmap_unlock(struct address_space *)
990 {
991 }
992 
993 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
994 						 struct rb_root_cached *)
995 {
996 }
997 
998 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
999 						 struct rb_root_cached *)
1000 {
1001 }
1002 
1003 static inline void uprobe_mmap(struct vm_area_struct *)
1004 {
1005 }
1006 
1007 static inline void uprobe_munmap(struct vm_area_struct *vma,
1008 				 unsigned long start, unsigned long end)
1009 {
1010 	(void)vma;
1011 	(void)start;
1012 	(void)end;
1013 }
1014 
1015 static inline void i_mmap_lock_write(struct address_space *)
1016 {
1017 }
1018 
1019 static inline void anon_vma_lock_write(struct anon_vma *)
1020 {
1021 }
1022 
1023 static inline void vma_assert_write_locked(struct vm_area_struct *)
1024 {
1025 }
1026 
1027 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
1028 {
1029 	/* For testing purposes, indicate that the anon_vma was unlinked. */
1030 	vma->anon_vma->was_unlinked = true;
1031 }
1032 
1033 static inline void anon_vma_unlock_write(struct anon_vma *)
1034 {
1035 }
1036 
1037 static inline void i_mmap_unlock_write(struct address_space *)
1038 {
1039 }
1040 
1041 static inline void anon_vma_merge(struct vm_area_struct *,
1042 				  struct vm_area_struct *)
1043 {
1044 }
1045 
1046 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
1047 					 unsigned long start,
1048 					 unsigned long end,
1049 					 struct list_head *unmaps)
1050 {
1051 	(void)vma;
1052 	(void)start;
1053 	(void)end;
1054 	(void)unmaps;
1055 
1056 	return 0;
1057 }
1058 
1059 static inline void mmap_write_downgrade(struct mm_struct *)
1060 {
1061 }
1062 
1063 static inline void mmap_read_unlock(struct mm_struct *)
1064 {
1065 }
1066 
1067 static inline void mmap_write_unlock(struct mm_struct *)
1068 {
1069 }
1070 
1071 static inline int mmap_write_lock_killable(struct mm_struct *)
1072 {
1073 	return 0;
1074 }
1075 
1076 static inline bool can_modify_mm(struct mm_struct *mm,
1077 				 unsigned long start,
1078 				 unsigned long end)
1079 {
1080 	(void)mm;
1081 	(void)start;
1082 	(void)end;
1083 
1084 	return true;
1085 }
1086 
1087 static inline void arch_unmap(struct mm_struct *mm,
1088 				 unsigned long start,
1089 				 unsigned long end)
1090 {
1091 	(void)mm;
1092 	(void)start;
1093 	(void)end;
1094 }
1095 
1096 static inline void mmap_assert_locked(struct mm_struct *)
1097 {
1098 }
1099 
1100 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
1101 {
1102 	return true;
1103 }
1104 
1105 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
1106 			  vm_flags_t vm_flags)
1107 {
1108 	(void)vma;
1109 	(void)vm_flags;
1110 }
1111 
1112 static inline bool mapping_can_writeback(struct address_space *)
1113 {
1114 	return true;
1115 }
1116 
1117 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
1118 {
1119 	return false;
1120 }
1121 
1122 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
1123 {
1124 	return false;
1125 }
1126 
1127 static inline bool userfaultfd_wp(struct vm_area_struct *)
1128 {
1129 	return false;
1130 }
1131 
1132 static inline void mmap_assert_write_locked(struct mm_struct *)
1133 {
1134 }
1135 
1136 static inline void mutex_lock(struct mutex *)
1137 {
1138 }
1139 
1140 static inline void mutex_unlock(struct mutex *)
1141 {
1142 }
1143 
1144 static inline bool mutex_is_locked(struct mutex *)
1145 {
1146 	return true;
1147 }
1148 
1149 static inline bool signal_pending(void *)
1150 {
1151 	return false;
1152 }
1153 
1154 static inline bool is_file_hugepages(struct file *)
1155 {
1156 	return false;
1157 }
1158 
1159 static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
1160 {
1161 	return 0;
1162 }
1163 
1164 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
1165 {
1166 	return true;
1167 }
1168 
1169 static inline void vm_flags_init(struct vm_area_struct *vma,
1170 				 vm_flags_t flags)
1171 {
1172 	vma->__vm_flags = flags;
1173 }
1174 
1175 static inline void vm_flags_set(struct vm_area_struct *vma,
1176 				vm_flags_t flags)
1177 {
1178 	vma_start_write(vma);
1179 	vma->__vm_flags |= flags;
1180 }
1181 
1182 static inline void vm_flags_clear(struct vm_area_struct *vma,
1183 				  vm_flags_t flags)
1184 {
1185 	vma_start_write(vma);
1186 	vma->__vm_flags &= ~flags;
1187 }
1188 
1189 static inline int shmem_zero_setup(struct vm_area_struct *)
1190 {
1191 	return 0;
1192 }
1193 
1194 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1195 {
1196 	vma->vm_ops = NULL;
1197 }
1198 
1199 static inline void ksm_add_vma(struct vm_area_struct *)
1200 {
1201 }
1202 
1203 static inline void perf_event_mmap(struct vm_area_struct *)
1204 {
1205 }
1206 
1207 static inline bool vma_is_dax(struct vm_area_struct *)
1208 {
1209 	return false;
1210 }
1211 
1212 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
1213 {
1214 	return NULL;
1215 }
1216 
1217 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1218 
1219 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1220 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1221 {
1222 	vm_flags_t vm_flags = vma->vm_flags;
1223 	pgprot_t vm_page_prot;
1224 
1225 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1226 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1227 
1228 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1229 		vm_flags &= ~VM_SHARED;
1230 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1231 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1232 	}
1233 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1234 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1235 }
1236 
1237 static inline bool arch_validate_flags(vm_flags_t)
1238 {
1239 	return true;
1240 }
1241 
1242 static inline void vma_close(struct vm_area_struct *)
1243 {
1244 }
1245 
1246 static inline int mmap_file(struct file *, struct vm_area_struct *)
1247 {
1248 	return 0;
1249 }
1250 
1251 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1252 {
1253 	if (vma->vm_flags & VM_GROWSDOWN)
1254 		return stack_guard_gap;
1255 
1256 	/* See reasoning around the VM_SHADOW_STACK definition */
1257 	if (vma->vm_flags & VM_SHADOW_STACK)
1258 		return PAGE_SIZE;
1259 
1260 	return 0;
1261 }
1262 
1263 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1264 {
1265 	unsigned long gap = stack_guard_start_gap(vma);
1266 	unsigned long vm_start = vma->vm_start;
1267 
1268 	vm_start -= gap;
1269 	if (vm_start > vma->vm_start)
1270 		vm_start = 0;
1271 	return vm_start;
1272 }
1273 
1274 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1275 {
1276 	unsigned long vm_end = vma->vm_end;
1277 
1278 	if (vma->vm_flags & VM_GROWSUP) {
1279 		vm_end += stack_guard_gap;
1280 		if (vm_end < vma->vm_end)
1281 			vm_end = -PAGE_SIZE;
1282 	}
1283 	return vm_end;
1284 }
1285 
1286 static inline int is_hugepage_only_range(struct mm_struct *mm,
1287 					unsigned long addr, unsigned long len)
1288 {
1289 	return 0;
1290 }
1291 
1292 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1293 {
1294 	return vma->vm_flags & VM_ACCESS_FLAGS;
1295 }
1296 
1297 static inline bool capable(int cap)
1298 {
1299 	return true;
1300 }
1301 
1302 static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags,
1303 			unsigned long bytes)
1304 {
1305 	unsigned long locked_pages, limit_pages;
1306 
1307 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1308 		return true;
1309 
1310 	locked_pages = bytes >> PAGE_SHIFT;
1311 	locked_pages += mm->locked_vm;
1312 
1313 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1314 	limit_pages >>= PAGE_SHIFT;
1315 
1316 	return locked_pages <= limit_pages;
1317 }
1318 
1319 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1320 {
1321 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1322 
1323 	if (!anon_vma)
1324 		return -ENOMEM;
1325 
1326 	anon_vma->root = anon_vma;
1327 	vma->anon_vma = anon_vma;
1328 
1329 	return 0;
1330 }
1331 
1332 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1333 {
1334 	if (likely(vma->anon_vma))
1335 		return 0;
1336 
1337 	return __anon_vma_prepare(vma);
1338 }
1339 
1340 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1341 					      struct list_head *uf)
1342 {
1343 }
1344 
1345 # define ACCESS_PRIVATE(p, member) ((p)->member)
1346 
1347 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
1348 {
1349 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
1350 }
1351 
1352 /*
1353  * Denies creating a writable executable mapping or gaining executable permissions.
1354  *
1355  * This denies the following:
1356  *
1357  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1358  *
1359  *     b)      mmap(PROT_WRITE)
1360  *             mprotect(PROT_EXEC)
1361  *
1362  *     c)      mmap(PROT_WRITE)
1363  *             mprotect(PROT_READ)
1364  *             mprotect(PROT_EXEC)
1365  *
1366  * But allows the following:
1367  *
1368  *     d)      mmap(PROT_READ | PROT_EXEC)
1369  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1370  *
1371  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1372  * (MDWE) protection mask for the current process.
1373  *
1374  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1375  * we propose to set.
1376  *
1377  * Return: false if proposed change is OK, true if not ok and should be denied.
1378  */
1379 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1380 {
1381 	/* If MDWE is disabled, we have nothing to deny. */
1382 	if (mm_flags_test(MMF_HAS_MDWE, current->mm))
1383 		return false;
1384 
1385 	/* If the new VMA is not executable, we have nothing to deny. */
1386 	if (!(new & VM_EXEC))
1387 		return false;
1388 
1389 	/* Under MDWE we do not accept newly writably executable VMAs... */
1390 	if (new & VM_WRITE)
1391 		return true;
1392 
1393 	/* ...nor previously non-executable VMAs becoming executable. */
1394 	if (!(old & VM_EXEC))
1395 		return true;
1396 
1397 	return false;
1398 }
1399 
1400 static inline int mapping_map_writable(struct address_space *mapping)
1401 {
1402 	return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1403 		0 : -EPERM;
1404 }
1405 
1406 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1407 {
1408 	(void)pmc;
1409 
1410 	return 0;
1411 }
1412 
1413 static inline void free_pgd_range(struct mmu_gather *tlb,
1414 			unsigned long addr, unsigned long end,
1415 			unsigned long floor, unsigned long ceiling)
1416 {
1417 	(void)tlb;
1418 	(void)addr;
1419 	(void)end;
1420 	(void)floor;
1421 	(void)ceiling;
1422 }
1423 
1424 static inline int ksm_execve(struct mm_struct *mm)
1425 {
1426 	(void)mm;
1427 
1428 	return 0;
1429 }
1430 
1431 static inline void ksm_exit(struct mm_struct *mm)
1432 {
1433 	(void)mm;
1434 }
1435 
1436 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1437 {
1438 	(void)vma;
1439 	(void)reset_refcnt;
1440 }
1441 
1442 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1443 {
1444 	(void)vma;
1445 }
1446 
1447 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1448 {
1449 	(void)vma;
1450 }
1451 
1452 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1453 				     struct vm_area_struct *new_vma)
1454 {
1455 	(void)orig_vma;
1456 	(void)new_vma;
1457 }
1458 
1459 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1460 {
1461 	(void)vma;
1462 }
1463 
1464 /* Declared in vma.h. */
1465 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1466 		struct vm_area_desc *desc);
1467 
1468 static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
1469 		struct vm_area_desc *desc);
1470 
1471 static int compat_vma_mmap_prepare(struct file *file,
1472 		struct vm_area_struct *vma)
1473 {
1474 	struct vm_area_desc desc;
1475 	int err;
1476 
1477 	err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc));
1478 	if (err)
1479 		return err;
1480 	set_vma_from_desc(vma, &desc);
1481 
1482 	return 0;
1483 }
1484 
1485 /* Did the driver provide valid mmap hook configuration? */
1486 static inline bool can_mmap_file(struct file *file)
1487 {
1488 	bool has_mmap = file->f_op->mmap;
1489 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1490 
1491 	/* Hooks are mutually exclusive. */
1492 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1493 		return false;
1494 	if (!has_mmap && !has_mmap_prepare)
1495 		return false;
1496 
1497 	return true;
1498 }
1499 
1500 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1501 {
1502 	if (file->f_op->mmap_prepare)
1503 		return compat_vma_mmap_prepare(file, vma);
1504 
1505 	return file->f_op->mmap(file, vma);
1506 }
1507 
1508 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1509 {
1510 	return file->f_op->mmap_prepare(desc);
1511 }
1512 
1513 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1514 {
1515 	(void)vma;
1516 }
1517 
1518 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1519 {
1520 	/* Changing an anonymous vma with this is illegal */
1521 	get_file(file);
1522 	swap(vma->vm_file, file);
1523 	fput(file);
1524 }
1525 
1526 static inline bool shmem_file(struct file *)
1527 {
1528 	return false;
1529 }
1530 
1531 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *, const struct file *,
1532 			 vm_flags_t vm_flags)
1533 {
1534 	return vm_flags;
1535 }
1536 
1537 #endif	/* __MM_VMA_INTERNAL_H */
1538