xref: /linux/tools/testing/vma/vma_internal.h (revision 73519ded992fc9dda2807450d6931002bb93cb16)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/rwsem.h>
29 
30 extern unsigned long stack_guard_gap;
31 #ifdef CONFIG_MMU
32 extern unsigned long mmap_min_addr;
33 extern unsigned long dac_mmap_min_addr;
34 #else
35 #define mmap_min_addr		0UL
36 #define dac_mmap_min_addr	0UL
37 #endif
38 
39 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
40 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
41 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
42 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
43 
44 #define MMF_HAS_MDWE	28
45 
46 #define VM_NONE		0x00000000
47 #define VM_READ		0x00000001
48 #define VM_WRITE	0x00000002
49 #define VM_EXEC		0x00000004
50 #define VM_SHARED	0x00000008
51 #define VM_MAYREAD	0x00000010
52 #define VM_MAYWRITE	0x00000020
53 #define VM_MAYEXEC	0x00000040
54 #define VM_GROWSDOWN	0x00000100
55 #define VM_PFNMAP	0x00000400
56 #define VM_LOCKED	0x00002000
57 #define VM_IO           0x00004000
58 #define VM_DONTEXPAND	0x00040000
59 #define VM_LOCKONFAULT	0x00080000
60 #define VM_ACCOUNT	0x00100000
61 #define VM_NORESERVE	0x00200000
62 #define VM_MIXEDMAP	0x10000000
63 #define VM_STACK	VM_GROWSDOWN
64 #define VM_SHADOW_STACK	VM_NONE
65 #define VM_SOFTDIRTY	0
66 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
67 #define VM_GROWSUP	VM_NONE
68 
69 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
70 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
71 
72 /* This mask represents all the VMA flag bits used by mlock */
73 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
74 
75 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
76 
77 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
78 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
79 
80 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
81 
82 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
83 
84 #define RLIMIT_STACK		3	/* max stack size */
85 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
86 
87 #define CAP_IPC_LOCK         14
88 
89 #ifdef CONFIG_64BIT
90 /* VM is sealed, in vm_flags */
91 #define VM_SEALED	_BITUL(63)
92 #endif
93 
94 #define FIRST_USER_ADDRESS	0UL
95 #define USER_PGTABLES_CEILING	0UL
96 
97 #define vma_policy(vma) NULL
98 
99 #define down_write_nest_lock(sem, nest_lock)
100 
101 #define pgprot_val(x)		((x).pgprot)
102 #define __pgprot(x)		((pgprot_t) { (x) } )
103 
104 #define for_each_vma(__vmi, __vma)					\
105 	while (((__vma) = vma_next(&(__vmi))) != NULL)
106 
107 /* The MM code likes to work with exclusive end addresses */
108 #define for_each_vma_range(__vmi, __vma, __end)				\
109 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
110 
111 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
112 
113 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
114 
115 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
116 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
117 
118 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
119 
120 #define AS_MM_ALL_LOCKS 2
121 
122 /* We hardcode this for now. */
123 #define sysctl_max_map_count 0x1000000UL
124 
125 #define pgoff_t unsigned long
126 typedef unsigned long	pgprotval_t;
127 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
128 typedef unsigned long vm_flags_t;
129 typedef __bitwise unsigned int vm_fault_t;
130 
131 /*
132  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
133  * either way :)
134  */
135 #define pr_warn_once pr_err
136 
137 typedef struct refcount_struct {
138 	atomic_t refs;
139 } refcount_t;
140 
141 struct kref {
142 	refcount_t refcount;
143 };
144 
145 /*
146  * Define the task command name length as enum, then it can be visible to
147  * BPF programs.
148  */
149 enum {
150 	TASK_COMM_LEN = 16,
151 };
152 
153 /*
154  * Flags for bug emulation.
155  *
156  * These occupy the top three bytes.
157  */
158 enum {
159 	READ_IMPLIES_EXEC =	0x0400000,
160 };
161 
162 struct task_struct {
163 	char comm[TASK_COMM_LEN];
164 	pid_t pid;
165 	struct mm_struct *mm;
166 
167 	/* Used for emulating ABI behavior of previous Linux versions: */
168 	unsigned int			personality;
169 };
170 
171 struct task_struct *get_current(void);
172 #define current get_current()
173 
174 struct anon_vma {
175 	struct anon_vma *root;
176 	struct rb_root_cached rb_root;
177 
178 	/* Test fields. */
179 	bool was_cloned;
180 	bool was_unlinked;
181 };
182 
183 struct anon_vma_chain {
184 	struct anon_vma *anon_vma;
185 	struct list_head same_vma;
186 };
187 
188 struct anon_vma_name {
189 	struct kref kref;
190 	/* The name needs to be at the end because it is dynamically sized. */
191 	char name[];
192 };
193 
194 struct vma_iterator {
195 	struct ma_state mas;
196 };
197 
198 #define VMA_ITERATOR(name, __mm, __addr)				\
199 	struct vma_iterator name = {					\
200 		.mas = {						\
201 			.tree = &(__mm)->mm_mt,				\
202 			.index = __addr,				\
203 			.node = NULL,					\
204 			.status = ma_start,				\
205 		},							\
206 	}
207 
208 struct address_space {
209 	struct rb_root_cached	i_mmap;
210 	unsigned long		flags;
211 	atomic_t		i_mmap_writable;
212 };
213 
214 struct vm_userfaultfd_ctx {};
215 struct mempolicy {};
216 struct mmu_gather {};
217 struct mutex {};
218 #define DEFINE_MUTEX(mutexname) \
219 	struct mutex mutexname = {}
220 
221 struct mm_struct {
222 	struct maple_tree mm_mt;
223 	int map_count;			/* number of VMAs */
224 	unsigned long total_vm;	   /* Total pages mapped */
225 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
226 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
227 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
228 	unsigned long stack_vm;	   /* VM_STACK */
229 
230 	unsigned long def_flags;
231 
232 	unsigned long flags; /* Must use atomic bitops to access */
233 };
234 
235 struct vma_lock {
236 	struct rw_semaphore lock;
237 };
238 
239 
240 struct file {
241 	struct address_space	*f_mapping;
242 };
243 
244 struct vm_area_struct {
245 	/* The first cache line has the info for VMA tree walking. */
246 
247 	union {
248 		struct {
249 			/* VMA covers [vm_start; vm_end) addresses within mm */
250 			unsigned long vm_start;
251 			unsigned long vm_end;
252 		};
253 #ifdef CONFIG_PER_VMA_LOCK
254 		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
255 #endif
256 	};
257 
258 	struct mm_struct *vm_mm;	/* The address space we belong to. */
259 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
260 
261 	/*
262 	 * Flags, see mm.h.
263 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
264 	 */
265 	union {
266 		const vm_flags_t vm_flags;
267 		vm_flags_t __private __vm_flags;
268 	};
269 
270 #ifdef CONFIG_PER_VMA_LOCK
271 	/* Flag to indicate areas detached from the mm->mm_mt tree */
272 	bool detached;
273 
274 	/*
275 	 * Can only be written (using WRITE_ONCE()) while holding both:
276 	 *  - mmap_lock (in write mode)
277 	 *  - vm_lock->lock (in write mode)
278 	 * Can be read reliably while holding one of:
279 	 *  - mmap_lock (in read or write mode)
280 	 *  - vm_lock->lock (in read or write mode)
281 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
282 	 * while holding nothing (except RCU to keep the VMA struct allocated).
283 	 *
284 	 * This sequence counter is explicitly allowed to overflow; sequence
285 	 * counter reuse can only lead to occasional unnecessary use of the
286 	 * slowpath.
287 	 */
288 	unsigned int vm_lock_seq;
289 	struct vma_lock *vm_lock;
290 #endif
291 
292 	/*
293 	 * For areas with an address space and backing store,
294 	 * linkage into the address_space->i_mmap interval tree.
295 	 *
296 	 */
297 	struct {
298 		struct rb_node rb;
299 		unsigned long rb_subtree_last;
300 	} shared;
301 
302 	/*
303 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
304 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
305 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
306 	 * or brk vma (with NULL file) can only be in an anon_vma list.
307 	 */
308 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
309 					  * page_table_lock */
310 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
311 
312 	/* Function pointers to deal with this struct. */
313 	const struct vm_operations_struct *vm_ops;
314 
315 	/* Information about our backing store: */
316 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
317 					   units */
318 	struct file * vm_file;		/* File we map to (can be NULL). */
319 	void * vm_private_data;		/* was vm_pte (shared mem) */
320 
321 #ifdef CONFIG_ANON_VMA_NAME
322 	/*
323 	 * For private and shared anonymous mappings, a pointer to a null
324 	 * terminated string containing the name given to the vma, or NULL if
325 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
326 	 */
327 	struct anon_vma_name *anon_name;
328 #endif
329 #ifdef CONFIG_SWAP
330 	atomic_long_t swap_readahead_info;
331 #endif
332 #ifndef CONFIG_MMU
333 	struct vm_region *vm_region;	/* NOMMU mapping region */
334 #endif
335 #ifdef CONFIG_NUMA
336 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
337 #endif
338 #ifdef CONFIG_NUMA_BALANCING
339 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
340 #endif
341 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
342 } __randomize_layout;
343 
344 struct vm_fault {};
345 
346 struct vm_operations_struct {
347 	void (*open)(struct vm_area_struct * area);
348 	/**
349 	 * @close: Called when the VMA is being removed from the MM.
350 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
351 	 */
352 	void (*close)(struct vm_area_struct * area);
353 	/* Called any time before splitting to check if it's allowed */
354 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
355 	int (*mremap)(struct vm_area_struct *area);
356 	/*
357 	 * Called by mprotect() to make driver-specific permission
358 	 * checks before mprotect() is finalised.   The VMA must not
359 	 * be modified.  Returns 0 if mprotect() can proceed.
360 	 */
361 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
362 			unsigned long end, unsigned long newflags);
363 	vm_fault_t (*fault)(struct vm_fault *vmf);
364 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
365 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
366 			pgoff_t start_pgoff, pgoff_t end_pgoff);
367 	unsigned long (*pagesize)(struct vm_area_struct * area);
368 
369 	/* notification that a previously read-only page is about to become
370 	 * writable, if an error is returned it will cause a SIGBUS */
371 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
372 
373 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
374 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
375 
376 	/* called by access_process_vm when get_user_pages() fails, typically
377 	 * for use by special VMAs. See also generic_access_phys() for a generic
378 	 * implementation useful for any iomem mapping.
379 	 */
380 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
381 		      void *buf, int len, int write);
382 
383 	/* Called by the /proc/PID/maps code to ask the vma whether it
384 	 * has a special name.  Returning non-NULL will also cause this
385 	 * vma to be dumped unconditionally. */
386 	const char *(*name)(struct vm_area_struct *vma);
387 
388 #ifdef CONFIG_NUMA
389 	/*
390 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
391 	 * to hold the policy upon return.  Caller should pass NULL @new to
392 	 * remove a policy and fall back to surrounding context--i.e. do not
393 	 * install a MPOL_DEFAULT policy, nor the task or system default
394 	 * mempolicy.
395 	 */
396 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
397 
398 	/*
399 	 * get_policy() op must add reference [mpol_get()] to any policy at
400 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
401 	 * in mm/mempolicy.c will do this automatically.
402 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
403 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
404 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
405 	 * must return NULL--i.e., do not "fallback" to task or system default
406 	 * policy.
407 	 */
408 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
409 					unsigned long addr, pgoff_t *ilx);
410 #endif
411 	/*
412 	 * Called by vm_normal_page() for special PTEs to find the
413 	 * page for @addr.  This is useful if the default behavior
414 	 * (using pte_page()) would not find the correct page.
415 	 */
416 	struct page *(*find_special_page)(struct vm_area_struct *vma,
417 					  unsigned long addr);
418 };
419 
420 struct vm_unmapped_area_info {
421 #define VM_UNMAPPED_AREA_TOPDOWN 1
422 	unsigned long flags;
423 	unsigned long length;
424 	unsigned long low_limit;
425 	unsigned long high_limit;
426 	unsigned long align_mask;
427 	unsigned long align_offset;
428 	unsigned long start_gap;
429 };
430 
431 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
432 {
433 	mas_pause(&vmi->mas);
434 }
435 
436 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
437 {
438 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
439 }
440 
441 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
442 {
443 	return __pgprot(vm_flags);
444 }
445 
446 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
447 {
448 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
449 		(VM_SHARED | VM_MAYWRITE);
450 }
451 
452 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
453 {
454 	return is_shared_maywrite(vma->vm_flags);
455 }
456 
457 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
458 {
459 	/*
460 	 * Uses mas_find() to get the first VMA when the iterator starts.
461 	 * Calling mas_next() could skip the first entry.
462 	 */
463 	return mas_find(&vmi->mas, ULONG_MAX);
464 }
465 
466 static inline bool vma_lock_alloc(struct vm_area_struct *vma)
467 {
468 	vma->vm_lock = calloc(1, sizeof(struct vma_lock));
469 
470 	if (!vma->vm_lock)
471 		return false;
472 
473 	init_rwsem(&vma->vm_lock->lock);
474 	vma->vm_lock_seq = UINT_MAX;
475 
476 	return true;
477 }
478 
479 static inline void vma_assert_write_locked(struct vm_area_struct *);
480 static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
481 {
482 	/* When detaching vma should be write-locked */
483 	if (detached)
484 		vma_assert_write_locked(vma);
485 	vma->detached = detached;
486 }
487 
488 extern const struct vm_operations_struct vma_dummy_vm_ops;
489 
490 extern unsigned long rlimit(unsigned int limit);
491 
492 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
493 {
494 	memset(vma, 0, sizeof(*vma));
495 	vma->vm_mm = mm;
496 	vma->vm_ops = &vma_dummy_vm_ops;
497 	INIT_LIST_HEAD(&vma->anon_vma_chain);
498 	vma_mark_detached(vma, false);
499 }
500 
501 static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
502 {
503 	struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct));
504 
505 	if (!vma)
506 		return NULL;
507 
508 	vma_init(vma, mm);
509 	if (!vma_lock_alloc(vma)) {
510 		free(vma);
511 		return NULL;
512 	}
513 
514 	return vma;
515 }
516 
517 static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
518 {
519 	struct vm_area_struct *new = calloc(1, sizeof(struct vm_area_struct));
520 
521 	if (!new)
522 		return NULL;
523 
524 	memcpy(new, orig, sizeof(*new));
525 	if (!vma_lock_alloc(new)) {
526 		free(new);
527 		return NULL;
528 	}
529 	INIT_LIST_HEAD(&new->anon_vma_chain);
530 
531 	return new;
532 }
533 
534 /*
535  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
536  * kernel/fork.c, so we have to these broadly available there, and temporarily
537  * define them here to resolve the dependency cycle.
538  */
539 
540 #define is_exec_mapping(flags) \
541 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
542 
543 #define is_stack_mapping(flags) \
544 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
545 
546 #define is_data_mapping(flags) \
547 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
548 
549 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
550 				   long npages)
551 {
552 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
553 
554 	if (is_exec_mapping(flags))
555 		mm->exec_vm += npages;
556 	else if (is_stack_mapping(flags))
557 		mm->stack_vm += npages;
558 	else if (is_data_mapping(flags))
559 		mm->data_vm += npages;
560 }
561 
562 #undef is_exec_mapping
563 #undef is_stack_mapping
564 #undef is_data_mapping
565 
566 /* Currently stubbed but we may later wish to un-stub. */
567 static inline void vm_acct_memory(long pages);
568 static inline void vm_unacct_memory(long pages)
569 {
570 	vm_acct_memory(-pages);
571 }
572 
573 static inline void mapping_allow_writable(struct address_space *mapping)
574 {
575 	atomic_inc(&mapping->i_mmap_writable);
576 }
577 
578 static inline void vma_set_range(struct vm_area_struct *vma,
579 				 unsigned long start, unsigned long end,
580 				 pgoff_t pgoff)
581 {
582 	vma->vm_start = start;
583 	vma->vm_end = end;
584 	vma->vm_pgoff = pgoff;
585 }
586 
587 static inline
588 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
589 {
590 	return mas_find(&vmi->mas, max - 1);
591 }
592 
593 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
594 			unsigned long start, unsigned long end, gfp_t gfp)
595 {
596 	__mas_set_range(&vmi->mas, start, end - 1);
597 	mas_store_gfp(&vmi->mas, NULL, gfp);
598 	if (unlikely(mas_is_err(&vmi->mas)))
599 		return -ENOMEM;
600 
601 	return 0;
602 }
603 
604 static inline void mmap_assert_locked(struct mm_struct *);
605 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
606 						unsigned long start_addr,
607 						unsigned long end_addr)
608 {
609 	unsigned long index = start_addr;
610 
611 	mmap_assert_locked(mm);
612 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
613 }
614 
615 static inline
616 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
617 {
618 	return mtree_load(&mm->mm_mt, addr);
619 }
620 
621 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
622 {
623 	return mas_prev(&vmi->mas, 0);
624 }
625 
626 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
627 {
628 	mas_set(&vmi->mas, addr);
629 }
630 
631 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
632 {
633 	return !vma->vm_ops;
634 }
635 
636 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
637 #define vma_iter_load(vmi) \
638 	mas_walk(&(vmi)->mas)
639 
640 static inline struct vm_area_struct *
641 find_vma_prev(struct mm_struct *mm, unsigned long addr,
642 			struct vm_area_struct **pprev)
643 {
644 	struct vm_area_struct *vma;
645 	VMA_ITERATOR(vmi, mm, addr);
646 
647 	vma = vma_iter_load(&vmi);
648 	*pprev = vma_prev(&vmi);
649 	if (!vma)
650 		vma = vma_next(&vmi);
651 	return vma;
652 }
653 
654 #undef vma_iter_load
655 
656 static inline void vma_iter_init(struct vma_iterator *vmi,
657 		struct mm_struct *mm, unsigned long addr)
658 {
659 	mas_init(&vmi->mas, &mm->mm_mt, addr);
660 }
661 
662 /* Stubbed functions. */
663 
664 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
665 {
666 	return NULL;
667 }
668 
669 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
670 					struct vm_userfaultfd_ctx vm_ctx)
671 {
672 	return true;
673 }
674 
675 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
676 				    struct anon_vma_name *anon_name2)
677 {
678 	return true;
679 }
680 
681 static inline void might_sleep(void)
682 {
683 }
684 
685 static inline unsigned long vma_pages(struct vm_area_struct *vma)
686 {
687 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
688 }
689 
690 static inline void fput(struct file *)
691 {
692 }
693 
694 static inline void mpol_put(struct mempolicy *)
695 {
696 }
697 
698 static inline void vma_lock_free(struct vm_area_struct *vma)
699 {
700 	free(vma->vm_lock);
701 }
702 
703 static inline void __vm_area_free(struct vm_area_struct *vma)
704 {
705 	vma_lock_free(vma);
706 	free(vma);
707 }
708 
709 static inline void vm_area_free(struct vm_area_struct *vma)
710 {
711 	__vm_area_free(vma);
712 }
713 
714 static inline void lru_add_drain(void)
715 {
716 }
717 
718 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
719 {
720 }
721 
722 static inline void update_hiwater_rss(struct mm_struct *)
723 {
724 }
725 
726 static inline void update_hiwater_vm(struct mm_struct *)
727 {
728 }
729 
730 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
731 		      struct vm_area_struct *vma, unsigned long start_addr,
732 		      unsigned long end_addr, unsigned long tree_end,
733 		      bool mm_wr_locked)
734 {
735 	(void)tlb;
736 	(void)mas;
737 	(void)vma;
738 	(void)start_addr;
739 	(void)end_addr;
740 	(void)tree_end;
741 	(void)mm_wr_locked;
742 }
743 
744 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
745 		   struct vm_area_struct *vma, unsigned long floor,
746 		   unsigned long ceiling, bool mm_wr_locked)
747 {
748 	(void)tlb;
749 	(void)mas;
750 	(void)vma;
751 	(void)floor;
752 	(void)ceiling;
753 	(void)mm_wr_locked;
754 }
755 
756 static inline void mapping_unmap_writable(struct address_space *)
757 {
758 }
759 
760 static inline void flush_dcache_mmap_lock(struct address_space *)
761 {
762 }
763 
764 static inline void tlb_finish_mmu(struct mmu_gather *)
765 {
766 }
767 
768 static inline struct file *get_file(struct file *f)
769 {
770 	return f;
771 }
772 
773 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
774 {
775 	return 0;
776 }
777 
778 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
779 {
780 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
781 	if (src->anon_vma != NULL) {
782 		dst->anon_vma = src->anon_vma;
783 		dst->anon_vma->was_cloned = true;
784 	}
785 
786 	return 0;
787 }
788 
789 static inline void vma_start_write(struct vm_area_struct *vma)
790 {
791 	/* Used to indicate to tests that a write operation has begun. */
792 	vma->vm_lock_seq++;
793 }
794 
795 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
796 					 unsigned long start,
797 					 unsigned long end,
798 					 long adjust_next)
799 {
800 	(void)vma;
801 	(void)start;
802 	(void)end;
803 	(void)adjust_next;
804 }
805 
806 static inline void vma_iter_free(struct vma_iterator *vmi)
807 {
808 	mas_destroy(&vmi->mas);
809 }
810 
811 static inline
812 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
813 {
814 	return mas_next_range(&vmi->mas, ULONG_MAX);
815 }
816 
817 static inline void vm_acct_memory(long pages)
818 {
819 }
820 
821 static inline void vma_interval_tree_insert(struct vm_area_struct *,
822 					    struct rb_root_cached *)
823 {
824 }
825 
826 static inline void vma_interval_tree_remove(struct vm_area_struct *,
827 					    struct rb_root_cached *)
828 {
829 }
830 
831 static inline void flush_dcache_mmap_unlock(struct address_space *)
832 {
833 }
834 
835 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
836 						 struct rb_root_cached *)
837 {
838 }
839 
840 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
841 						 struct rb_root_cached *)
842 {
843 }
844 
845 static inline void uprobe_mmap(struct vm_area_struct *)
846 {
847 }
848 
849 static inline void uprobe_munmap(struct vm_area_struct *vma,
850 				 unsigned long start, unsigned long end)
851 {
852 	(void)vma;
853 	(void)start;
854 	(void)end;
855 }
856 
857 static inline void i_mmap_lock_write(struct address_space *)
858 {
859 }
860 
861 static inline void anon_vma_lock_write(struct anon_vma *)
862 {
863 }
864 
865 static inline void vma_assert_write_locked(struct vm_area_struct *)
866 {
867 }
868 
869 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
870 {
871 	/* For testing purposes, indicate that the anon_vma was unlinked. */
872 	vma->anon_vma->was_unlinked = true;
873 }
874 
875 static inline void anon_vma_unlock_write(struct anon_vma *)
876 {
877 }
878 
879 static inline void i_mmap_unlock_write(struct address_space *)
880 {
881 }
882 
883 static inline void anon_vma_merge(struct vm_area_struct *,
884 				  struct vm_area_struct *)
885 {
886 }
887 
888 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
889 					 unsigned long start,
890 					 unsigned long end,
891 					 struct list_head *unmaps)
892 {
893 	(void)vma;
894 	(void)start;
895 	(void)end;
896 	(void)unmaps;
897 
898 	return 0;
899 }
900 
901 static inline void mmap_write_downgrade(struct mm_struct *)
902 {
903 }
904 
905 static inline void mmap_read_unlock(struct mm_struct *)
906 {
907 }
908 
909 static inline void mmap_write_unlock(struct mm_struct *)
910 {
911 }
912 
913 static inline int mmap_write_lock_killable(struct mm_struct *)
914 {
915 	return 0;
916 }
917 
918 static inline bool can_modify_mm(struct mm_struct *mm,
919 				 unsigned long start,
920 				 unsigned long end)
921 {
922 	(void)mm;
923 	(void)start;
924 	(void)end;
925 
926 	return true;
927 }
928 
929 static inline void arch_unmap(struct mm_struct *mm,
930 				 unsigned long start,
931 				 unsigned long end)
932 {
933 	(void)mm;
934 	(void)start;
935 	(void)end;
936 }
937 
938 static inline void mmap_assert_locked(struct mm_struct *)
939 {
940 }
941 
942 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
943 {
944 	return true;
945 }
946 
947 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
948 			  unsigned long vm_flags)
949 {
950 	(void)vma;
951 	(void)vm_flags;
952 }
953 
954 static inline bool mapping_can_writeback(struct address_space *)
955 {
956 	return true;
957 }
958 
959 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
960 {
961 	return false;
962 }
963 
964 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
965 {
966 	return false;
967 }
968 
969 static inline bool userfaultfd_wp(struct vm_area_struct *)
970 {
971 	return false;
972 }
973 
974 static inline void mmap_assert_write_locked(struct mm_struct *)
975 {
976 }
977 
978 static inline void mutex_lock(struct mutex *)
979 {
980 }
981 
982 static inline void mutex_unlock(struct mutex *)
983 {
984 }
985 
986 static inline bool mutex_is_locked(struct mutex *)
987 {
988 	return true;
989 }
990 
991 static inline bool signal_pending(void *)
992 {
993 	return false;
994 }
995 
996 static inline bool is_file_hugepages(struct file *)
997 {
998 	return false;
999 }
1000 
1001 static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
1002 {
1003 	return 0;
1004 }
1005 
1006 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
1007 {
1008 	return true;
1009 }
1010 
1011 static inline void vm_flags_init(struct vm_area_struct *vma,
1012 				 vm_flags_t flags)
1013 {
1014 	vma->__vm_flags = flags;
1015 }
1016 
1017 static inline void vm_flags_set(struct vm_area_struct *vma,
1018 				vm_flags_t flags)
1019 {
1020 	vma_start_write(vma);
1021 	vma->__vm_flags |= flags;
1022 }
1023 
1024 static inline void vm_flags_clear(struct vm_area_struct *vma,
1025 				  vm_flags_t flags)
1026 {
1027 	vma_start_write(vma);
1028 	vma->__vm_flags &= ~flags;
1029 }
1030 
1031 static inline int call_mmap(struct file *, struct vm_area_struct *)
1032 {
1033 	return 0;
1034 }
1035 
1036 static inline int shmem_zero_setup(struct vm_area_struct *)
1037 {
1038 	return 0;
1039 }
1040 
1041 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1042 {
1043 	vma->vm_ops = NULL;
1044 }
1045 
1046 static inline void ksm_add_vma(struct vm_area_struct *)
1047 {
1048 }
1049 
1050 static inline void perf_event_mmap(struct vm_area_struct *)
1051 {
1052 }
1053 
1054 static inline bool vma_is_dax(struct vm_area_struct *)
1055 {
1056 	return false;
1057 }
1058 
1059 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
1060 {
1061 	return NULL;
1062 }
1063 
1064 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1065 
1066 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1067 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1068 {
1069 	unsigned long vm_flags = vma->vm_flags;
1070 	pgprot_t vm_page_prot;
1071 
1072 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1073 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1074 
1075 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1076 		vm_flags &= ~VM_SHARED;
1077 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1078 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1079 	}
1080 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1081 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1082 }
1083 
1084 static inline bool arch_validate_flags(unsigned long)
1085 {
1086 	return true;
1087 }
1088 
1089 static inline void vma_close(struct vm_area_struct *)
1090 {
1091 }
1092 
1093 static inline int mmap_file(struct file *, struct vm_area_struct *)
1094 {
1095 	return 0;
1096 }
1097 
1098 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1099 {
1100 	if (vma->vm_flags & VM_GROWSDOWN)
1101 		return stack_guard_gap;
1102 
1103 	/* See reasoning around the VM_SHADOW_STACK definition */
1104 	if (vma->vm_flags & VM_SHADOW_STACK)
1105 		return PAGE_SIZE;
1106 
1107 	return 0;
1108 }
1109 
1110 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1111 {
1112 	unsigned long gap = stack_guard_start_gap(vma);
1113 	unsigned long vm_start = vma->vm_start;
1114 
1115 	vm_start -= gap;
1116 	if (vm_start > vma->vm_start)
1117 		vm_start = 0;
1118 	return vm_start;
1119 }
1120 
1121 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1122 {
1123 	unsigned long vm_end = vma->vm_end;
1124 
1125 	if (vma->vm_flags & VM_GROWSUP) {
1126 		vm_end += stack_guard_gap;
1127 		if (vm_end < vma->vm_end)
1128 			vm_end = -PAGE_SIZE;
1129 	}
1130 	return vm_end;
1131 }
1132 
1133 static inline int is_hugepage_only_range(struct mm_struct *mm,
1134 					unsigned long addr, unsigned long len)
1135 {
1136 	return 0;
1137 }
1138 
1139 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1140 {
1141 	return vma->vm_flags & VM_ACCESS_FLAGS;
1142 }
1143 
1144 static inline bool capable(int cap)
1145 {
1146 	return true;
1147 }
1148 
1149 static inline bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1150 			unsigned long bytes)
1151 {
1152 	unsigned long locked_pages, limit_pages;
1153 
1154 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1155 		return true;
1156 
1157 	locked_pages = bytes >> PAGE_SHIFT;
1158 	locked_pages += mm->locked_vm;
1159 
1160 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1161 	limit_pages >>= PAGE_SHIFT;
1162 
1163 	return locked_pages <= limit_pages;
1164 }
1165 
1166 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1167 {
1168 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1169 
1170 	if (!anon_vma)
1171 		return -ENOMEM;
1172 
1173 	anon_vma->root = anon_vma;
1174 	vma->anon_vma = anon_vma;
1175 
1176 	return 0;
1177 }
1178 
1179 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1180 {
1181 	if (likely(vma->anon_vma))
1182 		return 0;
1183 
1184 	return __anon_vma_prepare(vma);
1185 }
1186 
1187 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1188 					      struct list_head *uf)
1189 {
1190 }
1191 
1192 /*
1193  * Denies creating a writable executable mapping or gaining executable permissions.
1194  *
1195  * This denies the following:
1196  *
1197  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1198  *
1199  *     b)      mmap(PROT_WRITE)
1200  *             mprotect(PROT_EXEC)
1201  *
1202  *     c)      mmap(PROT_WRITE)
1203  *             mprotect(PROT_READ)
1204  *             mprotect(PROT_EXEC)
1205  *
1206  * But allows the following:
1207  *
1208  *     d)      mmap(PROT_READ | PROT_EXEC)
1209  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1210  *
1211  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1212  * (MDWE) protection mask for the current process.
1213  *
1214  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1215  * we propose to set.
1216  *
1217  * Return: false if proposed change is OK, true if not ok and should be denied.
1218  */
1219 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1220 {
1221 	/* If MDWE is disabled, we have nothing to deny. */
1222 	if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
1223 		return false;
1224 
1225 	/* If the new VMA is not executable, we have nothing to deny. */
1226 	if (!(new & VM_EXEC))
1227 		return false;
1228 
1229 	/* Under MDWE we do not accept newly writably executable VMAs... */
1230 	if (new & VM_WRITE)
1231 		return true;
1232 
1233 	/* ...nor previously non-executable VMAs becoming executable. */
1234 	if (!(old & VM_EXEC))
1235 		return true;
1236 
1237 	return false;
1238 }
1239 
1240 static inline int mapping_map_writable(struct address_space *mapping)
1241 {
1242 	int c = atomic_read(&mapping->i_mmap_writable);
1243 
1244 	/* Derived from the raw_atomic_inc_unless_negative() implementation. */
1245 	do {
1246 		if (c < 0)
1247 			return -EPERM;
1248 	} while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1));
1249 
1250 	return 0;
1251 }
1252 
1253 #endif	/* __MM_VMA_INTERNAL_H */
1254