xref: /linux/tools/testing/vma/vma_internal.h (revision 27b9989b87119da2f33f2c0fcbb8984ab4ebdf1a)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/refcount.h>
29 
30 extern unsigned long stack_guard_gap;
31 #ifdef CONFIG_MMU
32 extern unsigned long mmap_min_addr;
33 extern unsigned long dac_mmap_min_addr;
34 #else
35 #define mmap_min_addr		0UL
36 #define dac_mmap_min_addr	0UL
37 #endif
38 
39 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
40 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
41 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
42 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
43 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
44 
45 #define MMF_HAS_MDWE	28
46 
47 #define VM_NONE		0x00000000
48 #define VM_READ		0x00000001
49 #define VM_WRITE	0x00000002
50 #define VM_EXEC		0x00000004
51 #define VM_SHARED	0x00000008
52 #define VM_MAYREAD	0x00000010
53 #define VM_MAYWRITE	0x00000020
54 #define VM_MAYEXEC	0x00000040
55 #define VM_GROWSDOWN	0x00000100
56 #define VM_PFNMAP	0x00000400
57 #define VM_LOCKED	0x00002000
58 #define VM_IO           0x00004000
59 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
60 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
61 #define VM_DONTEXPAND	0x00040000
62 #define VM_LOCKONFAULT	0x00080000
63 #define VM_ACCOUNT	0x00100000
64 #define VM_NORESERVE	0x00200000
65 #define VM_MIXEDMAP	0x10000000
66 #define VM_STACK	VM_GROWSDOWN
67 #define VM_SHADOW_STACK	VM_NONE
68 #define VM_SOFTDIRTY	0
69 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
70 #define VM_GROWSUP	VM_NONE
71 
72 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
73 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
74 
75 #ifdef CONFIG_STACK_GROWSUP
76 #define VM_STACK	VM_GROWSUP
77 #define VM_STACK_EARLY	VM_GROWSDOWN
78 #else
79 #define VM_STACK	VM_GROWSDOWN
80 #define VM_STACK_EARLY	0
81 #endif
82 
83 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
84 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
85 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
86 #define STACK_TOP		TASK_SIZE_LOW
87 #define STACK_TOP_MAX		TASK_SIZE_MAX
88 
89 /* This mask represents all the VMA flag bits used by mlock */
90 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
91 
92 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
93 
94 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
95 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
96 
97 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
98 
99 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
100 
101 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
102 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
103 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
104 
105 #define RLIMIT_STACK		3	/* max stack size */
106 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
107 
108 #define CAP_IPC_LOCK         14
109 
110 #ifdef CONFIG_64BIT
111 /* VM is sealed, in vm_flags */
112 #define VM_SEALED	_BITUL(63)
113 #endif
114 
115 #define FIRST_USER_ADDRESS	0UL
116 #define USER_PGTABLES_CEILING	0UL
117 
118 #define vma_policy(vma) NULL
119 
120 #define down_write_nest_lock(sem, nest_lock)
121 
122 #define pgprot_val(x)		((x).pgprot)
123 #define __pgprot(x)		((pgprot_t) { (x) } )
124 
125 #define for_each_vma(__vmi, __vma)					\
126 	while (((__vma) = vma_next(&(__vmi))) != NULL)
127 
128 /* The MM code likes to work with exclusive end addresses */
129 #define for_each_vma_range(__vmi, __vma, __end)				\
130 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
131 
132 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
133 
134 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
135 
136 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
137 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
138 
139 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
140 
141 #define AS_MM_ALL_LOCKS 2
142 
143 /* We hardcode this for now. */
144 #define sysctl_max_map_count 0x1000000UL
145 
146 #define pgoff_t unsigned long
147 typedef unsigned long	pgprotval_t;
148 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
149 typedef unsigned long vm_flags_t;
150 typedef __bitwise unsigned int vm_fault_t;
151 
152 /*
153  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
154  * either way :)
155  */
156 #define pr_warn_once pr_err
157 
158 #define data_race(expr) expr
159 
160 #define ASSERT_EXCLUSIVE_WRITER(x)
161 
162 /**
163  * swap - swap values of @a and @b
164  * @a: first value
165  * @b: second value
166  */
167 #define swap(a, b) \
168 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
169 
170 struct kref {
171 	refcount_t refcount;
172 };
173 
174 /*
175  * Define the task command name length as enum, then it can be visible to
176  * BPF programs.
177  */
178 enum {
179 	TASK_COMM_LEN = 16,
180 };
181 
182 /*
183  * Flags for bug emulation.
184  *
185  * These occupy the top three bytes.
186  */
187 enum {
188 	READ_IMPLIES_EXEC =	0x0400000,
189 };
190 
191 struct task_struct {
192 	char comm[TASK_COMM_LEN];
193 	pid_t pid;
194 	struct mm_struct *mm;
195 
196 	/* Used for emulating ABI behavior of previous Linux versions: */
197 	unsigned int			personality;
198 };
199 
200 struct task_struct *get_current(void);
201 #define current get_current()
202 
203 struct anon_vma {
204 	struct anon_vma *root;
205 	struct rb_root_cached rb_root;
206 
207 	/* Test fields. */
208 	bool was_cloned;
209 	bool was_unlinked;
210 };
211 
212 struct anon_vma_chain {
213 	struct anon_vma *anon_vma;
214 	struct list_head same_vma;
215 };
216 
217 struct anon_vma_name {
218 	struct kref kref;
219 	/* The name needs to be at the end because it is dynamically sized. */
220 	char name[];
221 };
222 
223 struct vma_iterator {
224 	struct ma_state mas;
225 };
226 
227 #define VMA_ITERATOR(name, __mm, __addr)				\
228 	struct vma_iterator name = {					\
229 		.mas = {						\
230 			.tree = &(__mm)->mm_mt,				\
231 			.index = __addr,				\
232 			.node = NULL,					\
233 			.status = ma_start,				\
234 		},							\
235 	}
236 
237 struct address_space {
238 	struct rb_root_cached	i_mmap;
239 	unsigned long		flags;
240 	atomic_t		i_mmap_writable;
241 };
242 
243 struct vm_userfaultfd_ctx {};
244 struct mempolicy {};
245 struct mmu_gather {};
246 struct mutex {};
247 #define DEFINE_MUTEX(mutexname) \
248 	struct mutex mutexname = {}
249 
250 struct mm_struct {
251 	struct maple_tree mm_mt;
252 	int map_count;			/* number of VMAs */
253 	unsigned long total_vm;	   /* Total pages mapped */
254 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
255 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
256 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
257 	unsigned long stack_vm;	   /* VM_STACK */
258 
259 	unsigned long def_flags;
260 
261 	unsigned long flags; /* Must use atomic bitops to access */
262 };
263 
264 struct vm_area_struct;
265 
266 /*
267  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
268  * manipulate mutable fields which will cause those fields to be updated in the
269  * resultant VMA.
270  *
271  * Helper functions are not required for manipulating any field.
272  */
273 struct vm_area_desc {
274 	/* Immutable state. */
275 	struct mm_struct *mm;
276 	unsigned long start;
277 	unsigned long end;
278 
279 	/* Mutable fields. Populated with initial state. */
280 	pgoff_t pgoff;
281 	struct file *file;
282 	vm_flags_t vm_flags;
283 	pgprot_t page_prot;
284 
285 	/* Write-only fields. */
286 	const struct vm_operations_struct *vm_ops;
287 	void *private_data;
288 };
289 
290 struct file_operations {
291 	int (*mmap)(struct file *, struct vm_area_struct *);
292 	int (*mmap_prepare)(struct vm_area_desc *);
293 };
294 
295 struct file {
296 	struct address_space	*f_mapping;
297 	const struct file_operations	*f_op;
298 };
299 
300 #define VMA_LOCK_OFFSET	0x40000000
301 
302 typedef struct { unsigned long v; } freeptr_t;
303 
304 struct vm_area_struct {
305 	/* The first cache line has the info for VMA tree walking. */
306 
307 	union {
308 		struct {
309 			/* VMA covers [vm_start; vm_end) addresses within mm */
310 			unsigned long vm_start;
311 			unsigned long vm_end;
312 		};
313 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
314 	};
315 
316 	struct mm_struct *vm_mm;	/* The address space we belong to. */
317 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
318 
319 	/*
320 	 * Flags, see mm.h.
321 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
322 	 */
323 	union {
324 		const vm_flags_t vm_flags;
325 		vm_flags_t __private __vm_flags;
326 	};
327 
328 #ifdef CONFIG_PER_VMA_LOCK
329 	/*
330 	 * Can only be written (using WRITE_ONCE()) while holding both:
331 	 *  - mmap_lock (in write mode)
332 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
333 	 * Can be read reliably while holding one of:
334 	 *  - mmap_lock (in read or write mode)
335 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
336 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
337 	 * while holding nothing (except RCU to keep the VMA struct allocated).
338 	 *
339 	 * This sequence counter is explicitly allowed to overflow; sequence
340 	 * counter reuse can only lead to occasional unnecessary use of the
341 	 * slowpath.
342 	 */
343 	unsigned int vm_lock_seq;
344 #endif
345 
346 	/*
347 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
348 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
349 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
350 	 * or brk vma (with NULL file) can only be in an anon_vma list.
351 	 */
352 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
353 					  * page_table_lock */
354 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
355 
356 	/* Function pointers to deal with this struct. */
357 	const struct vm_operations_struct *vm_ops;
358 
359 	/* Information about our backing store: */
360 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
361 					   units */
362 	struct file * vm_file;		/* File we map to (can be NULL). */
363 	void * vm_private_data;		/* was vm_pte (shared mem) */
364 
365 #ifdef CONFIG_SWAP
366 	atomic_long_t swap_readahead_info;
367 #endif
368 #ifndef CONFIG_MMU
369 	struct vm_region *vm_region;	/* NOMMU mapping region */
370 #endif
371 #ifdef CONFIG_NUMA
372 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
373 #endif
374 #ifdef CONFIG_NUMA_BALANCING
375 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
376 #endif
377 #ifdef CONFIG_PER_VMA_LOCK
378 	/* Unstable RCU readers are allowed to read this. */
379 	refcount_t vm_refcnt;
380 #endif
381 	/*
382 	 * For areas with an address space and backing store,
383 	 * linkage into the address_space->i_mmap interval tree.
384 	 *
385 	 */
386 	struct {
387 		struct rb_node rb;
388 		unsigned long rb_subtree_last;
389 	} shared;
390 #ifdef CONFIG_ANON_VMA_NAME
391 	/*
392 	 * For private and shared anonymous mappings, a pointer to a null
393 	 * terminated string containing the name given to the vma, or NULL if
394 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
395 	 */
396 	struct anon_vma_name *anon_name;
397 #endif
398 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
399 } __randomize_layout;
400 
401 struct vm_fault {};
402 
403 struct vm_operations_struct {
404 	void (*open)(struct vm_area_struct * area);
405 	/**
406 	 * @close: Called when the VMA is being removed from the MM.
407 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
408 	 */
409 	void (*close)(struct vm_area_struct * area);
410 	/* Called any time before splitting to check if it's allowed */
411 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
412 	int (*mremap)(struct vm_area_struct *area);
413 	/*
414 	 * Called by mprotect() to make driver-specific permission
415 	 * checks before mprotect() is finalised.   The VMA must not
416 	 * be modified.  Returns 0 if mprotect() can proceed.
417 	 */
418 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
419 			unsigned long end, unsigned long newflags);
420 	vm_fault_t (*fault)(struct vm_fault *vmf);
421 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
422 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
423 			pgoff_t start_pgoff, pgoff_t end_pgoff);
424 	unsigned long (*pagesize)(struct vm_area_struct * area);
425 
426 	/* notification that a previously read-only page is about to become
427 	 * writable, if an error is returned it will cause a SIGBUS */
428 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
429 
430 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
431 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
432 
433 	/* called by access_process_vm when get_user_pages() fails, typically
434 	 * for use by special VMAs. See also generic_access_phys() for a generic
435 	 * implementation useful for any iomem mapping.
436 	 */
437 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
438 		      void *buf, int len, int write);
439 
440 	/* Called by the /proc/PID/maps code to ask the vma whether it
441 	 * has a special name.  Returning non-NULL will also cause this
442 	 * vma to be dumped unconditionally. */
443 	const char *(*name)(struct vm_area_struct *vma);
444 
445 #ifdef CONFIG_NUMA
446 	/*
447 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
448 	 * to hold the policy upon return.  Caller should pass NULL @new to
449 	 * remove a policy and fall back to surrounding context--i.e. do not
450 	 * install a MPOL_DEFAULT policy, nor the task or system default
451 	 * mempolicy.
452 	 */
453 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
454 
455 	/*
456 	 * get_policy() op must add reference [mpol_get()] to any policy at
457 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
458 	 * in mm/mempolicy.c will do this automatically.
459 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
460 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
461 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
462 	 * must return NULL--i.e., do not "fallback" to task or system default
463 	 * policy.
464 	 */
465 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
466 					unsigned long addr, pgoff_t *ilx);
467 #endif
468 	/*
469 	 * Called by vm_normal_page() for special PTEs to find the
470 	 * page for @addr.  This is useful if the default behavior
471 	 * (using pte_page()) would not find the correct page.
472 	 */
473 	struct page *(*find_special_page)(struct vm_area_struct *vma,
474 					  unsigned long addr);
475 };
476 
477 struct vm_unmapped_area_info {
478 #define VM_UNMAPPED_AREA_TOPDOWN 1
479 	unsigned long flags;
480 	unsigned long length;
481 	unsigned long low_limit;
482 	unsigned long high_limit;
483 	unsigned long align_mask;
484 	unsigned long align_offset;
485 	unsigned long start_gap;
486 };
487 
488 struct pagetable_move_control {
489 	struct vm_area_struct *old; /* Source VMA. */
490 	struct vm_area_struct *new; /* Destination VMA. */
491 	unsigned long old_addr; /* Address from which the move begins. */
492 	unsigned long old_end; /* Exclusive address at which old range ends. */
493 	unsigned long new_addr; /* Address to move page tables to. */
494 	unsigned long len_in; /* Bytes to remap specified by user. */
495 
496 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
497 	bool for_stack; /* Is this an early temp stack being moved? */
498 };
499 
500 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
501 	struct pagetable_move_control name = {				\
502 		.old = old_,						\
503 		.new = new_,						\
504 		.old_addr = old_addr_,					\
505 		.old_end = (old_addr_) + (len_),			\
506 		.new_addr = new_addr_,					\
507 		.len_in = len_,						\
508 	}
509 
510 struct kmem_cache_args {
511 	/**
512 	 * @align: The required alignment for the objects.
513 	 *
514 	 * %0 means no specific alignment is requested.
515 	 */
516 	unsigned int align;
517 	/**
518 	 * @useroffset: Usercopy region offset.
519 	 *
520 	 * %0 is a valid offset, when @usersize is non-%0
521 	 */
522 	unsigned int useroffset;
523 	/**
524 	 * @usersize: Usercopy region size.
525 	 *
526 	 * %0 means no usercopy region is specified.
527 	 */
528 	unsigned int usersize;
529 	/**
530 	 * @freeptr_offset: Custom offset for the free pointer
531 	 * in &SLAB_TYPESAFE_BY_RCU caches
532 	 *
533 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
534 	 * outside of the object. This might cause the object to grow in size.
535 	 * Cache creators that have a reason to avoid this can specify a custom
536 	 * free pointer offset in their struct where the free pointer will be
537 	 * placed.
538 	 *
539 	 * Note that placing the free pointer inside the object requires the
540 	 * caller to ensure that no fields are invalidated that are required to
541 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
542 	 * details).
543 	 *
544 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
545 	 * is specified, %use_freeptr_offset must be set %true.
546 	 *
547 	 * Note that @ctor currently isn't supported with custom free pointers
548 	 * as a @ctor requires an external free pointer.
549 	 */
550 	unsigned int freeptr_offset;
551 	/**
552 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
553 	 */
554 	bool use_freeptr_offset;
555 	/**
556 	 * @ctor: A constructor for the objects.
557 	 *
558 	 * The constructor is invoked for each object in a newly allocated slab
559 	 * page. It is the cache user's responsibility to free object in the
560 	 * same state as after calling the constructor, or deal appropriately
561 	 * with any differences between a freshly constructed and a reallocated
562 	 * object.
563 	 *
564 	 * %NULL means no constructor.
565 	 */
566 	void (*ctor)(void *);
567 };
568 
vma_iter_invalidate(struct vma_iterator * vmi)569 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
570 {
571 	mas_pause(&vmi->mas);
572 }
573 
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)574 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
575 {
576 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
577 }
578 
vm_get_page_prot(unsigned long vm_flags)579 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
580 {
581 	return __pgprot(vm_flags);
582 }
583 
is_shared_maywrite(vm_flags_t vm_flags)584 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
585 {
586 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
587 		(VM_SHARED | VM_MAYWRITE);
588 }
589 
vma_is_shared_maywrite(struct vm_area_struct * vma)590 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
591 {
592 	return is_shared_maywrite(vma->vm_flags);
593 }
594 
vma_next(struct vma_iterator * vmi)595 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
596 {
597 	/*
598 	 * Uses mas_find() to get the first VMA when the iterator starts.
599 	 * Calling mas_next() could skip the first entry.
600 	 */
601 	return mas_find(&vmi->mas, ULONG_MAX);
602 }
603 
604 /*
605  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
606  * assertions should be made either under mmap_write_lock or when the object
607  * has been isolated under mmap_write_lock, ensuring no competing writers.
608  */
vma_assert_attached(struct vm_area_struct * vma)609 static inline void vma_assert_attached(struct vm_area_struct *vma)
610 {
611 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
612 }
613 
vma_assert_detached(struct vm_area_struct * vma)614 static inline void vma_assert_detached(struct vm_area_struct *vma)
615 {
616 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
617 }
618 
619 static inline void vma_assert_write_locked(struct vm_area_struct *);
vma_mark_attached(struct vm_area_struct * vma)620 static inline void vma_mark_attached(struct vm_area_struct *vma)
621 {
622 	vma_assert_write_locked(vma);
623 	vma_assert_detached(vma);
624 	refcount_set_release(&vma->vm_refcnt, 1);
625 }
626 
vma_mark_detached(struct vm_area_struct * vma)627 static inline void vma_mark_detached(struct vm_area_struct *vma)
628 {
629 	vma_assert_write_locked(vma);
630 	vma_assert_attached(vma);
631 	/* We are the only writer, so no need to use vma_refcount_put(). */
632 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
633 		/*
634 		 * Reader must have temporarily raised vm_refcnt but it will
635 		 * drop it without using the vma since vma is write-locked.
636 		 */
637 	}
638 }
639 
640 extern const struct vm_operations_struct vma_dummy_vm_ops;
641 
642 extern unsigned long rlimit(unsigned int limit);
643 
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)644 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
645 {
646 	memset(vma, 0, sizeof(*vma));
647 	vma->vm_mm = mm;
648 	vma->vm_ops = &vma_dummy_vm_ops;
649 	INIT_LIST_HEAD(&vma->anon_vma_chain);
650 	vma->vm_lock_seq = UINT_MAX;
651 }
652 
653 struct kmem_cache {
654 	const char *name;
655 	size_t object_size;
656 	struct kmem_cache_args *args;
657 };
658 
__kmem_cache_create(const char * name,size_t object_size,struct kmem_cache_args * args)659 static inline struct kmem_cache *__kmem_cache_create(const char *name,
660 						     size_t object_size,
661 						     struct kmem_cache_args *args)
662 {
663 	struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
664 
665 	ret->name = name;
666 	ret->object_size = object_size;
667 	ret->args = args;
668 
669 	return ret;
670 }
671 
672 #define kmem_cache_create(__name, __object_size, __args, ...)           \
673 	__kmem_cache_create((__name), (__object_size), (__args))
674 
kmem_cache_alloc(struct kmem_cache * s,gfp_t gfpflags)675 static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
676 {
677 	(void)gfpflags;
678 
679 	return calloc(s->object_size, 1);
680 }
681 
kmem_cache_free(struct kmem_cache * s,void * x)682 static inline void kmem_cache_free(struct kmem_cache *s, void *x)
683 {
684 	free(x);
685 }
686 
687 /*
688  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
689  * kernel/fork.c, so we have to these broadly available there, and temporarily
690  * define them here to resolve the dependency cycle.
691  */
692 
693 #define is_exec_mapping(flags) \
694 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
695 
696 #define is_stack_mapping(flags) \
697 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
698 
699 #define is_data_mapping(flags) \
700 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
701 
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)702 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
703 				   long npages)
704 {
705 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
706 
707 	if (is_exec_mapping(flags))
708 		mm->exec_vm += npages;
709 	else if (is_stack_mapping(flags))
710 		mm->stack_vm += npages;
711 	else if (is_data_mapping(flags))
712 		mm->data_vm += npages;
713 }
714 
715 #undef is_exec_mapping
716 #undef is_stack_mapping
717 #undef is_data_mapping
718 
719 /* Currently stubbed but we may later wish to un-stub. */
720 static inline void vm_acct_memory(long pages);
vm_unacct_memory(long pages)721 static inline void vm_unacct_memory(long pages)
722 {
723 	vm_acct_memory(-pages);
724 }
725 
mapping_allow_writable(struct address_space * mapping)726 static inline void mapping_allow_writable(struct address_space *mapping)
727 {
728 	atomic_inc(&mapping->i_mmap_writable);
729 }
730 
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)731 static inline void vma_set_range(struct vm_area_struct *vma,
732 				 unsigned long start, unsigned long end,
733 				 pgoff_t pgoff)
734 {
735 	vma->vm_start = start;
736 	vma->vm_end = end;
737 	vma->vm_pgoff = pgoff;
738 }
739 
740 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)741 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
742 {
743 	return mas_find(&vmi->mas, max - 1);
744 }
745 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)746 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
747 			unsigned long start, unsigned long end, gfp_t gfp)
748 {
749 	__mas_set_range(&vmi->mas, start, end - 1);
750 	mas_store_gfp(&vmi->mas, NULL, gfp);
751 	if (unlikely(mas_is_err(&vmi->mas)))
752 		return -ENOMEM;
753 
754 	return 0;
755 }
756 
757 static inline void mmap_assert_locked(struct mm_struct *);
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)758 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
759 						unsigned long start_addr,
760 						unsigned long end_addr)
761 {
762 	unsigned long index = start_addr;
763 
764 	mmap_assert_locked(mm);
765 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
766 }
767 
768 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)769 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
770 {
771 	return mtree_load(&mm->mm_mt, addr);
772 }
773 
vma_prev(struct vma_iterator * vmi)774 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
775 {
776 	return mas_prev(&vmi->mas, 0);
777 }
778 
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)779 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
780 {
781 	mas_set(&vmi->mas, addr);
782 }
783 
vma_is_anonymous(struct vm_area_struct * vma)784 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
785 {
786 	return !vma->vm_ops;
787 }
788 
789 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
790 #define vma_iter_load(vmi) \
791 	mas_walk(&(vmi)->mas)
792 
793 static inline struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)794 find_vma_prev(struct mm_struct *mm, unsigned long addr,
795 			struct vm_area_struct **pprev)
796 {
797 	struct vm_area_struct *vma;
798 	VMA_ITERATOR(vmi, mm, addr);
799 
800 	vma = vma_iter_load(&vmi);
801 	*pprev = vma_prev(&vmi);
802 	if (!vma)
803 		vma = vma_next(&vmi);
804 	return vma;
805 }
806 
807 #undef vma_iter_load
808 
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)809 static inline void vma_iter_init(struct vma_iterator *vmi,
810 		struct mm_struct *mm, unsigned long addr)
811 {
812 	mas_init(&vmi->mas, &mm->mm_mt, addr);
813 }
814 
815 /* Stubbed functions. */
816 
anon_vma_name(struct vm_area_struct * vma)817 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
818 {
819 	return NULL;
820 }
821 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)822 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
823 					struct vm_userfaultfd_ctx vm_ctx)
824 {
825 	return true;
826 }
827 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)828 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
829 				    struct anon_vma_name *anon_name2)
830 {
831 	return true;
832 }
833 
might_sleep(void)834 static inline void might_sleep(void)
835 {
836 }
837 
vma_pages(struct vm_area_struct * vma)838 static inline unsigned long vma_pages(struct vm_area_struct *vma)
839 {
840 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
841 }
842 
fput(struct file *)843 static inline void fput(struct file *)
844 {
845 }
846 
mpol_put(struct mempolicy *)847 static inline void mpol_put(struct mempolicy *)
848 {
849 }
850 
lru_add_drain(void)851 static inline void lru_add_drain(void)
852 {
853 }
854 
tlb_gather_mmu(struct mmu_gather *,struct mm_struct *)855 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
856 {
857 }
858 
update_hiwater_rss(struct mm_struct *)859 static inline void update_hiwater_rss(struct mm_struct *)
860 {
861 }
862 
update_hiwater_vm(struct mm_struct *)863 static inline void update_hiwater_vm(struct mm_struct *)
864 {
865 }
866 
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)867 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
868 		      struct vm_area_struct *vma, unsigned long start_addr,
869 		      unsigned long end_addr, unsigned long tree_end,
870 		      bool mm_wr_locked)
871 {
872 	(void)tlb;
873 	(void)mas;
874 	(void)vma;
875 	(void)start_addr;
876 	(void)end_addr;
877 	(void)tree_end;
878 	(void)mm_wr_locked;
879 }
880 
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)881 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
882 		   struct vm_area_struct *vma, unsigned long floor,
883 		   unsigned long ceiling, bool mm_wr_locked)
884 {
885 	(void)tlb;
886 	(void)mas;
887 	(void)vma;
888 	(void)floor;
889 	(void)ceiling;
890 	(void)mm_wr_locked;
891 }
892 
mapping_unmap_writable(struct address_space *)893 static inline void mapping_unmap_writable(struct address_space *)
894 {
895 }
896 
flush_dcache_mmap_lock(struct address_space *)897 static inline void flush_dcache_mmap_lock(struct address_space *)
898 {
899 }
900 
tlb_finish_mmu(struct mmu_gather *)901 static inline void tlb_finish_mmu(struct mmu_gather *)
902 {
903 }
904 
get_file(struct file * f)905 static inline struct file *get_file(struct file *f)
906 {
907 	return f;
908 }
909 
vma_dup_policy(struct vm_area_struct *,struct vm_area_struct *)910 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
911 {
912 	return 0;
913 }
914 
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src)915 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
916 {
917 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
918 	if (src->anon_vma != NULL) {
919 		dst->anon_vma = src->anon_vma;
920 		dst->anon_vma->was_cloned = true;
921 	}
922 
923 	return 0;
924 }
925 
vma_start_write(struct vm_area_struct * vma)926 static inline void vma_start_write(struct vm_area_struct *vma)
927 {
928 	/* Used to indicate to tests that a write operation has begun. */
929 	vma->vm_lock_seq++;
930 }
931 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)932 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
933 					 unsigned long start,
934 					 unsigned long end,
935 					 struct vm_area_struct *next)
936 {
937 	(void)vma;
938 	(void)start;
939 	(void)end;
940 	(void)next;
941 }
942 
hugetlb_split(struct vm_area_struct *,unsigned long)943 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
944 
vma_iter_free(struct vma_iterator * vmi)945 static inline void vma_iter_free(struct vma_iterator *vmi)
946 {
947 	mas_destroy(&vmi->mas);
948 }
949 
950 static inline
vma_iter_next_range(struct vma_iterator * vmi)951 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
952 {
953 	return mas_next_range(&vmi->mas, ULONG_MAX);
954 }
955 
vm_acct_memory(long pages)956 static inline void vm_acct_memory(long pages)
957 {
958 }
959 
vma_interval_tree_insert(struct vm_area_struct *,struct rb_root_cached *)960 static inline void vma_interval_tree_insert(struct vm_area_struct *,
961 					    struct rb_root_cached *)
962 {
963 }
964 
vma_interval_tree_remove(struct vm_area_struct *,struct rb_root_cached *)965 static inline void vma_interval_tree_remove(struct vm_area_struct *,
966 					    struct rb_root_cached *)
967 {
968 }
969 
flush_dcache_mmap_unlock(struct address_space *)970 static inline void flush_dcache_mmap_unlock(struct address_space *)
971 {
972 }
973 
anon_vma_interval_tree_insert(struct anon_vma_chain *,struct rb_root_cached *)974 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
975 						 struct rb_root_cached *)
976 {
977 }
978 
anon_vma_interval_tree_remove(struct anon_vma_chain *,struct rb_root_cached *)979 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
980 						 struct rb_root_cached *)
981 {
982 }
983 
uprobe_mmap(struct vm_area_struct *)984 static inline void uprobe_mmap(struct vm_area_struct *)
985 {
986 }
987 
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)988 static inline void uprobe_munmap(struct vm_area_struct *vma,
989 				 unsigned long start, unsigned long end)
990 {
991 	(void)vma;
992 	(void)start;
993 	(void)end;
994 }
995 
i_mmap_lock_write(struct address_space *)996 static inline void i_mmap_lock_write(struct address_space *)
997 {
998 }
999 
anon_vma_lock_write(struct anon_vma *)1000 static inline void anon_vma_lock_write(struct anon_vma *)
1001 {
1002 }
1003 
vma_assert_write_locked(struct vm_area_struct *)1004 static inline void vma_assert_write_locked(struct vm_area_struct *)
1005 {
1006 }
1007 
unlink_anon_vmas(struct vm_area_struct * vma)1008 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
1009 {
1010 	/* For testing purposes, indicate that the anon_vma was unlinked. */
1011 	vma->anon_vma->was_unlinked = true;
1012 }
1013 
anon_vma_unlock_write(struct anon_vma *)1014 static inline void anon_vma_unlock_write(struct anon_vma *)
1015 {
1016 }
1017 
i_mmap_unlock_write(struct address_space *)1018 static inline void i_mmap_unlock_write(struct address_space *)
1019 {
1020 }
1021 
anon_vma_merge(struct vm_area_struct *,struct vm_area_struct *)1022 static inline void anon_vma_merge(struct vm_area_struct *,
1023 				  struct vm_area_struct *)
1024 {
1025 }
1026 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)1027 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
1028 					 unsigned long start,
1029 					 unsigned long end,
1030 					 struct list_head *unmaps)
1031 {
1032 	(void)vma;
1033 	(void)start;
1034 	(void)end;
1035 	(void)unmaps;
1036 
1037 	return 0;
1038 }
1039 
mmap_write_downgrade(struct mm_struct *)1040 static inline void mmap_write_downgrade(struct mm_struct *)
1041 {
1042 }
1043 
mmap_read_unlock(struct mm_struct *)1044 static inline void mmap_read_unlock(struct mm_struct *)
1045 {
1046 }
1047 
mmap_write_unlock(struct mm_struct *)1048 static inline void mmap_write_unlock(struct mm_struct *)
1049 {
1050 }
1051 
mmap_write_lock_killable(struct mm_struct *)1052 static inline int mmap_write_lock_killable(struct mm_struct *)
1053 {
1054 	return 0;
1055 }
1056 
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)1057 static inline bool can_modify_mm(struct mm_struct *mm,
1058 				 unsigned long start,
1059 				 unsigned long end)
1060 {
1061 	(void)mm;
1062 	(void)start;
1063 	(void)end;
1064 
1065 	return true;
1066 }
1067 
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)1068 static inline void arch_unmap(struct mm_struct *mm,
1069 				 unsigned long start,
1070 				 unsigned long end)
1071 {
1072 	(void)mm;
1073 	(void)start;
1074 	(void)end;
1075 }
1076 
mmap_assert_locked(struct mm_struct *)1077 static inline void mmap_assert_locked(struct mm_struct *)
1078 {
1079 }
1080 
mpol_equal(struct mempolicy *,struct mempolicy *)1081 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
1082 {
1083 	return true;
1084 }
1085 
khugepaged_enter_vma(struct vm_area_struct * vma,unsigned long vm_flags)1086 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
1087 			  unsigned long vm_flags)
1088 {
1089 	(void)vma;
1090 	(void)vm_flags;
1091 }
1092 
mapping_can_writeback(struct address_space *)1093 static inline bool mapping_can_writeback(struct address_space *)
1094 {
1095 	return true;
1096 }
1097 
is_vm_hugetlb_page(struct vm_area_struct *)1098 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
1099 {
1100 	return false;
1101 }
1102 
vma_soft_dirty_enabled(struct vm_area_struct *)1103 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
1104 {
1105 	return false;
1106 }
1107 
userfaultfd_wp(struct vm_area_struct *)1108 static inline bool userfaultfd_wp(struct vm_area_struct *)
1109 {
1110 	return false;
1111 }
1112 
mmap_assert_write_locked(struct mm_struct *)1113 static inline void mmap_assert_write_locked(struct mm_struct *)
1114 {
1115 }
1116 
mutex_lock(struct mutex *)1117 static inline void mutex_lock(struct mutex *)
1118 {
1119 }
1120 
mutex_unlock(struct mutex *)1121 static inline void mutex_unlock(struct mutex *)
1122 {
1123 }
1124 
mutex_is_locked(struct mutex *)1125 static inline bool mutex_is_locked(struct mutex *)
1126 {
1127 	return true;
1128 }
1129 
signal_pending(void *)1130 static inline bool signal_pending(void *)
1131 {
1132 	return false;
1133 }
1134 
is_file_hugepages(struct file *)1135 static inline bool is_file_hugepages(struct file *)
1136 {
1137 	return false;
1138 }
1139 
security_vm_enough_memory_mm(struct mm_struct *,long)1140 static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
1141 {
1142 	return 0;
1143 }
1144 
may_expand_vm(struct mm_struct *,vm_flags_t,unsigned long)1145 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
1146 {
1147 	return true;
1148 }
1149 
vm_flags_init(struct vm_area_struct * vma,vm_flags_t flags)1150 static inline void vm_flags_init(struct vm_area_struct *vma,
1151 				 vm_flags_t flags)
1152 {
1153 	vma->__vm_flags = flags;
1154 }
1155 
vm_flags_set(struct vm_area_struct * vma,vm_flags_t flags)1156 static inline void vm_flags_set(struct vm_area_struct *vma,
1157 				vm_flags_t flags)
1158 {
1159 	vma_start_write(vma);
1160 	vma->__vm_flags |= flags;
1161 }
1162 
vm_flags_clear(struct vm_area_struct * vma,vm_flags_t flags)1163 static inline void vm_flags_clear(struct vm_area_struct *vma,
1164 				  vm_flags_t flags)
1165 {
1166 	vma_start_write(vma);
1167 	vma->__vm_flags &= ~flags;
1168 }
1169 
shmem_zero_setup(struct vm_area_struct *)1170 static inline int shmem_zero_setup(struct vm_area_struct *)
1171 {
1172 	return 0;
1173 }
1174 
vma_set_anonymous(struct vm_area_struct * vma)1175 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1176 {
1177 	vma->vm_ops = NULL;
1178 }
1179 
ksm_add_vma(struct vm_area_struct *)1180 static inline void ksm_add_vma(struct vm_area_struct *)
1181 {
1182 }
1183 
perf_event_mmap(struct vm_area_struct *)1184 static inline void perf_event_mmap(struct vm_area_struct *)
1185 {
1186 }
1187 
vma_is_dax(struct vm_area_struct *)1188 static inline bool vma_is_dax(struct vm_area_struct *)
1189 {
1190 	return false;
1191 }
1192 
get_gate_vma(struct mm_struct *)1193 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
1194 {
1195 	return NULL;
1196 }
1197 
1198 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1199 
1200 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)1201 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1202 {
1203 	unsigned long vm_flags = vma->vm_flags;
1204 	pgprot_t vm_page_prot;
1205 
1206 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1207 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1208 
1209 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1210 		vm_flags &= ~VM_SHARED;
1211 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1212 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1213 	}
1214 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1215 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1216 }
1217 
arch_validate_flags(unsigned long)1218 static inline bool arch_validate_flags(unsigned long)
1219 {
1220 	return true;
1221 }
1222 
vma_close(struct vm_area_struct *)1223 static inline void vma_close(struct vm_area_struct *)
1224 {
1225 }
1226 
mmap_file(struct file *,struct vm_area_struct *)1227 static inline int mmap_file(struct file *, struct vm_area_struct *)
1228 {
1229 	return 0;
1230 }
1231 
stack_guard_start_gap(struct vm_area_struct * vma)1232 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1233 {
1234 	if (vma->vm_flags & VM_GROWSDOWN)
1235 		return stack_guard_gap;
1236 
1237 	/* See reasoning around the VM_SHADOW_STACK definition */
1238 	if (vma->vm_flags & VM_SHADOW_STACK)
1239 		return PAGE_SIZE;
1240 
1241 	return 0;
1242 }
1243 
vm_start_gap(struct vm_area_struct * vma)1244 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1245 {
1246 	unsigned long gap = stack_guard_start_gap(vma);
1247 	unsigned long vm_start = vma->vm_start;
1248 
1249 	vm_start -= gap;
1250 	if (vm_start > vma->vm_start)
1251 		vm_start = 0;
1252 	return vm_start;
1253 }
1254 
vm_end_gap(struct vm_area_struct * vma)1255 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1256 {
1257 	unsigned long vm_end = vma->vm_end;
1258 
1259 	if (vma->vm_flags & VM_GROWSUP) {
1260 		vm_end += stack_guard_gap;
1261 		if (vm_end < vma->vm_end)
1262 			vm_end = -PAGE_SIZE;
1263 	}
1264 	return vm_end;
1265 }
1266 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)1267 static inline int is_hugepage_only_range(struct mm_struct *mm,
1268 					unsigned long addr, unsigned long len)
1269 {
1270 	return 0;
1271 }
1272 
vma_is_accessible(struct vm_area_struct * vma)1273 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1274 {
1275 	return vma->vm_flags & VM_ACCESS_FLAGS;
1276 }
1277 
capable(int cap)1278 static inline bool capable(int cap)
1279 {
1280 	return true;
1281 }
1282 
mlock_future_ok(struct mm_struct * mm,unsigned long flags,unsigned long bytes)1283 static inline bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1284 			unsigned long bytes)
1285 {
1286 	unsigned long locked_pages, limit_pages;
1287 
1288 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1289 		return true;
1290 
1291 	locked_pages = bytes >> PAGE_SHIFT;
1292 	locked_pages += mm->locked_vm;
1293 
1294 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1295 	limit_pages >>= PAGE_SHIFT;
1296 
1297 	return locked_pages <= limit_pages;
1298 }
1299 
__anon_vma_prepare(struct vm_area_struct * vma)1300 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1301 {
1302 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1303 
1304 	if (!anon_vma)
1305 		return -ENOMEM;
1306 
1307 	anon_vma->root = anon_vma;
1308 	vma->anon_vma = anon_vma;
1309 
1310 	return 0;
1311 }
1312 
anon_vma_prepare(struct vm_area_struct * vma)1313 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1314 {
1315 	if (likely(vma->anon_vma))
1316 		return 0;
1317 
1318 	return __anon_vma_prepare(vma);
1319 }
1320 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)1321 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1322 					      struct list_head *uf)
1323 {
1324 }
1325 
1326 /*
1327  * Denies creating a writable executable mapping or gaining executable permissions.
1328  *
1329  * This denies the following:
1330  *
1331  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1332  *
1333  *     b)      mmap(PROT_WRITE)
1334  *             mprotect(PROT_EXEC)
1335  *
1336  *     c)      mmap(PROT_WRITE)
1337  *             mprotect(PROT_READ)
1338  *             mprotect(PROT_EXEC)
1339  *
1340  * But allows the following:
1341  *
1342  *     d)      mmap(PROT_READ | PROT_EXEC)
1343  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1344  *
1345  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1346  * (MDWE) protection mask for the current process.
1347  *
1348  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1349  * we propose to set.
1350  *
1351  * Return: false if proposed change is OK, true if not ok and should be denied.
1352  */
map_deny_write_exec(unsigned long old,unsigned long new)1353 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1354 {
1355 	/* If MDWE is disabled, we have nothing to deny. */
1356 	if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
1357 		return false;
1358 
1359 	/* If the new VMA is not executable, we have nothing to deny. */
1360 	if (!(new & VM_EXEC))
1361 		return false;
1362 
1363 	/* Under MDWE we do not accept newly writably executable VMAs... */
1364 	if (new & VM_WRITE)
1365 		return true;
1366 
1367 	/* ...nor previously non-executable VMAs becoming executable. */
1368 	if (!(old & VM_EXEC))
1369 		return true;
1370 
1371 	return false;
1372 }
1373 
mapping_map_writable(struct address_space * mapping)1374 static inline int mapping_map_writable(struct address_space *mapping)
1375 {
1376 	int c = atomic_read(&mapping->i_mmap_writable);
1377 
1378 	/* Derived from the raw_atomic_inc_unless_negative() implementation. */
1379 	do {
1380 		if (c < 0)
1381 			return -EPERM;
1382 	} while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1));
1383 
1384 	return 0;
1385 }
1386 
move_page_tables(struct pagetable_move_control * pmc)1387 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1388 {
1389 	(void)pmc;
1390 
1391 	return 0;
1392 }
1393 
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1394 static inline void free_pgd_range(struct mmu_gather *tlb,
1395 			unsigned long addr, unsigned long end,
1396 			unsigned long floor, unsigned long ceiling)
1397 {
1398 	(void)tlb;
1399 	(void)addr;
1400 	(void)end;
1401 	(void)floor;
1402 	(void)ceiling;
1403 }
1404 
ksm_execve(struct mm_struct * mm)1405 static inline int ksm_execve(struct mm_struct *mm)
1406 {
1407 	(void)mm;
1408 
1409 	return 0;
1410 }
1411 
ksm_exit(struct mm_struct * mm)1412 static inline void ksm_exit(struct mm_struct *mm)
1413 {
1414 	(void)mm;
1415 }
1416 
vma_lock_init(struct vm_area_struct * vma,bool reset_refcnt)1417 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1418 {
1419 	(void)vma;
1420 	(void)reset_refcnt;
1421 }
1422 
vma_numab_state_init(struct vm_area_struct * vma)1423 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1424 {
1425 	(void)vma;
1426 }
1427 
vma_numab_state_free(struct vm_area_struct * vma)1428 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1429 {
1430 	(void)vma;
1431 }
1432 
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)1433 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1434 				     struct vm_area_struct *new_vma)
1435 {
1436 	(void)orig_vma;
1437 	(void)new_vma;
1438 }
1439 
free_anon_vma_name(struct vm_area_struct * vma)1440 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1441 {
1442 	(void)vma;
1443 }
1444 
1445 /* Did the driver provide valid mmap hook configuration? */
file_has_valid_mmap_hooks(struct file * file)1446 static inline bool file_has_valid_mmap_hooks(struct file *file)
1447 {
1448 	bool has_mmap = file->f_op->mmap;
1449 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1450 
1451 	/* Hooks are mutually exclusive. */
1452 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1453 		return false;
1454 	if (WARN_ON_ONCE(!has_mmap && !has_mmap_prepare))
1455 		return false;
1456 
1457 	return true;
1458 }
1459 
call_mmap(struct file * file,struct vm_area_struct * vma)1460 static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
1461 {
1462 	if (WARN_ON_ONCE(file->f_op->mmap_prepare))
1463 		return -EINVAL;
1464 
1465 	return file->f_op->mmap(file, vma);
1466 }
1467 
__call_mmap_prepare(struct file * file,struct vm_area_desc * desc)1468 static inline int __call_mmap_prepare(struct file *file,
1469 		struct vm_area_desc *desc)
1470 {
1471 	return file->f_op->mmap_prepare(desc);
1472 }
1473 
fixup_hugetlb_reservations(struct vm_area_struct * vma)1474 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1475 {
1476 	(void)vma;
1477 }
1478 
vma_set_file(struct vm_area_struct * vma,struct file * file)1479 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1480 {
1481 	/* Changing an anonymous vma with this is illegal */
1482 	get_file(file);
1483 	swap(vma->vm_file, file);
1484 	fput(file);
1485 }
1486 
1487 #endif	/* __MM_VMA_INTERNAL_H */
1488