xref: /linux/tools/testing/vma/vma_internal.h (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/atomic.h>
25 #include <linux/list.h>
26 #include <linux/maple_tree.h>
27 #include <linux/mm.h>
28 #include <linux/rbtree.h>
29 #include <linux/refcount.h>
30 #include <linux/slab.h>
31 
32 extern unsigned long stack_guard_gap;
33 #ifdef CONFIG_MMU
34 extern unsigned long mmap_min_addr;
35 extern unsigned long dac_mmap_min_addr;
36 #else
37 #define mmap_min_addr		0UL
38 #define dac_mmap_min_addr	0UL
39 #endif
40 
41 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
42 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
43 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
44 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
45 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
46 
47 #define MMF_HAS_MDWE	28
48 
49 #define VM_NONE		0x00000000
50 #define VM_READ		0x00000001
51 #define VM_WRITE	0x00000002
52 #define VM_EXEC		0x00000004
53 #define VM_SHARED	0x00000008
54 #define VM_MAYREAD	0x00000010
55 #define VM_MAYWRITE	0x00000020
56 #define VM_MAYEXEC	0x00000040
57 #define VM_GROWSDOWN	0x00000100
58 #define VM_PFNMAP	0x00000400
59 #define VM_LOCKED	0x00002000
60 #define VM_IO           0x00004000
61 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
62 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
63 #define VM_DONTEXPAND	0x00040000
64 #define VM_LOCKONFAULT	0x00080000
65 #define VM_ACCOUNT	0x00100000
66 #define VM_NORESERVE	0x00200000
67 #define VM_MIXEDMAP	0x10000000
68 #define VM_STACK	VM_GROWSDOWN
69 #define VM_SHADOW_STACK	VM_NONE
70 #define VM_SOFTDIRTY	0
71 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
72 #define VM_GROWSUP	VM_NONE
73 
74 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
75 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
76 
77 #ifdef CONFIG_STACK_GROWSUP
78 #define VM_STACK	VM_GROWSUP
79 #define VM_STACK_EARLY	VM_GROWSDOWN
80 #else
81 #define VM_STACK	VM_GROWSDOWN
82 #define VM_STACK_EARLY	0
83 #endif
84 
85 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
86 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
87 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
88 #define STACK_TOP		TASK_SIZE_LOW
89 #define STACK_TOP_MAX		TASK_SIZE_MAX
90 
91 /* This mask represents all the VMA flag bits used by mlock */
92 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
93 
94 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
95 
96 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
97 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
98 
99 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
100 
101 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
102 
103 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
104 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
105 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
106 
107 #define RLIMIT_STACK		3	/* max stack size */
108 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
109 
110 #define CAP_IPC_LOCK         14
111 
112 #ifdef CONFIG_64BIT
113 #define VM_SEALED_BIT	42
114 #define VM_SEALED	BIT(VM_SEALED_BIT)
115 #else
116 #define VM_SEALED	VM_NONE
117 #endif
118 
119 #define FIRST_USER_ADDRESS	0UL
120 #define USER_PGTABLES_CEILING	0UL
121 
122 #define vma_policy(vma) NULL
123 
124 #define down_write_nest_lock(sem, nest_lock)
125 
126 #define pgprot_val(x)		((x).pgprot)
127 #define __pgprot(x)		((pgprot_t) { (x) } )
128 
129 #define for_each_vma(__vmi, __vma)					\
130 	while (((__vma) = vma_next(&(__vmi))) != NULL)
131 
132 /* The MM code likes to work with exclusive end addresses */
133 #define for_each_vma_range(__vmi, __vma, __end)				\
134 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
135 
136 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
137 
138 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
139 
140 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
141 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
142 
143 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
144 
145 #define AS_MM_ALL_LOCKS 2
146 
147 /* We hardcode this for now. */
148 #define sysctl_max_map_count 0x1000000UL
149 
150 #define pgoff_t unsigned long
151 typedef unsigned long	pgprotval_t;
152 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
153 typedef unsigned long vm_flags_t;
154 typedef __bitwise unsigned int vm_fault_t;
155 
156 /*
157  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
158  * either way :)
159  */
160 #define pr_warn_once pr_err
161 
162 #define data_race(expr) expr
163 
164 #define ASSERT_EXCLUSIVE_WRITER(x)
165 
166 /**
167  * swap - swap values of @a and @b
168  * @a: first value
169  * @b: second value
170  */
171 #define swap(a, b) \
172 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
173 
174 struct kref {
175 	refcount_t refcount;
176 };
177 
178 /*
179  * Define the task command name length as enum, then it can be visible to
180  * BPF programs.
181  */
182 enum {
183 	TASK_COMM_LEN = 16,
184 };
185 
186 /*
187  * Flags for bug emulation.
188  *
189  * These occupy the top three bytes.
190  */
191 enum {
192 	READ_IMPLIES_EXEC =	0x0400000,
193 };
194 
195 struct task_struct {
196 	char comm[TASK_COMM_LEN];
197 	pid_t pid;
198 	struct mm_struct *mm;
199 
200 	/* Used for emulating ABI behavior of previous Linux versions: */
201 	unsigned int			personality;
202 };
203 
204 struct task_struct *get_current(void);
205 #define current get_current()
206 
207 struct anon_vma {
208 	struct anon_vma *root;
209 	struct rb_root_cached rb_root;
210 
211 	/* Test fields. */
212 	bool was_cloned;
213 	bool was_unlinked;
214 };
215 
216 struct anon_vma_chain {
217 	struct anon_vma *anon_vma;
218 	struct list_head same_vma;
219 };
220 
221 struct anon_vma_name {
222 	struct kref kref;
223 	/* The name needs to be at the end because it is dynamically sized. */
224 	char name[];
225 };
226 
227 struct vma_iterator {
228 	struct ma_state mas;
229 };
230 
231 #define VMA_ITERATOR(name, __mm, __addr)				\
232 	struct vma_iterator name = {					\
233 		.mas = {						\
234 			.tree = &(__mm)->mm_mt,				\
235 			.index = __addr,				\
236 			.node = NULL,					\
237 			.status = ma_start,				\
238 		},							\
239 	}
240 
241 struct address_space {
242 	struct rb_root_cached	i_mmap;
243 	unsigned long		flags;
244 	atomic_t		i_mmap_writable;
245 };
246 
247 struct vm_userfaultfd_ctx {};
248 struct mempolicy {};
249 struct mmu_gather {};
250 struct mutex {};
251 #define DEFINE_MUTEX(mutexname) \
252 	struct mutex mutexname = {}
253 
254 #define DECLARE_BITMAP(name, bits) \
255 	unsigned long name[BITS_TO_LONGS(bits)]
256 
257 #define NUM_MM_FLAG_BITS (64)
258 typedef struct {
259 	__private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
260 } mm_flags_t;
261 
262 struct mm_struct {
263 	struct maple_tree mm_mt;
264 	int map_count;			/* number of VMAs */
265 	unsigned long total_vm;	   /* Total pages mapped */
266 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
267 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
268 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
269 	unsigned long stack_vm;	   /* VM_STACK */
270 
271 	unsigned long def_flags;
272 
273 	mm_flags_t flags; /* Must use mm_flags_* helpers to access */
274 };
275 
276 struct vm_area_struct;
277 
278 /*
279  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
280  * manipulate mutable fields which will cause those fields to be updated in the
281  * resultant VMA.
282  *
283  * Helper functions are not required for manipulating any field.
284  */
285 struct vm_area_desc {
286 	/* Immutable state. */
287 	const struct mm_struct *const mm;
288 	struct file *const file; /* May vary from vm_file in stacked callers. */
289 	unsigned long start;
290 	unsigned long end;
291 
292 	/* Mutable fields. Populated with initial state. */
293 	pgoff_t pgoff;
294 	struct file *vm_file;
295 	vm_flags_t vm_flags;
296 	pgprot_t page_prot;
297 
298 	/* Write-only fields. */
299 	const struct vm_operations_struct *vm_ops;
300 	void *private_data;
301 };
302 
303 struct file_operations {
304 	int (*mmap)(struct file *, struct vm_area_struct *);
305 	int (*mmap_prepare)(struct vm_area_desc *);
306 };
307 
308 struct file {
309 	struct address_space	*f_mapping;
310 	const struct file_operations	*f_op;
311 };
312 
313 #define VMA_LOCK_OFFSET	0x40000000
314 
315 typedef struct { unsigned long v; } freeptr_t;
316 
317 struct vm_area_struct {
318 	/* The first cache line has the info for VMA tree walking. */
319 
320 	union {
321 		struct {
322 			/* VMA covers [vm_start; vm_end) addresses within mm */
323 			unsigned long vm_start;
324 			unsigned long vm_end;
325 		};
326 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
327 	};
328 
329 	struct mm_struct *vm_mm;	/* The address space we belong to. */
330 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
331 
332 	/*
333 	 * Flags, see mm.h.
334 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
335 	 */
336 	union {
337 		const vm_flags_t vm_flags;
338 		vm_flags_t __private __vm_flags;
339 	};
340 
341 #ifdef CONFIG_PER_VMA_LOCK
342 	/*
343 	 * Can only be written (using WRITE_ONCE()) while holding both:
344 	 *  - mmap_lock (in write mode)
345 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
346 	 * Can be read reliably while holding one of:
347 	 *  - mmap_lock (in read or write mode)
348 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
349 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
350 	 * while holding nothing (except RCU to keep the VMA struct allocated).
351 	 *
352 	 * This sequence counter is explicitly allowed to overflow; sequence
353 	 * counter reuse can only lead to occasional unnecessary use of the
354 	 * slowpath.
355 	 */
356 	unsigned int vm_lock_seq;
357 #endif
358 
359 	/*
360 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
361 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
362 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
363 	 * or brk vma (with NULL file) can only be in an anon_vma list.
364 	 */
365 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
366 					  * page_table_lock */
367 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
368 
369 	/* Function pointers to deal with this struct. */
370 	const struct vm_operations_struct *vm_ops;
371 
372 	/* Information about our backing store: */
373 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
374 					   units */
375 	struct file * vm_file;		/* File we map to (can be NULL). */
376 	void * vm_private_data;		/* was vm_pte (shared mem) */
377 
378 #ifdef CONFIG_SWAP
379 	atomic_long_t swap_readahead_info;
380 #endif
381 #ifndef CONFIG_MMU
382 	struct vm_region *vm_region;	/* NOMMU mapping region */
383 #endif
384 #ifdef CONFIG_NUMA
385 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
386 #endif
387 #ifdef CONFIG_NUMA_BALANCING
388 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
389 #endif
390 #ifdef CONFIG_PER_VMA_LOCK
391 	/* Unstable RCU readers are allowed to read this. */
392 	refcount_t vm_refcnt;
393 #endif
394 	/*
395 	 * For areas with an address space and backing store,
396 	 * linkage into the address_space->i_mmap interval tree.
397 	 *
398 	 */
399 	struct {
400 		struct rb_node rb;
401 		unsigned long rb_subtree_last;
402 	} shared;
403 #ifdef CONFIG_ANON_VMA_NAME
404 	/*
405 	 * For private and shared anonymous mappings, a pointer to a null
406 	 * terminated string containing the name given to the vma, or NULL if
407 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
408 	 */
409 	struct anon_vma_name *anon_name;
410 #endif
411 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
412 } __randomize_layout;
413 
414 struct vm_fault {};
415 
416 struct vm_operations_struct {
417 	void (*open)(struct vm_area_struct * area);
418 	/**
419 	 * @close: Called when the VMA is being removed from the MM.
420 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
421 	 */
422 	void (*close)(struct vm_area_struct * area);
423 	/* Called any time before splitting to check if it's allowed */
424 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
425 	int (*mremap)(struct vm_area_struct *area);
426 	/*
427 	 * Called by mprotect() to make driver-specific permission
428 	 * checks before mprotect() is finalised.   The VMA must not
429 	 * be modified.  Returns 0 if mprotect() can proceed.
430 	 */
431 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
432 			unsigned long end, unsigned long newflags);
433 	vm_fault_t (*fault)(struct vm_fault *vmf);
434 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
435 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
436 			pgoff_t start_pgoff, pgoff_t end_pgoff);
437 	unsigned long (*pagesize)(struct vm_area_struct * area);
438 
439 	/* notification that a previously read-only page is about to become
440 	 * writable, if an error is returned it will cause a SIGBUS */
441 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
442 
443 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
444 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
445 
446 	/* called by access_process_vm when get_user_pages() fails, typically
447 	 * for use by special VMAs. See also generic_access_phys() for a generic
448 	 * implementation useful for any iomem mapping.
449 	 */
450 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
451 		      void *buf, int len, int write);
452 
453 	/* Called by the /proc/PID/maps code to ask the vma whether it
454 	 * has a special name.  Returning non-NULL will also cause this
455 	 * vma to be dumped unconditionally. */
456 	const char *(*name)(struct vm_area_struct *vma);
457 
458 #ifdef CONFIG_NUMA
459 	/*
460 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
461 	 * to hold the policy upon return.  Caller should pass NULL @new to
462 	 * remove a policy and fall back to surrounding context--i.e. do not
463 	 * install a MPOL_DEFAULT policy, nor the task or system default
464 	 * mempolicy.
465 	 */
466 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
467 
468 	/*
469 	 * get_policy() op must add reference [mpol_get()] to any policy at
470 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
471 	 * in mm/mempolicy.c will do this automatically.
472 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
473 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
474 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
475 	 * must return NULL--i.e., do not "fallback" to task or system default
476 	 * policy.
477 	 */
478 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
479 					unsigned long addr, pgoff_t *ilx);
480 #endif
481 #ifdef CONFIG_FIND_NORMAL_PAGE
482 	/*
483 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
484 	 * allows for returning a "normal" page from vm_normal_page() even
485 	 * though the PTE indicates that the "struct page" either does not exist
486 	 * or should not be touched: "special".
487 	 *
488 	 * Do not add new users: this really only works when a "normal" page
489 	 * was mapped, but then the PTE got changed to something weird (+
490 	 * marked special) that would not make pte_pfn() identify the originally
491 	 * inserted page.
492 	 */
493 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
494 					 unsigned long addr);
495 #endif /* CONFIG_FIND_NORMAL_PAGE */
496 };
497 
498 struct vm_unmapped_area_info {
499 #define VM_UNMAPPED_AREA_TOPDOWN 1
500 	unsigned long flags;
501 	unsigned long length;
502 	unsigned long low_limit;
503 	unsigned long high_limit;
504 	unsigned long align_mask;
505 	unsigned long align_offset;
506 	unsigned long start_gap;
507 };
508 
509 struct pagetable_move_control {
510 	struct vm_area_struct *old; /* Source VMA. */
511 	struct vm_area_struct *new; /* Destination VMA. */
512 	unsigned long old_addr; /* Address from which the move begins. */
513 	unsigned long old_end; /* Exclusive address at which old range ends. */
514 	unsigned long new_addr; /* Address to move page tables to. */
515 	unsigned long len_in; /* Bytes to remap specified by user. */
516 
517 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
518 	bool for_stack; /* Is this an early temp stack being moved? */
519 };
520 
521 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
522 	struct pagetable_move_control name = {				\
523 		.old = old_,						\
524 		.new = new_,						\
525 		.old_addr = old_addr_,					\
526 		.old_end = (old_addr_) + (len_),			\
527 		.new_addr = new_addr_,					\
528 		.len_in = len_,						\
529 	}
530 
vma_iter_invalidate(struct vma_iterator * vmi)531 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
532 {
533 	mas_pause(&vmi->mas);
534 }
535 
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)536 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
537 {
538 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
539 }
540 
vm_get_page_prot(vm_flags_t vm_flags)541 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
542 {
543 	return __pgprot(vm_flags);
544 }
545 
is_shared_maywrite(vm_flags_t vm_flags)546 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
547 {
548 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
549 		(VM_SHARED | VM_MAYWRITE);
550 }
551 
vma_is_shared_maywrite(struct vm_area_struct * vma)552 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
553 {
554 	return is_shared_maywrite(vma->vm_flags);
555 }
556 
vma_next(struct vma_iterator * vmi)557 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
558 {
559 	/*
560 	 * Uses mas_find() to get the first VMA when the iterator starts.
561 	 * Calling mas_next() could skip the first entry.
562 	 */
563 	return mas_find(&vmi->mas, ULONG_MAX);
564 }
565 
566 /*
567  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
568  * assertions should be made either under mmap_write_lock or when the object
569  * has been isolated under mmap_write_lock, ensuring no competing writers.
570  */
vma_assert_attached(struct vm_area_struct * vma)571 static inline void vma_assert_attached(struct vm_area_struct *vma)
572 {
573 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
574 }
575 
vma_assert_detached(struct vm_area_struct * vma)576 static inline void vma_assert_detached(struct vm_area_struct *vma)
577 {
578 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
579 }
580 
581 static inline void vma_assert_write_locked(struct vm_area_struct *);
vma_mark_attached(struct vm_area_struct * vma)582 static inline void vma_mark_attached(struct vm_area_struct *vma)
583 {
584 	vma_assert_write_locked(vma);
585 	vma_assert_detached(vma);
586 	refcount_set_release(&vma->vm_refcnt, 1);
587 }
588 
vma_mark_detached(struct vm_area_struct * vma)589 static inline void vma_mark_detached(struct vm_area_struct *vma)
590 {
591 	vma_assert_write_locked(vma);
592 	vma_assert_attached(vma);
593 	/* We are the only writer, so no need to use vma_refcount_put(). */
594 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
595 		/*
596 		 * Reader must have temporarily raised vm_refcnt but it will
597 		 * drop it without using the vma since vma is write-locked.
598 		 */
599 	}
600 }
601 
602 extern const struct vm_operations_struct vma_dummy_vm_ops;
603 
604 extern unsigned long rlimit(unsigned int limit);
605 
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)606 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
607 {
608 	memset(vma, 0, sizeof(*vma));
609 	vma->vm_mm = mm;
610 	vma->vm_ops = &vma_dummy_vm_ops;
611 	INIT_LIST_HEAD(&vma->anon_vma_chain);
612 	vma->vm_lock_seq = UINT_MAX;
613 }
614 
615 /*
616  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
617  * kernel/fork.c, so we have to these broadly available there, and temporarily
618  * define them here to resolve the dependency cycle.
619  */
620 
621 #define is_exec_mapping(flags) \
622 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
623 
624 #define is_stack_mapping(flags) \
625 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
626 
627 #define is_data_mapping(flags) \
628 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
629 
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)630 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
631 				   long npages)
632 {
633 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
634 
635 	if (is_exec_mapping(flags))
636 		mm->exec_vm += npages;
637 	else if (is_stack_mapping(flags))
638 		mm->stack_vm += npages;
639 	else if (is_data_mapping(flags))
640 		mm->data_vm += npages;
641 }
642 
643 #undef is_exec_mapping
644 #undef is_stack_mapping
645 #undef is_data_mapping
646 
647 /* Currently stubbed but we may later wish to un-stub. */
648 static inline void vm_acct_memory(long pages);
vm_unacct_memory(long pages)649 static inline void vm_unacct_memory(long pages)
650 {
651 	vm_acct_memory(-pages);
652 }
653 
mapping_allow_writable(struct address_space * mapping)654 static inline void mapping_allow_writable(struct address_space *mapping)
655 {
656 	atomic_inc(&mapping->i_mmap_writable);
657 }
658 
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)659 static inline void vma_set_range(struct vm_area_struct *vma,
660 				 unsigned long start, unsigned long end,
661 				 pgoff_t pgoff)
662 {
663 	vma->vm_start = start;
664 	vma->vm_end = end;
665 	vma->vm_pgoff = pgoff;
666 }
667 
668 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)669 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
670 {
671 	return mas_find(&vmi->mas, max - 1);
672 }
673 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)674 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
675 			unsigned long start, unsigned long end, gfp_t gfp)
676 {
677 	__mas_set_range(&vmi->mas, start, end - 1);
678 	mas_store_gfp(&vmi->mas, NULL, gfp);
679 	if (unlikely(mas_is_err(&vmi->mas)))
680 		return -ENOMEM;
681 
682 	return 0;
683 }
684 
685 static inline void mmap_assert_locked(struct mm_struct *);
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)686 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
687 						unsigned long start_addr,
688 						unsigned long end_addr)
689 {
690 	unsigned long index = start_addr;
691 
692 	mmap_assert_locked(mm);
693 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
694 }
695 
696 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)697 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
698 {
699 	return mtree_load(&mm->mm_mt, addr);
700 }
701 
vma_prev(struct vma_iterator * vmi)702 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
703 {
704 	return mas_prev(&vmi->mas, 0);
705 }
706 
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)707 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
708 {
709 	mas_set(&vmi->mas, addr);
710 }
711 
vma_is_anonymous(struct vm_area_struct * vma)712 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
713 {
714 	return !vma->vm_ops;
715 }
716 
717 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
718 #define vma_iter_load(vmi) \
719 	mas_walk(&(vmi)->mas)
720 
721 static inline struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)722 find_vma_prev(struct mm_struct *mm, unsigned long addr,
723 			struct vm_area_struct **pprev)
724 {
725 	struct vm_area_struct *vma;
726 	VMA_ITERATOR(vmi, mm, addr);
727 
728 	vma = vma_iter_load(&vmi);
729 	*pprev = vma_prev(&vmi);
730 	if (!vma)
731 		vma = vma_next(&vmi);
732 	return vma;
733 }
734 
735 #undef vma_iter_load
736 
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)737 static inline void vma_iter_init(struct vma_iterator *vmi,
738 		struct mm_struct *mm, unsigned long addr)
739 {
740 	mas_init(&vmi->mas, &mm->mm_mt, addr);
741 }
742 
743 /* Stubbed functions. */
744 
anon_vma_name(struct vm_area_struct * vma)745 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
746 {
747 	return NULL;
748 }
749 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)750 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
751 					struct vm_userfaultfd_ctx vm_ctx)
752 {
753 	return true;
754 }
755 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)756 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
757 				    struct anon_vma_name *anon_name2)
758 {
759 	return true;
760 }
761 
might_sleep(void)762 static inline void might_sleep(void)
763 {
764 }
765 
vma_pages(struct vm_area_struct * vma)766 static inline unsigned long vma_pages(struct vm_area_struct *vma)
767 {
768 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
769 }
770 
fput(struct file * file)771 static inline void fput(struct file *file)
772 {
773 }
774 
mpol_put(struct mempolicy * pol)775 static inline void mpol_put(struct mempolicy *pol)
776 {
777 }
778 
lru_add_drain(void)779 static inline void lru_add_drain(void)
780 {
781 }
782 
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm)783 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
784 {
785 }
786 
update_hiwater_rss(struct mm_struct * mm)787 static inline void update_hiwater_rss(struct mm_struct *mm)
788 {
789 }
790 
update_hiwater_vm(struct mm_struct * mm)791 static inline void update_hiwater_vm(struct mm_struct *mm)
792 {
793 }
794 
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)795 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
796 		      struct vm_area_struct *vma, unsigned long start_addr,
797 		      unsigned long end_addr, unsigned long tree_end,
798 		      bool mm_wr_locked)
799 {
800 }
801 
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)802 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
803 		   struct vm_area_struct *vma, unsigned long floor,
804 		   unsigned long ceiling, bool mm_wr_locked)
805 {
806 }
807 
mapping_unmap_writable(struct address_space * mapping)808 static inline void mapping_unmap_writable(struct address_space *mapping)
809 {
810 }
811 
flush_dcache_mmap_lock(struct address_space * mapping)812 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
813 {
814 }
815 
tlb_finish_mmu(struct mmu_gather * tlb)816 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
817 {
818 }
819 
get_file(struct file * f)820 static inline struct file *get_file(struct file *f)
821 {
822 	return f;
823 }
824 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)825 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
826 {
827 	return 0;
828 }
829 
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src)830 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
831 {
832 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
833 	if (src->anon_vma != NULL) {
834 		dst->anon_vma = src->anon_vma;
835 		dst->anon_vma->was_cloned = true;
836 	}
837 
838 	return 0;
839 }
840 
vma_start_write(struct vm_area_struct * vma)841 static inline void vma_start_write(struct vm_area_struct *vma)
842 {
843 	/* Used to indicate to tests that a write operation has begun. */
844 	vma->vm_lock_seq++;
845 }
846 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)847 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
848 					 unsigned long start,
849 					 unsigned long end,
850 					 struct vm_area_struct *next)
851 {
852 }
853 
hugetlb_split(struct vm_area_struct *,unsigned long)854 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
855 
vma_iter_free(struct vma_iterator * vmi)856 static inline void vma_iter_free(struct vma_iterator *vmi)
857 {
858 	mas_destroy(&vmi->mas);
859 }
860 
861 static inline
vma_iter_next_range(struct vma_iterator * vmi)862 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
863 {
864 	return mas_next_range(&vmi->mas, ULONG_MAX);
865 }
866 
vm_acct_memory(long pages)867 static inline void vm_acct_memory(long pages)
868 {
869 }
870 
vma_interval_tree_insert(struct vm_area_struct * vma,struct rb_root_cached * rb)871 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
872 					    struct rb_root_cached *rb)
873 {
874 }
875 
vma_interval_tree_remove(struct vm_area_struct * vma,struct rb_root_cached * rb)876 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
877 					    struct rb_root_cached *rb)
878 {
879 }
880 
flush_dcache_mmap_unlock(struct address_space * mapping)881 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
882 {
883 }
884 
anon_vma_interval_tree_insert(struct anon_vma_chain * avc,struct rb_root_cached * rb)885 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
886 						 struct rb_root_cached *rb)
887 {
888 }
889 
anon_vma_interval_tree_remove(struct anon_vma_chain * avc,struct rb_root_cached * rb)890 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
891 						 struct rb_root_cached *rb)
892 {
893 }
894 
uprobe_mmap(struct vm_area_struct * vma)895 static inline void uprobe_mmap(struct vm_area_struct *vma)
896 {
897 }
898 
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)899 static inline void uprobe_munmap(struct vm_area_struct *vma,
900 				 unsigned long start, unsigned long end)
901 {
902 }
903 
i_mmap_lock_write(struct address_space * mapping)904 static inline void i_mmap_lock_write(struct address_space *mapping)
905 {
906 }
907 
anon_vma_lock_write(struct anon_vma * anon_vma)908 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
909 {
910 }
911 
vma_assert_write_locked(struct vm_area_struct * vma)912 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
913 {
914 }
915 
unlink_anon_vmas(struct vm_area_struct * vma)916 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
917 {
918 	/* For testing purposes, indicate that the anon_vma was unlinked. */
919 	vma->anon_vma->was_unlinked = true;
920 }
921 
anon_vma_unlock_write(struct anon_vma * anon_vma)922 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
923 {
924 }
925 
i_mmap_unlock_write(struct address_space * mapping)926 static inline void i_mmap_unlock_write(struct address_space *mapping)
927 {
928 }
929 
anon_vma_merge(struct vm_area_struct * vma,struct vm_area_struct * next)930 static inline void anon_vma_merge(struct vm_area_struct *vma,
931 				  struct vm_area_struct *next)
932 {
933 }
934 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)935 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
936 					 unsigned long start,
937 					 unsigned long end,
938 					 struct list_head *unmaps)
939 {
940 	return 0;
941 }
942 
mmap_write_downgrade(struct mm_struct * mm)943 static inline void mmap_write_downgrade(struct mm_struct *mm)
944 {
945 }
946 
mmap_read_unlock(struct mm_struct * mm)947 static inline void mmap_read_unlock(struct mm_struct *mm)
948 {
949 }
950 
mmap_write_unlock(struct mm_struct * mm)951 static inline void mmap_write_unlock(struct mm_struct *mm)
952 {
953 }
954 
mmap_write_lock_killable(struct mm_struct * mm)955 static inline int mmap_write_lock_killable(struct mm_struct *mm)
956 {
957 	return 0;
958 }
959 
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)960 static inline bool can_modify_mm(struct mm_struct *mm,
961 				 unsigned long start,
962 				 unsigned long end)
963 {
964 	return true;
965 }
966 
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)967 static inline void arch_unmap(struct mm_struct *mm,
968 				 unsigned long start,
969 				 unsigned long end)
970 {
971 }
972 
mmap_assert_locked(struct mm_struct * mm)973 static inline void mmap_assert_locked(struct mm_struct *mm)
974 {
975 }
976 
mpol_equal(struct mempolicy * a,struct mempolicy * b)977 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
978 {
979 	return true;
980 }
981 
khugepaged_enter_vma(struct vm_area_struct * vma,vm_flags_t vm_flags)982 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
983 			  vm_flags_t vm_flags)
984 {
985 }
986 
mapping_can_writeback(struct address_space * mapping)987 static inline bool mapping_can_writeback(struct address_space *mapping)
988 {
989 	return true;
990 }
991 
is_vm_hugetlb_page(struct vm_area_struct * vma)992 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
993 {
994 	return false;
995 }
996 
vma_soft_dirty_enabled(struct vm_area_struct * vma)997 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
998 {
999 	return false;
1000 }
1001 
userfaultfd_wp(struct vm_area_struct * vma)1002 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
1003 {
1004 	return false;
1005 }
1006 
mmap_assert_write_locked(struct mm_struct * mm)1007 static inline void mmap_assert_write_locked(struct mm_struct *mm)
1008 {
1009 }
1010 
mutex_lock(struct mutex * lock)1011 static inline void mutex_lock(struct mutex *lock)
1012 {
1013 }
1014 
mutex_unlock(struct mutex * lock)1015 static inline void mutex_unlock(struct mutex *lock)
1016 {
1017 }
1018 
mutex_is_locked(struct mutex * lock)1019 static inline bool mutex_is_locked(struct mutex *lock)
1020 {
1021 	return true;
1022 }
1023 
signal_pending(void * p)1024 static inline bool signal_pending(void *p)
1025 {
1026 	return false;
1027 }
1028 
is_file_hugepages(struct file * file)1029 static inline bool is_file_hugepages(struct file *file)
1030 {
1031 	return false;
1032 }
1033 
security_vm_enough_memory_mm(struct mm_struct * mm,long pages)1034 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
1035 {
1036 	return 0;
1037 }
1038 
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)1039 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
1040 				 unsigned long npages)
1041 {
1042 	return true;
1043 }
1044 
vm_flags_init(struct vm_area_struct * vma,vm_flags_t flags)1045 static inline void vm_flags_init(struct vm_area_struct *vma,
1046 				 vm_flags_t flags)
1047 {
1048 	vma->__vm_flags = flags;
1049 }
1050 
vm_flags_set(struct vm_area_struct * vma,vm_flags_t flags)1051 static inline void vm_flags_set(struct vm_area_struct *vma,
1052 				vm_flags_t flags)
1053 {
1054 	vma_start_write(vma);
1055 	vma->__vm_flags |= flags;
1056 }
1057 
vm_flags_clear(struct vm_area_struct * vma,vm_flags_t flags)1058 static inline void vm_flags_clear(struct vm_area_struct *vma,
1059 				  vm_flags_t flags)
1060 {
1061 	vma_start_write(vma);
1062 	vma->__vm_flags &= ~flags;
1063 }
1064 
shmem_zero_setup(struct vm_area_struct * vma)1065 static inline int shmem_zero_setup(struct vm_area_struct *vma)
1066 {
1067 	return 0;
1068 }
1069 
vma_set_anonymous(struct vm_area_struct * vma)1070 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1071 {
1072 	vma->vm_ops = NULL;
1073 }
1074 
ksm_add_vma(struct vm_area_struct * vma)1075 static inline void ksm_add_vma(struct vm_area_struct *vma)
1076 {
1077 }
1078 
perf_event_mmap(struct vm_area_struct * vma)1079 static inline void perf_event_mmap(struct vm_area_struct *vma)
1080 {
1081 }
1082 
vma_is_dax(struct vm_area_struct * vma)1083 static inline bool vma_is_dax(struct vm_area_struct *vma)
1084 {
1085 	return false;
1086 }
1087 
get_gate_vma(struct mm_struct * mm)1088 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1089 {
1090 	return NULL;
1091 }
1092 
1093 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1094 
1095 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)1096 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1097 {
1098 	vm_flags_t vm_flags = vma->vm_flags;
1099 	pgprot_t vm_page_prot;
1100 
1101 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1102 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1103 
1104 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1105 		vm_flags &= ~VM_SHARED;
1106 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1107 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1108 	}
1109 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1110 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1111 }
1112 
arch_validate_flags(vm_flags_t flags)1113 static inline bool arch_validate_flags(vm_flags_t flags)
1114 {
1115 	return true;
1116 }
1117 
vma_close(struct vm_area_struct * vma)1118 static inline void vma_close(struct vm_area_struct *vma)
1119 {
1120 }
1121 
mmap_file(struct file * file,struct vm_area_struct * vma)1122 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
1123 {
1124 	return 0;
1125 }
1126 
stack_guard_start_gap(struct vm_area_struct * vma)1127 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1128 {
1129 	if (vma->vm_flags & VM_GROWSDOWN)
1130 		return stack_guard_gap;
1131 
1132 	/* See reasoning around the VM_SHADOW_STACK definition */
1133 	if (vma->vm_flags & VM_SHADOW_STACK)
1134 		return PAGE_SIZE;
1135 
1136 	return 0;
1137 }
1138 
vm_start_gap(struct vm_area_struct * vma)1139 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1140 {
1141 	unsigned long gap = stack_guard_start_gap(vma);
1142 	unsigned long vm_start = vma->vm_start;
1143 
1144 	vm_start -= gap;
1145 	if (vm_start > vma->vm_start)
1146 		vm_start = 0;
1147 	return vm_start;
1148 }
1149 
vm_end_gap(struct vm_area_struct * vma)1150 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1151 {
1152 	unsigned long vm_end = vma->vm_end;
1153 
1154 	if (vma->vm_flags & VM_GROWSUP) {
1155 		vm_end += stack_guard_gap;
1156 		if (vm_end < vma->vm_end)
1157 			vm_end = -PAGE_SIZE;
1158 	}
1159 	return vm_end;
1160 }
1161 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)1162 static inline int is_hugepage_only_range(struct mm_struct *mm,
1163 					unsigned long addr, unsigned long len)
1164 {
1165 	return 0;
1166 }
1167 
vma_is_accessible(struct vm_area_struct * vma)1168 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1169 {
1170 	return vma->vm_flags & VM_ACCESS_FLAGS;
1171 }
1172 
capable(int cap)1173 static inline bool capable(int cap)
1174 {
1175 	return true;
1176 }
1177 
mlock_future_ok(const struct mm_struct * mm,vm_flags_t vm_flags,unsigned long bytes)1178 static inline bool mlock_future_ok(const struct mm_struct *mm,
1179 		vm_flags_t vm_flags, unsigned long bytes)
1180 {
1181 	unsigned long locked_pages, limit_pages;
1182 
1183 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1184 		return true;
1185 
1186 	locked_pages = bytes >> PAGE_SHIFT;
1187 	locked_pages += mm->locked_vm;
1188 
1189 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1190 	limit_pages >>= PAGE_SHIFT;
1191 
1192 	return locked_pages <= limit_pages;
1193 }
1194 
__anon_vma_prepare(struct vm_area_struct * vma)1195 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1196 {
1197 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1198 
1199 	if (!anon_vma)
1200 		return -ENOMEM;
1201 
1202 	anon_vma->root = anon_vma;
1203 	vma->anon_vma = anon_vma;
1204 
1205 	return 0;
1206 }
1207 
anon_vma_prepare(struct vm_area_struct * vma)1208 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1209 {
1210 	if (likely(vma->anon_vma))
1211 		return 0;
1212 
1213 	return __anon_vma_prepare(vma);
1214 }
1215 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)1216 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1217 					      struct list_head *uf)
1218 {
1219 }
1220 
1221 # define ACCESS_PRIVATE(p, member) ((p)->member)
1222 
mm_flags_test(int flag,const struct mm_struct * mm)1223 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
1224 {
1225 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
1226 }
1227 
1228 /*
1229  * Denies creating a writable executable mapping or gaining executable permissions.
1230  *
1231  * This denies the following:
1232  *
1233  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1234  *
1235  *     b)      mmap(PROT_WRITE)
1236  *             mprotect(PROT_EXEC)
1237  *
1238  *     c)      mmap(PROT_WRITE)
1239  *             mprotect(PROT_READ)
1240  *             mprotect(PROT_EXEC)
1241  *
1242  * But allows the following:
1243  *
1244  *     d)      mmap(PROT_READ | PROT_EXEC)
1245  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1246  *
1247  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1248  * (MDWE) protection mask for the current process.
1249  *
1250  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1251  * we propose to set.
1252  *
1253  * Return: false if proposed change is OK, true if not ok and should be denied.
1254  */
map_deny_write_exec(unsigned long old,unsigned long new)1255 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1256 {
1257 	/* If MDWE is disabled, we have nothing to deny. */
1258 	if (mm_flags_test(MMF_HAS_MDWE, current->mm))
1259 		return false;
1260 
1261 	/* If the new VMA is not executable, we have nothing to deny. */
1262 	if (!(new & VM_EXEC))
1263 		return false;
1264 
1265 	/* Under MDWE we do not accept newly writably executable VMAs... */
1266 	if (new & VM_WRITE)
1267 		return true;
1268 
1269 	/* ...nor previously non-executable VMAs becoming executable. */
1270 	if (!(old & VM_EXEC))
1271 		return true;
1272 
1273 	return false;
1274 }
1275 
mapping_map_writable(struct address_space * mapping)1276 static inline int mapping_map_writable(struct address_space *mapping)
1277 {
1278 	return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1279 		0 : -EPERM;
1280 }
1281 
move_page_tables(struct pagetable_move_control * pmc)1282 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1283 {
1284 	return 0;
1285 }
1286 
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1287 static inline void free_pgd_range(struct mmu_gather *tlb,
1288 			unsigned long addr, unsigned long end,
1289 			unsigned long floor, unsigned long ceiling)
1290 {
1291 }
1292 
ksm_execve(struct mm_struct * mm)1293 static inline int ksm_execve(struct mm_struct *mm)
1294 {
1295 	return 0;
1296 }
1297 
ksm_exit(struct mm_struct * mm)1298 static inline void ksm_exit(struct mm_struct *mm)
1299 {
1300 }
1301 
vma_lock_init(struct vm_area_struct * vma,bool reset_refcnt)1302 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1303 {
1304 	if (reset_refcnt)
1305 		refcount_set(&vma->vm_refcnt, 0);
1306 }
1307 
vma_numab_state_init(struct vm_area_struct * vma)1308 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1309 {
1310 }
1311 
vma_numab_state_free(struct vm_area_struct * vma)1312 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1313 {
1314 }
1315 
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)1316 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1317 				     struct vm_area_struct *new_vma)
1318 {
1319 }
1320 
free_anon_vma_name(struct vm_area_struct * vma)1321 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1322 {
1323 }
1324 
1325 /* Declared in vma.h. */
1326 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1327 		struct vm_area_desc *desc);
1328 
__compat_vma_mmap_prepare(const struct file_operations * f_op,struct file * file,struct vm_area_struct * vma)1329 static inline int __compat_vma_mmap_prepare(const struct file_operations *f_op,
1330 		struct file *file, struct vm_area_struct *vma)
1331 {
1332 	struct vm_area_desc desc = {
1333 		.mm = vma->vm_mm,
1334 		.file = vma->vm_file,
1335 		.start = vma->vm_start,
1336 		.end = vma->vm_end,
1337 
1338 		.pgoff = vma->vm_pgoff,
1339 		.vm_file = vma->vm_file,
1340 		.vm_flags = vma->vm_flags,
1341 		.page_prot = vma->vm_page_prot,
1342 	};
1343 	int err;
1344 
1345 	err = f_op->mmap_prepare(&desc);
1346 	if (err)
1347 		return err;
1348 	set_vma_from_desc(vma, &desc);
1349 
1350 	return 0;
1351 }
1352 
compat_vma_mmap_prepare(struct file * file,struct vm_area_struct * vma)1353 static inline int compat_vma_mmap_prepare(struct file *file,
1354 		struct vm_area_struct *vma)
1355 {
1356 	return __compat_vma_mmap_prepare(file->f_op, file, vma);
1357 }
1358 
1359 /* Did the driver provide valid mmap hook configuration? */
can_mmap_file(struct file * file)1360 static inline bool can_mmap_file(struct file *file)
1361 {
1362 	bool has_mmap = file->f_op->mmap;
1363 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1364 
1365 	/* Hooks are mutually exclusive. */
1366 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1367 		return false;
1368 	if (!has_mmap && !has_mmap_prepare)
1369 		return false;
1370 
1371 	return true;
1372 }
1373 
vfs_mmap(struct file * file,struct vm_area_struct * vma)1374 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1375 {
1376 	if (file->f_op->mmap_prepare)
1377 		return compat_vma_mmap_prepare(file, vma);
1378 
1379 	return file->f_op->mmap(file, vma);
1380 }
1381 
vfs_mmap_prepare(struct file * file,struct vm_area_desc * desc)1382 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1383 {
1384 	return file->f_op->mmap_prepare(desc);
1385 }
1386 
fixup_hugetlb_reservations(struct vm_area_struct * vma)1387 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1388 {
1389 }
1390 
vma_set_file(struct vm_area_struct * vma,struct file * file)1391 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1392 {
1393 	/* Changing an anonymous vma with this is illegal */
1394 	get_file(file);
1395 	swap(vma->vm_file, file);
1396 	fput(file);
1397 }
1398 
shmem_file(struct file * file)1399 static inline bool shmem_file(struct file *file)
1400 {
1401 	return false;
1402 }
1403 
ksm_vma_flags(const struct mm_struct * mm,const struct file * file,vm_flags_t vm_flags)1404 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
1405 		const struct file *file, vm_flags_t vm_flags)
1406 {
1407 	return vm_flags;
1408 }
1409 
1410 #endif	/* __MM_VMA_INTERNAL_H */
1411