xref: /linux/tools/testing/vma/vma_internal.h (revision 4fc012daf9c074772421c904357abf586336b1ca)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/refcount.h>
29 
30 extern unsigned long stack_guard_gap;
31 #ifdef CONFIG_MMU
32 extern unsigned long mmap_min_addr;
33 extern unsigned long dac_mmap_min_addr;
34 #else
35 #define mmap_min_addr		0UL
36 #define dac_mmap_min_addr	0UL
37 #endif
38 
39 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
40 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
41 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
42 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
43 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
44 
45 #define MMF_HAS_MDWE	28
46 
47 #define VM_NONE		0x00000000
48 #define VM_READ		0x00000001
49 #define VM_WRITE	0x00000002
50 #define VM_EXEC		0x00000004
51 #define VM_SHARED	0x00000008
52 #define VM_MAYREAD	0x00000010
53 #define VM_MAYWRITE	0x00000020
54 #define VM_MAYEXEC	0x00000040
55 #define VM_GROWSDOWN	0x00000100
56 #define VM_PFNMAP	0x00000400
57 #define VM_LOCKED	0x00002000
58 #define VM_IO           0x00004000
59 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
60 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
61 #define VM_DONTEXPAND	0x00040000
62 #define VM_LOCKONFAULT	0x00080000
63 #define VM_ACCOUNT	0x00100000
64 #define VM_NORESERVE	0x00200000
65 #define VM_MIXEDMAP	0x10000000
66 #define VM_STACK	VM_GROWSDOWN
67 #define VM_SHADOW_STACK	VM_NONE
68 #define VM_SOFTDIRTY	0
69 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
70 #define VM_GROWSUP	VM_NONE
71 
72 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
73 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
74 
75 #ifdef CONFIG_STACK_GROWSUP
76 #define VM_STACK	VM_GROWSUP
77 #define VM_STACK_EARLY	VM_GROWSDOWN
78 #else
79 #define VM_STACK	VM_GROWSDOWN
80 #define VM_STACK_EARLY	0
81 #endif
82 
83 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
84 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
85 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
86 #define STACK_TOP		TASK_SIZE_LOW
87 #define STACK_TOP_MAX		TASK_SIZE_MAX
88 
89 /* This mask represents all the VMA flag bits used by mlock */
90 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
91 
92 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
93 
94 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
95 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
96 
97 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
98 
99 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
100 
101 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
102 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
103 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
104 
105 #define RLIMIT_STACK		3	/* max stack size */
106 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
107 
108 #define CAP_IPC_LOCK         14
109 
110 #ifdef CONFIG_64BIT
111 /* VM is sealed, in vm_flags */
112 #define VM_SEALED	_BITUL(63)
113 #endif
114 
115 #define FIRST_USER_ADDRESS	0UL
116 #define USER_PGTABLES_CEILING	0UL
117 
118 #define vma_policy(vma) NULL
119 
120 #define down_write_nest_lock(sem, nest_lock)
121 
122 #define pgprot_val(x)		((x).pgprot)
123 #define __pgprot(x)		((pgprot_t) { (x) } )
124 
125 #define for_each_vma(__vmi, __vma)					\
126 	while (((__vma) = vma_next(&(__vmi))) != NULL)
127 
128 /* The MM code likes to work with exclusive end addresses */
129 #define for_each_vma_range(__vmi, __vma, __end)				\
130 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
131 
132 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
133 
134 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
135 
136 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
137 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
138 
139 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
140 
141 #define AS_MM_ALL_LOCKS 2
142 
143 /* We hardcode this for now. */
144 #define sysctl_max_map_count 0x1000000UL
145 
146 #define pgoff_t unsigned long
147 typedef unsigned long	pgprotval_t;
148 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
149 typedef unsigned long vm_flags_t;
150 typedef __bitwise unsigned int vm_fault_t;
151 
152 /*
153  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
154  * either way :)
155  */
156 #define pr_warn_once pr_err
157 
158 #define data_race(expr) expr
159 
160 #define ASSERT_EXCLUSIVE_WRITER(x)
161 
162 struct kref {
163 	refcount_t refcount;
164 };
165 
166 /*
167  * Define the task command name length as enum, then it can be visible to
168  * BPF programs.
169  */
170 enum {
171 	TASK_COMM_LEN = 16,
172 };
173 
174 /*
175  * Flags for bug emulation.
176  *
177  * These occupy the top three bytes.
178  */
179 enum {
180 	READ_IMPLIES_EXEC =	0x0400000,
181 };
182 
183 struct task_struct {
184 	char comm[TASK_COMM_LEN];
185 	pid_t pid;
186 	struct mm_struct *mm;
187 
188 	/* Used for emulating ABI behavior of previous Linux versions: */
189 	unsigned int			personality;
190 };
191 
192 struct task_struct *get_current(void);
193 #define current get_current()
194 
195 struct anon_vma {
196 	struct anon_vma *root;
197 	struct rb_root_cached rb_root;
198 
199 	/* Test fields. */
200 	bool was_cloned;
201 	bool was_unlinked;
202 };
203 
204 struct anon_vma_chain {
205 	struct anon_vma *anon_vma;
206 	struct list_head same_vma;
207 };
208 
209 struct anon_vma_name {
210 	struct kref kref;
211 	/* The name needs to be at the end because it is dynamically sized. */
212 	char name[];
213 };
214 
215 struct vma_iterator {
216 	struct ma_state mas;
217 };
218 
219 #define VMA_ITERATOR(name, __mm, __addr)				\
220 	struct vma_iterator name = {					\
221 		.mas = {						\
222 			.tree = &(__mm)->mm_mt,				\
223 			.index = __addr,				\
224 			.node = NULL,					\
225 			.status = ma_start,				\
226 		},							\
227 	}
228 
229 struct address_space {
230 	struct rb_root_cached	i_mmap;
231 	unsigned long		flags;
232 	atomic_t		i_mmap_writable;
233 };
234 
235 struct vm_userfaultfd_ctx {};
236 struct mempolicy {};
237 struct mmu_gather {};
238 struct mutex {};
239 #define DEFINE_MUTEX(mutexname) \
240 	struct mutex mutexname = {}
241 
242 struct mm_struct {
243 	struct maple_tree mm_mt;
244 	int map_count;			/* number of VMAs */
245 	unsigned long total_vm;	   /* Total pages mapped */
246 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
247 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
248 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
249 	unsigned long stack_vm;	   /* VM_STACK */
250 
251 	unsigned long def_flags;
252 
253 	unsigned long flags; /* Must use atomic bitops to access */
254 };
255 
256 struct vm_area_struct;
257 
258 /*
259  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
260  * manipulate mutable fields which will cause those fields to be updated in the
261  * resultant VMA.
262  *
263  * Helper functions are not required for manipulating any field.
264  */
265 struct vm_area_desc {
266 	/* Immutable state. */
267 	struct mm_struct *mm;
268 	unsigned long start;
269 	unsigned long end;
270 
271 	/* Mutable fields. Populated with initial state. */
272 	pgoff_t pgoff;
273 	struct file *file;
274 	vm_flags_t vm_flags;
275 	pgprot_t page_prot;
276 
277 	/* Write-only fields. */
278 	const struct vm_operations_struct *vm_ops;
279 	void *private_data;
280 };
281 
282 struct file_operations {
283 	int (*mmap)(struct file *, struct vm_area_struct *);
284 	int (*mmap_prepare)(struct vm_area_desc *);
285 };
286 
287 struct file {
288 	struct address_space	*f_mapping;
289 	const struct file_operations	*f_op;
290 };
291 
292 #define VMA_LOCK_OFFSET	0x40000000
293 
294 typedef struct { unsigned long v; } freeptr_t;
295 
296 struct vm_area_struct {
297 	/* The first cache line has the info for VMA tree walking. */
298 
299 	union {
300 		struct {
301 			/* VMA covers [vm_start; vm_end) addresses within mm */
302 			unsigned long vm_start;
303 			unsigned long vm_end;
304 		};
305 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
306 	};
307 
308 	struct mm_struct *vm_mm;	/* The address space we belong to. */
309 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
310 
311 	/*
312 	 * Flags, see mm.h.
313 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
314 	 */
315 	union {
316 		const vm_flags_t vm_flags;
317 		vm_flags_t __private __vm_flags;
318 	};
319 
320 #ifdef CONFIG_PER_VMA_LOCK
321 	/*
322 	 * Can only be written (using WRITE_ONCE()) while holding both:
323 	 *  - mmap_lock (in write mode)
324 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
325 	 * Can be read reliably while holding one of:
326 	 *  - mmap_lock (in read or write mode)
327 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
328 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
329 	 * while holding nothing (except RCU to keep the VMA struct allocated).
330 	 *
331 	 * This sequence counter is explicitly allowed to overflow; sequence
332 	 * counter reuse can only lead to occasional unnecessary use of the
333 	 * slowpath.
334 	 */
335 	unsigned int vm_lock_seq;
336 #endif
337 
338 	/*
339 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
340 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
341 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
342 	 * or brk vma (with NULL file) can only be in an anon_vma list.
343 	 */
344 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
345 					  * page_table_lock */
346 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
347 
348 	/* Function pointers to deal with this struct. */
349 	const struct vm_operations_struct *vm_ops;
350 
351 	/* Information about our backing store: */
352 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
353 					   units */
354 	struct file * vm_file;		/* File we map to (can be NULL). */
355 	void * vm_private_data;		/* was vm_pte (shared mem) */
356 
357 #ifdef CONFIG_SWAP
358 	atomic_long_t swap_readahead_info;
359 #endif
360 #ifndef CONFIG_MMU
361 	struct vm_region *vm_region;	/* NOMMU mapping region */
362 #endif
363 #ifdef CONFIG_NUMA
364 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
365 #endif
366 #ifdef CONFIG_NUMA_BALANCING
367 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
368 #endif
369 #ifdef CONFIG_PER_VMA_LOCK
370 	/* Unstable RCU readers are allowed to read this. */
371 	refcount_t vm_refcnt;
372 #endif
373 	/*
374 	 * For areas with an address space and backing store,
375 	 * linkage into the address_space->i_mmap interval tree.
376 	 *
377 	 */
378 	struct {
379 		struct rb_node rb;
380 		unsigned long rb_subtree_last;
381 	} shared;
382 #ifdef CONFIG_ANON_VMA_NAME
383 	/*
384 	 * For private and shared anonymous mappings, a pointer to a null
385 	 * terminated string containing the name given to the vma, or NULL if
386 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
387 	 */
388 	struct anon_vma_name *anon_name;
389 #endif
390 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
391 } __randomize_layout;
392 
393 struct vm_fault {};
394 
395 struct vm_operations_struct {
396 	void (*open)(struct vm_area_struct * area);
397 	/**
398 	 * @close: Called when the VMA is being removed from the MM.
399 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
400 	 */
401 	void (*close)(struct vm_area_struct * area);
402 	/* Called any time before splitting to check if it's allowed */
403 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
404 	int (*mremap)(struct vm_area_struct *area);
405 	/*
406 	 * Called by mprotect() to make driver-specific permission
407 	 * checks before mprotect() is finalised.   The VMA must not
408 	 * be modified.  Returns 0 if mprotect() can proceed.
409 	 */
410 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
411 			unsigned long end, unsigned long newflags);
412 	vm_fault_t (*fault)(struct vm_fault *vmf);
413 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
414 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
415 			pgoff_t start_pgoff, pgoff_t end_pgoff);
416 	unsigned long (*pagesize)(struct vm_area_struct * area);
417 
418 	/* notification that a previously read-only page is about to become
419 	 * writable, if an error is returned it will cause a SIGBUS */
420 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
421 
422 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
423 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
424 
425 	/* called by access_process_vm when get_user_pages() fails, typically
426 	 * for use by special VMAs. See also generic_access_phys() for a generic
427 	 * implementation useful for any iomem mapping.
428 	 */
429 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
430 		      void *buf, int len, int write);
431 
432 	/* Called by the /proc/PID/maps code to ask the vma whether it
433 	 * has a special name.  Returning non-NULL will also cause this
434 	 * vma to be dumped unconditionally. */
435 	const char *(*name)(struct vm_area_struct *vma);
436 
437 #ifdef CONFIG_NUMA
438 	/*
439 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
440 	 * to hold the policy upon return.  Caller should pass NULL @new to
441 	 * remove a policy and fall back to surrounding context--i.e. do not
442 	 * install a MPOL_DEFAULT policy, nor the task or system default
443 	 * mempolicy.
444 	 */
445 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
446 
447 	/*
448 	 * get_policy() op must add reference [mpol_get()] to any policy at
449 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
450 	 * in mm/mempolicy.c will do this automatically.
451 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
452 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
453 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
454 	 * must return NULL--i.e., do not "fallback" to task or system default
455 	 * policy.
456 	 */
457 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
458 					unsigned long addr, pgoff_t *ilx);
459 #endif
460 	/*
461 	 * Called by vm_normal_page() for special PTEs to find the
462 	 * page for @addr.  This is useful if the default behavior
463 	 * (using pte_page()) would not find the correct page.
464 	 */
465 	struct page *(*find_special_page)(struct vm_area_struct *vma,
466 					  unsigned long addr);
467 };
468 
469 struct vm_unmapped_area_info {
470 #define VM_UNMAPPED_AREA_TOPDOWN 1
471 	unsigned long flags;
472 	unsigned long length;
473 	unsigned long low_limit;
474 	unsigned long high_limit;
475 	unsigned long align_mask;
476 	unsigned long align_offset;
477 	unsigned long start_gap;
478 };
479 
480 struct pagetable_move_control {
481 	struct vm_area_struct *old; /* Source VMA. */
482 	struct vm_area_struct *new; /* Destination VMA. */
483 	unsigned long old_addr; /* Address from which the move begins. */
484 	unsigned long old_end; /* Exclusive address at which old range ends. */
485 	unsigned long new_addr; /* Address to move page tables to. */
486 	unsigned long len_in; /* Bytes to remap specified by user. */
487 
488 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
489 	bool for_stack; /* Is this an early temp stack being moved? */
490 };
491 
492 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
493 	struct pagetable_move_control name = {				\
494 		.old = old_,						\
495 		.new = new_,						\
496 		.old_addr = old_addr_,					\
497 		.old_end = (old_addr_) + (len_),			\
498 		.new_addr = new_addr_,					\
499 		.len_in = len_,						\
500 	}
501 
502 struct kmem_cache_args {
503 	/**
504 	 * @align: The required alignment for the objects.
505 	 *
506 	 * %0 means no specific alignment is requested.
507 	 */
508 	unsigned int align;
509 	/**
510 	 * @useroffset: Usercopy region offset.
511 	 *
512 	 * %0 is a valid offset, when @usersize is non-%0
513 	 */
514 	unsigned int useroffset;
515 	/**
516 	 * @usersize: Usercopy region size.
517 	 *
518 	 * %0 means no usercopy region is specified.
519 	 */
520 	unsigned int usersize;
521 	/**
522 	 * @freeptr_offset: Custom offset for the free pointer
523 	 * in &SLAB_TYPESAFE_BY_RCU caches
524 	 *
525 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
526 	 * outside of the object. This might cause the object to grow in size.
527 	 * Cache creators that have a reason to avoid this can specify a custom
528 	 * free pointer offset in their struct where the free pointer will be
529 	 * placed.
530 	 *
531 	 * Note that placing the free pointer inside the object requires the
532 	 * caller to ensure that no fields are invalidated that are required to
533 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
534 	 * details).
535 	 *
536 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
537 	 * is specified, %use_freeptr_offset must be set %true.
538 	 *
539 	 * Note that @ctor currently isn't supported with custom free pointers
540 	 * as a @ctor requires an external free pointer.
541 	 */
542 	unsigned int freeptr_offset;
543 	/**
544 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
545 	 */
546 	bool use_freeptr_offset;
547 	/**
548 	 * @ctor: A constructor for the objects.
549 	 *
550 	 * The constructor is invoked for each object in a newly allocated slab
551 	 * page. It is the cache user's responsibility to free object in the
552 	 * same state as after calling the constructor, or deal appropriately
553 	 * with any differences between a freshly constructed and a reallocated
554 	 * object.
555 	 *
556 	 * %NULL means no constructor.
557 	 */
558 	void (*ctor)(void *);
559 };
560 
561 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
562 {
563 	mas_pause(&vmi->mas);
564 }
565 
566 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
567 {
568 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
569 }
570 
571 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
572 {
573 	return __pgprot(vm_flags);
574 }
575 
576 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
577 {
578 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
579 		(VM_SHARED | VM_MAYWRITE);
580 }
581 
582 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
583 {
584 	return is_shared_maywrite(vma->vm_flags);
585 }
586 
587 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
588 {
589 	/*
590 	 * Uses mas_find() to get the first VMA when the iterator starts.
591 	 * Calling mas_next() could skip the first entry.
592 	 */
593 	return mas_find(&vmi->mas, ULONG_MAX);
594 }
595 
596 /*
597  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
598  * assertions should be made either under mmap_write_lock or when the object
599  * has been isolated under mmap_write_lock, ensuring no competing writers.
600  */
601 static inline void vma_assert_attached(struct vm_area_struct *vma)
602 {
603 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
604 }
605 
606 static inline void vma_assert_detached(struct vm_area_struct *vma)
607 {
608 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
609 }
610 
611 static inline void vma_assert_write_locked(struct vm_area_struct *);
612 static inline void vma_mark_attached(struct vm_area_struct *vma)
613 {
614 	vma_assert_write_locked(vma);
615 	vma_assert_detached(vma);
616 	refcount_set_release(&vma->vm_refcnt, 1);
617 }
618 
619 static inline void vma_mark_detached(struct vm_area_struct *vma)
620 {
621 	vma_assert_write_locked(vma);
622 	vma_assert_attached(vma);
623 	/* We are the only writer, so no need to use vma_refcount_put(). */
624 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
625 		/*
626 		 * Reader must have temporarily raised vm_refcnt but it will
627 		 * drop it without using the vma since vma is write-locked.
628 		 */
629 	}
630 }
631 
632 extern const struct vm_operations_struct vma_dummy_vm_ops;
633 
634 extern unsigned long rlimit(unsigned int limit);
635 
636 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
637 {
638 	memset(vma, 0, sizeof(*vma));
639 	vma->vm_mm = mm;
640 	vma->vm_ops = &vma_dummy_vm_ops;
641 	INIT_LIST_HEAD(&vma->anon_vma_chain);
642 	vma->vm_lock_seq = UINT_MAX;
643 }
644 
645 struct kmem_cache {
646 	const char *name;
647 	size_t object_size;
648 	struct kmem_cache_args *args;
649 };
650 
651 static inline struct kmem_cache *__kmem_cache_create(const char *name,
652 						     size_t object_size,
653 						     struct kmem_cache_args *args)
654 {
655 	struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
656 
657 	ret->name = name;
658 	ret->object_size = object_size;
659 	ret->args = args;
660 
661 	return ret;
662 }
663 
664 #define kmem_cache_create(__name, __object_size, __args, ...)           \
665 	__kmem_cache_create((__name), (__object_size), (__args))
666 
667 static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
668 {
669 	(void)gfpflags;
670 
671 	return calloc(s->object_size, 1);
672 }
673 
674 static inline void kmem_cache_free(struct kmem_cache *s, void *x)
675 {
676 	free(x);
677 }
678 
679 /*
680  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
681  * kernel/fork.c, so we have to these broadly available there, and temporarily
682  * define them here to resolve the dependency cycle.
683  */
684 
685 #define is_exec_mapping(flags) \
686 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
687 
688 #define is_stack_mapping(flags) \
689 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
690 
691 #define is_data_mapping(flags) \
692 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
693 
694 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
695 				   long npages)
696 {
697 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
698 
699 	if (is_exec_mapping(flags))
700 		mm->exec_vm += npages;
701 	else if (is_stack_mapping(flags))
702 		mm->stack_vm += npages;
703 	else if (is_data_mapping(flags))
704 		mm->data_vm += npages;
705 }
706 
707 #undef is_exec_mapping
708 #undef is_stack_mapping
709 #undef is_data_mapping
710 
711 /* Currently stubbed but we may later wish to un-stub. */
712 static inline void vm_acct_memory(long pages);
713 static inline void vm_unacct_memory(long pages)
714 {
715 	vm_acct_memory(-pages);
716 }
717 
718 static inline void mapping_allow_writable(struct address_space *mapping)
719 {
720 	atomic_inc(&mapping->i_mmap_writable);
721 }
722 
723 static inline void vma_set_range(struct vm_area_struct *vma,
724 				 unsigned long start, unsigned long end,
725 				 pgoff_t pgoff)
726 {
727 	vma->vm_start = start;
728 	vma->vm_end = end;
729 	vma->vm_pgoff = pgoff;
730 }
731 
732 static inline
733 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
734 {
735 	return mas_find(&vmi->mas, max - 1);
736 }
737 
738 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
739 			unsigned long start, unsigned long end, gfp_t gfp)
740 {
741 	__mas_set_range(&vmi->mas, start, end - 1);
742 	mas_store_gfp(&vmi->mas, NULL, gfp);
743 	if (unlikely(mas_is_err(&vmi->mas)))
744 		return -ENOMEM;
745 
746 	return 0;
747 }
748 
749 static inline void mmap_assert_locked(struct mm_struct *);
750 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
751 						unsigned long start_addr,
752 						unsigned long end_addr)
753 {
754 	unsigned long index = start_addr;
755 
756 	mmap_assert_locked(mm);
757 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
758 }
759 
760 static inline
761 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
762 {
763 	return mtree_load(&mm->mm_mt, addr);
764 }
765 
766 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
767 {
768 	return mas_prev(&vmi->mas, 0);
769 }
770 
771 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
772 {
773 	mas_set(&vmi->mas, addr);
774 }
775 
776 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
777 {
778 	return !vma->vm_ops;
779 }
780 
781 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
782 #define vma_iter_load(vmi) \
783 	mas_walk(&(vmi)->mas)
784 
785 static inline struct vm_area_struct *
786 find_vma_prev(struct mm_struct *mm, unsigned long addr,
787 			struct vm_area_struct **pprev)
788 {
789 	struct vm_area_struct *vma;
790 	VMA_ITERATOR(vmi, mm, addr);
791 
792 	vma = vma_iter_load(&vmi);
793 	*pprev = vma_prev(&vmi);
794 	if (!vma)
795 		vma = vma_next(&vmi);
796 	return vma;
797 }
798 
799 #undef vma_iter_load
800 
801 static inline void vma_iter_init(struct vma_iterator *vmi,
802 		struct mm_struct *mm, unsigned long addr)
803 {
804 	mas_init(&vmi->mas, &mm->mm_mt, addr);
805 }
806 
807 /* Stubbed functions. */
808 
809 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
810 {
811 	return NULL;
812 }
813 
814 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
815 					struct vm_userfaultfd_ctx vm_ctx)
816 {
817 	return true;
818 }
819 
820 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
821 				    struct anon_vma_name *anon_name2)
822 {
823 	return true;
824 }
825 
826 static inline void might_sleep(void)
827 {
828 }
829 
830 static inline unsigned long vma_pages(struct vm_area_struct *vma)
831 {
832 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
833 }
834 
835 static inline void fput(struct file *)
836 {
837 }
838 
839 static inline void mpol_put(struct mempolicy *)
840 {
841 }
842 
843 static inline void lru_add_drain(void)
844 {
845 }
846 
847 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
848 {
849 }
850 
851 static inline void update_hiwater_rss(struct mm_struct *)
852 {
853 }
854 
855 static inline void update_hiwater_vm(struct mm_struct *)
856 {
857 }
858 
859 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
860 		      struct vm_area_struct *vma, unsigned long start_addr,
861 		      unsigned long end_addr, unsigned long tree_end,
862 		      bool mm_wr_locked)
863 {
864 	(void)tlb;
865 	(void)mas;
866 	(void)vma;
867 	(void)start_addr;
868 	(void)end_addr;
869 	(void)tree_end;
870 	(void)mm_wr_locked;
871 }
872 
873 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
874 		   struct vm_area_struct *vma, unsigned long floor,
875 		   unsigned long ceiling, bool mm_wr_locked)
876 {
877 	(void)tlb;
878 	(void)mas;
879 	(void)vma;
880 	(void)floor;
881 	(void)ceiling;
882 	(void)mm_wr_locked;
883 }
884 
885 static inline void mapping_unmap_writable(struct address_space *)
886 {
887 }
888 
889 static inline void flush_dcache_mmap_lock(struct address_space *)
890 {
891 }
892 
893 static inline void tlb_finish_mmu(struct mmu_gather *)
894 {
895 }
896 
897 static inline struct file *get_file(struct file *f)
898 {
899 	return f;
900 }
901 
902 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
903 {
904 	return 0;
905 }
906 
907 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
908 {
909 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
910 	if (src->anon_vma != NULL) {
911 		dst->anon_vma = src->anon_vma;
912 		dst->anon_vma->was_cloned = true;
913 	}
914 
915 	return 0;
916 }
917 
918 static inline void vma_start_write(struct vm_area_struct *vma)
919 {
920 	/* Used to indicate to tests that a write operation has begun. */
921 	vma->vm_lock_seq++;
922 }
923 
924 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
925 					 unsigned long start,
926 					 unsigned long end,
927 					 struct vm_area_struct *next)
928 {
929 	(void)vma;
930 	(void)start;
931 	(void)end;
932 	(void)next;
933 }
934 
935 static inline void vma_iter_free(struct vma_iterator *vmi)
936 {
937 	mas_destroy(&vmi->mas);
938 }
939 
940 static inline
941 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
942 {
943 	return mas_next_range(&vmi->mas, ULONG_MAX);
944 }
945 
946 static inline void vm_acct_memory(long pages)
947 {
948 }
949 
950 static inline void vma_interval_tree_insert(struct vm_area_struct *,
951 					    struct rb_root_cached *)
952 {
953 }
954 
955 static inline void vma_interval_tree_remove(struct vm_area_struct *,
956 					    struct rb_root_cached *)
957 {
958 }
959 
960 static inline void flush_dcache_mmap_unlock(struct address_space *)
961 {
962 }
963 
964 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
965 						 struct rb_root_cached *)
966 {
967 }
968 
969 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
970 						 struct rb_root_cached *)
971 {
972 }
973 
974 static inline void uprobe_mmap(struct vm_area_struct *)
975 {
976 }
977 
978 static inline void uprobe_munmap(struct vm_area_struct *vma,
979 				 unsigned long start, unsigned long end)
980 {
981 	(void)vma;
982 	(void)start;
983 	(void)end;
984 }
985 
986 static inline void i_mmap_lock_write(struct address_space *)
987 {
988 }
989 
990 static inline void anon_vma_lock_write(struct anon_vma *)
991 {
992 }
993 
994 static inline void vma_assert_write_locked(struct vm_area_struct *)
995 {
996 }
997 
998 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
999 {
1000 	/* For testing purposes, indicate that the anon_vma was unlinked. */
1001 	vma->anon_vma->was_unlinked = true;
1002 }
1003 
1004 static inline void anon_vma_unlock_write(struct anon_vma *)
1005 {
1006 }
1007 
1008 static inline void i_mmap_unlock_write(struct address_space *)
1009 {
1010 }
1011 
1012 static inline void anon_vma_merge(struct vm_area_struct *,
1013 				  struct vm_area_struct *)
1014 {
1015 }
1016 
1017 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
1018 					 unsigned long start,
1019 					 unsigned long end,
1020 					 struct list_head *unmaps)
1021 {
1022 	(void)vma;
1023 	(void)start;
1024 	(void)end;
1025 	(void)unmaps;
1026 
1027 	return 0;
1028 }
1029 
1030 static inline void mmap_write_downgrade(struct mm_struct *)
1031 {
1032 }
1033 
1034 static inline void mmap_read_unlock(struct mm_struct *)
1035 {
1036 }
1037 
1038 static inline void mmap_write_unlock(struct mm_struct *)
1039 {
1040 }
1041 
1042 static inline int mmap_write_lock_killable(struct mm_struct *)
1043 {
1044 	return 0;
1045 }
1046 
1047 static inline bool can_modify_mm(struct mm_struct *mm,
1048 				 unsigned long start,
1049 				 unsigned long end)
1050 {
1051 	(void)mm;
1052 	(void)start;
1053 	(void)end;
1054 
1055 	return true;
1056 }
1057 
1058 static inline void arch_unmap(struct mm_struct *mm,
1059 				 unsigned long start,
1060 				 unsigned long end)
1061 {
1062 	(void)mm;
1063 	(void)start;
1064 	(void)end;
1065 }
1066 
1067 static inline void mmap_assert_locked(struct mm_struct *)
1068 {
1069 }
1070 
1071 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
1072 {
1073 	return true;
1074 }
1075 
1076 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
1077 			  unsigned long vm_flags)
1078 {
1079 	(void)vma;
1080 	(void)vm_flags;
1081 }
1082 
1083 static inline bool mapping_can_writeback(struct address_space *)
1084 {
1085 	return true;
1086 }
1087 
1088 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
1089 {
1090 	return false;
1091 }
1092 
1093 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
1094 {
1095 	return false;
1096 }
1097 
1098 static inline bool userfaultfd_wp(struct vm_area_struct *)
1099 {
1100 	return false;
1101 }
1102 
1103 static inline void mmap_assert_write_locked(struct mm_struct *)
1104 {
1105 }
1106 
1107 static inline void mutex_lock(struct mutex *)
1108 {
1109 }
1110 
1111 static inline void mutex_unlock(struct mutex *)
1112 {
1113 }
1114 
1115 static inline bool mutex_is_locked(struct mutex *)
1116 {
1117 	return true;
1118 }
1119 
1120 static inline bool signal_pending(void *)
1121 {
1122 	return false;
1123 }
1124 
1125 static inline bool is_file_hugepages(struct file *)
1126 {
1127 	return false;
1128 }
1129 
1130 static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
1131 {
1132 	return 0;
1133 }
1134 
1135 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
1136 {
1137 	return true;
1138 }
1139 
1140 static inline void vm_flags_init(struct vm_area_struct *vma,
1141 				 vm_flags_t flags)
1142 {
1143 	vma->__vm_flags = flags;
1144 }
1145 
1146 static inline void vm_flags_set(struct vm_area_struct *vma,
1147 				vm_flags_t flags)
1148 {
1149 	vma_start_write(vma);
1150 	vma->__vm_flags |= flags;
1151 }
1152 
1153 static inline void vm_flags_clear(struct vm_area_struct *vma,
1154 				  vm_flags_t flags)
1155 {
1156 	vma_start_write(vma);
1157 	vma->__vm_flags &= ~flags;
1158 }
1159 
1160 static inline int shmem_zero_setup(struct vm_area_struct *)
1161 {
1162 	return 0;
1163 }
1164 
1165 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1166 {
1167 	vma->vm_ops = NULL;
1168 }
1169 
1170 static inline void ksm_add_vma(struct vm_area_struct *)
1171 {
1172 }
1173 
1174 static inline void perf_event_mmap(struct vm_area_struct *)
1175 {
1176 }
1177 
1178 static inline bool vma_is_dax(struct vm_area_struct *)
1179 {
1180 	return false;
1181 }
1182 
1183 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
1184 {
1185 	return NULL;
1186 }
1187 
1188 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1189 
1190 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1191 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1192 {
1193 	unsigned long vm_flags = vma->vm_flags;
1194 	pgprot_t vm_page_prot;
1195 
1196 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1197 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1198 
1199 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1200 		vm_flags &= ~VM_SHARED;
1201 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1202 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1203 	}
1204 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1205 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1206 }
1207 
1208 static inline bool arch_validate_flags(unsigned long)
1209 {
1210 	return true;
1211 }
1212 
1213 static inline void vma_close(struct vm_area_struct *)
1214 {
1215 }
1216 
1217 static inline int mmap_file(struct file *, struct vm_area_struct *)
1218 {
1219 	return 0;
1220 }
1221 
1222 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1223 {
1224 	if (vma->vm_flags & VM_GROWSDOWN)
1225 		return stack_guard_gap;
1226 
1227 	/* See reasoning around the VM_SHADOW_STACK definition */
1228 	if (vma->vm_flags & VM_SHADOW_STACK)
1229 		return PAGE_SIZE;
1230 
1231 	return 0;
1232 }
1233 
1234 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1235 {
1236 	unsigned long gap = stack_guard_start_gap(vma);
1237 	unsigned long vm_start = vma->vm_start;
1238 
1239 	vm_start -= gap;
1240 	if (vm_start > vma->vm_start)
1241 		vm_start = 0;
1242 	return vm_start;
1243 }
1244 
1245 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1246 {
1247 	unsigned long vm_end = vma->vm_end;
1248 
1249 	if (vma->vm_flags & VM_GROWSUP) {
1250 		vm_end += stack_guard_gap;
1251 		if (vm_end < vma->vm_end)
1252 			vm_end = -PAGE_SIZE;
1253 	}
1254 	return vm_end;
1255 }
1256 
1257 static inline int is_hugepage_only_range(struct mm_struct *mm,
1258 					unsigned long addr, unsigned long len)
1259 {
1260 	return 0;
1261 }
1262 
1263 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1264 {
1265 	return vma->vm_flags & VM_ACCESS_FLAGS;
1266 }
1267 
1268 static inline bool capable(int cap)
1269 {
1270 	return true;
1271 }
1272 
1273 static inline bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1274 			unsigned long bytes)
1275 {
1276 	unsigned long locked_pages, limit_pages;
1277 
1278 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1279 		return true;
1280 
1281 	locked_pages = bytes >> PAGE_SHIFT;
1282 	locked_pages += mm->locked_vm;
1283 
1284 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1285 	limit_pages >>= PAGE_SHIFT;
1286 
1287 	return locked_pages <= limit_pages;
1288 }
1289 
1290 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1291 {
1292 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1293 
1294 	if (!anon_vma)
1295 		return -ENOMEM;
1296 
1297 	anon_vma->root = anon_vma;
1298 	vma->anon_vma = anon_vma;
1299 
1300 	return 0;
1301 }
1302 
1303 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1304 {
1305 	if (likely(vma->anon_vma))
1306 		return 0;
1307 
1308 	return __anon_vma_prepare(vma);
1309 }
1310 
1311 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1312 					      struct list_head *uf)
1313 {
1314 }
1315 
1316 /*
1317  * Denies creating a writable executable mapping or gaining executable permissions.
1318  *
1319  * This denies the following:
1320  *
1321  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1322  *
1323  *     b)      mmap(PROT_WRITE)
1324  *             mprotect(PROT_EXEC)
1325  *
1326  *     c)      mmap(PROT_WRITE)
1327  *             mprotect(PROT_READ)
1328  *             mprotect(PROT_EXEC)
1329  *
1330  * But allows the following:
1331  *
1332  *     d)      mmap(PROT_READ | PROT_EXEC)
1333  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1334  *
1335  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1336  * (MDWE) protection mask for the current process.
1337  *
1338  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1339  * we propose to set.
1340  *
1341  * Return: false if proposed change is OK, true if not ok and should be denied.
1342  */
1343 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1344 {
1345 	/* If MDWE is disabled, we have nothing to deny. */
1346 	if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
1347 		return false;
1348 
1349 	/* If the new VMA is not executable, we have nothing to deny. */
1350 	if (!(new & VM_EXEC))
1351 		return false;
1352 
1353 	/* Under MDWE we do not accept newly writably executable VMAs... */
1354 	if (new & VM_WRITE)
1355 		return true;
1356 
1357 	/* ...nor previously non-executable VMAs becoming executable. */
1358 	if (!(old & VM_EXEC))
1359 		return true;
1360 
1361 	return false;
1362 }
1363 
1364 static inline int mapping_map_writable(struct address_space *mapping)
1365 {
1366 	int c = atomic_read(&mapping->i_mmap_writable);
1367 
1368 	/* Derived from the raw_atomic_inc_unless_negative() implementation. */
1369 	do {
1370 		if (c < 0)
1371 			return -EPERM;
1372 	} while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1));
1373 
1374 	return 0;
1375 }
1376 
1377 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1378 {
1379 	(void)pmc;
1380 
1381 	return 0;
1382 }
1383 
1384 static inline void free_pgd_range(struct mmu_gather *tlb,
1385 			unsigned long addr, unsigned long end,
1386 			unsigned long floor, unsigned long ceiling)
1387 {
1388 	(void)tlb;
1389 	(void)addr;
1390 	(void)end;
1391 	(void)floor;
1392 	(void)ceiling;
1393 }
1394 
1395 static inline int ksm_execve(struct mm_struct *mm)
1396 {
1397 	(void)mm;
1398 
1399 	return 0;
1400 }
1401 
1402 static inline void ksm_exit(struct mm_struct *mm)
1403 {
1404 	(void)mm;
1405 }
1406 
1407 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1408 {
1409 	(void)vma;
1410 	(void)reset_refcnt;
1411 }
1412 
1413 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1414 {
1415 	(void)vma;
1416 }
1417 
1418 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1419 {
1420 	(void)vma;
1421 }
1422 
1423 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1424 				     struct vm_area_struct *new_vma)
1425 {
1426 	(void)orig_vma;
1427 	(void)new_vma;
1428 }
1429 
1430 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1431 {
1432 	(void)vma;
1433 }
1434 
1435 /* Did the driver provide valid mmap hook configuration? */
1436 static inline bool file_has_valid_mmap_hooks(struct file *file)
1437 {
1438 	bool has_mmap = file->f_op->mmap;
1439 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1440 
1441 	/* Hooks are mutually exclusive. */
1442 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1443 		return false;
1444 	if (WARN_ON_ONCE(!has_mmap && !has_mmap_prepare))
1445 		return false;
1446 
1447 	return true;
1448 }
1449 
1450 static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
1451 {
1452 	if (WARN_ON_ONCE(file->f_op->mmap_prepare))
1453 		return -EINVAL;
1454 
1455 	return file->f_op->mmap(file, vma);
1456 }
1457 
1458 static inline int __call_mmap_prepare(struct file *file,
1459 		struct vm_area_desc *desc)
1460 {
1461 	return file->f_op->mmap_prepare(desc);
1462 }
1463 
1464 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1465 {
1466 	(void)vma;
1467 }
1468 
1469 #endif	/* __MM_VMA_INTERNAL_H */
1470