xref: /linux/tools/testing/vma/vma_internal.h (revision da23ea194db94257123f1534d487f3cdc9b5626d)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/refcount.h>
29 
30 extern unsigned long stack_guard_gap;
31 #ifdef CONFIG_MMU
32 extern unsigned long mmap_min_addr;
33 extern unsigned long dac_mmap_min_addr;
34 #else
35 #define mmap_min_addr		0UL
36 #define dac_mmap_min_addr	0UL
37 #endif
38 
39 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
40 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
41 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
42 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
43 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
44 
45 #define MMF_HAS_MDWE	28
46 
47 #define VM_NONE		0x00000000
48 #define VM_READ		0x00000001
49 #define VM_WRITE	0x00000002
50 #define VM_EXEC		0x00000004
51 #define VM_SHARED	0x00000008
52 #define VM_MAYREAD	0x00000010
53 #define VM_MAYWRITE	0x00000020
54 #define VM_MAYEXEC	0x00000040
55 #define VM_GROWSDOWN	0x00000100
56 #define VM_PFNMAP	0x00000400
57 #define VM_LOCKED	0x00002000
58 #define VM_IO           0x00004000
59 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
60 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
61 #define VM_DONTEXPAND	0x00040000
62 #define VM_LOCKONFAULT	0x00080000
63 #define VM_ACCOUNT	0x00100000
64 #define VM_NORESERVE	0x00200000
65 #define VM_MIXEDMAP	0x10000000
66 #define VM_STACK	VM_GROWSDOWN
67 #define VM_SHADOW_STACK	VM_NONE
68 #define VM_SOFTDIRTY	0
69 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
70 #define VM_GROWSUP	VM_NONE
71 
72 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
73 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
74 
75 #ifdef CONFIG_STACK_GROWSUP
76 #define VM_STACK	VM_GROWSUP
77 #define VM_STACK_EARLY	VM_GROWSDOWN
78 #else
79 #define VM_STACK	VM_GROWSDOWN
80 #define VM_STACK_EARLY	0
81 #endif
82 
83 #define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
84 #define TASK_SIZE_LOW		DEFAULT_MAP_WINDOW
85 #define TASK_SIZE_MAX		DEFAULT_MAP_WINDOW
86 #define STACK_TOP		TASK_SIZE_LOW
87 #define STACK_TOP_MAX		TASK_SIZE_MAX
88 
89 /* This mask represents all the VMA flag bits used by mlock */
90 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
91 
92 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
93 
94 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
95 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
96 
97 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
98 
99 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
100 
101 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
102 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
103 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
104 
105 #define RLIMIT_STACK		3	/* max stack size */
106 #define RLIMIT_MEMLOCK		8	/* max locked-in-memory address space */
107 
108 #define CAP_IPC_LOCK         14
109 
110 #ifdef CONFIG_64BIT
111 #define VM_SEALED_BIT	42
112 #define VM_SEALED	BIT(VM_SEALED_BIT)
113 #else
114 #define VM_SEALED	VM_NONE
115 #endif
116 
117 #define FIRST_USER_ADDRESS	0UL
118 #define USER_PGTABLES_CEILING	0UL
119 
120 #define vma_policy(vma) NULL
121 
122 #define down_write_nest_lock(sem, nest_lock)
123 
124 #define pgprot_val(x)		((x).pgprot)
125 #define __pgprot(x)		((pgprot_t) { (x) } )
126 
127 #define for_each_vma(__vmi, __vma)					\
128 	while (((__vma) = vma_next(&(__vmi))) != NULL)
129 
130 /* The MM code likes to work with exclusive end addresses */
131 #define for_each_vma_range(__vmi, __vma, __end)				\
132 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
133 
134 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
135 
136 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
137 
138 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
139 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
140 
141 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
142 
143 #define AS_MM_ALL_LOCKS 2
144 
145 /* We hardcode this for now. */
146 #define sysctl_max_map_count 0x1000000UL
147 
148 #define pgoff_t unsigned long
149 typedef unsigned long	pgprotval_t;
150 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
151 typedef unsigned long vm_flags_t;
152 typedef __bitwise unsigned int vm_fault_t;
153 
154 /*
155  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
156  * either way :)
157  */
158 #define pr_warn_once pr_err
159 
160 #define data_race(expr) expr
161 
162 #define ASSERT_EXCLUSIVE_WRITER(x)
163 
164 /**
165  * swap - swap values of @a and @b
166  * @a: first value
167  * @b: second value
168  */
169 #define swap(a, b) \
170 	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
171 
172 struct kref {
173 	refcount_t refcount;
174 };
175 
176 /*
177  * Define the task command name length as enum, then it can be visible to
178  * BPF programs.
179  */
180 enum {
181 	TASK_COMM_LEN = 16,
182 };
183 
184 /*
185  * Flags for bug emulation.
186  *
187  * These occupy the top three bytes.
188  */
189 enum {
190 	READ_IMPLIES_EXEC =	0x0400000,
191 };
192 
193 struct task_struct {
194 	char comm[TASK_COMM_LEN];
195 	pid_t pid;
196 	struct mm_struct *mm;
197 
198 	/* Used for emulating ABI behavior of previous Linux versions: */
199 	unsigned int			personality;
200 };
201 
202 struct task_struct *get_current(void);
203 #define current get_current()
204 
205 struct anon_vma {
206 	struct anon_vma *root;
207 	struct rb_root_cached rb_root;
208 
209 	/* Test fields. */
210 	bool was_cloned;
211 	bool was_unlinked;
212 };
213 
214 struct anon_vma_chain {
215 	struct anon_vma *anon_vma;
216 	struct list_head same_vma;
217 };
218 
219 struct anon_vma_name {
220 	struct kref kref;
221 	/* The name needs to be at the end because it is dynamically sized. */
222 	char name[];
223 };
224 
225 struct vma_iterator {
226 	struct ma_state mas;
227 };
228 
229 #define VMA_ITERATOR(name, __mm, __addr)				\
230 	struct vma_iterator name = {					\
231 		.mas = {						\
232 			.tree = &(__mm)->mm_mt,				\
233 			.index = __addr,				\
234 			.node = NULL,					\
235 			.status = ma_start,				\
236 		},							\
237 	}
238 
239 struct address_space {
240 	struct rb_root_cached	i_mmap;
241 	unsigned long		flags;
242 	atomic_t		i_mmap_writable;
243 };
244 
245 struct vm_userfaultfd_ctx {};
246 struct mempolicy {};
247 struct mmu_gather {};
248 struct mutex {};
249 #define DEFINE_MUTEX(mutexname) \
250 	struct mutex mutexname = {}
251 
252 struct mm_struct {
253 	struct maple_tree mm_mt;
254 	int map_count;			/* number of VMAs */
255 	unsigned long total_vm;	   /* Total pages mapped */
256 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
257 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
258 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
259 	unsigned long stack_vm;	   /* VM_STACK */
260 
261 	unsigned long def_flags;
262 
263 	unsigned long flags; /* Must use atomic bitops to access */
264 };
265 
266 struct vm_area_struct;
267 
268 /*
269  * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
270  * manipulate mutable fields which will cause those fields to be updated in the
271  * resultant VMA.
272  *
273  * Helper functions are not required for manipulating any field.
274  */
275 struct vm_area_desc {
276 	/* Immutable state. */
277 	struct mm_struct *mm;
278 	unsigned long start;
279 	unsigned long end;
280 
281 	/* Mutable fields. Populated with initial state. */
282 	pgoff_t pgoff;
283 	struct file *file;
284 	vm_flags_t vm_flags;
285 	pgprot_t page_prot;
286 
287 	/* Write-only fields. */
288 	const struct vm_operations_struct *vm_ops;
289 	void *private_data;
290 };
291 
292 struct file_operations {
293 	int (*mmap)(struct file *, struct vm_area_struct *);
294 	int (*mmap_prepare)(struct vm_area_desc *);
295 };
296 
297 struct file {
298 	struct address_space	*f_mapping;
299 	const struct file_operations	*f_op;
300 };
301 
302 #define VMA_LOCK_OFFSET	0x40000000
303 
304 typedef struct { unsigned long v; } freeptr_t;
305 
306 struct vm_area_struct {
307 	/* The first cache line has the info for VMA tree walking. */
308 
309 	union {
310 		struct {
311 			/* VMA covers [vm_start; vm_end) addresses within mm */
312 			unsigned long vm_start;
313 			unsigned long vm_end;
314 		};
315 		freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
316 	};
317 
318 	struct mm_struct *vm_mm;	/* The address space we belong to. */
319 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
320 
321 	/*
322 	 * Flags, see mm.h.
323 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
324 	 */
325 	union {
326 		const vm_flags_t vm_flags;
327 		vm_flags_t __private __vm_flags;
328 	};
329 
330 #ifdef CONFIG_PER_VMA_LOCK
331 	/*
332 	 * Can only be written (using WRITE_ONCE()) while holding both:
333 	 *  - mmap_lock (in write mode)
334 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
335 	 * Can be read reliably while holding one of:
336 	 *  - mmap_lock (in read or write mode)
337 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
338 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
339 	 * while holding nothing (except RCU to keep the VMA struct allocated).
340 	 *
341 	 * This sequence counter is explicitly allowed to overflow; sequence
342 	 * counter reuse can only lead to occasional unnecessary use of the
343 	 * slowpath.
344 	 */
345 	unsigned int vm_lock_seq;
346 #endif
347 
348 	/*
349 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
350 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
351 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
352 	 * or brk vma (with NULL file) can only be in an anon_vma list.
353 	 */
354 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
355 					  * page_table_lock */
356 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
357 
358 	/* Function pointers to deal with this struct. */
359 	const struct vm_operations_struct *vm_ops;
360 
361 	/* Information about our backing store: */
362 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
363 					   units */
364 	struct file * vm_file;		/* File we map to (can be NULL). */
365 	void * vm_private_data;		/* was vm_pte (shared mem) */
366 
367 #ifdef CONFIG_SWAP
368 	atomic_long_t swap_readahead_info;
369 #endif
370 #ifndef CONFIG_MMU
371 	struct vm_region *vm_region;	/* NOMMU mapping region */
372 #endif
373 #ifdef CONFIG_NUMA
374 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
375 #endif
376 #ifdef CONFIG_NUMA_BALANCING
377 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
378 #endif
379 #ifdef CONFIG_PER_VMA_LOCK
380 	/* Unstable RCU readers are allowed to read this. */
381 	refcount_t vm_refcnt;
382 #endif
383 	/*
384 	 * For areas with an address space and backing store,
385 	 * linkage into the address_space->i_mmap interval tree.
386 	 *
387 	 */
388 	struct {
389 		struct rb_node rb;
390 		unsigned long rb_subtree_last;
391 	} shared;
392 #ifdef CONFIG_ANON_VMA_NAME
393 	/*
394 	 * For private and shared anonymous mappings, a pointer to a null
395 	 * terminated string containing the name given to the vma, or NULL if
396 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
397 	 */
398 	struct anon_vma_name *anon_name;
399 #endif
400 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
401 } __randomize_layout;
402 
403 struct vm_fault {};
404 
405 struct vm_operations_struct {
406 	void (*open)(struct vm_area_struct * area);
407 	/**
408 	 * @close: Called when the VMA is being removed from the MM.
409 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
410 	 */
411 	void (*close)(struct vm_area_struct * area);
412 	/* Called any time before splitting to check if it's allowed */
413 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
414 	int (*mremap)(struct vm_area_struct *area);
415 	/*
416 	 * Called by mprotect() to make driver-specific permission
417 	 * checks before mprotect() is finalised.   The VMA must not
418 	 * be modified.  Returns 0 if mprotect() can proceed.
419 	 */
420 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
421 			unsigned long end, unsigned long newflags);
422 	vm_fault_t (*fault)(struct vm_fault *vmf);
423 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
424 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
425 			pgoff_t start_pgoff, pgoff_t end_pgoff);
426 	unsigned long (*pagesize)(struct vm_area_struct * area);
427 
428 	/* notification that a previously read-only page is about to become
429 	 * writable, if an error is returned it will cause a SIGBUS */
430 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
431 
432 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
433 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
434 
435 	/* called by access_process_vm when get_user_pages() fails, typically
436 	 * for use by special VMAs. See also generic_access_phys() for a generic
437 	 * implementation useful for any iomem mapping.
438 	 */
439 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
440 		      void *buf, int len, int write);
441 
442 	/* Called by the /proc/PID/maps code to ask the vma whether it
443 	 * has a special name.  Returning non-NULL will also cause this
444 	 * vma to be dumped unconditionally. */
445 	const char *(*name)(struct vm_area_struct *vma);
446 
447 #ifdef CONFIG_NUMA
448 	/*
449 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
450 	 * to hold the policy upon return.  Caller should pass NULL @new to
451 	 * remove a policy and fall back to surrounding context--i.e. do not
452 	 * install a MPOL_DEFAULT policy, nor the task or system default
453 	 * mempolicy.
454 	 */
455 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
456 
457 	/*
458 	 * get_policy() op must add reference [mpol_get()] to any policy at
459 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
460 	 * in mm/mempolicy.c will do this automatically.
461 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
462 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
463 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
464 	 * must return NULL--i.e., do not "fallback" to task or system default
465 	 * policy.
466 	 */
467 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
468 					unsigned long addr, pgoff_t *ilx);
469 #endif
470 	/*
471 	 * Called by vm_normal_page() for special PTEs to find the
472 	 * page for @addr.  This is useful if the default behavior
473 	 * (using pte_page()) would not find the correct page.
474 	 */
475 	struct page *(*find_special_page)(struct vm_area_struct *vma,
476 					  unsigned long addr);
477 };
478 
479 struct vm_unmapped_area_info {
480 #define VM_UNMAPPED_AREA_TOPDOWN 1
481 	unsigned long flags;
482 	unsigned long length;
483 	unsigned long low_limit;
484 	unsigned long high_limit;
485 	unsigned long align_mask;
486 	unsigned long align_offset;
487 	unsigned long start_gap;
488 };
489 
490 struct pagetable_move_control {
491 	struct vm_area_struct *old; /* Source VMA. */
492 	struct vm_area_struct *new; /* Destination VMA. */
493 	unsigned long old_addr; /* Address from which the move begins. */
494 	unsigned long old_end; /* Exclusive address at which old range ends. */
495 	unsigned long new_addr; /* Address to move page tables to. */
496 	unsigned long len_in; /* Bytes to remap specified by user. */
497 
498 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
499 	bool for_stack; /* Is this an early temp stack being moved? */
500 };
501 
502 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
503 	struct pagetable_move_control name = {				\
504 		.old = old_,						\
505 		.new = new_,						\
506 		.old_addr = old_addr_,					\
507 		.old_end = (old_addr_) + (len_),			\
508 		.new_addr = new_addr_,					\
509 		.len_in = len_,						\
510 	}
511 
512 struct kmem_cache_args {
513 	/**
514 	 * @align: The required alignment for the objects.
515 	 *
516 	 * %0 means no specific alignment is requested.
517 	 */
518 	unsigned int align;
519 	/**
520 	 * @useroffset: Usercopy region offset.
521 	 *
522 	 * %0 is a valid offset, when @usersize is non-%0
523 	 */
524 	unsigned int useroffset;
525 	/**
526 	 * @usersize: Usercopy region size.
527 	 *
528 	 * %0 means no usercopy region is specified.
529 	 */
530 	unsigned int usersize;
531 	/**
532 	 * @freeptr_offset: Custom offset for the free pointer
533 	 * in &SLAB_TYPESAFE_BY_RCU caches
534 	 *
535 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
536 	 * outside of the object. This might cause the object to grow in size.
537 	 * Cache creators that have a reason to avoid this can specify a custom
538 	 * free pointer offset in their struct where the free pointer will be
539 	 * placed.
540 	 *
541 	 * Note that placing the free pointer inside the object requires the
542 	 * caller to ensure that no fields are invalidated that are required to
543 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
544 	 * details).
545 	 *
546 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
547 	 * is specified, %use_freeptr_offset must be set %true.
548 	 *
549 	 * Note that @ctor currently isn't supported with custom free pointers
550 	 * as a @ctor requires an external free pointer.
551 	 */
552 	unsigned int freeptr_offset;
553 	/**
554 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
555 	 */
556 	bool use_freeptr_offset;
557 	/**
558 	 * @ctor: A constructor for the objects.
559 	 *
560 	 * The constructor is invoked for each object in a newly allocated slab
561 	 * page. It is the cache user's responsibility to free object in the
562 	 * same state as after calling the constructor, or deal appropriately
563 	 * with any differences between a freshly constructed and a reallocated
564 	 * object.
565 	 *
566 	 * %NULL means no constructor.
567 	 */
568 	void (*ctor)(void *);
569 };
570 
571 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
572 {
573 	mas_pause(&vmi->mas);
574 }
575 
576 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
577 {
578 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
579 }
580 
581 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
582 {
583 	return __pgprot(vm_flags);
584 }
585 
586 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
587 {
588 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
589 		(VM_SHARED | VM_MAYWRITE);
590 }
591 
592 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
593 {
594 	return is_shared_maywrite(vma->vm_flags);
595 }
596 
597 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
598 {
599 	/*
600 	 * Uses mas_find() to get the first VMA when the iterator starts.
601 	 * Calling mas_next() could skip the first entry.
602 	 */
603 	return mas_find(&vmi->mas, ULONG_MAX);
604 }
605 
606 /*
607  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
608  * assertions should be made either under mmap_write_lock or when the object
609  * has been isolated under mmap_write_lock, ensuring no competing writers.
610  */
611 static inline void vma_assert_attached(struct vm_area_struct *vma)
612 {
613 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
614 }
615 
616 static inline void vma_assert_detached(struct vm_area_struct *vma)
617 {
618 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
619 }
620 
621 static inline void vma_assert_write_locked(struct vm_area_struct *);
622 static inline void vma_mark_attached(struct vm_area_struct *vma)
623 {
624 	vma_assert_write_locked(vma);
625 	vma_assert_detached(vma);
626 	refcount_set_release(&vma->vm_refcnt, 1);
627 }
628 
629 static inline void vma_mark_detached(struct vm_area_struct *vma)
630 {
631 	vma_assert_write_locked(vma);
632 	vma_assert_attached(vma);
633 	/* We are the only writer, so no need to use vma_refcount_put(). */
634 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
635 		/*
636 		 * Reader must have temporarily raised vm_refcnt but it will
637 		 * drop it without using the vma since vma is write-locked.
638 		 */
639 	}
640 }
641 
642 extern const struct vm_operations_struct vma_dummy_vm_ops;
643 
644 extern unsigned long rlimit(unsigned int limit);
645 
646 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
647 {
648 	memset(vma, 0, sizeof(*vma));
649 	vma->vm_mm = mm;
650 	vma->vm_ops = &vma_dummy_vm_ops;
651 	INIT_LIST_HEAD(&vma->anon_vma_chain);
652 	vma->vm_lock_seq = UINT_MAX;
653 }
654 
655 struct kmem_cache {
656 	const char *name;
657 	size_t object_size;
658 	struct kmem_cache_args *args;
659 };
660 
661 static inline struct kmem_cache *__kmem_cache_create(const char *name,
662 						     size_t object_size,
663 						     struct kmem_cache_args *args)
664 {
665 	struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
666 
667 	ret->name = name;
668 	ret->object_size = object_size;
669 	ret->args = args;
670 
671 	return ret;
672 }
673 
674 #define kmem_cache_create(__name, __object_size, __args, ...)           \
675 	__kmem_cache_create((__name), (__object_size), (__args))
676 
677 static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
678 {
679 	(void)gfpflags;
680 
681 	return calloc(s->object_size, 1);
682 }
683 
684 static inline void kmem_cache_free(struct kmem_cache *s, void *x)
685 {
686 	free(x);
687 }
688 
689 /*
690  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
691  * kernel/fork.c, so we have to these broadly available there, and temporarily
692  * define them here to resolve the dependency cycle.
693  */
694 
695 #define is_exec_mapping(flags) \
696 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
697 
698 #define is_stack_mapping(flags) \
699 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
700 
701 #define is_data_mapping(flags) \
702 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
703 
704 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
705 				   long npages)
706 {
707 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
708 
709 	if (is_exec_mapping(flags))
710 		mm->exec_vm += npages;
711 	else if (is_stack_mapping(flags))
712 		mm->stack_vm += npages;
713 	else if (is_data_mapping(flags))
714 		mm->data_vm += npages;
715 }
716 
717 #undef is_exec_mapping
718 #undef is_stack_mapping
719 #undef is_data_mapping
720 
721 /* Currently stubbed but we may later wish to un-stub. */
722 static inline void vm_acct_memory(long pages);
723 static inline void vm_unacct_memory(long pages)
724 {
725 	vm_acct_memory(-pages);
726 }
727 
728 static inline void mapping_allow_writable(struct address_space *mapping)
729 {
730 	atomic_inc(&mapping->i_mmap_writable);
731 }
732 
733 static inline void vma_set_range(struct vm_area_struct *vma,
734 				 unsigned long start, unsigned long end,
735 				 pgoff_t pgoff)
736 {
737 	vma->vm_start = start;
738 	vma->vm_end = end;
739 	vma->vm_pgoff = pgoff;
740 }
741 
742 static inline
743 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
744 {
745 	return mas_find(&vmi->mas, max - 1);
746 }
747 
748 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
749 			unsigned long start, unsigned long end, gfp_t gfp)
750 {
751 	__mas_set_range(&vmi->mas, start, end - 1);
752 	mas_store_gfp(&vmi->mas, NULL, gfp);
753 	if (unlikely(mas_is_err(&vmi->mas)))
754 		return -ENOMEM;
755 
756 	return 0;
757 }
758 
759 static inline void mmap_assert_locked(struct mm_struct *);
760 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
761 						unsigned long start_addr,
762 						unsigned long end_addr)
763 {
764 	unsigned long index = start_addr;
765 
766 	mmap_assert_locked(mm);
767 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
768 }
769 
770 static inline
771 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
772 {
773 	return mtree_load(&mm->mm_mt, addr);
774 }
775 
776 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
777 {
778 	return mas_prev(&vmi->mas, 0);
779 }
780 
781 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
782 {
783 	mas_set(&vmi->mas, addr);
784 }
785 
786 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
787 {
788 	return !vma->vm_ops;
789 }
790 
791 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
792 #define vma_iter_load(vmi) \
793 	mas_walk(&(vmi)->mas)
794 
795 static inline struct vm_area_struct *
796 find_vma_prev(struct mm_struct *mm, unsigned long addr,
797 			struct vm_area_struct **pprev)
798 {
799 	struct vm_area_struct *vma;
800 	VMA_ITERATOR(vmi, mm, addr);
801 
802 	vma = vma_iter_load(&vmi);
803 	*pprev = vma_prev(&vmi);
804 	if (!vma)
805 		vma = vma_next(&vmi);
806 	return vma;
807 }
808 
809 #undef vma_iter_load
810 
811 static inline void vma_iter_init(struct vma_iterator *vmi,
812 		struct mm_struct *mm, unsigned long addr)
813 {
814 	mas_init(&vmi->mas, &mm->mm_mt, addr);
815 }
816 
817 /* Stubbed functions. */
818 
819 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
820 {
821 	return NULL;
822 }
823 
824 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
825 					struct vm_userfaultfd_ctx vm_ctx)
826 {
827 	return true;
828 }
829 
830 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
831 				    struct anon_vma_name *anon_name2)
832 {
833 	return true;
834 }
835 
836 static inline void might_sleep(void)
837 {
838 }
839 
840 static inline unsigned long vma_pages(struct vm_area_struct *vma)
841 {
842 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
843 }
844 
845 static inline void fput(struct file *)
846 {
847 }
848 
849 static inline void mpol_put(struct mempolicy *)
850 {
851 }
852 
853 static inline void lru_add_drain(void)
854 {
855 }
856 
857 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
858 {
859 }
860 
861 static inline void update_hiwater_rss(struct mm_struct *)
862 {
863 }
864 
865 static inline void update_hiwater_vm(struct mm_struct *)
866 {
867 }
868 
869 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
870 		      struct vm_area_struct *vma, unsigned long start_addr,
871 		      unsigned long end_addr, unsigned long tree_end,
872 		      bool mm_wr_locked)
873 {
874 	(void)tlb;
875 	(void)mas;
876 	(void)vma;
877 	(void)start_addr;
878 	(void)end_addr;
879 	(void)tree_end;
880 	(void)mm_wr_locked;
881 }
882 
883 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
884 		   struct vm_area_struct *vma, unsigned long floor,
885 		   unsigned long ceiling, bool mm_wr_locked)
886 {
887 	(void)tlb;
888 	(void)mas;
889 	(void)vma;
890 	(void)floor;
891 	(void)ceiling;
892 	(void)mm_wr_locked;
893 }
894 
895 static inline void mapping_unmap_writable(struct address_space *)
896 {
897 }
898 
899 static inline void flush_dcache_mmap_lock(struct address_space *)
900 {
901 }
902 
903 static inline void tlb_finish_mmu(struct mmu_gather *)
904 {
905 }
906 
907 static inline struct file *get_file(struct file *f)
908 {
909 	return f;
910 }
911 
912 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
913 {
914 	return 0;
915 }
916 
917 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
918 {
919 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
920 	if (src->anon_vma != NULL) {
921 		dst->anon_vma = src->anon_vma;
922 		dst->anon_vma->was_cloned = true;
923 	}
924 
925 	return 0;
926 }
927 
928 static inline void vma_start_write(struct vm_area_struct *vma)
929 {
930 	/* Used to indicate to tests that a write operation has begun. */
931 	vma->vm_lock_seq++;
932 }
933 
934 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
935 					 unsigned long start,
936 					 unsigned long end,
937 					 struct vm_area_struct *next)
938 {
939 	(void)vma;
940 	(void)start;
941 	(void)end;
942 	(void)next;
943 }
944 
945 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
946 
947 static inline void vma_iter_free(struct vma_iterator *vmi)
948 {
949 	mas_destroy(&vmi->mas);
950 }
951 
952 static inline
953 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
954 {
955 	return mas_next_range(&vmi->mas, ULONG_MAX);
956 }
957 
958 static inline void vm_acct_memory(long pages)
959 {
960 }
961 
962 static inline void vma_interval_tree_insert(struct vm_area_struct *,
963 					    struct rb_root_cached *)
964 {
965 }
966 
967 static inline void vma_interval_tree_remove(struct vm_area_struct *,
968 					    struct rb_root_cached *)
969 {
970 }
971 
972 static inline void flush_dcache_mmap_unlock(struct address_space *)
973 {
974 }
975 
976 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
977 						 struct rb_root_cached *)
978 {
979 }
980 
981 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
982 						 struct rb_root_cached *)
983 {
984 }
985 
986 static inline void uprobe_mmap(struct vm_area_struct *)
987 {
988 }
989 
990 static inline void uprobe_munmap(struct vm_area_struct *vma,
991 				 unsigned long start, unsigned long end)
992 {
993 	(void)vma;
994 	(void)start;
995 	(void)end;
996 }
997 
998 static inline void i_mmap_lock_write(struct address_space *)
999 {
1000 }
1001 
1002 static inline void anon_vma_lock_write(struct anon_vma *)
1003 {
1004 }
1005 
1006 static inline void vma_assert_write_locked(struct vm_area_struct *)
1007 {
1008 }
1009 
1010 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
1011 {
1012 	/* For testing purposes, indicate that the anon_vma was unlinked. */
1013 	vma->anon_vma->was_unlinked = true;
1014 }
1015 
1016 static inline void anon_vma_unlock_write(struct anon_vma *)
1017 {
1018 }
1019 
1020 static inline void i_mmap_unlock_write(struct address_space *)
1021 {
1022 }
1023 
1024 static inline void anon_vma_merge(struct vm_area_struct *,
1025 				  struct vm_area_struct *)
1026 {
1027 }
1028 
1029 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
1030 					 unsigned long start,
1031 					 unsigned long end,
1032 					 struct list_head *unmaps)
1033 {
1034 	(void)vma;
1035 	(void)start;
1036 	(void)end;
1037 	(void)unmaps;
1038 
1039 	return 0;
1040 }
1041 
1042 static inline void mmap_write_downgrade(struct mm_struct *)
1043 {
1044 }
1045 
1046 static inline void mmap_read_unlock(struct mm_struct *)
1047 {
1048 }
1049 
1050 static inline void mmap_write_unlock(struct mm_struct *)
1051 {
1052 }
1053 
1054 static inline int mmap_write_lock_killable(struct mm_struct *)
1055 {
1056 	return 0;
1057 }
1058 
1059 static inline bool can_modify_mm(struct mm_struct *mm,
1060 				 unsigned long start,
1061 				 unsigned long end)
1062 {
1063 	(void)mm;
1064 	(void)start;
1065 	(void)end;
1066 
1067 	return true;
1068 }
1069 
1070 static inline void arch_unmap(struct mm_struct *mm,
1071 				 unsigned long start,
1072 				 unsigned long end)
1073 {
1074 	(void)mm;
1075 	(void)start;
1076 	(void)end;
1077 }
1078 
1079 static inline void mmap_assert_locked(struct mm_struct *)
1080 {
1081 }
1082 
1083 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
1084 {
1085 	return true;
1086 }
1087 
1088 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
1089 			  vm_flags_t vm_flags)
1090 {
1091 	(void)vma;
1092 	(void)vm_flags;
1093 }
1094 
1095 static inline bool mapping_can_writeback(struct address_space *)
1096 {
1097 	return true;
1098 }
1099 
1100 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
1101 {
1102 	return false;
1103 }
1104 
1105 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
1106 {
1107 	return false;
1108 }
1109 
1110 static inline bool userfaultfd_wp(struct vm_area_struct *)
1111 {
1112 	return false;
1113 }
1114 
1115 static inline void mmap_assert_write_locked(struct mm_struct *)
1116 {
1117 }
1118 
1119 static inline void mutex_lock(struct mutex *)
1120 {
1121 }
1122 
1123 static inline void mutex_unlock(struct mutex *)
1124 {
1125 }
1126 
1127 static inline bool mutex_is_locked(struct mutex *)
1128 {
1129 	return true;
1130 }
1131 
1132 static inline bool signal_pending(void *)
1133 {
1134 	return false;
1135 }
1136 
1137 static inline bool is_file_hugepages(struct file *)
1138 {
1139 	return false;
1140 }
1141 
1142 static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
1143 {
1144 	return 0;
1145 }
1146 
1147 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
1148 {
1149 	return true;
1150 }
1151 
1152 static inline void vm_flags_init(struct vm_area_struct *vma,
1153 				 vm_flags_t flags)
1154 {
1155 	vma->__vm_flags = flags;
1156 }
1157 
1158 static inline void vm_flags_set(struct vm_area_struct *vma,
1159 				vm_flags_t flags)
1160 {
1161 	vma_start_write(vma);
1162 	vma->__vm_flags |= flags;
1163 }
1164 
1165 static inline void vm_flags_clear(struct vm_area_struct *vma,
1166 				  vm_flags_t flags)
1167 {
1168 	vma_start_write(vma);
1169 	vma->__vm_flags &= ~flags;
1170 }
1171 
1172 static inline int shmem_zero_setup(struct vm_area_struct *)
1173 {
1174 	return 0;
1175 }
1176 
1177 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1178 {
1179 	vma->vm_ops = NULL;
1180 }
1181 
1182 static inline void ksm_add_vma(struct vm_area_struct *)
1183 {
1184 }
1185 
1186 static inline void perf_event_mmap(struct vm_area_struct *)
1187 {
1188 }
1189 
1190 static inline bool vma_is_dax(struct vm_area_struct *)
1191 {
1192 	return false;
1193 }
1194 
1195 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
1196 {
1197 	return NULL;
1198 }
1199 
1200 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1201 
1202 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1203 static inline void vma_set_page_prot(struct vm_area_struct *vma)
1204 {
1205 	vm_flags_t vm_flags = vma->vm_flags;
1206 	pgprot_t vm_page_prot;
1207 
1208 	/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1209 	vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1210 
1211 	if (vma_wants_writenotify(vma, vm_page_prot)) {
1212 		vm_flags &= ~VM_SHARED;
1213 		/* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1214 		vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1215 	}
1216 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1217 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1218 }
1219 
1220 static inline bool arch_validate_flags(vm_flags_t)
1221 {
1222 	return true;
1223 }
1224 
1225 static inline void vma_close(struct vm_area_struct *)
1226 {
1227 }
1228 
1229 static inline int mmap_file(struct file *, struct vm_area_struct *)
1230 {
1231 	return 0;
1232 }
1233 
1234 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1235 {
1236 	if (vma->vm_flags & VM_GROWSDOWN)
1237 		return stack_guard_gap;
1238 
1239 	/* See reasoning around the VM_SHADOW_STACK definition */
1240 	if (vma->vm_flags & VM_SHADOW_STACK)
1241 		return PAGE_SIZE;
1242 
1243 	return 0;
1244 }
1245 
1246 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1247 {
1248 	unsigned long gap = stack_guard_start_gap(vma);
1249 	unsigned long vm_start = vma->vm_start;
1250 
1251 	vm_start -= gap;
1252 	if (vm_start > vma->vm_start)
1253 		vm_start = 0;
1254 	return vm_start;
1255 }
1256 
1257 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1258 {
1259 	unsigned long vm_end = vma->vm_end;
1260 
1261 	if (vma->vm_flags & VM_GROWSUP) {
1262 		vm_end += stack_guard_gap;
1263 		if (vm_end < vma->vm_end)
1264 			vm_end = -PAGE_SIZE;
1265 	}
1266 	return vm_end;
1267 }
1268 
1269 static inline int is_hugepage_only_range(struct mm_struct *mm,
1270 					unsigned long addr, unsigned long len)
1271 {
1272 	return 0;
1273 }
1274 
1275 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1276 {
1277 	return vma->vm_flags & VM_ACCESS_FLAGS;
1278 }
1279 
1280 static inline bool capable(int cap)
1281 {
1282 	return true;
1283 }
1284 
1285 static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags,
1286 			unsigned long bytes)
1287 {
1288 	unsigned long locked_pages, limit_pages;
1289 
1290 	if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1291 		return true;
1292 
1293 	locked_pages = bytes >> PAGE_SHIFT;
1294 	locked_pages += mm->locked_vm;
1295 
1296 	limit_pages = rlimit(RLIMIT_MEMLOCK);
1297 	limit_pages >>= PAGE_SHIFT;
1298 
1299 	return locked_pages <= limit_pages;
1300 }
1301 
1302 static inline int __anon_vma_prepare(struct vm_area_struct *vma)
1303 {
1304 	struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
1305 
1306 	if (!anon_vma)
1307 		return -ENOMEM;
1308 
1309 	anon_vma->root = anon_vma;
1310 	vma->anon_vma = anon_vma;
1311 
1312 	return 0;
1313 }
1314 
1315 static inline int anon_vma_prepare(struct vm_area_struct *vma)
1316 {
1317 	if (likely(vma->anon_vma))
1318 		return 0;
1319 
1320 	return __anon_vma_prepare(vma);
1321 }
1322 
1323 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
1324 					      struct list_head *uf)
1325 {
1326 }
1327 
1328 /*
1329  * Denies creating a writable executable mapping or gaining executable permissions.
1330  *
1331  * This denies the following:
1332  *
1333  *     a)      mmap(PROT_WRITE | PROT_EXEC)
1334  *
1335  *     b)      mmap(PROT_WRITE)
1336  *             mprotect(PROT_EXEC)
1337  *
1338  *     c)      mmap(PROT_WRITE)
1339  *             mprotect(PROT_READ)
1340  *             mprotect(PROT_EXEC)
1341  *
1342  * But allows the following:
1343  *
1344  *     d)      mmap(PROT_READ | PROT_EXEC)
1345  *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
1346  *
1347  * This is only applicable if the user has set the Memory-Deny-Write-Execute
1348  * (MDWE) protection mask for the current process.
1349  *
1350  * @old specifies the VMA flags the VMA originally possessed, and @new the ones
1351  * we propose to set.
1352  *
1353  * Return: false if proposed change is OK, true if not ok and should be denied.
1354  */
1355 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
1356 {
1357 	/* If MDWE is disabled, we have nothing to deny. */
1358 	if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
1359 		return false;
1360 
1361 	/* If the new VMA is not executable, we have nothing to deny. */
1362 	if (!(new & VM_EXEC))
1363 		return false;
1364 
1365 	/* Under MDWE we do not accept newly writably executable VMAs... */
1366 	if (new & VM_WRITE)
1367 		return true;
1368 
1369 	/* ...nor previously non-executable VMAs becoming executable. */
1370 	if (!(old & VM_EXEC))
1371 		return true;
1372 
1373 	return false;
1374 }
1375 
1376 static inline int mapping_map_writable(struct address_space *mapping)
1377 {
1378 	int c = atomic_read(&mapping->i_mmap_writable);
1379 
1380 	/* Derived from the raw_atomic_inc_unless_negative() implementation. */
1381 	do {
1382 		if (c < 0)
1383 			return -EPERM;
1384 	} while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1));
1385 
1386 	return 0;
1387 }
1388 
1389 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
1390 {
1391 	(void)pmc;
1392 
1393 	return 0;
1394 }
1395 
1396 static inline void free_pgd_range(struct mmu_gather *tlb,
1397 			unsigned long addr, unsigned long end,
1398 			unsigned long floor, unsigned long ceiling)
1399 {
1400 	(void)tlb;
1401 	(void)addr;
1402 	(void)end;
1403 	(void)floor;
1404 	(void)ceiling;
1405 }
1406 
1407 static inline int ksm_execve(struct mm_struct *mm)
1408 {
1409 	(void)mm;
1410 
1411 	return 0;
1412 }
1413 
1414 static inline void ksm_exit(struct mm_struct *mm)
1415 {
1416 	(void)mm;
1417 }
1418 
1419 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
1420 {
1421 	(void)vma;
1422 	(void)reset_refcnt;
1423 }
1424 
1425 static inline void vma_numab_state_init(struct vm_area_struct *vma)
1426 {
1427 	(void)vma;
1428 }
1429 
1430 static inline void vma_numab_state_free(struct vm_area_struct *vma)
1431 {
1432 	(void)vma;
1433 }
1434 
1435 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
1436 				     struct vm_area_struct *new_vma)
1437 {
1438 	(void)orig_vma;
1439 	(void)new_vma;
1440 }
1441 
1442 static inline void free_anon_vma_name(struct vm_area_struct *vma)
1443 {
1444 	(void)vma;
1445 }
1446 
1447 /* Declared in vma.h. */
1448 static inline void set_vma_from_desc(struct vm_area_struct *vma,
1449 		struct vm_area_desc *desc);
1450 
1451 static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
1452 		struct vm_area_desc *desc);
1453 
1454 static int compat_vma_mmap_prepare(struct file *file,
1455 		struct vm_area_struct *vma)
1456 {
1457 	struct vm_area_desc desc;
1458 	int err;
1459 
1460 	err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc));
1461 	if (err)
1462 		return err;
1463 	set_vma_from_desc(vma, &desc);
1464 
1465 	return 0;
1466 }
1467 
1468 /* Did the driver provide valid mmap hook configuration? */
1469 static inline bool can_mmap_file(struct file *file)
1470 {
1471 	bool has_mmap = file->f_op->mmap;
1472 	bool has_mmap_prepare = file->f_op->mmap_prepare;
1473 
1474 	/* Hooks are mutually exclusive. */
1475 	if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1476 		return false;
1477 	if (!has_mmap && !has_mmap_prepare)
1478 		return false;
1479 
1480 	return true;
1481 }
1482 
1483 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1484 {
1485 	if (file->f_op->mmap_prepare)
1486 		return compat_vma_mmap_prepare(file, vma);
1487 
1488 	return file->f_op->mmap(file, vma);
1489 }
1490 
1491 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1492 {
1493 	return file->f_op->mmap_prepare(desc);
1494 }
1495 
1496 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
1497 {
1498 	(void)vma;
1499 }
1500 
1501 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1502 {
1503 	/* Changing an anonymous vma with this is illegal */
1504 	get_file(file);
1505 	swap(vma->vm_file, file);
1506 	fput(file);
1507 }
1508 
1509 static inline bool shmem_file(struct file *)
1510 {
1511 	return false;
1512 }
1513 
1514 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *, const struct file *,
1515 			 vm_flags_t vm_flags)
1516 {
1517 	return vm_flags;
1518 }
1519 
1520 #endif	/* __MM_VMA_INTERNAL_H */
1521