1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * vma_internal.h 4 * 5 * Header providing userland wrappers and shims for the functionality provided 6 * by mm/vma_internal.h. 7 * 8 * We make the header guard the same as mm/vma_internal.h, so if this shim 9 * header is included, it precludes the inclusion of the kernel one. 10 */ 11 12 #ifndef __MM_VMA_INTERNAL_H 13 #define __MM_VMA_INTERNAL_H 14 15 #define __private 16 #define __bitwise 17 #define __randomize_layout 18 19 #define CONFIG_MMU 20 #define CONFIG_PER_VMA_LOCK 21 22 #include <stdlib.h> 23 24 #include <linux/atomic.h> 25 #include <linux/list.h> 26 #include <linux/maple_tree.h> 27 #include <linux/mm.h> 28 #include <linux/rbtree.h> 29 #include <linux/refcount.h> 30 #include <linux/slab.h> 31 32 extern unsigned long stack_guard_gap; 33 #ifdef CONFIG_MMU 34 extern unsigned long mmap_min_addr; 35 extern unsigned long dac_mmap_min_addr; 36 #else 37 #define mmap_min_addr 0UL 38 #define dac_mmap_min_addr 0UL 39 #endif 40 41 #define VM_WARN_ON(_expr) (WARN_ON(_expr)) 42 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr)) 43 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr)) 44 #define VM_BUG_ON(_expr) (BUG_ON(_expr)) 45 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr)) 46 47 #define MMF_HAS_MDWE 28 48 49 #define VM_NONE 0x00000000 50 #define VM_READ 0x00000001 51 #define VM_WRITE 0x00000002 52 #define VM_EXEC 0x00000004 53 #define VM_SHARED 0x00000008 54 #define VM_MAYREAD 0x00000010 55 #define VM_MAYWRITE 0x00000020 56 #define VM_MAYEXEC 0x00000040 57 #define VM_GROWSDOWN 0x00000100 58 #define VM_PFNMAP 0x00000400 59 #define VM_MAYBE_GUARD 0x00000800 60 #define VM_LOCKED 0x00002000 61 #define VM_IO 0x00004000 62 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 63 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 64 #define VM_DONTEXPAND 0x00040000 65 #define VM_LOCKONFAULT 0x00080000 66 #define VM_ACCOUNT 0x00100000 67 #define VM_NORESERVE 0x00200000 68 #define VM_MIXEDMAP 0x10000000 69 #define VM_STACK VM_GROWSDOWN 70 #define VM_SHADOW_STACK VM_NONE 71 #define VM_SOFTDIRTY 0 72 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 73 #define VM_GROWSUP VM_NONE 74 75 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 76 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 77 78 #ifdef CONFIG_STACK_GROWSUP 79 #define VM_STACK VM_GROWSUP 80 #define VM_STACK_EARLY VM_GROWSDOWN 81 #else 82 #define VM_STACK VM_GROWSDOWN 83 #define VM_STACK_EARLY 0 84 #endif 85 86 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 87 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 88 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 89 #define STACK_TOP TASK_SIZE_LOW 90 #define STACK_TOP_MAX TASK_SIZE_MAX 91 92 /* This mask represents all the VMA flag bits used by mlock */ 93 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 94 95 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 96 97 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 98 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 99 100 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 101 102 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 103 104 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 105 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 106 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 107 108 #define RLIMIT_STACK 3 /* max stack size */ 109 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 110 111 #define CAP_IPC_LOCK 14 112 113 #ifdef CONFIG_64BIT 114 #define VM_SEALED_BIT 42 115 #define VM_SEALED BIT(VM_SEALED_BIT) 116 #else 117 #define VM_SEALED VM_NONE 118 #endif 119 120 /* 121 * Flags which should be 'sticky' on merge - that is, flags which, when one VMA 122 * possesses it but the other does not, the merged VMA should nonetheless have 123 * applied to it: 124 * 125 * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that 126 * mapped page tables may contain metadata not described by the 127 * VMA and thus any merged VMA may also contain this metadata, 128 * and thus we must make this flag sticky. 129 */ 130 #define VM_STICKY VM_MAYBE_GUARD 131 132 /* 133 * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one 134 * of these flags and the other not does not preclude a merge. 135 * 136 * VM_SOFTDIRTY - Should not prevent from VMA merging, if we match the flags but 137 * dirty bit -- the caller should mark merged VMA as dirty. If 138 * dirty bit won't be excluded from comparison, we increase 139 * pressure on the memory system forcing the kernel to generate 140 * new VMAs when old one could be extended instead. 141 * 142 * VM_STICKY - When merging VMAs, VMA flags must match, unless they are 143 * 'sticky'. If any sticky flags exist in either VMA, we simply 144 * set all of them on the merged VMA. 145 */ 146 #define VM_IGNORE_MERGE (VM_SOFTDIRTY | VM_STICKY) 147 148 /* 149 * Flags which should result in page tables being copied on fork. These are 150 * flags which indicate that the VMA maps page tables which cannot be 151 * reconsistuted upon page fault, so necessitate page table copying upon 152 * 153 * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be 154 * reasonably reconstructed on page fault. 155 * 156 * VM_UFFD_WP - Encodes metadata about an installed uffd 157 * write protect handler, which cannot be 158 * reconstructed on page fault. 159 * 160 * We always copy pgtables when dst_vma has uffd-wp 161 * enabled even if it's file-backed 162 * (e.g. shmem). Because when uffd-wp is enabled, 163 * pgtable contains uffd-wp protection information, 164 * that's something we can't retrieve from page cache, 165 * and skip copying will lose those info. 166 * 167 * VM_MAYBE_GUARD - Could contain page guard region markers which 168 * by design are a property of the page tables 169 * only and thus cannot be reconstructed on page 170 * fault. 171 */ 172 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) 173 174 #define FIRST_USER_ADDRESS 0UL 175 #define USER_PGTABLES_CEILING 0UL 176 177 #define vma_policy(vma) NULL 178 179 #define down_write_nest_lock(sem, nest_lock) 180 181 #define pgprot_val(x) ((x).pgprot) 182 #define __pgprot(x) ((pgprot_t) { (x) } ) 183 184 #define for_each_vma(__vmi, __vma) \ 185 while (((__vma) = vma_next(&(__vmi))) != NULL) 186 187 /* The MM code likes to work with exclusive end addresses */ 188 #define for_each_vma_range(__vmi, __vma, __end) \ 189 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 190 191 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 192 193 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 194 195 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 196 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 197 198 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE) 199 200 #define AS_MM_ALL_LOCKS 2 201 202 /* We hardcode this for now. */ 203 #define sysctl_max_map_count 0x1000000UL 204 205 #define pgoff_t unsigned long 206 typedef unsigned long pgprotval_t; 207 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; 208 typedef unsigned long vm_flags_t; 209 typedef __bitwise unsigned int vm_fault_t; 210 211 /* 212 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...) 213 * either way :) 214 */ 215 #define pr_warn_once pr_err 216 217 #define data_race(expr) expr 218 219 #define ASSERT_EXCLUSIVE_WRITER(x) 220 221 /** 222 * swap - swap values of @a and @b 223 * @a: first value 224 * @b: second value 225 */ 226 #define swap(a, b) \ 227 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 228 229 struct kref { 230 refcount_t refcount; 231 }; 232 233 /* 234 * Define the task command name length as enum, then it can be visible to 235 * BPF programs. 236 */ 237 enum { 238 TASK_COMM_LEN = 16, 239 }; 240 241 /* 242 * Flags for bug emulation. 243 * 244 * These occupy the top three bytes. 245 */ 246 enum { 247 READ_IMPLIES_EXEC = 0x0400000, 248 }; 249 250 struct task_struct { 251 char comm[TASK_COMM_LEN]; 252 pid_t pid; 253 struct mm_struct *mm; 254 255 /* Used for emulating ABI behavior of previous Linux versions: */ 256 unsigned int personality; 257 }; 258 259 struct task_struct *get_current(void); 260 #define current get_current() 261 262 struct anon_vma { 263 struct anon_vma *root; 264 struct rb_root_cached rb_root; 265 266 /* Test fields. */ 267 bool was_cloned; 268 bool was_unlinked; 269 }; 270 271 struct anon_vma_chain { 272 struct anon_vma *anon_vma; 273 struct list_head same_vma; 274 }; 275 276 struct anon_vma_name { 277 struct kref kref; 278 /* The name needs to be at the end because it is dynamically sized. */ 279 char name[]; 280 }; 281 282 struct vma_iterator { 283 struct ma_state mas; 284 }; 285 286 #define VMA_ITERATOR(name, __mm, __addr) \ 287 struct vma_iterator name = { \ 288 .mas = { \ 289 .tree = &(__mm)->mm_mt, \ 290 .index = __addr, \ 291 .node = NULL, \ 292 .status = ma_start, \ 293 }, \ 294 } 295 296 struct address_space { 297 struct rb_root_cached i_mmap; 298 unsigned long flags; 299 atomic_t i_mmap_writable; 300 }; 301 302 struct vm_userfaultfd_ctx {}; 303 struct mempolicy {}; 304 struct mmu_gather {}; 305 struct mutex {}; 306 #define DEFINE_MUTEX(mutexname) \ 307 struct mutex mutexname = {} 308 309 #define DECLARE_BITMAP(name, bits) \ 310 unsigned long name[BITS_TO_LONGS(bits)] 311 312 #define NUM_MM_FLAG_BITS (64) 313 typedef struct { 314 __private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS); 315 } mm_flags_t; 316 317 struct mm_struct { 318 struct maple_tree mm_mt; 319 int map_count; /* number of VMAs */ 320 unsigned long total_vm; /* Total pages mapped */ 321 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 322 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 323 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 324 unsigned long stack_vm; /* VM_STACK */ 325 326 unsigned long def_flags; 327 328 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 329 }; 330 331 struct vm_area_struct; 332 333 334 /* What action should be taken after an .mmap_prepare call is complete? */ 335 enum mmap_action_type { 336 MMAP_NOTHING, /* Mapping is complete, no further action. */ 337 MMAP_REMAP_PFN, /* Remap PFN range. */ 338 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ 339 }; 340 341 /* 342 * Describes an action an mmap_prepare hook can instruct to be taken to complete 343 * the mapping of a VMA. Specified in vm_area_desc. 344 */ 345 struct mmap_action { 346 union { 347 /* Remap range. */ 348 struct { 349 unsigned long start; 350 unsigned long start_pfn; 351 unsigned long size; 352 pgprot_t pgprot; 353 } remap; 354 }; 355 enum mmap_action_type type; 356 357 /* 358 * If specified, this hook is invoked after the selected action has been 359 * successfully completed. Note that the VMA write lock still held. 360 * 361 * The absolute minimum ought to be done here. 362 * 363 * Returns 0 on success, or an error code. 364 */ 365 int (*success_hook)(const struct vm_area_struct *vma); 366 367 /* 368 * If specified, this hook is invoked when an error occurred when 369 * attempting the selection action. 370 * 371 * The hook can return an error code in order to filter the error, but 372 * it is not valid to clear the error here. 373 */ 374 int (*error_hook)(int err); 375 376 /* 377 * This should be set in rare instances where the operation required 378 * that the rmap should not be able to access the VMA until 379 * completely set up. 380 */ 381 bool hide_from_rmap_until_complete :1; 382 }; 383 384 /* 385 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 386 * manipulate mutable fields which will cause those fields to be updated in the 387 * resultant VMA. 388 * 389 * Helper functions are not required for manipulating any field. 390 */ 391 struct vm_area_desc { 392 /* Immutable state. */ 393 const struct mm_struct *const mm; 394 struct file *const file; /* May vary from vm_file in stacked callers. */ 395 unsigned long start; 396 unsigned long end; 397 398 /* Mutable fields. Populated with initial state. */ 399 pgoff_t pgoff; 400 struct file *vm_file; 401 vm_flags_t vm_flags; 402 pgprot_t page_prot; 403 404 /* Write-only fields. */ 405 const struct vm_operations_struct *vm_ops; 406 void *private_data; 407 408 /* Take further action? */ 409 struct mmap_action action; 410 }; 411 412 struct file_operations { 413 int (*mmap)(struct file *, struct vm_area_struct *); 414 int (*mmap_prepare)(struct vm_area_desc *); 415 }; 416 417 struct file { 418 struct address_space *f_mapping; 419 const struct file_operations *f_op; 420 }; 421 422 #define VMA_LOCK_OFFSET 0x40000000 423 424 typedef struct { unsigned long v; } freeptr_t; 425 426 struct vm_area_struct { 427 /* The first cache line has the info for VMA tree walking. */ 428 429 union { 430 struct { 431 /* VMA covers [vm_start; vm_end) addresses within mm */ 432 unsigned long vm_start; 433 unsigned long vm_end; 434 }; 435 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 436 }; 437 438 struct mm_struct *vm_mm; /* The address space we belong to. */ 439 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 440 441 /* 442 * Flags, see mm.h. 443 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 444 */ 445 union { 446 const vm_flags_t vm_flags; 447 vm_flags_t __private __vm_flags; 448 }; 449 450 #ifdef CONFIG_PER_VMA_LOCK 451 /* 452 * Can only be written (using WRITE_ONCE()) while holding both: 453 * - mmap_lock (in write mode) 454 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 455 * Can be read reliably while holding one of: 456 * - mmap_lock (in read or write mode) 457 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 458 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 459 * while holding nothing (except RCU to keep the VMA struct allocated). 460 * 461 * This sequence counter is explicitly allowed to overflow; sequence 462 * counter reuse can only lead to occasional unnecessary use of the 463 * slowpath. 464 */ 465 unsigned int vm_lock_seq; 466 #endif 467 468 /* 469 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 470 * list, after a COW of one of the file pages. A MAP_SHARED vma 471 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 472 * or brk vma (with NULL file) can only be in an anon_vma list. 473 */ 474 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 475 * page_table_lock */ 476 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 477 478 /* Function pointers to deal with this struct. */ 479 const struct vm_operations_struct *vm_ops; 480 481 /* Information about our backing store: */ 482 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 483 units */ 484 struct file * vm_file; /* File we map to (can be NULL). */ 485 void * vm_private_data; /* was vm_pte (shared mem) */ 486 487 #ifdef CONFIG_SWAP 488 atomic_long_t swap_readahead_info; 489 #endif 490 #ifndef CONFIG_MMU 491 struct vm_region *vm_region; /* NOMMU mapping region */ 492 #endif 493 #ifdef CONFIG_NUMA 494 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 495 #endif 496 #ifdef CONFIG_NUMA_BALANCING 497 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 498 #endif 499 #ifdef CONFIG_PER_VMA_LOCK 500 /* Unstable RCU readers are allowed to read this. */ 501 refcount_t vm_refcnt; 502 #endif 503 /* 504 * For areas with an address space and backing store, 505 * linkage into the address_space->i_mmap interval tree. 506 * 507 */ 508 struct { 509 struct rb_node rb; 510 unsigned long rb_subtree_last; 511 } shared; 512 #ifdef CONFIG_ANON_VMA_NAME 513 /* 514 * For private and shared anonymous mappings, a pointer to a null 515 * terminated string containing the name given to the vma, or NULL if 516 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 517 */ 518 struct anon_vma_name *anon_name; 519 #endif 520 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 521 } __randomize_layout; 522 523 struct vm_fault {}; 524 525 struct vm_operations_struct { 526 void (*open)(struct vm_area_struct * area); 527 /** 528 * @close: Called when the VMA is being removed from the MM. 529 * Context: User context. May sleep. Caller holds mmap_lock. 530 */ 531 void (*close)(struct vm_area_struct * area); 532 /* Called any time before splitting to check if it's allowed */ 533 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 534 int (*mremap)(struct vm_area_struct *area); 535 /* 536 * Called by mprotect() to make driver-specific permission 537 * checks before mprotect() is finalised. The VMA must not 538 * be modified. Returns 0 if mprotect() can proceed. 539 */ 540 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 541 unsigned long end, unsigned long newflags); 542 vm_fault_t (*fault)(struct vm_fault *vmf); 543 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 544 vm_fault_t (*map_pages)(struct vm_fault *vmf, 545 pgoff_t start_pgoff, pgoff_t end_pgoff); 546 unsigned long (*pagesize)(struct vm_area_struct * area); 547 548 /* notification that a previously read-only page is about to become 549 * writable, if an error is returned it will cause a SIGBUS */ 550 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 551 552 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 553 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 554 555 /* called by access_process_vm when get_user_pages() fails, typically 556 * for use by special VMAs. See also generic_access_phys() for a generic 557 * implementation useful for any iomem mapping. 558 */ 559 int (*access)(struct vm_area_struct *vma, unsigned long addr, 560 void *buf, int len, int write); 561 562 /* Called by the /proc/PID/maps code to ask the vma whether it 563 * has a special name. Returning non-NULL will also cause this 564 * vma to be dumped unconditionally. */ 565 const char *(*name)(struct vm_area_struct *vma); 566 567 #ifdef CONFIG_NUMA 568 /* 569 * set_policy() op must add a reference to any non-NULL @new mempolicy 570 * to hold the policy upon return. Caller should pass NULL @new to 571 * remove a policy and fall back to surrounding context--i.e. do not 572 * install a MPOL_DEFAULT policy, nor the task or system default 573 * mempolicy. 574 */ 575 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 576 577 /* 578 * get_policy() op must add reference [mpol_get()] to any policy at 579 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 580 * in mm/mempolicy.c will do this automatically. 581 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 582 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 583 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 584 * must return NULL--i.e., do not "fallback" to task or system default 585 * policy. 586 */ 587 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 588 unsigned long addr, pgoff_t *ilx); 589 #endif 590 #ifdef CONFIG_FIND_NORMAL_PAGE 591 /* 592 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 593 * allows for returning a "normal" page from vm_normal_page() even 594 * though the PTE indicates that the "struct page" either does not exist 595 * or should not be touched: "special". 596 * 597 * Do not add new users: this really only works when a "normal" page 598 * was mapped, but then the PTE got changed to something weird (+ 599 * marked special) that would not make pte_pfn() identify the originally 600 * inserted page. 601 */ 602 struct page *(*find_normal_page)(struct vm_area_struct *vma, 603 unsigned long addr); 604 #endif /* CONFIG_FIND_NORMAL_PAGE */ 605 }; 606 607 struct vm_unmapped_area_info { 608 #define VM_UNMAPPED_AREA_TOPDOWN 1 609 unsigned long flags; 610 unsigned long length; 611 unsigned long low_limit; 612 unsigned long high_limit; 613 unsigned long align_mask; 614 unsigned long align_offset; 615 unsigned long start_gap; 616 }; 617 618 struct pagetable_move_control { 619 struct vm_area_struct *old; /* Source VMA. */ 620 struct vm_area_struct *new; /* Destination VMA. */ 621 unsigned long old_addr; /* Address from which the move begins. */ 622 unsigned long old_end; /* Exclusive address at which old range ends. */ 623 unsigned long new_addr; /* Address to move page tables to. */ 624 unsigned long len_in; /* Bytes to remap specified by user. */ 625 626 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 627 bool for_stack; /* Is this an early temp stack being moved? */ 628 }; 629 630 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 631 struct pagetable_move_control name = { \ 632 .old = old_, \ 633 .new = new_, \ 634 .old_addr = old_addr_, \ 635 .old_end = (old_addr_) + (len_), \ 636 .new_addr = new_addr_, \ 637 .len_in = len_, \ 638 } 639 640 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 641 { 642 mas_pause(&vmi->mas); 643 } 644 645 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 646 { 647 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 648 } 649 650 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 651 { 652 return __pgprot(vm_flags); 653 } 654 655 static inline bool is_shared_maywrite(vm_flags_t vm_flags) 656 { 657 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 658 (VM_SHARED | VM_MAYWRITE); 659 } 660 661 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 662 { 663 return is_shared_maywrite(vma->vm_flags); 664 } 665 666 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 667 { 668 /* 669 * Uses mas_find() to get the first VMA when the iterator starts. 670 * Calling mas_next() could skip the first entry. 671 */ 672 return mas_find(&vmi->mas, ULONG_MAX); 673 } 674 675 /* 676 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 677 * assertions should be made either under mmap_write_lock or when the object 678 * has been isolated under mmap_write_lock, ensuring no competing writers. 679 */ 680 static inline void vma_assert_attached(struct vm_area_struct *vma) 681 { 682 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 683 } 684 685 static inline void vma_assert_detached(struct vm_area_struct *vma) 686 { 687 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 688 } 689 690 static inline void vma_assert_write_locked(struct vm_area_struct *); 691 static inline void vma_mark_attached(struct vm_area_struct *vma) 692 { 693 vma_assert_write_locked(vma); 694 vma_assert_detached(vma); 695 refcount_set_release(&vma->vm_refcnt, 1); 696 } 697 698 static inline void vma_mark_detached(struct vm_area_struct *vma) 699 { 700 vma_assert_write_locked(vma); 701 vma_assert_attached(vma); 702 /* We are the only writer, so no need to use vma_refcount_put(). */ 703 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 704 /* 705 * Reader must have temporarily raised vm_refcnt but it will 706 * drop it without using the vma since vma is write-locked. 707 */ 708 } 709 } 710 711 extern const struct vm_operations_struct vma_dummy_vm_ops; 712 713 extern unsigned long rlimit(unsigned int limit); 714 715 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 716 { 717 memset(vma, 0, sizeof(*vma)); 718 vma->vm_mm = mm; 719 vma->vm_ops = &vma_dummy_vm_ops; 720 INIT_LIST_HEAD(&vma->anon_vma_chain); 721 vma->vm_lock_seq = UINT_MAX; 722 } 723 724 /* 725 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 726 * kernel/fork.c, so we have to these broadly available there, and temporarily 727 * define them here to resolve the dependency cycle. 728 */ 729 730 #define is_exec_mapping(flags) \ 731 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 732 733 #define is_stack_mapping(flags) \ 734 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 735 736 #define is_data_mapping(flags) \ 737 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 738 739 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 740 long npages) 741 { 742 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 743 744 if (is_exec_mapping(flags)) 745 mm->exec_vm += npages; 746 else if (is_stack_mapping(flags)) 747 mm->stack_vm += npages; 748 else if (is_data_mapping(flags)) 749 mm->data_vm += npages; 750 } 751 752 #undef is_exec_mapping 753 #undef is_stack_mapping 754 #undef is_data_mapping 755 756 /* Currently stubbed but we may later wish to un-stub. */ 757 static inline void vm_acct_memory(long pages); 758 static inline void vm_unacct_memory(long pages) 759 { 760 vm_acct_memory(-pages); 761 } 762 763 static inline void mapping_allow_writable(struct address_space *mapping) 764 { 765 atomic_inc(&mapping->i_mmap_writable); 766 } 767 768 static inline void vma_set_range(struct vm_area_struct *vma, 769 unsigned long start, unsigned long end, 770 pgoff_t pgoff) 771 { 772 vma->vm_start = start; 773 vma->vm_end = end; 774 vma->vm_pgoff = pgoff; 775 } 776 777 static inline 778 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 779 { 780 return mas_find(&vmi->mas, max - 1); 781 } 782 783 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 784 unsigned long start, unsigned long end, gfp_t gfp) 785 { 786 __mas_set_range(&vmi->mas, start, end - 1); 787 mas_store_gfp(&vmi->mas, NULL, gfp); 788 if (unlikely(mas_is_err(&vmi->mas))) 789 return -ENOMEM; 790 791 return 0; 792 } 793 794 static inline void mmap_assert_locked(struct mm_struct *); 795 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 796 unsigned long start_addr, 797 unsigned long end_addr) 798 { 799 unsigned long index = start_addr; 800 801 mmap_assert_locked(mm); 802 return mt_find(&mm->mm_mt, &index, end_addr - 1); 803 } 804 805 static inline 806 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 807 { 808 return mtree_load(&mm->mm_mt, addr); 809 } 810 811 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 812 { 813 return mas_prev(&vmi->mas, 0); 814 } 815 816 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 817 { 818 mas_set(&vmi->mas, addr); 819 } 820 821 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 822 { 823 return !vma->vm_ops; 824 } 825 826 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 827 #define vma_iter_load(vmi) \ 828 mas_walk(&(vmi)->mas) 829 830 static inline struct vm_area_struct * 831 find_vma_prev(struct mm_struct *mm, unsigned long addr, 832 struct vm_area_struct **pprev) 833 { 834 struct vm_area_struct *vma; 835 VMA_ITERATOR(vmi, mm, addr); 836 837 vma = vma_iter_load(&vmi); 838 *pprev = vma_prev(&vmi); 839 if (!vma) 840 vma = vma_next(&vmi); 841 return vma; 842 } 843 844 #undef vma_iter_load 845 846 static inline void vma_iter_init(struct vma_iterator *vmi, 847 struct mm_struct *mm, unsigned long addr) 848 { 849 mas_init(&vmi->mas, &mm->mm_mt, addr); 850 } 851 852 /* Stubbed functions. */ 853 854 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 855 { 856 return NULL; 857 } 858 859 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 860 struct vm_userfaultfd_ctx vm_ctx) 861 { 862 return true; 863 } 864 865 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 866 struct anon_vma_name *anon_name2) 867 { 868 return true; 869 } 870 871 static inline void might_sleep(void) 872 { 873 } 874 875 static inline unsigned long vma_pages(struct vm_area_struct *vma) 876 { 877 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 878 } 879 880 static inline void fput(struct file *file) 881 { 882 } 883 884 static inline void mpol_put(struct mempolicy *pol) 885 { 886 } 887 888 static inline void lru_add_drain(void) 889 { 890 } 891 892 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) 893 { 894 } 895 896 static inline void update_hiwater_rss(struct mm_struct *mm) 897 { 898 } 899 900 static inline void update_hiwater_vm(struct mm_struct *mm) 901 { 902 } 903 904 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 905 struct vm_area_struct *vma, unsigned long start_addr, 906 unsigned long end_addr, unsigned long tree_end) 907 { 908 } 909 910 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 911 struct vm_area_struct *vma, unsigned long floor, 912 unsigned long ceiling, bool mm_wr_locked) 913 { 914 } 915 916 static inline void mapping_unmap_writable(struct address_space *mapping) 917 { 918 } 919 920 static inline void flush_dcache_mmap_lock(struct address_space *mapping) 921 { 922 } 923 924 static inline void tlb_finish_mmu(struct mmu_gather *tlb) 925 { 926 } 927 928 static inline struct file *get_file(struct file *f) 929 { 930 return f; 931 } 932 933 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 934 { 935 return 0; 936 } 937 938 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 939 { 940 /* For testing purposes. We indicate that an anon_vma has been cloned. */ 941 if (src->anon_vma != NULL) { 942 dst->anon_vma = src->anon_vma; 943 dst->anon_vma->was_cloned = true; 944 } 945 946 return 0; 947 } 948 949 static inline void vma_start_write(struct vm_area_struct *vma) 950 { 951 /* Used to indicate to tests that a write operation has begun. */ 952 vma->vm_lock_seq++; 953 } 954 955 static inline __must_check 956 int vma_start_write_killable(struct vm_area_struct *vma) 957 { 958 /* Used to indicate to tests that a write operation has begun. */ 959 vma->vm_lock_seq++; 960 return 0; 961 } 962 963 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 964 unsigned long start, 965 unsigned long end, 966 struct vm_area_struct *next) 967 { 968 } 969 970 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} 971 972 static inline void vma_iter_free(struct vma_iterator *vmi) 973 { 974 mas_destroy(&vmi->mas); 975 } 976 977 static inline 978 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 979 { 980 return mas_next_range(&vmi->mas, ULONG_MAX); 981 } 982 983 static inline void vm_acct_memory(long pages) 984 { 985 } 986 987 static inline void vma_interval_tree_insert(struct vm_area_struct *vma, 988 struct rb_root_cached *rb) 989 { 990 } 991 992 static inline void vma_interval_tree_remove(struct vm_area_struct *vma, 993 struct rb_root_cached *rb) 994 { 995 } 996 997 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) 998 { 999 } 1000 1001 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc, 1002 struct rb_root_cached *rb) 1003 { 1004 } 1005 1006 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc, 1007 struct rb_root_cached *rb) 1008 { 1009 } 1010 1011 static inline void uprobe_mmap(struct vm_area_struct *vma) 1012 { 1013 } 1014 1015 static inline void uprobe_munmap(struct vm_area_struct *vma, 1016 unsigned long start, unsigned long end) 1017 { 1018 } 1019 1020 static inline void i_mmap_lock_write(struct address_space *mapping) 1021 { 1022 } 1023 1024 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 1025 { 1026 } 1027 1028 static inline void vma_assert_write_locked(struct vm_area_struct *vma) 1029 { 1030 } 1031 1032 static inline void unlink_anon_vmas(struct vm_area_struct *vma) 1033 { 1034 /* For testing purposes, indicate that the anon_vma was unlinked. */ 1035 vma->anon_vma->was_unlinked = true; 1036 } 1037 1038 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 1039 { 1040 } 1041 1042 static inline void i_mmap_unlock_write(struct address_space *mapping) 1043 { 1044 } 1045 1046 static inline void anon_vma_merge(struct vm_area_struct *vma, 1047 struct vm_area_struct *next) 1048 { 1049 } 1050 1051 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 1052 unsigned long start, 1053 unsigned long end, 1054 struct list_head *unmaps) 1055 { 1056 return 0; 1057 } 1058 1059 static inline void mmap_write_downgrade(struct mm_struct *mm) 1060 { 1061 } 1062 1063 static inline void mmap_read_unlock(struct mm_struct *mm) 1064 { 1065 } 1066 1067 static inline void mmap_write_unlock(struct mm_struct *mm) 1068 { 1069 } 1070 1071 static inline int mmap_write_lock_killable(struct mm_struct *mm) 1072 { 1073 return 0; 1074 } 1075 1076 static inline bool can_modify_mm(struct mm_struct *mm, 1077 unsigned long start, 1078 unsigned long end) 1079 { 1080 return true; 1081 } 1082 1083 static inline void arch_unmap(struct mm_struct *mm, 1084 unsigned long start, 1085 unsigned long end) 1086 { 1087 } 1088 1089 static inline void mmap_assert_locked(struct mm_struct *mm) 1090 { 1091 } 1092 1093 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 1094 { 1095 return true; 1096 } 1097 1098 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 1099 vm_flags_t vm_flags) 1100 { 1101 } 1102 1103 static inline bool mapping_can_writeback(struct address_space *mapping) 1104 { 1105 return true; 1106 } 1107 1108 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) 1109 { 1110 return false; 1111 } 1112 1113 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) 1114 { 1115 return false; 1116 } 1117 1118 static inline bool userfaultfd_wp(struct vm_area_struct *vma) 1119 { 1120 return false; 1121 } 1122 1123 static inline void mmap_assert_write_locked(struct mm_struct *mm) 1124 { 1125 } 1126 1127 static inline void mutex_lock(struct mutex *lock) 1128 { 1129 } 1130 1131 static inline void mutex_unlock(struct mutex *lock) 1132 { 1133 } 1134 1135 static inline bool mutex_is_locked(struct mutex *lock) 1136 { 1137 return true; 1138 } 1139 1140 static inline bool signal_pending(void *p) 1141 { 1142 return false; 1143 } 1144 1145 static inline bool is_file_hugepages(struct file *file) 1146 { 1147 return false; 1148 } 1149 1150 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) 1151 { 1152 return 0; 1153 } 1154 1155 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, 1156 unsigned long npages) 1157 { 1158 return true; 1159 } 1160 1161 static inline void vm_flags_init(struct vm_area_struct *vma, 1162 vm_flags_t flags) 1163 { 1164 vma->__vm_flags = flags; 1165 } 1166 1167 static inline void vm_flags_set(struct vm_area_struct *vma, 1168 vm_flags_t flags) 1169 { 1170 vma_start_write(vma); 1171 vma->__vm_flags |= flags; 1172 } 1173 1174 static inline void vm_flags_clear(struct vm_area_struct *vma, 1175 vm_flags_t flags) 1176 { 1177 vma_start_write(vma); 1178 vma->__vm_flags &= ~flags; 1179 } 1180 1181 static inline int shmem_zero_setup(struct vm_area_struct *vma) 1182 { 1183 return 0; 1184 } 1185 1186 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1187 { 1188 vma->vm_ops = NULL; 1189 } 1190 1191 static inline void ksm_add_vma(struct vm_area_struct *vma) 1192 { 1193 } 1194 1195 static inline void perf_event_mmap(struct vm_area_struct *vma) 1196 { 1197 } 1198 1199 static inline bool vma_is_dax(struct vm_area_struct *vma) 1200 { 1201 return false; 1202 } 1203 1204 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 1205 { 1206 return NULL; 1207 } 1208 1209 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1210 1211 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1212 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1213 { 1214 vm_flags_t vm_flags = vma->vm_flags; 1215 pgprot_t vm_page_prot; 1216 1217 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1218 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1219 1220 if (vma_wants_writenotify(vma, vm_page_prot)) { 1221 vm_flags &= ~VM_SHARED; 1222 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1223 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1224 } 1225 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1226 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1227 } 1228 1229 static inline bool arch_validate_flags(vm_flags_t flags) 1230 { 1231 return true; 1232 } 1233 1234 static inline void vma_close(struct vm_area_struct *vma) 1235 { 1236 } 1237 1238 static inline int mmap_file(struct file *file, struct vm_area_struct *vma) 1239 { 1240 return 0; 1241 } 1242 1243 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1244 { 1245 if (vma->vm_flags & VM_GROWSDOWN) 1246 return stack_guard_gap; 1247 1248 /* See reasoning around the VM_SHADOW_STACK definition */ 1249 if (vma->vm_flags & VM_SHADOW_STACK) 1250 return PAGE_SIZE; 1251 1252 return 0; 1253 } 1254 1255 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1256 { 1257 unsigned long gap = stack_guard_start_gap(vma); 1258 unsigned long vm_start = vma->vm_start; 1259 1260 vm_start -= gap; 1261 if (vm_start > vma->vm_start) 1262 vm_start = 0; 1263 return vm_start; 1264 } 1265 1266 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1267 { 1268 unsigned long vm_end = vma->vm_end; 1269 1270 if (vma->vm_flags & VM_GROWSUP) { 1271 vm_end += stack_guard_gap; 1272 if (vm_end < vma->vm_end) 1273 vm_end = -PAGE_SIZE; 1274 } 1275 return vm_end; 1276 } 1277 1278 static inline int is_hugepage_only_range(struct mm_struct *mm, 1279 unsigned long addr, unsigned long len) 1280 { 1281 return 0; 1282 } 1283 1284 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1285 { 1286 return vma->vm_flags & VM_ACCESS_FLAGS; 1287 } 1288 1289 static inline bool capable(int cap) 1290 { 1291 return true; 1292 } 1293 1294 static inline bool mlock_future_ok(const struct mm_struct *mm, 1295 vm_flags_t vm_flags, unsigned long bytes) 1296 { 1297 unsigned long locked_pages, limit_pages; 1298 1299 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1300 return true; 1301 1302 locked_pages = bytes >> PAGE_SHIFT; 1303 locked_pages += mm->locked_vm; 1304 1305 limit_pages = rlimit(RLIMIT_MEMLOCK); 1306 limit_pages >>= PAGE_SHIFT; 1307 1308 return locked_pages <= limit_pages; 1309 } 1310 1311 static inline int __anon_vma_prepare(struct vm_area_struct *vma) 1312 { 1313 struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma)); 1314 1315 if (!anon_vma) 1316 return -ENOMEM; 1317 1318 anon_vma->root = anon_vma; 1319 vma->anon_vma = anon_vma; 1320 1321 return 0; 1322 } 1323 1324 static inline int anon_vma_prepare(struct vm_area_struct *vma) 1325 { 1326 if (likely(vma->anon_vma)) 1327 return 0; 1328 1329 return __anon_vma_prepare(vma); 1330 } 1331 1332 static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 1333 struct list_head *uf) 1334 { 1335 } 1336 1337 # define ACCESS_PRIVATE(p, member) ((p)->member) 1338 1339 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 1340 { 1341 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 1342 } 1343 1344 /* 1345 * Denies creating a writable executable mapping or gaining executable permissions. 1346 * 1347 * This denies the following: 1348 * 1349 * a) mmap(PROT_WRITE | PROT_EXEC) 1350 * 1351 * b) mmap(PROT_WRITE) 1352 * mprotect(PROT_EXEC) 1353 * 1354 * c) mmap(PROT_WRITE) 1355 * mprotect(PROT_READ) 1356 * mprotect(PROT_EXEC) 1357 * 1358 * But allows the following: 1359 * 1360 * d) mmap(PROT_READ | PROT_EXEC) 1361 * mmap(PROT_READ | PROT_EXEC | PROT_BTI) 1362 * 1363 * This is only applicable if the user has set the Memory-Deny-Write-Execute 1364 * (MDWE) protection mask for the current process. 1365 * 1366 * @old specifies the VMA flags the VMA originally possessed, and @new the ones 1367 * we propose to set. 1368 * 1369 * Return: false if proposed change is OK, true if not ok and should be denied. 1370 */ 1371 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1372 { 1373 /* If MDWE is disabled, we have nothing to deny. */ 1374 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1375 return false; 1376 1377 /* If the new VMA is not executable, we have nothing to deny. */ 1378 if (!(new & VM_EXEC)) 1379 return false; 1380 1381 /* Under MDWE we do not accept newly writably executable VMAs... */ 1382 if (new & VM_WRITE) 1383 return true; 1384 1385 /* ...nor previously non-executable VMAs becoming executable. */ 1386 if (!(old & VM_EXEC)) 1387 return true; 1388 1389 return false; 1390 } 1391 1392 static inline int mapping_map_writable(struct address_space *mapping) 1393 { 1394 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 1395 0 : -EPERM; 1396 } 1397 1398 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc) 1399 { 1400 return 0; 1401 } 1402 1403 static inline void free_pgd_range(struct mmu_gather *tlb, 1404 unsigned long addr, unsigned long end, 1405 unsigned long floor, unsigned long ceiling) 1406 { 1407 } 1408 1409 static inline int ksm_execve(struct mm_struct *mm) 1410 { 1411 return 0; 1412 } 1413 1414 static inline void ksm_exit(struct mm_struct *mm) 1415 { 1416 } 1417 1418 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) 1419 { 1420 if (reset_refcnt) 1421 refcount_set(&vma->vm_refcnt, 0); 1422 } 1423 1424 static inline void vma_numab_state_init(struct vm_area_struct *vma) 1425 { 1426 } 1427 1428 static inline void vma_numab_state_free(struct vm_area_struct *vma) 1429 { 1430 } 1431 1432 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 1433 struct vm_area_struct *new_vma) 1434 { 1435 } 1436 1437 static inline void free_anon_vma_name(struct vm_area_struct *vma) 1438 { 1439 } 1440 1441 /* Declared in vma.h. */ 1442 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1443 struct vm_area_desc *desc); 1444 1445 static inline void mmap_action_prepare(struct mmap_action *action, 1446 struct vm_area_desc *desc) 1447 { 1448 } 1449 1450 static inline int mmap_action_complete(struct mmap_action *action, 1451 struct vm_area_struct *vma) 1452 { 1453 return 0; 1454 } 1455 1456 static inline int __compat_vma_mmap(const struct file_operations *f_op, 1457 struct file *file, struct vm_area_struct *vma) 1458 { 1459 struct vm_area_desc desc = { 1460 .mm = vma->vm_mm, 1461 .file = file, 1462 .start = vma->vm_start, 1463 .end = vma->vm_end, 1464 1465 .pgoff = vma->vm_pgoff, 1466 .vm_file = vma->vm_file, 1467 .vm_flags = vma->vm_flags, 1468 .page_prot = vma->vm_page_prot, 1469 1470 .action.type = MMAP_NOTHING, /* Default */ 1471 }; 1472 int err; 1473 1474 err = f_op->mmap_prepare(&desc); 1475 if (err) 1476 return err; 1477 1478 mmap_action_prepare(&desc.action, &desc); 1479 set_vma_from_desc(vma, &desc); 1480 return mmap_action_complete(&desc.action, vma); 1481 } 1482 1483 static inline int compat_vma_mmap(struct file *file, 1484 struct vm_area_struct *vma) 1485 { 1486 return __compat_vma_mmap(file->f_op, file, vma); 1487 } 1488 1489 /* Did the driver provide valid mmap hook configuration? */ 1490 static inline bool can_mmap_file(struct file *file) 1491 { 1492 bool has_mmap = file->f_op->mmap; 1493 bool has_mmap_prepare = file->f_op->mmap_prepare; 1494 1495 /* Hooks are mutually exclusive. */ 1496 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1497 return false; 1498 if (!has_mmap && !has_mmap_prepare) 1499 return false; 1500 1501 return true; 1502 } 1503 1504 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1505 { 1506 if (file->f_op->mmap_prepare) 1507 return compat_vma_mmap(file, vma); 1508 1509 return file->f_op->mmap(file, vma); 1510 } 1511 1512 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1513 { 1514 return file->f_op->mmap_prepare(desc); 1515 } 1516 1517 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) 1518 { 1519 } 1520 1521 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1522 { 1523 /* Changing an anonymous vma with this is illegal */ 1524 get_file(file); 1525 swap(vma->vm_file, file); 1526 fput(file); 1527 } 1528 1529 static inline bool shmem_file(struct file *file) 1530 { 1531 return false; 1532 } 1533 1534 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, 1535 const struct file *file, vm_flags_t vm_flags) 1536 { 1537 return vm_flags; 1538 } 1539 1540 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn) 1541 { 1542 } 1543 1544 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr, 1545 unsigned long pfn, unsigned long size, pgprot_t pgprot) 1546 { 1547 return 0; 1548 } 1549 1550 static inline int do_munmap(struct mm_struct *, unsigned long, size_t, 1551 struct list_head *uf) 1552 { 1553 return 0; 1554 } 1555 1556 #endif /* __MM_VMA_INTERNAL_H */ 1557