1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * vma_internal.h 4 * 5 * Header providing userland wrappers and shims for the functionality provided 6 * by mm/vma_internal.h. 7 * 8 * We make the header guard the same as mm/vma_internal.h, so if this shim 9 * header is included, it precludes the inclusion of the kernel one. 10 */ 11 12 #ifndef __MM_VMA_INTERNAL_H 13 #define __MM_VMA_INTERNAL_H 14 15 #define __private 16 #define __bitwise 17 #define __randomize_layout 18 19 #define CONFIG_MMU 20 #define CONFIG_PER_VMA_LOCK 21 22 #include <stdlib.h> 23 24 #include <linux/atomic.h> 25 #include <linux/list.h> 26 #include <linux/maple_tree.h> 27 #include <linux/mm.h> 28 #include <linux/rbtree.h> 29 #include <linux/refcount.h> 30 #include <linux/slab.h> 31 32 extern unsigned long stack_guard_gap; 33 #ifdef CONFIG_MMU 34 extern unsigned long mmap_min_addr; 35 extern unsigned long dac_mmap_min_addr; 36 #else 37 #define mmap_min_addr 0UL 38 #define dac_mmap_min_addr 0UL 39 #endif 40 41 #define VM_WARN_ON(_expr) (WARN_ON(_expr)) 42 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr)) 43 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr)) 44 #define VM_BUG_ON(_expr) (BUG_ON(_expr)) 45 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr)) 46 47 #define MMF_HAS_MDWE 28 48 49 #define VM_NONE 0x00000000 50 #define VM_READ 0x00000001 51 #define VM_WRITE 0x00000002 52 #define VM_EXEC 0x00000004 53 #define VM_SHARED 0x00000008 54 #define VM_MAYREAD 0x00000010 55 #define VM_MAYWRITE 0x00000020 56 #define VM_MAYEXEC 0x00000040 57 #define VM_GROWSDOWN 0x00000100 58 #define VM_PFNMAP 0x00000400 59 #define VM_MAYBE_GUARD 0x00000800 60 #define VM_LOCKED 0x00002000 61 #define VM_IO 0x00004000 62 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 63 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 64 #define VM_DONTEXPAND 0x00040000 65 #define VM_LOCKONFAULT 0x00080000 66 #define VM_ACCOUNT 0x00100000 67 #define VM_NORESERVE 0x00200000 68 #define VM_MIXEDMAP 0x10000000 69 #define VM_STACK VM_GROWSDOWN 70 #define VM_SHADOW_STACK VM_NONE 71 #define VM_SOFTDIRTY 0 72 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 73 #define VM_GROWSUP VM_NONE 74 75 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 76 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 77 78 #ifdef CONFIG_STACK_GROWSUP 79 #define VM_STACK VM_GROWSUP 80 #define VM_STACK_EARLY VM_GROWSDOWN 81 #else 82 #define VM_STACK VM_GROWSDOWN 83 #define VM_STACK_EARLY 0 84 #endif 85 86 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 87 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 88 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 89 #define STACK_TOP TASK_SIZE_LOW 90 #define STACK_TOP_MAX TASK_SIZE_MAX 91 92 /* This mask represents all the VMA flag bits used by mlock */ 93 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 94 95 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 96 97 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 98 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 99 100 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 101 102 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 103 104 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 105 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 106 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 107 108 #define RLIMIT_STACK 3 /* max stack size */ 109 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 110 111 #define CAP_IPC_LOCK 14 112 113 #ifdef CONFIG_64BIT 114 #define VM_SEALED_BIT 42 115 #define VM_SEALED BIT(VM_SEALED_BIT) 116 #else 117 #define VM_SEALED VM_NONE 118 #endif 119 120 /* 121 * Flags which should be 'sticky' on merge - that is, flags which, when one VMA 122 * possesses it but the other does not, the merged VMA should nonetheless have 123 * applied to it: 124 * 125 * VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its 126 * references cleared via /proc/$pid/clear_refs, any merged VMA 127 * should be considered soft-dirty also as it operates at a VMA 128 * granularity. 129 */ 130 #define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD) 131 132 /* 133 * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one 134 * of these flags and the other not does not preclude a merge. 135 * 136 * VM_STICKY - When merging VMAs, VMA flags must match, unless they are 137 * 'sticky'. If any sticky flags exist in either VMA, we simply 138 * set all of them on the merged VMA. 139 */ 140 #define VM_IGNORE_MERGE VM_STICKY 141 142 /* 143 * Flags which should result in page tables being copied on fork. These are 144 * flags which indicate that the VMA maps page tables which cannot be 145 * reconsistuted upon page fault, so necessitate page table copying upon 146 * 147 * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be 148 * reasonably reconstructed on page fault. 149 * 150 * VM_UFFD_WP - Encodes metadata about an installed uffd 151 * write protect handler, which cannot be 152 * reconstructed on page fault. 153 * 154 * We always copy pgtables when dst_vma has uffd-wp 155 * enabled even if it's file-backed 156 * (e.g. shmem). Because when uffd-wp is enabled, 157 * pgtable contains uffd-wp protection information, 158 * that's something we can't retrieve from page cache, 159 * and skip copying will lose those info. 160 * 161 * VM_MAYBE_GUARD - Could contain page guard region markers which 162 * by design are a property of the page tables 163 * only and thus cannot be reconstructed on page 164 * fault. 165 */ 166 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) 167 168 #define FIRST_USER_ADDRESS 0UL 169 #define USER_PGTABLES_CEILING 0UL 170 171 #define vma_policy(vma) NULL 172 173 #define down_write_nest_lock(sem, nest_lock) 174 175 #define pgprot_val(x) ((x).pgprot) 176 #define __pgprot(x) ((pgprot_t) { (x) } ) 177 178 #define for_each_vma(__vmi, __vma) \ 179 while (((__vma) = vma_next(&(__vmi))) != NULL) 180 181 /* The MM code likes to work with exclusive end addresses */ 182 #define for_each_vma_range(__vmi, __vma, __end) \ 183 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 184 185 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 186 187 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 188 189 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 190 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 191 192 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE) 193 194 #define AS_MM_ALL_LOCKS 2 195 196 /* We hardcode this for now. */ 197 #define sysctl_max_map_count 0x1000000UL 198 199 #define pgoff_t unsigned long 200 typedef unsigned long pgprotval_t; 201 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; 202 typedef unsigned long vm_flags_t; 203 typedef __bitwise unsigned int vm_fault_t; 204 205 /* 206 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...) 207 * either way :) 208 */ 209 #define pr_warn_once pr_err 210 211 #define data_race(expr) expr 212 213 #define ASSERT_EXCLUSIVE_WRITER(x) 214 215 #define pgtable_supports_soft_dirty() 1 216 217 /** 218 * swap - swap values of @a and @b 219 * @a: first value 220 * @b: second value 221 */ 222 #define swap(a, b) \ 223 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 224 225 struct kref { 226 refcount_t refcount; 227 }; 228 229 /* 230 * Define the task command name length as enum, then it can be visible to 231 * BPF programs. 232 */ 233 enum { 234 TASK_COMM_LEN = 16, 235 }; 236 237 /* 238 * Flags for bug emulation. 239 * 240 * These occupy the top three bytes. 241 */ 242 enum { 243 READ_IMPLIES_EXEC = 0x0400000, 244 }; 245 246 struct task_struct { 247 char comm[TASK_COMM_LEN]; 248 pid_t pid; 249 struct mm_struct *mm; 250 251 /* Used for emulating ABI behavior of previous Linux versions: */ 252 unsigned int personality; 253 }; 254 255 struct task_struct *get_current(void); 256 #define current get_current() 257 258 struct anon_vma { 259 struct anon_vma *root; 260 struct rb_root_cached rb_root; 261 262 /* Test fields. */ 263 bool was_cloned; 264 bool was_unlinked; 265 }; 266 267 struct anon_vma_chain { 268 struct anon_vma *anon_vma; 269 struct list_head same_vma; 270 }; 271 272 struct anon_vma_name { 273 struct kref kref; 274 /* The name needs to be at the end because it is dynamically sized. */ 275 char name[]; 276 }; 277 278 struct vma_iterator { 279 struct ma_state mas; 280 }; 281 282 #define VMA_ITERATOR(name, __mm, __addr) \ 283 struct vma_iterator name = { \ 284 .mas = { \ 285 .tree = &(__mm)->mm_mt, \ 286 .index = __addr, \ 287 .node = NULL, \ 288 .status = ma_start, \ 289 }, \ 290 } 291 292 struct address_space { 293 struct rb_root_cached i_mmap; 294 unsigned long flags; 295 atomic_t i_mmap_writable; 296 }; 297 298 struct vm_userfaultfd_ctx {}; 299 struct mempolicy {}; 300 struct mmu_gather {}; 301 struct mutex {}; 302 #define DEFINE_MUTEX(mutexname) \ 303 struct mutex mutexname = {} 304 305 #define DECLARE_BITMAP(name, bits) \ 306 unsigned long name[BITS_TO_LONGS(bits)] 307 308 #define NUM_MM_FLAG_BITS (64) 309 typedef struct { 310 __private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS); 311 } mm_flags_t; 312 313 struct mm_struct { 314 struct maple_tree mm_mt; 315 int map_count; /* number of VMAs */ 316 unsigned long total_vm; /* Total pages mapped */ 317 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 318 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 319 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 320 unsigned long stack_vm; /* VM_STACK */ 321 322 unsigned long def_flags; 323 324 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 325 }; 326 327 struct vm_area_struct; 328 329 330 /* What action should be taken after an .mmap_prepare call is complete? */ 331 enum mmap_action_type { 332 MMAP_NOTHING, /* Mapping is complete, no further action. */ 333 MMAP_REMAP_PFN, /* Remap PFN range. */ 334 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ 335 }; 336 337 /* 338 * Describes an action an mmap_prepare hook can instruct to be taken to complete 339 * the mapping of a VMA. Specified in vm_area_desc. 340 */ 341 struct mmap_action { 342 union { 343 /* Remap range. */ 344 struct { 345 unsigned long start; 346 unsigned long start_pfn; 347 unsigned long size; 348 pgprot_t pgprot; 349 } remap; 350 }; 351 enum mmap_action_type type; 352 353 /* 354 * If specified, this hook is invoked after the selected action has been 355 * successfully completed. Note that the VMA write lock still held. 356 * 357 * The absolute minimum ought to be done here. 358 * 359 * Returns 0 on success, or an error code. 360 */ 361 int (*success_hook)(const struct vm_area_struct *vma); 362 363 /* 364 * If specified, this hook is invoked when an error occurred when 365 * attempting the selection action. 366 * 367 * The hook can return an error code in order to filter the error, but 368 * it is not valid to clear the error here. 369 */ 370 int (*error_hook)(int err); 371 372 /* 373 * This should be set in rare instances where the operation required 374 * that the rmap should not be able to access the VMA until 375 * completely set up. 376 */ 377 bool hide_from_rmap_until_complete :1; 378 }; 379 380 /* 381 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 382 * manipulate mutable fields which will cause those fields to be updated in the 383 * resultant VMA. 384 * 385 * Helper functions are not required for manipulating any field. 386 */ 387 struct vm_area_desc { 388 /* Immutable state. */ 389 const struct mm_struct *const mm; 390 struct file *const file; /* May vary from vm_file in stacked callers. */ 391 unsigned long start; 392 unsigned long end; 393 394 /* Mutable fields. Populated with initial state. */ 395 pgoff_t pgoff; 396 struct file *vm_file; 397 vm_flags_t vm_flags; 398 pgprot_t page_prot; 399 400 /* Write-only fields. */ 401 const struct vm_operations_struct *vm_ops; 402 void *private_data; 403 404 /* Take further action? */ 405 struct mmap_action action; 406 }; 407 408 struct file_operations { 409 int (*mmap)(struct file *, struct vm_area_struct *); 410 int (*mmap_prepare)(struct vm_area_desc *); 411 }; 412 413 struct file { 414 struct address_space *f_mapping; 415 const struct file_operations *f_op; 416 }; 417 418 #define VMA_LOCK_OFFSET 0x40000000 419 420 typedef struct { unsigned long v; } freeptr_t; 421 422 struct vm_area_struct { 423 /* The first cache line has the info for VMA tree walking. */ 424 425 union { 426 struct { 427 /* VMA covers [vm_start; vm_end) addresses within mm */ 428 unsigned long vm_start; 429 unsigned long vm_end; 430 }; 431 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 432 }; 433 434 struct mm_struct *vm_mm; /* The address space we belong to. */ 435 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 436 437 /* 438 * Flags, see mm.h. 439 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 440 */ 441 union { 442 const vm_flags_t vm_flags; 443 vm_flags_t __private __vm_flags; 444 }; 445 446 #ifdef CONFIG_PER_VMA_LOCK 447 /* 448 * Can only be written (using WRITE_ONCE()) while holding both: 449 * - mmap_lock (in write mode) 450 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 451 * Can be read reliably while holding one of: 452 * - mmap_lock (in read or write mode) 453 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 454 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 455 * while holding nothing (except RCU to keep the VMA struct allocated). 456 * 457 * This sequence counter is explicitly allowed to overflow; sequence 458 * counter reuse can only lead to occasional unnecessary use of the 459 * slowpath. 460 */ 461 unsigned int vm_lock_seq; 462 #endif 463 464 /* 465 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 466 * list, after a COW of one of the file pages. A MAP_SHARED vma 467 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 468 * or brk vma (with NULL file) can only be in an anon_vma list. 469 */ 470 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 471 * page_table_lock */ 472 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 473 474 /* Function pointers to deal with this struct. */ 475 const struct vm_operations_struct *vm_ops; 476 477 /* Information about our backing store: */ 478 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 479 units */ 480 struct file * vm_file; /* File we map to (can be NULL). */ 481 void * vm_private_data; /* was vm_pte (shared mem) */ 482 483 #ifdef CONFIG_SWAP 484 atomic_long_t swap_readahead_info; 485 #endif 486 #ifndef CONFIG_MMU 487 struct vm_region *vm_region; /* NOMMU mapping region */ 488 #endif 489 #ifdef CONFIG_NUMA 490 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 491 #endif 492 #ifdef CONFIG_NUMA_BALANCING 493 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 494 #endif 495 #ifdef CONFIG_PER_VMA_LOCK 496 /* Unstable RCU readers are allowed to read this. */ 497 refcount_t vm_refcnt; 498 #endif 499 /* 500 * For areas with an address space and backing store, 501 * linkage into the address_space->i_mmap interval tree. 502 * 503 */ 504 struct { 505 struct rb_node rb; 506 unsigned long rb_subtree_last; 507 } shared; 508 #ifdef CONFIG_ANON_VMA_NAME 509 /* 510 * For private and shared anonymous mappings, a pointer to a null 511 * terminated string containing the name given to the vma, or NULL if 512 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 513 */ 514 struct anon_vma_name *anon_name; 515 #endif 516 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 517 } __randomize_layout; 518 519 struct vm_fault {}; 520 521 struct vm_operations_struct { 522 void (*open)(struct vm_area_struct * area); 523 /** 524 * @close: Called when the VMA is being removed from the MM. 525 * Context: User context. May sleep. Caller holds mmap_lock. 526 */ 527 void (*close)(struct vm_area_struct * area); 528 /* Called any time before splitting to check if it's allowed */ 529 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 530 int (*mremap)(struct vm_area_struct *area); 531 /* 532 * Called by mprotect() to make driver-specific permission 533 * checks before mprotect() is finalised. The VMA must not 534 * be modified. Returns 0 if mprotect() can proceed. 535 */ 536 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 537 unsigned long end, unsigned long newflags); 538 vm_fault_t (*fault)(struct vm_fault *vmf); 539 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 540 vm_fault_t (*map_pages)(struct vm_fault *vmf, 541 pgoff_t start_pgoff, pgoff_t end_pgoff); 542 unsigned long (*pagesize)(struct vm_area_struct * area); 543 544 /* notification that a previously read-only page is about to become 545 * writable, if an error is returned it will cause a SIGBUS */ 546 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 547 548 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 549 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 550 551 /* called by access_process_vm when get_user_pages() fails, typically 552 * for use by special VMAs. See also generic_access_phys() for a generic 553 * implementation useful for any iomem mapping. 554 */ 555 int (*access)(struct vm_area_struct *vma, unsigned long addr, 556 void *buf, int len, int write); 557 558 /* Called by the /proc/PID/maps code to ask the vma whether it 559 * has a special name. Returning non-NULL will also cause this 560 * vma to be dumped unconditionally. */ 561 const char *(*name)(struct vm_area_struct *vma); 562 563 #ifdef CONFIG_NUMA 564 /* 565 * set_policy() op must add a reference to any non-NULL @new mempolicy 566 * to hold the policy upon return. Caller should pass NULL @new to 567 * remove a policy and fall back to surrounding context--i.e. do not 568 * install a MPOL_DEFAULT policy, nor the task or system default 569 * mempolicy. 570 */ 571 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 572 573 /* 574 * get_policy() op must add reference [mpol_get()] to any policy at 575 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 576 * in mm/mempolicy.c will do this automatically. 577 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 578 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 579 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 580 * must return NULL--i.e., do not "fallback" to task or system default 581 * policy. 582 */ 583 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 584 unsigned long addr, pgoff_t *ilx); 585 #endif 586 #ifdef CONFIG_FIND_NORMAL_PAGE 587 /* 588 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 589 * allows for returning a "normal" page from vm_normal_page() even 590 * though the PTE indicates that the "struct page" either does not exist 591 * or should not be touched: "special". 592 * 593 * Do not add new users: this really only works when a "normal" page 594 * was mapped, but then the PTE got changed to something weird (+ 595 * marked special) that would not make pte_pfn() identify the originally 596 * inserted page. 597 */ 598 struct page *(*find_normal_page)(struct vm_area_struct *vma, 599 unsigned long addr); 600 #endif /* CONFIG_FIND_NORMAL_PAGE */ 601 }; 602 603 struct vm_unmapped_area_info { 604 #define VM_UNMAPPED_AREA_TOPDOWN 1 605 unsigned long flags; 606 unsigned long length; 607 unsigned long low_limit; 608 unsigned long high_limit; 609 unsigned long align_mask; 610 unsigned long align_offset; 611 unsigned long start_gap; 612 }; 613 614 struct pagetable_move_control { 615 struct vm_area_struct *old; /* Source VMA. */ 616 struct vm_area_struct *new; /* Destination VMA. */ 617 unsigned long old_addr; /* Address from which the move begins. */ 618 unsigned long old_end; /* Exclusive address at which old range ends. */ 619 unsigned long new_addr; /* Address to move page tables to. */ 620 unsigned long len_in; /* Bytes to remap specified by user. */ 621 622 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 623 bool for_stack; /* Is this an early temp stack being moved? */ 624 }; 625 626 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 627 struct pagetable_move_control name = { \ 628 .old = old_, \ 629 .new = new_, \ 630 .old_addr = old_addr_, \ 631 .old_end = (old_addr_) + (len_), \ 632 .new_addr = new_addr_, \ 633 .len_in = len_, \ 634 } 635 636 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 637 { 638 mas_pause(&vmi->mas); 639 } 640 641 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 642 { 643 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 644 } 645 646 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 647 { 648 return __pgprot(vm_flags); 649 } 650 651 static inline bool is_shared_maywrite(vm_flags_t vm_flags) 652 { 653 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 654 (VM_SHARED | VM_MAYWRITE); 655 } 656 657 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 658 { 659 return is_shared_maywrite(vma->vm_flags); 660 } 661 662 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 663 { 664 /* 665 * Uses mas_find() to get the first VMA when the iterator starts. 666 * Calling mas_next() could skip the first entry. 667 */ 668 return mas_find(&vmi->mas, ULONG_MAX); 669 } 670 671 /* 672 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 673 * assertions should be made either under mmap_write_lock or when the object 674 * has been isolated under mmap_write_lock, ensuring no competing writers. 675 */ 676 static inline void vma_assert_attached(struct vm_area_struct *vma) 677 { 678 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 679 } 680 681 static inline void vma_assert_detached(struct vm_area_struct *vma) 682 { 683 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 684 } 685 686 static inline void vma_assert_write_locked(struct vm_area_struct *); 687 static inline void vma_mark_attached(struct vm_area_struct *vma) 688 { 689 vma_assert_write_locked(vma); 690 vma_assert_detached(vma); 691 refcount_set_release(&vma->vm_refcnt, 1); 692 } 693 694 static inline void vma_mark_detached(struct vm_area_struct *vma) 695 { 696 vma_assert_write_locked(vma); 697 vma_assert_attached(vma); 698 /* We are the only writer, so no need to use vma_refcount_put(). */ 699 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 700 /* 701 * Reader must have temporarily raised vm_refcnt but it will 702 * drop it without using the vma since vma is write-locked. 703 */ 704 } 705 } 706 707 extern const struct vm_operations_struct vma_dummy_vm_ops; 708 709 extern unsigned long rlimit(unsigned int limit); 710 711 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 712 { 713 memset(vma, 0, sizeof(*vma)); 714 vma->vm_mm = mm; 715 vma->vm_ops = &vma_dummy_vm_ops; 716 INIT_LIST_HEAD(&vma->anon_vma_chain); 717 vma->vm_lock_seq = UINT_MAX; 718 } 719 720 /* 721 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 722 * kernel/fork.c, so we have to these broadly available there, and temporarily 723 * define them here to resolve the dependency cycle. 724 */ 725 726 #define is_exec_mapping(flags) \ 727 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 728 729 #define is_stack_mapping(flags) \ 730 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 731 732 #define is_data_mapping(flags) \ 733 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 734 735 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 736 long npages) 737 { 738 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 739 740 if (is_exec_mapping(flags)) 741 mm->exec_vm += npages; 742 else if (is_stack_mapping(flags)) 743 mm->stack_vm += npages; 744 else if (is_data_mapping(flags)) 745 mm->data_vm += npages; 746 } 747 748 #undef is_exec_mapping 749 #undef is_stack_mapping 750 #undef is_data_mapping 751 752 /* Currently stubbed but we may later wish to un-stub. */ 753 static inline void vm_acct_memory(long pages); 754 static inline void vm_unacct_memory(long pages) 755 { 756 vm_acct_memory(-pages); 757 } 758 759 static inline void mapping_allow_writable(struct address_space *mapping) 760 { 761 atomic_inc(&mapping->i_mmap_writable); 762 } 763 764 static inline void vma_set_range(struct vm_area_struct *vma, 765 unsigned long start, unsigned long end, 766 pgoff_t pgoff) 767 { 768 vma->vm_start = start; 769 vma->vm_end = end; 770 vma->vm_pgoff = pgoff; 771 } 772 773 static inline 774 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 775 { 776 return mas_find(&vmi->mas, max - 1); 777 } 778 779 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 780 unsigned long start, unsigned long end, gfp_t gfp) 781 { 782 __mas_set_range(&vmi->mas, start, end - 1); 783 mas_store_gfp(&vmi->mas, NULL, gfp); 784 if (unlikely(mas_is_err(&vmi->mas))) 785 return -ENOMEM; 786 787 return 0; 788 } 789 790 static inline void mmap_assert_locked(struct mm_struct *); 791 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 792 unsigned long start_addr, 793 unsigned long end_addr) 794 { 795 unsigned long index = start_addr; 796 797 mmap_assert_locked(mm); 798 return mt_find(&mm->mm_mt, &index, end_addr - 1); 799 } 800 801 static inline 802 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 803 { 804 return mtree_load(&mm->mm_mt, addr); 805 } 806 807 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 808 { 809 return mas_prev(&vmi->mas, 0); 810 } 811 812 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 813 { 814 mas_set(&vmi->mas, addr); 815 } 816 817 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 818 { 819 return !vma->vm_ops; 820 } 821 822 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 823 #define vma_iter_load(vmi) \ 824 mas_walk(&(vmi)->mas) 825 826 static inline struct vm_area_struct * 827 find_vma_prev(struct mm_struct *mm, unsigned long addr, 828 struct vm_area_struct **pprev) 829 { 830 struct vm_area_struct *vma; 831 VMA_ITERATOR(vmi, mm, addr); 832 833 vma = vma_iter_load(&vmi); 834 *pprev = vma_prev(&vmi); 835 if (!vma) 836 vma = vma_next(&vmi); 837 return vma; 838 } 839 840 #undef vma_iter_load 841 842 static inline void vma_iter_init(struct vma_iterator *vmi, 843 struct mm_struct *mm, unsigned long addr) 844 { 845 mas_init(&vmi->mas, &mm->mm_mt, addr); 846 } 847 848 /* Stubbed functions. */ 849 850 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 851 { 852 return NULL; 853 } 854 855 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 856 struct vm_userfaultfd_ctx vm_ctx) 857 { 858 return true; 859 } 860 861 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 862 struct anon_vma_name *anon_name2) 863 { 864 return true; 865 } 866 867 static inline void might_sleep(void) 868 { 869 } 870 871 static inline unsigned long vma_pages(struct vm_area_struct *vma) 872 { 873 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 874 } 875 876 static inline void fput(struct file *file) 877 { 878 } 879 880 static inline void mpol_put(struct mempolicy *pol) 881 { 882 } 883 884 static inline void lru_add_drain(void) 885 { 886 } 887 888 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) 889 { 890 } 891 892 static inline void update_hiwater_rss(struct mm_struct *mm) 893 { 894 } 895 896 static inline void update_hiwater_vm(struct mm_struct *mm) 897 { 898 } 899 900 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 901 struct vm_area_struct *vma, unsigned long start_addr, 902 unsigned long end_addr, unsigned long tree_end) 903 { 904 } 905 906 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 907 struct vm_area_struct *vma, unsigned long floor, 908 unsigned long ceiling, bool mm_wr_locked) 909 { 910 } 911 912 static inline void mapping_unmap_writable(struct address_space *mapping) 913 { 914 } 915 916 static inline void flush_dcache_mmap_lock(struct address_space *mapping) 917 { 918 } 919 920 static inline void tlb_finish_mmu(struct mmu_gather *tlb) 921 { 922 } 923 924 static inline struct file *get_file(struct file *f) 925 { 926 return f; 927 } 928 929 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 930 { 931 return 0; 932 } 933 934 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 935 { 936 /* For testing purposes. We indicate that an anon_vma has been cloned. */ 937 if (src->anon_vma != NULL) { 938 dst->anon_vma = src->anon_vma; 939 dst->anon_vma->was_cloned = true; 940 } 941 942 return 0; 943 } 944 945 static inline void vma_start_write(struct vm_area_struct *vma) 946 { 947 /* Used to indicate to tests that a write operation has begun. */ 948 vma->vm_lock_seq++; 949 } 950 951 static inline __must_check 952 int vma_start_write_killable(struct vm_area_struct *vma) 953 { 954 /* Used to indicate to tests that a write operation has begun. */ 955 vma->vm_lock_seq++; 956 return 0; 957 } 958 959 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 960 unsigned long start, 961 unsigned long end, 962 struct vm_area_struct *next) 963 { 964 } 965 966 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} 967 968 static inline void vma_iter_free(struct vma_iterator *vmi) 969 { 970 mas_destroy(&vmi->mas); 971 } 972 973 static inline 974 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 975 { 976 return mas_next_range(&vmi->mas, ULONG_MAX); 977 } 978 979 static inline void vm_acct_memory(long pages) 980 { 981 } 982 983 static inline void vma_interval_tree_insert(struct vm_area_struct *vma, 984 struct rb_root_cached *rb) 985 { 986 } 987 988 static inline void vma_interval_tree_remove(struct vm_area_struct *vma, 989 struct rb_root_cached *rb) 990 { 991 } 992 993 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) 994 { 995 } 996 997 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc, 998 struct rb_root_cached *rb) 999 { 1000 } 1001 1002 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc, 1003 struct rb_root_cached *rb) 1004 { 1005 } 1006 1007 static inline void uprobe_mmap(struct vm_area_struct *vma) 1008 { 1009 } 1010 1011 static inline void uprobe_munmap(struct vm_area_struct *vma, 1012 unsigned long start, unsigned long end) 1013 { 1014 } 1015 1016 static inline void i_mmap_lock_write(struct address_space *mapping) 1017 { 1018 } 1019 1020 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 1021 { 1022 } 1023 1024 static inline void vma_assert_write_locked(struct vm_area_struct *vma) 1025 { 1026 } 1027 1028 static inline void unlink_anon_vmas(struct vm_area_struct *vma) 1029 { 1030 /* For testing purposes, indicate that the anon_vma was unlinked. */ 1031 vma->anon_vma->was_unlinked = true; 1032 } 1033 1034 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 1035 { 1036 } 1037 1038 static inline void i_mmap_unlock_write(struct address_space *mapping) 1039 { 1040 } 1041 1042 static inline void anon_vma_merge(struct vm_area_struct *vma, 1043 struct vm_area_struct *next) 1044 { 1045 } 1046 1047 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 1048 unsigned long start, 1049 unsigned long end, 1050 struct list_head *unmaps) 1051 { 1052 return 0; 1053 } 1054 1055 static inline void mmap_write_downgrade(struct mm_struct *mm) 1056 { 1057 } 1058 1059 static inline void mmap_read_unlock(struct mm_struct *mm) 1060 { 1061 } 1062 1063 static inline void mmap_write_unlock(struct mm_struct *mm) 1064 { 1065 } 1066 1067 static inline int mmap_write_lock_killable(struct mm_struct *mm) 1068 { 1069 return 0; 1070 } 1071 1072 static inline bool can_modify_mm(struct mm_struct *mm, 1073 unsigned long start, 1074 unsigned long end) 1075 { 1076 return true; 1077 } 1078 1079 static inline void arch_unmap(struct mm_struct *mm, 1080 unsigned long start, 1081 unsigned long end) 1082 { 1083 } 1084 1085 static inline void mmap_assert_locked(struct mm_struct *mm) 1086 { 1087 } 1088 1089 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 1090 { 1091 return true; 1092 } 1093 1094 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 1095 vm_flags_t vm_flags) 1096 { 1097 } 1098 1099 static inline bool mapping_can_writeback(struct address_space *mapping) 1100 { 1101 return true; 1102 } 1103 1104 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) 1105 { 1106 return false; 1107 } 1108 1109 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) 1110 { 1111 return false; 1112 } 1113 1114 static inline bool userfaultfd_wp(struct vm_area_struct *vma) 1115 { 1116 return false; 1117 } 1118 1119 static inline void mmap_assert_write_locked(struct mm_struct *mm) 1120 { 1121 } 1122 1123 static inline void mutex_lock(struct mutex *lock) 1124 { 1125 } 1126 1127 static inline void mutex_unlock(struct mutex *lock) 1128 { 1129 } 1130 1131 static inline bool mutex_is_locked(struct mutex *lock) 1132 { 1133 return true; 1134 } 1135 1136 static inline bool signal_pending(void *p) 1137 { 1138 return false; 1139 } 1140 1141 static inline bool is_file_hugepages(struct file *file) 1142 { 1143 return false; 1144 } 1145 1146 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) 1147 { 1148 return 0; 1149 } 1150 1151 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, 1152 unsigned long npages) 1153 { 1154 return true; 1155 } 1156 1157 static inline void vm_flags_init(struct vm_area_struct *vma, 1158 vm_flags_t flags) 1159 { 1160 vma->__vm_flags = flags; 1161 } 1162 1163 static inline void vm_flags_set(struct vm_area_struct *vma, 1164 vm_flags_t flags) 1165 { 1166 vma_start_write(vma); 1167 vma->__vm_flags |= flags; 1168 } 1169 1170 static inline void vm_flags_clear(struct vm_area_struct *vma, 1171 vm_flags_t flags) 1172 { 1173 vma_start_write(vma); 1174 vma->__vm_flags &= ~flags; 1175 } 1176 1177 static inline int shmem_zero_setup(struct vm_area_struct *vma) 1178 { 1179 return 0; 1180 } 1181 1182 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1183 { 1184 vma->vm_ops = NULL; 1185 } 1186 1187 static inline void ksm_add_vma(struct vm_area_struct *vma) 1188 { 1189 } 1190 1191 static inline void perf_event_mmap(struct vm_area_struct *vma) 1192 { 1193 } 1194 1195 static inline bool vma_is_dax(struct vm_area_struct *vma) 1196 { 1197 return false; 1198 } 1199 1200 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 1201 { 1202 return NULL; 1203 } 1204 1205 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1206 1207 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1208 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1209 { 1210 vm_flags_t vm_flags = vma->vm_flags; 1211 pgprot_t vm_page_prot; 1212 1213 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1214 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1215 1216 if (vma_wants_writenotify(vma, vm_page_prot)) { 1217 vm_flags &= ~VM_SHARED; 1218 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1219 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1220 } 1221 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1222 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1223 } 1224 1225 static inline bool arch_validate_flags(vm_flags_t flags) 1226 { 1227 return true; 1228 } 1229 1230 static inline void vma_close(struct vm_area_struct *vma) 1231 { 1232 } 1233 1234 static inline int mmap_file(struct file *file, struct vm_area_struct *vma) 1235 { 1236 return 0; 1237 } 1238 1239 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1240 { 1241 if (vma->vm_flags & VM_GROWSDOWN) 1242 return stack_guard_gap; 1243 1244 /* See reasoning around the VM_SHADOW_STACK definition */ 1245 if (vma->vm_flags & VM_SHADOW_STACK) 1246 return PAGE_SIZE; 1247 1248 return 0; 1249 } 1250 1251 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1252 { 1253 unsigned long gap = stack_guard_start_gap(vma); 1254 unsigned long vm_start = vma->vm_start; 1255 1256 vm_start -= gap; 1257 if (vm_start > vma->vm_start) 1258 vm_start = 0; 1259 return vm_start; 1260 } 1261 1262 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1263 { 1264 unsigned long vm_end = vma->vm_end; 1265 1266 if (vma->vm_flags & VM_GROWSUP) { 1267 vm_end += stack_guard_gap; 1268 if (vm_end < vma->vm_end) 1269 vm_end = -PAGE_SIZE; 1270 } 1271 return vm_end; 1272 } 1273 1274 static inline int is_hugepage_only_range(struct mm_struct *mm, 1275 unsigned long addr, unsigned long len) 1276 { 1277 return 0; 1278 } 1279 1280 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1281 { 1282 return vma->vm_flags & VM_ACCESS_FLAGS; 1283 } 1284 1285 static inline bool capable(int cap) 1286 { 1287 return true; 1288 } 1289 1290 static inline bool mlock_future_ok(const struct mm_struct *mm, 1291 vm_flags_t vm_flags, unsigned long bytes) 1292 { 1293 unsigned long locked_pages, limit_pages; 1294 1295 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1296 return true; 1297 1298 locked_pages = bytes >> PAGE_SHIFT; 1299 locked_pages += mm->locked_vm; 1300 1301 limit_pages = rlimit(RLIMIT_MEMLOCK); 1302 limit_pages >>= PAGE_SHIFT; 1303 1304 return locked_pages <= limit_pages; 1305 } 1306 1307 static inline int __anon_vma_prepare(struct vm_area_struct *vma) 1308 { 1309 struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma)); 1310 1311 if (!anon_vma) 1312 return -ENOMEM; 1313 1314 anon_vma->root = anon_vma; 1315 vma->anon_vma = anon_vma; 1316 1317 return 0; 1318 } 1319 1320 static inline int anon_vma_prepare(struct vm_area_struct *vma) 1321 { 1322 if (likely(vma->anon_vma)) 1323 return 0; 1324 1325 return __anon_vma_prepare(vma); 1326 } 1327 1328 static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 1329 struct list_head *uf) 1330 { 1331 } 1332 1333 # define ACCESS_PRIVATE(p, member) ((p)->member) 1334 1335 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 1336 { 1337 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 1338 } 1339 1340 /* 1341 * Denies creating a writable executable mapping or gaining executable permissions. 1342 * 1343 * This denies the following: 1344 * 1345 * a) mmap(PROT_WRITE | PROT_EXEC) 1346 * 1347 * b) mmap(PROT_WRITE) 1348 * mprotect(PROT_EXEC) 1349 * 1350 * c) mmap(PROT_WRITE) 1351 * mprotect(PROT_READ) 1352 * mprotect(PROT_EXEC) 1353 * 1354 * But allows the following: 1355 * 1356 * d) mmap(PROT_READ | PROT_EXEC) 1357 * mmap(PROT_READ | PROT_EXEC | PROT_BTI) 1358 * 1359 * This is only applicable if the user has set the Memory-Deny-Write-Execute 1360 * (MDWE) protection mask for the current process. 1361 * 1362 * @old specifies the VMA flags the VMA originally possessed, and @new the ones 1363 * we propose to set. 1364 * 1365 * Return: false if proposed change is OK, true if not ok and should be denied. 1366 */ 1367 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1368 { 1369 /* If MDWE is disabled, we have nothing to deny. */ 1370 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1371 return false; 1372 1373 /* If the new VMA is not executable, we have nothing to deny. */ 1374 if (!(new & VM_EXEC)) 1375 return false; 1376 1377 /* Under MDWE we do not accept newly writably executable VMAs... */ 1378 if (new & VM_WRITE) 1379 return true; 1380 1381 /* ...nor previously non-executable VMAs becoming executable. */ 1382 if (!(old & VM_EXEC)) 1383 return true; 1384 1385 return false; 1386 } 1387 1388 static inline int mapping_map_writable(struct address_space *mapping) 1389 { 1390 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 1391 0 : -EPERM; 1392 } 1393 1394 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc) 1395 { 1396 return 0; 1397 } 1398 1399 static inline void free_pgd_range(struct mmu_gather *tlb, 1400 unsigned long addr, unsigned long end, 1401 unsigned long floor, unsigned long ceiling) 1402 { 1403 } 1404 1405 static inline int ksm_execve(struct mm_struct *mm) 1406 { 1407 return 0; 1408 } 1409 1410 static inline void ksm_exit(struct mm_struct *mm) 1411 { 1412 } 1413 1414 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) 1415 { 1416 if (reset_refcnt) 1417 refcount_set(&vma->vm_refcnt, 0); 1418 } 1419 1420 static inline void vma_numab_state_init(struct vm_area_struct *vma) 1421 { 1422 } 1423 1424 static inline void vma_numab_state_free(struct vm_area_struct *vma) 1425 { 1426 } 1427 1428 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 1429 struct vm_area_struct *new_vma) 1430 { 1431 } 1432 1433 static inline void free_anon_vma_name(struct vm_area_struct *vma) 1434 { 1435 } 1436 1437 /* Declared in vma.h. */ 1438 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1439 struct vm_area_desc *desc); 1440 1441 static inline void mmap_action_prepare(struct mmap_action *action, 1442 struct vm_area_desc *desc) 1443 { 1444 } 1445 1446 static inline int mmap_action_complete(struct mmap_action *action, 1447 struct vm_area_struct *vma) 1448 { 1449 return 0; 1450 } 1451 1452 static inline int __compat_vma_mmap(const struct file_operations *f_op, 1453 struct file *file, struct vm_area_struct *vma) 1454 { 1455 struct vm_area_desc desc = { 1456 .mm = vma->vm_mm, 1457 .file = file, 1458 .start = vma->vm_start, 1459 .end = vma->vm_end, 1460 1461 .pgoff = vma->vm_pgoff, 1462 .vm_file = vma->vm_file, 1463 .vm_flags = vma->vm_flags, 1464 .page_prot = vma->vm_page_prot, 1465 1466 .action.type = MMAP_NOTHING, /* Default */ 1467 }; 1468 int err; 1469 1470 err = f_op->mmap_prepare(&desc); 1471 if (err) 1472 return err; 1473 1474 mmap_action_prepare(&desc.action, &desc); 1475 set_vma_from_desc(vma, &desc); 1476 return mmap_action_complete(&desc.action, vma); 1477 } 1478 1479 static inline int compat_vma_mmap(struct file *file, 1480 struct vm_area_struct *vma) 1481 { 1482 return __compat_vma_mmap(file->f_op, file, vma); 1483 } 1484 1485 /* Did the driver provide valid mmap hook configuration? */ 1486 static inline bool can_mmap_file(struct file *file) 1487 { 1488 bool has_mmap = file->f_op->mmap; 1489 bool has_mmap_prepare = file->f_op->mmap_prepare; 1490 1491 /* Hooks are mutually exclusive. */ 1492 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1493 return false; 1494 if (!has_mmap && !has_mmap_prepare) 1495 return false; 1496 1497 return true; 1498 } 1499 1500 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1501 { 1502 if (file->f_op->mmap_prepare) 1503 return compat_vma_mmap(file, vma); 1504 1505 return file->f_op->mmap(file, vma); 1506 } 1507 1508 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1509 { 1510 return file->f_op->mmap_prepare(desc); 1511 } 1512 1513 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) 1514 { 1515 } 1516 1517 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1518 { 1519 /* Changing an anonymous vma with this is illegal */ 1520 get_file(file); 1521 swap(vma->vm_file, file); 1522 fput(file); 1523 } 1524 1525 static inline bool shmem_file(struct file *file) 1526 { 1527 return false; 1528 } 1529 1530 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, 1531 const struct file *file, vm_flags_t vm_flags) 1532 { 1533 return vm_flags; 1534 } 1535 1536 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn) 1537 { 1538 } 1539 1540 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr, 1541 unsigned long pfn, unsigned long size, pgprot_t pgprot) 1542 { 1543 return 0; 1544 } 1545 1546 static inline int do_munmap(struct mm_struct *, unsigned long, size_t, 1547 struct list_head *uf) 1548 { 1549 return 0; 1550 } 1551 1552 static inline void vm_flags_reset(struct vm_area_struct *vma, vm_flags_t flags) 1553 { 1554 vm_flags_t *dst = (vm_flags_t *)(&vma->vm_flags); 1555 1556 *dst = flags; 1557 } 1558 1559 #endif /* __MM_VMA_INTERNAL_H */ 1560