1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * vma_internal.h 4 * 5 * Header providing userland wrappers and shims for the functionality provided 6 * by mm/vma_internal.h. 7 * 8 * We make the header guard the same as mm/vma_internal.h, so if this shim 9 * header is included, it precludes the inclusion of the kernel one. 10 */ 11 12 #ifndef __MM_VMA_INTERNAL_H 13 #define __MM_VMA_INTERNAL_H 14 15 #define __private 16 #define __bitwise 17 #define __randomize_layout 18 19 #define CONFIG_MMU 20 #define CONFIG_PER_VMA_LOCK 21 22 #include <stdlib.h> 23 24 #include <linux/list.h> 25 #include <linux/maple_tree.h> 26 #include <linux/mm.h> 27 #include <linux/rbtree.h> 28 #include <linux/refcount.h> 29 30 extern unsigned long stack_guard_gap; 31 #ifdef CONFIG_MMU 32 extern unsigned long mmap_min_addr; 33 extern unsigned long dac_mmap_min_addr; 34 #else 35 #define mmap_min_addr 0UL 36 #define dac_mmap_min_addr 0UL 37 #endif 38 39 #define VM_WARN_ON(_expr) (WARN_ON(_expr)) 40 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr)) 41 #define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr)) 42 #define VM_BUG_ON(_expr) (BUG_ON(_expr)) 43 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr)) 44 45 #define MMF_HAS_MDWE 28 46 47 #define VM_NONE 0x00000000 48 #define VM_READ 0x00000001 49 #define VM_WRITE 0x00000002 50 #define VM_EXEC 0x00000004 51 #define VM_SHARED 0x00000008 52 #define VM_MAYREAD 0x00000010 53 #define VM_MAYWRITE 0x00000020 54 #define VM_MAYEXEC 0x00000040 55 #define VM_GROWSDOWN 0x00000100 56 #define VM_PFNMAP 0x00000400 57 #define VM_LOCKED 0x00002000 58 #define VM_IO 0x00004000 59 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 60 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 61 #define VM_DONTEXPAND 0x00040000 62 #define VM_LOCKONFAULT 0x00080000 63 #define VM_ACCOUNT 0x00100000 64 #define VM_NORESERVE 0x00200000 65 #define VM_MIXEDMAP 0x10000000 66 #define VM_STACK VM_GROWSDOWN 67 #define VM_SHADOW_STACK VM_NONE 68 #define VM_SOFTDIRTY 0 69 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 70 #define VM_GROWSUP VM_NONE 71 72 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 73 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 74 75 #ifdef CONFIG_STACK_GROWSUP 76 #define VM_STACK VM_GROWSUP 77 #define VM_STACK_EARLY VM_GROWSDOWN 78 #else 79 #define VM_STACK VM_GROWSDOWN 80 #define VM_STACK_EARLY 0 81 #endif 82 83 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 84 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 85 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 86 #define STACK_TOP TASK_SIZE_LOW 87 #define STACK_TOP_MAX TASK_SIZE_MAX 88 89 /* This mask represents all the VMA flag bits used by mlock */ 90 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 91 92 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 93 94 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 95 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 96 97 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 98 99 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 100 101 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 102 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 103 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 104 105 #define RLIMIT_STACK 3 /* max stack size */ 106 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 107 108 #define CAP_IPC_LOCK 14 109 110 #ifdef CONFIG_64BIT 111 #define VM_SEALED_BIT 42 112 #define VM_SEALED BIT(VM_SEALED_BIT) 113 #else 114 #define VM_SEALED VM_NONE 115 #endif 116 117 #define FIRST_USER_ADDRESS 0UL 118 #define USER_PGTABLES_CEILING 0UL 119 120 #define vma_policy(vma) NULL 121 122 #define down_write_nest_lock(sem, nest_lock) 123 124 #define pgprot_val(x) ((x).pgprot) 125 #define __pgprot(x) ((pgprot_t) { (x) } ) 126 127 #define for_each_vma(__vmi, __vma) \ 128 while (((__vma) = vma_next(&(__vmi))) != NULL) 129 130 /* The MM code likes to work with exclusive end addresses */ 131 #define for_each_vma_range(__vmi, __vma, __end) \ 132 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 133 134 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 135 136 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 137 138 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 139 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 140 141 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE) 142 143 #define AS_MM_ALL_LOCKS 2 144 145 /* We hardcode this for now. */ 146 #define sysctl_max_map_count 0x1000000UL 147 148 #define pgoff_t unsigned long 149 typedef unsigned long pgprotval_t; 150 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; 151 typedef unsigned long vm_flags_t; 152 typedef __bitwise unsigned int vm_fault_t; 153 154 /* 155 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...) 156 * either way :) 157 */ 158 #define pr_warn_once pr_err 159 160 #define data_race(expr) expr 161 162 #define ASSERT_EXCLUSIVE_WRITER(x) 163 164 /** 165 * swap - swap values of @a and @b 166 * @a: first value 167 * @b: second value 168 */ 169 #define swap(a, b) \ 170 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 171 172 struct kref { 173 refcount_t refcount; 174 }; 175 176 /* 177 * Define the task command name length as enum, then it can be visible to 178 * BPF programs. 179 */ 180 enum { 181 TASK_COMM_LEN = 16, 182 }; 183 184 /* 185 * Flags for bug emulation. 186 * 187 * These occupy the top three bytes. 188 */ 189 enum { 190 READ_IMPLIES_EXEC = 0x0400000, 191 }; 192 193 struct task_struct { 194 char comm[TASK_COMM_LEN]; 195 pid_t pid; 196 struct mm_struct *mm; 197 198 /* Used for emulating ABI behavior of previous Linux versions: */ 199 unsigned int personality; 200 }; 201 202 struct task_struct *get_current(void); 203 #define current get_current() 204 205 struct anon_vma { 206 struct anon_vma *root; 207 struct rb_root_cached rb_root; 208 209 /* Test fields. */ 210 bool was_cloned; 211 bool was_unlinked; 212 }; 213 214 struct anon_vma_chain { 215 struct anon_vma *anon_vma; 216 struct list_head same_vma; 217 }; 218 219 struct anon_vma_name { 220 struct kref kref; 221 /* The name needs to be at the end because it is dynamically sized. */ 222 char name[]; 223 }; 224 225 struct vma_iterator { 226 struct ma_state mas; 227 }; 228 229 #define VMA_ITERATOR(name, __mm, __addr) \ 230 struct vma_iterator name = { \ 231 .mas = { \ 232 .tree = &(__mm)->mm_mt, \ 233 .index = __addr, \ 234 .node = NULL, \ 235 .status = ma_start, \ 236 }, \ 237 } 238 239 struct address_space { 240 struct rb_root_cached i_mmap; 241 unsigned long flags; 242 atomic_t i_mmap_writable; 243 }; 244 245 struct vm_userfaultfd_ctx {}; 246 struct mempolicy {}; 247 struct mmu_gather {}; 248 struct mutex {}; 249 #define DEFINE_MUTEX(mutexname) \ 250 struct mutex mutexname = {} 251 252 #define DECLARE_BITMAP(name, bits) \ 253 unsigned long name[BITS_TO_LONGS(bits)] 254 255 #define NUM_MM_FLAG_BITS (64) 256 typedef struct { 257 __private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS); 258 } mm_flags_t; 259 260 struct mm_struct { 261 struct maple_tree mm_mt; 262 int map_count; /* number of VMAs */ 263 unsigned long total_vm; /* Total pages mapped */ 264 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 265 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 266 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 267 unsigned long stack_vm; /* VM_STACK */ 268 269 unsigned long def_flags; 270 271 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 272 }; 273 274 struct vm_area_struct; 275 276 /* 277 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 278 * manipulate mutable fields which will cause those fields to be updated in the 279 * resultant VMA. 280 * 281 * Helper functions are not required for manipulating any field. 282 */ 283 struct vm_area_desc { 284 /* Immutable state. */ 285 struct mm_struct *mm; 286 unsigned long start; 287 unsigned long end; 288 289 /* Mutable fields. Populated with initial state. */ 290 pgoff_t pgoff; 291 struct file *file; 292 vm_flags_t vm_flags; 293 pgprot_t page_prot; 294 295 /* Write-only fields. */ 296 const struct vm_operations_struct *vm_ops; 297 void *private_data; 298 }; 299 300 struct file_operations { 301 int (*mmap)(struct file *, struct vm_area_struct *); 302 int (*mmap_prepare)(struct vm_area_desc *); 303 }; 304 305 struct file { 306 struct address_space *f_mapping; 307 const struct file_operations *f_op; 308 }; 309 310 #define VMA_LOCK_OFFSET 0x40000000 311 312 typedef struct { unsigned long v; } freeptr_t; 313 314 struct vm_area_struct { 315 /* The first cache line has the info for VMA tree walking. */ 316 317 union { 318 struct { 319 /* VMA covers [vm_start; vm_end) addresses within mm */ 320 unsigned long vm_start; 321 unsigned long vm_end; 322 }; 323 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 324 }; 325 326 struct mm_struct *vm_mm; /* The address space we belong to. */ 327 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 328 329 /* 330 * Flags, see mm.h. 331 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 332 */ 333 union { 334 const vm_flags_t vm_flags; 335 vm_flags_t __private __vm_flags; 336 }; 337 338 #ifdef CONFIG_PER_VMA_LOCK 339 /* 340 * Can only be written (using WRITE_ONCE()) while holding both: 341 * - mmap_lock (in write mode) 342 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 343 * Can be read reliably while holding one of: 344 * - mmap_lock (in read or write mode) 345 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 346 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 347 * while holding nothing (except RCU to keep the VMA struct allocated). 348 * 349 * This sequence counter is explicitly allowed to overflow; sequence 350 * counter reuse can only lead to occasional unnecessary use of the 351 * slowpath. 352 */ 353 unsigned int vm_lock_seq; 354 #endif 355 356 /* 357 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 358 * list, after a COW of one of the file pages. A MAP_SHARED vma 359 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 360 * or brk vma (with NULL file) can only be in an anon_vma list. 361 */ 362 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 363 * page_table_lock */ 364 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 365 366 /* Function pointers to deal with this struct. */ 367 const struct vm_operations_struct *vm_ops; 368 369 /* Information about our backing store: */ 370 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 371 units */ 372 struct file * vm_file; /* File we map to (can be NULL). */ 373 void * vm_private_data; /* was vm_pte (shared mem) */ 374 375 #ifdef CONFIG_SWAP 376 atomic_long_t swap_readahead_info; 377 #endif 378 #ifndef CONFIG_MMU 379 struct vm_region *vm_region; /* NOMMU mapping region */ 380 #endif 381 #ifdef CONFIG_NUMA 382 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 383 #endif 384 #ifdef CONFIG_NUMA_BALANCING 385 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 386 #endif 387 #ifdef CONFIG_PER_VMA_LOCK 388 /* Unstable RCU readers are allowed to read this. */ 389 refcount_t vm_refcnt; 390 #endif 391 /* 392 * For areas with an address space and backing store, 393 * linkage into the address_space->i_mmap interval tree. 394 * 395 */ 396 struct { 397 struct rb_node rb; 398 unsigned long rb_subtree_last; 399 } shared; 400 #ifdef CONFIG_ANON_VMA_NAME 401 /* 402 * For private and shared anonymous mappings, a pointer to a null 403 * terminated string containing the name given to the vma, or NULL if 404 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 405 */ 406 struct anon_vma_name *anon_name; 407 #endif 408 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 409 } __randomize_layout; 410 411 struct vm_fault {}; 412 413 struct vm_operations_struct { 414 void (*open)(struct vm_area_struct * area); 415 /** 416 * @close: Called when the VMA is being removed from the MM. 417 * Context: User context. May sleep. Caller holds mmap_lock. 418 */ 419 void (*close)(struct vm_area_struct * area); 420 /* Called any time before splitting to check if it's allowed */ 421 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 422 int (*mremap)(struct vm_area_struct *area); 423 /* 424 * Called by mprotect() to make driver-specific permission 425 * checks before mprotect() is finalised. The VMA must not 426 * be modified. Returns 0 if mprotect() can proceed. 427 */ 428 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 429 unsigned long end, unsigned long newflags); 430 vm_fault_t (*fault)(struct vm_fault *vmf); 431 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 432 vm_fault_t (*map_pages)(struct vm_fault *vmf, 433 pgoff_t start_pgoff, pgoff_t end_pgoff); 434 unsigned long (*pagesize)(struct vm_area_struct * area); 435 436 /* notification that a previously read-only page is about to become 437 * writable, if an error is returned it will cause a SIGBUS */ 438 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 439 440 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 441 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 442 443 /* called by access_process_vm when get_user_pages() fails, typically 444 * for use by special VMAs. See also generic_access_phys() for a generic 445 * implementation useful for any iomem mapping. 446 */ 447 int (*access)(struct vm_area_struct *vma, unsigned long addr, 448 void *buf, int len, int write); 449 450 /* Called by the /proc/PID/maps code to ask the vma whether it 451 * has a special name. Returning non-NULL will also cause this 452 * vma to be dumped unconditionally. */ 453 const char *(*name)(struct vm_area_struct *vma); 454 455 #ifdef CONFIG_NUMA 456 /* 457 * set_policy() op must add a reference to any non-NULL @new mempolicy 458 * to hold the policy upon return. Caller should pass NULL @new to 459 * remove a policy and fall back to surrounding context--i.e. do not 460 * install a MPOL_DEFAULT policy, nor the task or system default 461 * mempolicy. 462 */ 463 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 464 465 /* 466 * get_policy() op must add reference [mpol_get()] to any policy at 467 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 468 * in mm/mempolicy.c will do this automatically. 469 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 470 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 471 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 472 * must return NULL--i.e., do not "fallback" to task or system default 473 * policy. 474 */ 475 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 476 unsigned long addr, pgoff_t *ilx); 477 #endif 478 #ifdef CONFIG_FIND_NORMAL_PAGE 479 /* 480 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 481 * allows for returning a "normal" page from vm_normal_page() even 482 * though the PTE indicates that the "struct page" either does not exist 483 * or should not be touched: "special". 484 * 485 * Do not add new users: this really only works when a "normal" page 486 * was mapped, but then the PTE got changed to something weird (+ 487 * marked special) that would not make pte_pfn() identify the originally 488 * inserted page. 489 */ 490 struct page *(*find_normal_page)(struct vm_area_struct *vma, 491 unsigned long addr); 492 #endif /* CONFIG_FIND_NORMAL_PAGE */ 493 }; 494 495 struct vm_unmapped_area_info { 496 #define VM_UNMAPPED_AREA_TOPDOWN 1 497 unsigned long flags; 498 unsigned long length; 499 unsigned long low_limit; 500 unsigned long high_limit; 501 unsigned long align_mask; 502 unsigned long align_offset; 503 unsigned long start_gap; 504 }; 505 506 struct pagetable_move_control { 507 struct vm_area_struct *old; /* Source VMA. */ 508 struct vm_area_struct *new; /* Destination VMA. */ 509 unsigned long old_addr; /* Address from which the move begins. */ 510 unsigned long old_end; /* Exclusive address at which old range ends. */ 511 unsigned long new_addr; /* Address to move page tables to. */ 512 unsigned long len_in; /* Bytes to remap specified by user. */ 513 514 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 515 bool for_stack; /* Is this an early temp stack being moved? */ 516 }; 517 518 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 519 struct pagetable_move_control name = { \ 520 .old = old_, \ 521 .new = new_, \ 522 .old_addr = old_addr_, \ 523 .old_end = (old_addr_) + (len_), \ 524 .new_addr = new_addr_, \ 525 .len_in = len_, \ 526 } 527 528 struct kmem_cache_args { 529 /** 530 * @align: The required alignment for the objects. 531 * 532 * %0 means no specific alignment is requested. 533 */ 534 unsigned int align; 535 /** 536 * @useroffset: Usercopy region offset. 537 * 538 * %0 is a valid offset, when @usersize is non-%0 539 */ 540 unsigned int useroffset; 541 /** 542 * @usersize: Usercopy region size. 543 * 544 * %0 means no usercopy region is specified. 545 */ 546 unsigned int usersize; 547 /** 548 * @freeptr_offset: Custom offset for the free pointer 549 * in &SLAB_TYPESAFE_BY_RCU caches 550 * 551 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer 552 * outside of the object. This might cause the object to grow in size. 553 * Cache creators that have a reason to avoid this can specify a custom 554 * free pointer offset in their struct where the free pointer will be 555 * placed. 556 * 557 * Note that placing the free pointer inside the object requires the 558 * caller to ensure that no fields are invalidated that are required to 559 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for 560 * details). 561 * 562 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset 563 * is specified, %use_freeptr_offset must be set %true. 564 * 565 * Note that @ctor currently isn't supported with custom free pointers 566 * as a @ctor requires an external free pointer. 567 */ 568 unsigned int freeptr_offset; 569 /** 570 * @use_freeptr_offset: Whether a @freeptr_offset is used. 571 */ 572 bool use_freeptr_offset; 573 /** 574 * @ctor: A constructor for the objects. 575 * 576 * The constructor is invoked for each object in a newly allocated slab 577 * page. It is the cache user's responsibility to free object in the 578 * same state as after calling the constructor, or deal appropriately 579 * with any differences between a freshly constructed and a reallocated 580 * object. 581 * 582 * %NULL means no constructor. 583 */ 584 void (*ctor)(void *); 585 }; 586 587 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 588 { 589 mas_pause(&vmi->mas); 590 } 591 592 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 593 { 594 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 595 } 596 597 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 598 { 599 return __pgprot(vm_flags); 600 } 601 602 static inline bool is_shared_maywrite(vm_flags_t vm_flags) 603 { 604 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 605 (VM_SHARED | VM_MAYWRITE); 606 } 607 608 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 609 { 610 return is_shared_maywrite(vma->vm_flags); 611 } 612 613 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 614 { 615 /* 616 * Uses mas_find() to get the first VMA when the iterator starts. 617 * Calling mas_next() could skip the first entry. 618 */ 619 return mas_find(&vmi->mas, ULONG_MAX); 620 } 621 622 /* 623 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 624 * assertions should be made either under mmap_write_lock or when the object 625 * has been isolated under mmap_write_lock, ensuring no competing writers. 626 */ 627 static inline void vma_assert_attached(struct vm_area_struct *vma) 628 { 629 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 630 } 631 632 static inline void vma_assert_detached(struct vm_area_struct *vma) 633 { 634 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 635 } 636 637 static inline void vma_assert_write_locked(struct vm_area_struct *); 638 static inline void vma_mark_attached(struct vm_area_struct *vma) 639 { 640 vma_assert_write_locked(vma); 641 vma_assert_detached(vma); 642 refcount_set_release(&vma->vm_refcnt, 1); 643 } 644 645 static inline void vma_mark_detached(struct vm_area_struct *vma) 646 { 647 vma_assert_write_locked(vma); 648 vma_assert_attached(vma); 649 /* We are the only writer, so no need to use vma_refcount_put(). */ 650 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 651 /* 652 * Reader must have temporarily raised vm_refcnt but it will 653 * drop it without using the vma since vma is write-locked. 654 */ 655 } 656 } 657 658 extern const struct vm_operations_struct vma_dummy_vm_ops; 659 660 extern unsigned long rlimit(unsigned int limit); 661 662 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 663 { 664 memset(vma, 0, sizeof(*vma)); 665 vma->vm_mm = mm; 666 vma->vm_ops = &vma_dummy_vm_ops; 667 INIT_LIST_HEAD(&vma->anon_vma_chain); 668 vma->vm_lock_seq = UINT_MAX; 669 } 670 671 struct kmem_cache { 672 const char *name; 673 size_t object_size; 674 struct kmem_cache_args *args; 675 }; 676 677 static inline struct kmem_cache *__kmem_cache_create(const char *name, 678 size_t object_size, 679 struct kmem_cache_args *args) 680 { 681 struct kmem_cache *ret = malloc(sizeof(struct kmem_cache)); 682 683 ret->name = name; 684 ret->object_size = object_size; 685 ret->args = args; 686 687 return ret; 688 } 689 690 #define kmem_cache_create(__name, __object_size, __args, ...) \ 691 __kmem_cache_create((__name), (__object_size), (__args)) 692 693 static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 694 { 695 (void)gfpflags; 696 697 return calloc(s->object_size, 1); 698 } 699 700 static inline void kmem_cache_free(struct kmem_cache *s, void *x) 701 { 702 free(x); 703 } 704 705 /* 706 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 707 * kernel/fork.c, so we have to these broadly available there, and temporarily 708 * define them here to resolve the dependency cycle. 709 */ 710 711 #define is_exec_mapping(flags) \ 712 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 713 714 #define is_stack_mapping(flags) \ 715 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 716 717 #define is_data_mapping(flags) \ 718 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 719 720 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 721 long npages) 722 { 723 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 724 725 if (is_exec_mapping(flags)) 726 mm->exec_vm += npages; 727 else if (is_stack_mapping(flags)) 728 mm->stack_vm += npages; 729 else if (is_data_mapping(flags)) 730 mm->data_vm += npages; 731 } 732 733 #undef is_exec_mapping 734 #undef is_stack_mapping 735 #undef is_data_mapping 736 737 /* Currently stubbed but we may later wish to un-stub. */ 738 static inline void vm_acct_memory(long pages); 739 static inline void vm_unacct_memory(long pages) 740 { 741 vm_acct_memory(-pages); 742 } 743 744 static inline void mapping_allow_writable(struct address_space *mapping) 745 { 746 atomic_inc(&mapping->i_mmap_writable); 747 } 748 749 static inline void vma_set_range(struct vm_area_struct *vma, 750 unsigned long start, unsigned long end, 751 pgoff_t pgoff) 752 { 753 vma->vm_start = start; 754 vma->vm_end = end; 755 vma->vm_pgoff = pgoff; 756 } 757 758 static inline 759 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 760 { 761 return mas_find(&vmi->mas, max - 1); 762 } 763 764 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 765 unsigned long start, unsigned long end, gfp_t gfp) 766 { 767 __mas_set_range(&vmi->mas, start, end - 1); 768 mas_store_gfp(&vmi->mas, NULL, gfp); 769 if (unlikely(mas_is_err(&vmi->mas))) 770 return -ENOMEM; 771 772 return 0; 773 } 774 775 static inline void mmap_assert_locked(struct mm_struct *); 776 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 777 unsigned long start_addr, 778 unsigned long end_addr) 779 { 780 unsigned long index = start_addr; 781 782 mmap_assert_locked(mm); 783 return mt_find(&mm->mm_mt, &index, end_addr - 1); 784 } 785 786 static inline 787 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 788 { 789 return mtree_load(&mm->mm_mt, addr); 790 } 791 792 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 793 { 794 return mas_prev(&vmi->mas, 0); 795 } 796 797 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 798 { 799 mas_set(&vmi->mas, addr); 800 } 801 802 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 803 { 804 return !vma->vm_ops; 805 } 806 807 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 808 #define vma_iter_load(vmi) \ 809 mas_walk(&(vmi)->mas) 810 811 static inline struct vm_area_struct * 812 find_vma_prev(struct mm_struct *mm, unsigned long addr, 813 struct vm_area_struct **pprev) 814 { 815 struct vm_area_struct *vma; 816 VMA_ITERATOR(vmi, mm, addr); 817 818 vma = vma_iter_load(&vmi); 819 *pprev = vma_prev(&vmi); 820 if (!vma) 821 vma = vma_next(&vmi); 822 return vma; 823 } 824 825 #undef vma_iter_load 826 827 static inline void vma_iter_init(struct vma_iterator *vmi, 828 struct mm_struct *mm, unsigned long addr) 829 { 830 mas_init(&vmi->mas, &mm->mm_mt, addr); 831 } 832 833 /* Stubbed functions. */ 834 835 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 836 { 837 return NULL; 838 } 839 840 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 841 struct vm_userfaultfd_ctx vm_ctx) 842 { 843 return true; 844 } 845 846 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 847 struct anon_vma_name *anon_name2) 848 { 849 return true; 850 } 851 852 static inline void might_sleep(void) 853 { 854 } 855 856 static inline unsigned long vma_pages(struct vm_area_struct *vma) 857 { 858 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 859 } 860 861 static inline void fput(struct file *) 862 { 863 } 864 865 static inline void mpol_put(struct mempolicy *) 866 { 867 } 868 869 static inline void lru_add_drain(void) 870 { 871 } 872 873 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *) 874 { 875 } 876 877 static inline void update_hiwater_rss(struct mm_struct *) 878 { 879 } 880 881 static inline void update_hiwater_vm(struct mm_struct *) 882 { 883 } 884 885 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 886 struct vm_area_struct *vma, unsigned long start_addr, 887 unsigned long end_addr, unsigned long tree_end, 888 bool mm_wr_locked) 889 { 890 (void)tlb; 891 (void)mas; 892 (void)vma; 893 (void)start_addr; 894 (void)end_addr; 895 (void)tree_end; 896 (void)mm_wr_locked; 897 } 898 899 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 900 struct vm_area_struct *vma, unsigned long floor, 901 unsigned long ceiling, bool mm_wr_locked) 902 { 903 (void)tlb; 904 (void)mas; 905 (void)vma; 906 (void)floor; 907 (void)ceiling; 908 (void)mm_wr_locked; 909 } 910 911 static inline void mapping_unmap_writable(struct address_space *) 912 { 913 } 914 915 static inline void flush_dcache_mmap_lock(struct address_space *) 916 { 917 } 918 919 static inline void tlb_finish_mmu(struct mmu_gather *) 920 { 921 } 922 923 static inline struct file *get_file(struct file *f) 924 { 925 return f; 926 } 927 928 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *) 929 { 930 return 0; 931 } 932 933 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 934 { 935 /* For testing purposes. We indicate that an anon_vma has been cloned. */ 936 if (src->anon_vma != NULL) { 937 dst->anon_vma = src->anon_vma; 938 dst->anon_vma->was_cloned = true; 939 } 940 941 return 0; 942 } 943 944 static inline void vma_start_write(struct vm_area_struct *vma) 945 { 946 /* Used to indicate to tests that a write operation has begun. */ 947 vma->vm_lock_seq++; 948 } 949 950 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 951 unsigned long start, 952 unsigned long end, 953 struct vm_area_struct *next) 954 { 955 (void)vma; 956 (void)start; 957 (void)end; 958 (void)next; 959 } 960 961 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} 962 963 static inline void vma_iter_free(struct vma_iterator *vmi) 964 { 965 mas_destroy(&vmi->mas); 966 } 967 968 static inline 969 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 970 { 971 return mas_next_range(&vmi->mas, ULONG_MAX); 972 } 973 974 static inline void vm_acct_memory(long pages) 975 { 976 } 977 978 static inline void vma_interval_tree_insert(struct vm_area_struct *, 979 struct rb_root_cached *) 980 { 981 } 982 983 static inline void vma_interval_tree_remove(struct vm_area_struct *, 984 struct rb_root_cached *) 985 { 986 } 987 988 static inline void flush_dcache_mmap_unlock(struct address_space *) 989 { 990 } 991 992 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*, 993 struct rb_root_cached *) 994 { 995 } 996 997 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*, 998 struct rb_root_cached *) 999 { 1000 } 1001 1002 static inline void uprobe_mmap(struct vm_area_struct *) 1003 { 1004 } 1005 1006 static inline void uprobe_munmap(struct vm_area_struct *vma, 1007 unsigned long start, unsigned long end) 1008 { 1009 (void)vma; 1010 (void)start; 1011 (void)end; 1012 } 1013 1014 static inline void i_mmap_lock_write(struct address_space *) 1015 { 1016 } 1017 1018 static inline void anon_vma_lock_write(struct anon_vma *) 1019 { 1020 } 1021 1022 static inline void vma_assert_write_locked(struct vm_area_struct *) 1023 { 1024 } 1025 1026 static inline void unlink_anon_vmas(struct vm_area_struct *vma) 1027 { 1028 /* For testing purposes, indicate that the anon_vma was unlinked. */ 1029 vma->anon_vma->was_unlinked = true; 1030 } 1031 1032 static inline void anon_vma_unlock_write(struct anon_vma *) 1033 { 1034 } 1035 1036 static inline void i_mmap_unlock_write(struct address_space *) 1037 { 1038 } 1039 1040 static inline void anon_vma_merge(struct vm_area_struct *, 1041 struct vm_area_struct *) 1042 { 1043 } 1044 1045 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 1046 unsigned long start, 1047 unsigned long end, 1048 struct list_head *unmaps) 1049 { 1050 (void)vma; 1051 (void)start; 1052 (void)end; 1053 (void)unmaps; 1054 1055 return 0; 1056 } 1057 1058 static inline void mmap_write_downgrade(struct mm_struct *) 1059 { 1060 } 1061 1062 static inline void mmap_read_unlock(struct mm_struct *) 1063 { 1064 } 1065 1066 static inline void mmap_write_unlock(struct mm_struct *) 1067 { 1068 } 1069 1070 static inline int mmap_write_lock_killable(struct mm_struct *) 1071 { 1072 return 0; 1073 } 1074 1075 static inline bool can_modify_mm(struct mm_struct *mm, 1076 unsigned long start, 1077 unsigned long end) 1078 { 1079 (void)mm; 1080 (void)start; 1081 (void)end; 1082 1083 return true; 1084 } 1085 1086 static inline void arch_unmap(struct mm_struct *mm, 1087 unsigned long start, 1088 unsigned long end) 1089 { 1090 (void)mm; 1091 (void)start; 1092 (void)end; 1093 } 1094 1095 static inline void mmap_assert_locked(struct mm_struct *) 1096 { 1097 } 1098 1099 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *) 1100 { 1101 return true; 1102 } 1103 1104 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 1105 vm_flags_t vm_flags) 1106 { 1107 (void)vma; 1108 (void)vm_flags; 1109 } 1110 1111 static inline bool mapping_can_writeback(struct address_space *) 1112 { 1113 return true; 1114 } 1115 1116 static inline bool is_vm_hugetlb_page(struct vm_area_struct *) 1117 { 1118 return false; 1119 } 1120 1121 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *) 1122 { 1123 return false; 1124 } 1125 1126 static inline bool userfaultfd_wp(struct vm_area_struct *) 1127 { 1128 return false; 1129 } 1130 1131 static inline void mmap_assert_write_locked(struct mm_struct *) 1132 { 1133 } 1134 1135 static inline void mutex_lock(struct mutex *) 1136 { 1137 } 1138 1139 static inline void mutex_unlock(struct mutex *) 1140 { 1141 } 1142 1143 static inline bool mutex_is_locked(struct mutex *) 1144 { 1145 return true; 1146 } 1147 1148 static inline bool signal_pending(void *) 1149 { 1150 return false; 1151 } 1152 1153 static inline bool is_file_hugepages(struct file *) 1154 { 1155 return false; 1156 } 1157 1158 static inline int security_vm_enough_memory_mm(struct mm_struct *, long) 1159 { 1160 return 0; 1161 } 1162 1163 static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long) 1164 { 1165 return true; 1166 } 1167 1168 static inline void vm_flags_init(struct vm_area_struct *vma, 1169 vm_flags_t flags) 1170 { 1171 vma->__vm_flags = flags; 1172 } 1173 1174 static inline void vm_flags_set(struct vm_area_struct *vma, 1175 vm_flags_t flags) 1176 { 1177 vma_start_write(vma); 1178 vma->__vm_flags |= flags; 1179 } 1180 1181 static inline void vm_flags_clear(struct vm_area_struct *vma, 1182 vm_flags_t flags) 1183 { 1184 vma_start_write(vma); 1185 vma->__vm_flags &= ~flags; 1186 } 1187 1188 static inline int shmem_zero_setup(struct vm_area_struct *) 1189 { 1190 return 0; 1191 } 1192 1193 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1194 { 1195 vma->vm_ops = NULL; 1196 } 1197 1198 static inline void ksm_add_vma(struct vm_area_struct *) 1199 { 1200 } 1201 1202 static inline void perf_event_mmap(struct vm_area_struct *) 1203 { 1204 } 1205 1206 static inline bool vma_is_dax(struct vm_area_struct *) 1207 { 1208 return false; 1209 } 1210 1211 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *) 1212 { 1213 return NULL; 1214 } 1215 1216 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1217 1218 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1219 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1220 { 1221 vm_flags_t vm_flags = vma->vm_flags; 1222 pgprot_t vm_page_prot; 1223 1224 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1225 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1226 1227 if (vma_wants_writenotify(vma, vm_page_prot)) { 1228 vm_flags &= ~VM_SHARED; 1229 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1230 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1231 } 1232 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1233 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1234 } 1235 1236 static inline bool arch_validate_flags(vm_flags_t) 1237 { 1238 return true; 1239 } 1240 1241 static inline void vma_close(struct vm_area_struct *) 1242 { 1243 } 1244 1245 static inline int mmap_file(struct file *, struct vm_area_struct *) 1246 { 1247 return 0; 1248 } 1249 1250 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1251 { 1252 if (vma->vm_flags & VM_GROWSDOWN) 1253 return stack_guard_gap; 1254 1255 /* See reasoning around the VM_SHADOW_STACK definition */ 1256 if (vma->vm_flags & VM_SHADOW_STACK) 1257 return PAGE_SIZE; 1258 1259 return 0; 1260 } 1261 1262 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1263 { 1264 unsigned long gap = stack_guard_start_gap(vma); 1265 unsigned long vm_start = vma->vm_start; 1266 1267 vm_start -= gap; 1268 if (vm_start > vma->vm_start) 1269 vm_start = 0; 1270 return vm_start; 1271 } 1272 1273 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1274 { 1275 unsigned long vm_end = vma->vm_end; 1276 1277 if (vma->vm_flags & VM_GROWSUP) { 1278 vm_end += stack_guard_gap; 1279 if (vm_end < vma->vm_end) 1280 vm_end = -PAGE_SIZE; 1281 } 1282 return vm_end; 1283 } 1284 1285 static inline int is_hugepage_only_range(struct mm_struct *mm, 1286 unsigned long addr, unsigned long len) 1287 { 1288 return 0; 1289 } 1290 1291 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1292 { 1293 return vma->vm_flags & VM_ACCESS_FLAGS; 1294 } 1295 1296 static inline bool capable(int cap) 1297 { 1298 return true; 1299 } 1300 1301 static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, 1302 unsigned long bytes) 1303 { 1304 unsigned long locked_pages, limit_pages; 1305 1306 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1307 return true; 1308 1309 locked_pages = bytes >> PAGE_SHIFT; 1310 locked_pages += mm->locked_vm; 1311 1312 limit_pages = rlimit(RLIMIT_MEMLOCK); 1313 limit_pages >>= PAGE_SHIFT; 1314 1315 return locked_pages <= limit_pages; 1316 } 1317 1318 static inline int __anon_vma_prepare(struct vm_area_struct *vma) 1319 { 1320 struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma)); 1321 1322 if (!anon_vma) 1323 return -ENOMEM; 1324 1325 anon_vma->root = anon_vma; 1326 vma->anon_vma = anon_vma; 1327 1328 return 0; 1329 } 1330 1331 static inline int anon_vma_prepare(struct vm_area_struct *vma) 1332 { 1333 if (likely(vma->anon_vma)) 1334 return 0; 1335 1336 return __anon_vma_prepare(vma); 1337 } 1338 1339 static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 1340 struct list_head *uf) 1341 { 1342 } 1343 1344 # define ACCESS_PRIVATE(p, member) ((p)->member) 1345 1346 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 1347 { 1348 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 1349 } 1350 1351 /* 1352 * Denies creating a writable executable mapping or gaining executable permissions. 1353 * 1354 * This denies the following: 1355 * 1356 * a) mmap(PROT_WRITE | PROT_EXEC) 1357 * 1358 * b) mmap(PROT_WRITE) 1359 * mprotect(PROT_EXEC) 1360 * 1361 * c) mmap(PROT_WRITE) 1362 * mprotect(PROT_READ) 1363 * mprotect(PROT_EXEC) 1364 * 1365 * But allows the following: 1366 * 1367 * d) mmap(PROT_READ | PROT_EXEC) 1368 * mmap(PROT_READ | PROT_EXEC | PROT_BTI) 1369 * 1370 * This is only applicable if the user has set the Memory-Deny-Write-Execute 1371 * (MDWE) protection mask for the current process. 1372 * 1373 * @old specifies the VMA flags the VMA originally possessed, and @new the ones 1374 * we propose to set. 1375 * 1376 * Return: false if proposed change is OK, true if not ok and should be denied. 1377 */ 1378 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1379 { 1380 /* If MDWE is disabled, we have nothing to deny. */ 1381 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1382 return false; 1383 1384 /* If the new VMA is not executable, we have nothing to deny. */ 1385 if (!(new & VM_EXEC)) 1386 return false; 1387 1388 /* Under MDWE we do not accept newly writably executable VMAs... */ 1389 if (new & VM_WRITE) 1390 return true; 1391 1392 /* ...nor previously non-executable VMAs becoming executable. */ 1393 if (!(old & VM_EXEC)) 1394 return true; 1395 1396 return false; 1397 } 1398 1399 static inline int mapping_map_writable(struct address_space *mapping) 1400 { 1401 int c = atomic_read(&mapping->i_mmap_writable); 1402 1403 /* Derived from the raw_atomic_inc_unless_negative() implementation. */ 1404 do { 1405 if (c < 0) 1406 return -EPERM; 1407 } while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1)); 1408 1409 return 0; 1410 } 1411 1412 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc) 1413 { 1414 (void)pmc; 1415 1416 return 0; 1417 } 1418 1419 static inline void free_pgd_range(struct mmu_gather *tlb, 1420 unsigned long addr, unsigned long end, 1421 unsigned long floor, unsigned long ceiling) 1422 { 1423 (void)tlb; 1424 (void)addr; 1425 (void)end; 1426 (void)floor; 1427 (void)ceiling; 1428 } 1429 1430 static inline int ksm_execve(struct mm_struct *mm) 1431 { 1432 (void)mm; 1433 1434 return 0; 1435 } 1436 1437 static inline void ksm_exit(struct mm_struct *mm) 1438 { 1439 (void)mm; 1440 } 1441 1442 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) 1443 { 1444 (void)vma; 1445 (void)reset_refcnt; 1446 } 1447 1448 static inline void vma_numab_state_init(struct vm_area_struct *vma) 1449 { 1450 (void)vma; 1451 } 1452 1453 static inline void vma_numab_state_free(struct vm_area_struct *vma) 1454 { 1455 (void)vma; 1456 } 1457 1458 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 1459 struct vm_area_struct *new_vma) 1460 { 1461 (void)orig_vma; 1462 (void)new_vma; 1463 } 1464 1465 static inline void free_anon_vma_name(struct vm_area_struct *vma) 1466 { 1467 (void)vma; 1468 } 1469 1470 /* Declared in vma.h. */ 1471 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1472 struct vm_area_desc *desc); 1473 1474 static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma, 1475 struct vm_area_desc *desc); 1476 1477 static int compat_vma_mmap_prepare(struct file *file, 1478 struct vm_area_struct *vma) 1479 { 1480 struct vm_area_desc desc; 1481 int err; 1482 1483 err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc)); 1484 if (err) 1485 return err; 1486 set_vma_from_desc(vma, &desc); 1487 1488 return 0; 1489 } 1490 1491 /* Did the driver provide valid mmap hook configuration? */ 1492 static inline bool can_mmap_file(struct file *file) 1493 { 1494 bool has_mmap = file->f_op->mmap; 1495 bool has_mmap_prepare = file->f_op->mmap_prepare; 1496 1497 /* Hooks are mutually exclusive. */ 1498 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1499 return false; 1500 if (!has_mmap && !has_mmap_prepare) 1501 return false; 1502 1503 return true; 1504 } 1505 1506 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1507 { 1508 if (file->f_op->mmap_prepare) 1509 return compat_vma_mmap_prepare(file, vma); 1510 1511 return file->f_op->mmap(file, vma); 1512 } 1513 1514 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1515 { 1516 return file->f_op->mmap_prepare(desc); 1517 } 1518 1519 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) 1520 { 1521 (void)vma; 1522 } 1523 1524 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1525 { 1526 /* Changing an anonymous vma with this is illegal */ 1527 get_file(file); 1528 swap(vma->vm_file, file); 1529 fput(file); 1530 } 1531 1532 static inline bool shmem_file(struct file *) 1533 { 1534 return false; 1535 } 1536 1537 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *, const struct file *, 1538 vm_flags_t vm_flags) 1539 { 1540 return vm_flags; 1541 } 1542 1543 #endif /* __MM_VMA_INTERNAL_H */ 1544