1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 3 #pragma once 4 5 /* Forward declarations to avoid header cycle. */ 6 struct vm_area_struct; 7 static inline void vma_start_write(struct vm_area_struct *vma); 8 9 extern const struct vm_operations_struct vma_dummy_vm_ops; 10 extern unsigned long stack_guard_gap; 11 extern const struct vm_operations_struct vma_dummy_vm_ops; 12 extern unsigned long rlimit(unsigned int limit); 13 struct task_struct *get_current(void); 14 15 #define MMF_HAS_MDWE 28 16 #define current get_current() 17 18 /* 19 * Define the task command name length as enum, then it can be visible to 20 * BPF programs. 21 */ 22 enum { 23 TASK_COMM_LEN = 16, 24 }; 25 26 /* PARTIALLY implemented types. */ 27 struct mm_struct { 28 struct maple_tree mm_mt; 29 int map_count; /* number of VMAs */ 30 unsigned long total_vm; /* Total pages mapped */ 31 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 32 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 33 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 34 unsigned long stack_vm; /* VM_STACK */ 35 36 union { 37 vm_flags_t def_flags; 38 vma_flags_t def_vma_flags; 39 }; 40 41 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 42 }; 43 struct address_space { 44 struct rb_root_cached i_mmap; 45 unsigned long flags; 46 atomic_t i_mmap_writable; 47 }; 48 struct file_operations { 49 int (*mmap)(struct file *, struct vm_area_struct *); 50 int (*mmap_prepare)(struct vm_area_desc *); 51 }; 52 struct file { 53 struct address_space *f_mapping; 54 const struct file_operations *f_op; 55 }; 56 struct anon_vma_chain { 57 struct anon_vma *anon_vma; 58 struct list_head same_vma; 59 }; 60 struct task_struct { 61 char comm[TASK_COMM_LEN]; 62 pid_t pid; 63 struct mm_struct *mm; 64 65 /* Used for emulating ABI behavior of previous Linux versions: */ 66 unsigned int personality; 67 }; 68 69 struct kref { 70 refcount_t refcount; 71 }; 72 73 struct anon_vma_name { 74 struct kref kref; 75 /* The name needs to be at the end because it is dynamically sized. */ 76 char name[]; 77 }; 78 79 /* 80 * Contains declarations that are DUPLICATED from kernel source in order to 81 * faciliate userland VMA testing. 82 * 83 * These must be kept in sync with kernel source. 84 */ 85 86 #define VMA_LOCK_OFFSET 0x40000000 87 88 typedef struct { unsigned long v; } freeptr_t; 89 90 #define VM_NONE 0x00000000 91 92 typedef int __bitwise vma_flag_t; 93 94 #define ACCESS_PRIVATE(p, member) ((p)->member) 95 96 #define DECLARE_VMA_BIT(name, bitnum) \ 97 VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum) 98 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \ 99 VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT 100 enum { 101 DECLARE_VMA_BIT(READ, 0), 102 DECLARE_VMA_BIT(WRITE, 1), 103 DECLARE_VMA_BIT(EXEC, 2), 104 DECLARE_VMA_BIT(SHARED, 3), 105 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 106 DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */ 107 DECLARE_VMA_BIT(MAYWRITE, 5), 108 DECLARE_VMA_BIT(MAYEXEC, 6), 109 DECLARE_VMA_BIT(MAYSHARE, 7), 110 DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */ 111 #ifdef CONFIG_MMU 112 DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */ 113 #else 114 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ 115 DECLARE_VMA_BIT(MAYOVERLAY, 9), 116 #endif /* CONFIG_MMU */ 117 /* Page-ranges managed without "struct page", just pure PFN */ 118 DECLARE_VMA_BIT(PFNMAP, 10), 119 DECLARE_VMA_BIT(MAYBE_GUARD, 11), 120 DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */ 121 DECLARE_VMA_BIT(LOCKED, 13), 122 DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */ 123 DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */ 124 DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */ 125 DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */ 126 DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */ 127 DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */ 128 DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */ 129 DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */ 130 DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */ 131 DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */ 132 DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */ 133 DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */ 134 DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */ 135 DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */ 136 DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */ 137 DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */ 138 DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */ 139 DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */ 140 /* These bits are reused, we define specific uses below. */ 141 DECLARE_VMA_BIT(HIGH_ARCH_0, 32), 142 DECLARE_VMA_BIT(HIGH_ARCH_1, 33), 143 DECLARE_VMA_BIT(HIGH_ARCH_2, 34), 144 DECLARE_VMA_BIT(HIGH_ARCH_3, 35), 145 DECLARE_VMA_BIT(HIGH_ARCH_4, 36), 146 DECLARE_VMA_BIT(HIGH_ARCH_5, 37), 147 DECLARE_VMA_BIT(HIGH_ARCH_6, 38), 148 /* 149 * This flag is used to connect VFIO to arch specific KVM code. It 150 * indicates that the memory under this VMA is safe for use with any 151 * non-cachable memory type inside KVM. Some VFIO devices, on some 152 * platforms, are thought to be unsafe and can cause machine crashes 153 * if KVM does not lock down the memory type. 154 */ 155 DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39), 156 #ifdef CONFIG_PPC32 157 DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1), 158 #else 159 DECLARE_VMA_BIT(DROPPABLE, 40), 160 #endif 161 DECLARE_VMA_BIT(UFFD_MINOR, 41), 162 DECLARE_VMA_BIT(SEALED, 42), 163 /* Flags that reuse flags above. */ 164 DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0), 165 DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1), 166 DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2), 167 DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3), 168 DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4), 169 #if defined(CONFIG_X86_USER_SHADOW_STACK) 170 /* 171 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of 172 * support core mm. 173 * 174 * These VMAs will get a single end guard page. This helps userspace 175 * protect itself from attacks. A single page is enough for current 176 * shadow stack archs (x86). See the comments near alloc_shstk() in 177 * arch/x86/kernel/shstk.c for more details on the guard size. 178 */ 179 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5), 180 #elif defined(CONFIG_ARM64_GCS) 181 /* 182 * arm64's Guarded Control Stack implements similar functionality and 183 * has similar constraints to shadow stacks. 184 */ 185 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6), 186 #endif 187 DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */ 188 DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */ 189 DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */ 190 DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */ 191 DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */ 192 DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */ 193 DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */ 194 DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */ 195 #ifdef CONFIG_STACK_GROWSUP 196 DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP), 197 DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN), 198 #else 199 DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN), 200 #endif 201 }; 202 203 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT) 204 #define VM_READ INIT_VM_FLAG(READ) 205 #define VM_WRITE INIT_VM_FLAG(WRITE) 206 #define VM_EXEC INIT_VM_FLAG(EXEC) 207 #define VM_SHARED INIT_VM_FLAG(SHARED) 208 #define VM_MAYREAD INIT_VM_FLAG(MAYREAD) 209 #define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE) 210 #define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC) 211 #define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE) 212 #define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN) 213 #ifdef CONFIG_MMU 214 #define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING) 215 #else 216 #define VM_UFFD_MISSING VM_NONE 217 #define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY) 218 #endif 219 #define VM_PFNMAP INIT_VM_FLAG(PFNMAP) 220 #define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD) 221 #define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP) 222 #define VM_LOCKED INIT_VM_FLAG(LOCKED) 223 #define VM_IO INIT_VM_FLAG(IO) 224 #define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ) 225 #define VM_RAND_READ INIT_VM_FLAG(RAND_READ) 226 #define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY) 227 #define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND) 228 #define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT) 229 #define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT) 230 #define VM_NORESERVE INIT_VM_FLAG(NORESERVE) 231 #define VM_HUGETLB INIT_VM_FLAG(HUGETLB) 232 #define VM_SYNC INIT_VM_FLAG(SYNC) 233 #define VM_ARCH_1 INIT_VM_FLAG(ARCH_1) 234 #define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK) 235 #define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP) 236 #ifdef CONFIG_MEM_SOFT_DIRTY 237 #define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY) 238 #else 239 #define VM_SOFTDIRTY VM_NONE 240 #endif 241 #define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP) 242 #define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE) 243 #define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) 244 #define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) 245 #define VM_STACK INIT_VM_FLAG(STACK) 246 #ifdef CONFIG_STACK_GROWS_UP 247 #define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) 248 #else 249 #define VM_STACK_EARLY VM_NONE 250 #endif 251 #ifdef CONFIG_ARCH_HAS_PKEYS 252 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT) 253 /* Despite the naming, these are FLAGS not bits. */ 254 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0) 255 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1) 256 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2) 257 #if CONFIG_ARCH_PKEY_BITS > 3 258 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3) 259 #else 260 #define VM_PKEY_BIT3 VM_NONE 261 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */ 262 #if CONFIG_ARCH_PKEY_BITS > 4 263 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4) 264 #else 265 #define VM_PKEY_BIT4 VM_NONE 266 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */ 267 #endif /* CONFIG_ARCH_HAS_PKEYS */ 268 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) 269 #define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK) 270 #else 271 #define VM_SHADOW_STACK VM_NONE 272 #endif 273 #if defined(CONFIG_PPC64) 274 #define VM_SAO INIT_VM_FLAG(SAO) 275 #elif defined(CONFIG_PARISC) 276 #define VM_GROWSUP INIT_VM_FLAG(GROWSUP) 277 #elif defined(CONFIG_SPARC64) 278 #define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI) 279 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 280 #elif defined(CONFIG_ARM64) 281 #define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI) 282 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 283 #elif !defined(CONFIG_MMU) 284 #define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY) 285 #endif 286 #ifndef VM_GROWSUP 287 #define VM_GROWSUP VM_NONE 288 #endif 289 #ifdef CONFIG_ARM64_MTE 290 #define VM_MTE INIT_VM_FLAG(MTE) 291 #define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED) 292 #else 293 #define VM_MTE VM_NONE 294 #define VM_MTE_ALLOWED VM_NONE 295 #endif 296 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 297 #define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR) 298 #else 299 #define VM_UFFD_MINOR VM_NONE 300 #endif 301 #ifdef CONFIG_64BIT 302 #define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED) 303 #define VM_SEALED INIT_VM_FLAG(SEALED) 304 #else 305 #define VM_ALLOW_ANY_UNCACHED VM_NONE 306 #define VM_SEALED VM_NONE 307 #endif 308 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 309 #define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE) 310 #else 311 #define VM_DROPPABLE VM_NONE 312 #endif 313 314 /* Bits set in the VMA until the stack is in its final location */ 315 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 316 317 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 318 319 /* Common data flag combinations */ 320 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 321 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 322 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 323 VM_MAYWRITE | VM_MAYEXEC) 324 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 325 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 326 327 #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 328 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 329 #endif 330 331 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 332 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 333 #endif 334 335 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 336 337 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 338 339 /* VMA basic access permission flags */ 340 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 341 342 /* 343 * Special vmas that are non-mergable, non-mlock()able. 344 */ 345 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 346 347 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 348 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 349 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 350 #define STACK_TOP TASK_SIZE_LOW 351 #define STACK_TOP_MAX TASK_SIZE_MAX 352 353 /* This mask represents all the VMA flag bits used by mlock */ 354 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 355 356 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 357 358 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 359 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 360 361 #define RLIMIT_STACK 3 /* max stack size */ 362 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 363 364 #define CAP_IPC_LOCK 14 365 366 #define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD) 367 368 #define VM_IGNORE_MERGE VM_STICKY 369 370 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) 371 372 #define pgprot_val(x) ((x).pgprot) 373 #define __pgprot(x) ((pgprot_t) { (x) } ) 374 375 #define for_each_vma(__vmi, __vma) \ 376 while (((__vma) = vma_next(&(__vmi))) != NULL) 377 378 /* The MM code likes to work with exclusive end addresses */ 379 #define for_each_vma_range(__vmi, __vma, __end) \ 380 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 381 382 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 383 384 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 385 386 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 387 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 388 389 #define AS_MM_ALL_LOCKS 2 390 391 #define swap(a, b) \ 392 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 393 394 /* 395 * Flags for bug emulation. 396 * 397 * These occupy the top three bytes. 398 */ 399 enum { 400 READ_IMPLIES_EXEC = 0x0400000, 401 }; 402 403 struct vma_iterator { 404 struct ma_state mas; 405 }; 406 407 #define VMA_ITERATOR(name, __mm, __addr) \ 408 struct vma_iterator name = { \ 409 .mas = { \ 410 .tree = &(__mm)->mm_mt, \ 411 .index = __addr, \ 412 .node = NULL, \ 413 .status = ma_start, \ 414 }, \ 415 } 416 417 #define DEFINE_MUTEX(mutexname) \ 418 struct mutex mutexname = {} 419 420 #define DECLARE_BITMAP(name, bits) \ 421 unsigned long name[BITS_TO_LONGS(bits)] 422 423 #define EMPTY_VMA_FLAGS ((vma_flags_t){ }) 424 425 #define MAPCOUNT_ELF_CORE_MARGIN (5) 426 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 427 428 static __always_inline bool vma_flags_empty(const vma_flags_t *flags) 429 { 430 const unsigned long *bitmap = flags->__vma_flags; 431 432 return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS); 433 } 434 435 /* What action should be taken after an .mmap_prepare call is complete? */ 436 enum mmap_action_type { 437 MMAP_NOTHING, /* Mapping is complete, no further action. */ 438 MMAP_REMAP_PFN, /* Remap PFN range. */ 439 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ 440 }; 441 442 /* 443 * Describes an action an mmap_prepare hook can instruct to be taken to complete 444 * the mapping of a VMA. Specified in vm_area_desc. 445 */ 446 struct mmap_action { 447 union { 448 /* Remap range. */ 449 struct { 450 unsigned long start; 451 unsigned long start_pfn; 452 unsigned long size; 453 pgprot_t pgprot; 454 } remap; 455 }; 456 enum mmap_action_type type; 457 458 /* 459 * If specified, this hook is invoked after the selected action has been 460 * successfully completed. Note that the VMA write lock still held. 461 * 462 * The absolute minimum ought to be done here. 463 * 464 * Returns 0 on success, or an error code. 465 */ 466 int (*success_hook)(const struct vm_area_struct *vma); 467 468 /* 469 * If specified, this hook is invoked when an error occurred when 470 * attempting the selection action. 471 * 472 * The hook can return an error code in order to filter the error, but 473 * it is not valid to clear the error here. 474 */ 475 int (*error_hook)(int err); 476 477 /* 478 * This should be set in rare instances where the operation required 479 * that the rmap should not be able to access the VMA until 480 * completely set up. 481 */ 482 bool hide_from_rmap_until_complete :1; 483 }; 484 485 /* Operations which modify VMAs. */ 486 enum vma_operation { 487 VMA_OP_SPLIT, 488 VMA_OP_MERGE_UNFAULTED, 489 VMA_OP_REMAP, 490 VMA_OP_FORK, 491 }; 492 493 /* 494 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 495 * manipulate mutable fields which will cause those fields to be updated in the 496 * resultant VMA. 497 * 498 * Helper functions are not required for manipulating any field. 499 */ 500 struct vm_area_desc { 501 /* Immutable state. */ 502 const struct mm_struct *const mm; 503 struct file *const file; /* May vary from vm_file in stacked callers. */ 504 unsigned long start; 505 unsigned long end; 506 507 /* Mutable fields. Populated with initial state. */ 508 pgoff_t pgoff; 509 struct file *vm_file; 510 vma_flags_t vma_flags; 511 pgprot_t page_prot; 512 513 /* Write-only fields. */ 514 const struct vm_operations_struct *vm_ops; 515 void *private_data; 516 517 /* Take further action? */ 518 struct mmap_action action; 519 }; 520 521 struct vm_area_struct { 522 /* The first cache line has the info for VMA tree walking. */ 523 524 union { 525 struct { 526 /* VMA covers [vm_start; vm_end) addresses within mm */ 527 unsigned long vm_start; 528 unsigned long vm_end; 529 }; 530 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 531 }; 532 533 struct mm_struct *vm_mm; /* The address space we belong to. */ 534 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 535 536 /* 537 * Flags, see mm.h. 538 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 539 */ 540 union { 541 const vm_flags_t vm_flags; 542 vma_flags_t flags; 543 }; 544 545 #ifdef CONFIG_PER_VMA_LOCK 546 /* 547 * Can only be written (using WRITE_ONCE()) while holding both: 548 * - mmap_lock (in write mode) 549 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 550 * Can be read reliably while holding one of: 551 * - mmap_lock (in read or write mode) 552 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 553 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 554 * while holding nothing (except RCU to keep the VMA struct allocated). 555 * 556 * This sequence counter is explicitly allowed to overflow; sequence 557 * counter reuse can only lead to occasional unnecessary use of the 558 * slowpath. 559 */ 560 unsigned int vm_lock_seq; 561 #endif 562 563 /* 564 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 565 * list, after a COW of one of the file pages. A MAP_SHARED vma 566 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 567 * or brk vma (with NULL file) can only be in an anon_vma list. 568 */ 569 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 570 * page_table_lock */ 571 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 572 573 /* Function pointers to deal with this struct. */ 574 const struct vm_operations_struct *vm_ops; 575 576 /* Information about our backing store: */ 577 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 578 units */ 579 struct file * vm_file; /* File we map to (can be NULL). */ 580 void * vm_private_data; /* was vm_pte (shared mem) */ 581 582 #ifdef CONFIG_SWAP 583 atomic_long_t swap_readahead_info; 584 #endif 585 #ifndef CONFIG_MMU 586 struct vm_region *vm_region; /* NOMMU mapping region */ 587 #endif 588 #ifdef CONFIG_NUMA 589 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 590 #endif 591 #ifdef CONFIG_NUMA_BALANCING 592 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 593 #endif 594 #ifdef CONFIG_PER_VMA_LOCK 595 /* Unstable RCU readers are allowed to read this. */ 596 refcount_t vm_refcnt; 597 #endif 598 /* 599 * For areas with an address space and backing store, 600 * linkage into the address_space->i_mmap interval tree. 601 * 602 */ 603 struct { 604 struct rb_node rb; 605 unsigned long rb_subtree_last; 606 } shared; 607 #ifdef CONFIG_ANON_VMA_NAME 608 /* 609 * For private and shared anonymous mappings, a pointer to a null 610 * terminated string containing the name given to the vma, or NULL if 611 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 612 */ 613 struct anon_vma_name *anon_name; 614 #endif 615 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 616 } __randomize_layout; 617 618 struct vm_operations_struct { 619 void (*open)(struct vm_area_struct * area); 620 /** 621 * @close: Called when the VMA is being removed from the MM. 622 * Context: User context. May sleep. Caller holds mmap_lock. 623 */ 624 void (*close)(struct vm_area_struct * area); 625 /* Called any time before splitting to check if it's allowed */ 626 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 627 int (*mremap)(struct vm_area_struct *area); 628 /* 629 * Called by mprotect() to make driver-specific permission 630 * checks before mprotect() is finalised. The VMA must not 631 * be modified. Returns 0 if mprotect() can proceed. 632 */ 633 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 634 unsigned long end, unsigned long newflags); 635 vm_fault_t (*fault)(struct vm_fault *vmf); 636 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 637 vm_fault_t (*map_pages)(struct vm_fault *vmf, 638 pgoff_t start_pgoff, pgoff_t end_pgoff); 639 unsigned long (*pagesize)(struct vm_area_struct * area); 640 641 /* notification that a previously read-only page is about to become 642 * writable, if an error is returned it will cause a SIGBUS */ 643 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 644 645 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 646 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 647 648 /* called by access_process_vm when get_user_pages() fails, typically 649 * for use by special VMAs. See also generic_access_phys() for a generic 650 * implementation useful for any iomem mapping. 651 */ 652 int (*access)(struct vm_area_struct *vma, unsigned long addr, 653 void *buf, int len, int write); 654 655 /* Called by the /proc/PID/maps code to ask the vma whether it 656 * has a special name. Returning non-NULL will also cause this 657 * vma to be dumped unconditionally. */ 658 const char *(*name)(struct vm_area_struct *vma); 659 660 #ifdef CONFIG_NUMA 661 /* 662 * set_policy() op must add a reference to any non-NULL @new mempolicy 663 * to hold the policy upon return. Caller should pass NULL @new to 664 * remove a policy and fall back to surrounding context--i.e. do not 665 * install a MPOL_DEFAULT policy, nor the task or system default 666 * mempolicy. 667 */ 668 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 669 670 /* 671 * get_policy() op must add reference [mpol_get()] to any policy at 672 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 673 * in mm/mempolicy.c will do this automatically. 674 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 675 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 676 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 677 * must return NULL--i.e., do not "fallback" to task or system default 678 * policy. 679 */ 680 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 681 unsigned long addr, pgoff_t *ilx); 682 #endif 683 #ifdef CONFIG_FIND_NORMAL_PAGE 684 /* 685 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 686 * allows for returning a "normal" page from vm_normal_page() even 687 * though the PTE indicates that the "struct page" either does not exist 688 * or should not be touched: "special". 689 * 690 * Do not add new users: this really only works when a "normal" page 691 * was mapped, but then the PTE got changed to something weird (+ 692 * marked special) that would not make pte_pfn() identify the originally 693 * inserted page. 694 */ 695 struct page *(*find_normal_page)(struct vm_area_struct *vma, 696 unsigned long addr); 697 #endif /* CONFIG_FIND_NORMAL_PAGE */ 698 }; 699 700 struct vm_unmapped_area_info { 701 #define VM_UNMAPPED_AREA_TOPDOWN 1 702 unsigned long flags; 703 unsigned long length; 704 unsigned long low_limit; 705 unsigned long high_limit; 706 unsigned long align_mask; 707 unsigned long align_offset; 708 unsigned long start_gap; 709 }; 710 711 struct pagetable_move_control { 712 struct vm_area_struct *old; /* Source VMA. */ 713 struct vm_area_struct *new; /* Destination VMA. */ 714 unsigned long old_addr; /* Address from which the move begins. */ 715 unsigned long old_end; /* Exclusive address at which old range ends. */ 716 unsigned long new_addr; /* Address to move page tables to. */ 717 unsigned long len_in; /* Bytes to remap specified by user. */ 718 719 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 720 bool for_stack; /* Is this an early temp stack being moved? */ 721 }; 722 723 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 724 struct pagetable_move_control name = { \ 725 .old = old_, \ 726 .new = new_, \ 727 .old_addr = old_addr_, \ 728 .old_end = (old_addr_) + (len_), \ 729 .new_addr = new_addr_, \ 730 .len_in = len_, \ 731 } 732 733 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 734 { 735 mas_pause(&vmi->mas); 736 } 737 738 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 739 { 740 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 741 } 742 743 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 744 { 745 return __pgprot(vm_flags); 746 } 747 748 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 749 { 750 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 751 } 752 753 /* 754 * Copy value to the first system word of VMA flags, non-atomically. 755 * 756 * IMPORTANT: This does not overwrite bytes past the first system word. The 757 * caller must account for this. 758 */ 759 static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value) 760 { 761 *ACCESS_PRIVATE(flags, __vma_flags) = value; 762 } 763 764 /* 765 * Copy value to the first system word of VMA flags ONCE, non-atomically. 766 * 767 * IMPORTANT: This does not overwrite bytes past the first system word. The 768 * caller must account for this. 769 */ 770 static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value) 771 { 772 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 773 774 WRITE_ONCE(*bitmap, value); 775 } 776 777 /* Update the first system word of VMA flags setting bits, non-atomically. */ 778 static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value) 779 { 780 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 781 782 *bitmap |= value; 783 } 784 785 /* Update the first system word of VMA flags clearing bits, non-atomically. */ 786 static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value) 787 { 788 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 789 790 *bitmap &= ~value; 791 } 792 793 static __always_inline void vma_flags_clear_all(vma_flags_t *flags) 794 { 795 bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS); 796 } 797 798 static __always_inline void vma_flags_set_flag(vma_flags_t *flags, 799 vma_flag_t bit) 800 { 801 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 802 803 __set_bit((__force int)bit, bitmap); 804 } 805 806 /* Use when VMA is not part of the VMA tree and needs no locking */ 807 static inline void vm_flags_init(struct vm_area_struct *vma, 808 vm_flags_t flags) 809 { 810 vma_flags_clear_all(&vma->flags); 811 vma_flags_overwrite_word(&vma->flags, flags); 812 } 813 814 /* 815 * Use when VMA is part of the VMA tree and modifications need coordination 816 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and 817 * it should be locked explicitly beforehand. 818 */ 819 static inline void vm_flags_reset(struct vm_area_struct *vma, 820 vm_flags_t flags) 821 { 822 vma_assert_write_locked(vma); 823 vm_flags_init(vma, flags); 824 } 825 826 static inline void vm_flags_reset_once(struct vm_area_struct *vma, 827 vm_flags_t flags) 828 { 829 vma_assert_write_locked(vma); 830 /* 831 * The user should only be interested in avoiding reordering of 832 * assignment to the first word. 833 */ 834 vma_flags_clear_all(&vma->flags); 835 vma_flags_overwrite_word_once(&vma->flags, flags); 836 } 837 838 static inline void vm_flags_set(struct vm_area_struct *vma, 839 vm_flags_t flags) 840 { 841 vma_start_write(vma); 842 vma_flags_set_word(&vma->flags, flags); 843 } 844 845 static inline void vm_flags_clear(struct vm_area_struct *vma, 846 vm_flags_t flags) 847 { 848 vma_start_write(vma); 849 vma_flags_clear_word(&vma->flags, flags); 850 } 851 852 static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits); 853 854 #define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \ 855 (const vma_flag_t []){__VA_ARGS__}) 856 857 static __always_inline bool vma_flags_test(const vma_flags_t *flags, 858 vma_flag_t bit) 859 { 860 const unsigned long *bitmap = flags->__vma_flags; 861 862 return test_bit((__force int)bit, bitmap); 863 } 864 865 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags, 866 vma_flags_t to_and) 867 { 868 vma_flags_t dst; 869 unsigned long *bitmap_dst = dst.__vma_flags; 870 const unsigned long *bitmap = flags->__vma_flags; 871 const unsigned long *bitmap_to_and = to_and.__vma_flags; 872 873 bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS); 874 return dst; 875 } 876 877 #define vma_flags_and(flags, ...) \ 878 vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__)) 879 880 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags, 881 vma_flags_t to_test) 882 { 883 const unsigned long *bitmap = flags->__vma_flags; 884 const unsigned long *bitmap_to_test = to_test.__vma_flags; 885 886 return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 887 } 888 889 #define vma_flags_test_any(flags, ...) \ 890 vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__)) 891 892 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags, 893 vma_flags_t to_test) 894 { 895 const unsigned long *bitmap = flags->__vma_flags; 896 const unsigned long *bitmap_to_test = to_test.__vma_flags; 897 898 return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 899 } 900 901 #define vma_flags_test_all(flags, ...) \ 902 vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__)) 903 904 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set) 905 { 906 unsigned long *bitmap = flags->__vma_flags; 907 const unsigned long *bitmap_to_set = to_set.__vma_flags; 908 909 bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS); 910 } 911 912 #define vma_flags_set(flags, ...) \ 913 vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__)) 914 915 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear) 916 { 917 unsigned long *bitmap = flags->__vma_flags; 918 const unsigned long *bitmap_to_clear = to_clear.__vma_flags; 919 920 bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS); 921 } 922 923 #define vma_flags_clear(flags, ...) \ 924 vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__)) 925 926 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags, 927 const vma_flags_t *flags_other) 928 { 929 vma_flags_t dst; 930 const unsigned long *bitmap_other = flags_other->__vma_flags; 931 const unsigned long *bitmap = flags->__vma_flags; 932 unsigned long *bitmap_dst = dst.__vma_flags; 933 934 bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS); 935 return dst; 936 } 937 938 static inline bool vma_test_all_mask(const struct vm_area_struct *vma, 939 vma_flags_t flags) 940 { 941 return vma_flags_test_all_mask(&vma->flags, flags); 942 } 943 944 #define vma_test_all(vma, ...) \ 945 vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__)) 946 947 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags) 948 { 949 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 950 (VM_SHARED | VM_MAYWRITE); 951 } 952 953 static inline void vma_set_flags_mask(struct vm_area_struct *vma, 954 vma_flags_t flags) 955 { 956 vma_flags_set_mask(&vma->flags, flags); 957 } 958 959 #define vma_set_flags(vma, ...) \ 960 vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__)) 961 962 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc, 963 vma_flag_t bit) 964 { 965 return vma_flags_test(&desc->vma_flags, bit); 966 } 967 968 static inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc, 969 vma_flags_t flags) 970 { 971 return vma_flags_test_any_mask(&desc->vma_flags, flags); 972 } 973 974 #define vma_desc_test_any(desc, ...) \ 975 vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__)) 976 977 static inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc, 978 vma_flags_t flags) 979 { 980 return vma_flags_test_all_mask(&desc->vma_flags, flags); 981 } 982 983 #define vma_desc_test_all(desc, ...) \ 984 vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__)) 985 986 static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc, 987 vma_flags_t flags) 988 { 989 vma_flags_set_mask(&desc->vma_flags, flags); 990 } 991 992 #define vma_desc_set_flags(desc, ...) \ 993 vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 994 995 static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc, 996 vma_flags_t flags) 997 { 998 vma_flags_clear_mask(&desc->vma_flags, flags); 999 } 1000 1001 #define vma_desc_clear_flags(desc, ...) \ 1002 vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 1003 1004 static inline bool is_shared_maywrite(const vma_flags_t *flags) 1005 { 1006 return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT); 1007 } 1008 1009 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 1010 { 1011 return is_shared_maywrite(&vma->flags); 1012 } 1013 1014 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 1015 { 1016 /* 1017 * Uses mas_find() to get the first VMA when the iterator starts. 1018 * Calling mas_next() could skip the first entry. 1019 */ 1020 return mas_find(&vmi->mas, ULONG_MAX); 1021 } 1022 1023 /* 1024 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 1025 * assertions should be made either under mmap_write_lock or when the object 1026 * has been isolated under mmap_write_lock, ensuring no competing writers. 1027 */ 1028 static inline void vma_assert_attached(struct vm_area_struct *vma) 1029 { 1030 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 1031 } 1032 1033 static inline void vma_assert_detached(struct vm_area_struct *vma) 1034 { 1035 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 1036 } 1037 1038 static inline void vma_assert_write_locked(struct vm_area_struct *); 1039 static inline void vma_mark_attached(struct vm_area_struct *vma) 1040 { 1041 vma_assert_write_locked(vma); 1042 vma_assert_detached(vma); 1043 refcount_set_release(&vma->vm_refcnt, 1); 1044 } 1045 1046 static inline void vma_mark_detached(struct vm_area_struct *vma) 1047 { 1048 vma_assert_write_locked(vma); 1049 vma_assert_attached(vma); 1050 /* We are the only writer, so no need to use vma_refcount_put(). */ 1051 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 1052 /* 1053 * Reader must have temporarily raised vm_refcnt but it will 1054 * drop it without using the vma since vma is write-locked. 1055 */ 1056 } 1057 } 1058 1059 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 1060 { 1061 memset(vma, 0, sizeof(*vma)); 1062 vma->vm_mm = mm; 1063 vma->vm_ops = &vma_dummy_vm_ops; 1064 INIT_LIST_HEAD(&vma->anon_vma_chain); 1065 vma->vm_lock_seq = UINT_MAX; 1066 } 1067 1068 /* 1069 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 1070 * kernel/fork.c, so we have to these broadly available there, and temporarily 1071 * define them here to resolve the dependency cycle. 1072 */ 1073 #define is_exec_mapping(flags) \ 1074 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 1075 1076 #define is_stack_mapping(flags) \ 1077 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 1078 1079 #define is_data_mapping(flags) \ 1080 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 1081 1082 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 1083 long npages) 1084 { 1085 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 1086 1087 if (is_exec_mapping(flags)) 1088 mm->exec_vm += npages; 1089 else if (is_stack_mapping(flags)) 1090 mm->stack_vm += npages; 1091 else if (is_data_mapping(flags)) 1092 mm->data_vm += npages; 1093 } 1094 1095 #undef is_exec_mapping 1096 #undef is_stack_mapping 1097 #undef is_data_mapping 1098 1099 static inline void vm_unacct_memory(long pages) 1100 { 1101 vm_acct_memory(-pages); 1102 } 1103 1104 static inline void mapping_allow_writable(struct address_space *mapping) 1105 { 1106 atomic_inc(&mapping->i_mmap_writable); 1107 } 1108 1109 static inline 1110 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 1111 { 1112 return mas_find(&vmi->mas, max - 1); 1113 } 1114 1115 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 1116 unsigned long start, unsigned long end, gfp_t gfp) 1117 { 1118 __mas_set_range(&vmi->mas, start, end - 1); 1119 mas_store_gfp(&vmi->mas, NULL, gfp); 1120 if (unlikely(mas_is_err(&vmi->mas))) 1121 return -ENOMEM; 1122 1123 return 0; 1124 } 1125 1126 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1127 { 1128 vma->vm_ops = NULL; 1129 } 1130 1131 /* Declared in vma.h. */ 1132 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1133 struct vm_area_desc *desc); 1134 1135 static inline int __compat_vma_mmap(const struct file_operations *f_op, 1136 struct file *file, struct vm_area_struct *vma) 1137 { 1138 struct vm_area_desc desc = { 1139 .mm = vma->vm_mm, 1140 .file = file, 1141 .start = vma->vm_start, 1142 .end = vma->vm_end, 1143 1144 .pgoff = vma->vm_pgoff, 1145 .vm_file = vma->vm_file, 1146 .vma_flags = vma->flags, 1147 .page_prot = vma->vm_page_prot, 1148 1149 .action.type = MMAP_NOTHING, /* Default */ 1150 }; 1151 int err; 1152 1153 err = f_op->mmap_prepare(&desc); 1154 if (err) 1155 return err; 1156 1157 mmap_action_prepare(&desc.action, &desc); 1158 set_vma_from_desc(vma, &desc); 1159 return mmap_action_complete(&desc.action, vma); 1160 } 1161 1162 static inline int compat_vma_mmap(struct file *file, 1163 struct vm_area_struct *vma) 1164 { 1165 return __compat_vma_mmap(file->f_op, file, vma); 1166 } 1167 1168 1169 static inline void vma_iter_init(struct vma_iterator *vmi, 1170 struct mm_struct *mm, unsigned long addr) 1171 { 1172 mas_init(&vmi->mas, &mm->mm_mt, addr); 1173 } 1174 1175 static inline unsigned long vma_pages(struct vm_area_struct *vma) 1176 { 1177 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1178 } 1179 1180 static inline void mmap_assert_locked(struct mm_struct *); 1181 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1182 unsigned long start_addr, 1183 unsigned long end_addr) 1184 { 1185 unsigned long index = start_addr; 1186 1187 mmap_assert_locked(mm); 1188 return mt_find(&mm->mm_mt, &index, end_addr - 1); 1189 } 1190 1191 static inline 1192 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 1193 { 1194 return mtree_load(&mm->mm_mt, addr); 1195 } 1196 1197 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 1198 { 1199 return mas_prev(&vmi->mas, 0); 1200 } 1201 1202 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 1203 { 1204 mas_set(&vmi->mas, addr); 1205 } 1206 1207 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1208 { 1209 return !vma->vm_ops; 1210 } 1211 1212 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 1213 #define vma_iter_load(vmi) \ 1214 mas_walk(&(vmi)->mas) 1215 1216 static inline struct vm_area_struct * 1217 find_vma_prev(struct mm_struct *mm, unsigned long addr, 1218 struct vm_area_struct **pprev) 1219 { 1220 struct vm_area_struct *vma; 1221 VMA_ITERATOR(vmi, mm, addr); 1222 1223 vma = vma_iter_load(&vmi); 1224 *pprev = vma_prev(&vmi); 1225 if (!vma) 1226 vma = vma_next(&vmi); 1227 return vma; 1228 } 1229 1230 #undef vma_iter_load 1231 1232 static inline void vma_iter_free(struct vma_iterator *vmi) 1233 { 1234 mas_destroy(&vmi->mas); 1235 } 1236 1237 static inline 1238 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 1239 { 1240 return mas_next_range(&vmi->mas, ULONG_MAX); 1241 } 1242 1243 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1244 1245 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1246 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1247 { 1248 vm_flags_t vm_flags = vma->vm_flags; 1249 pgprot_t vm_page_prot; 1250 1251 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1252 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1253 1254 if (vma_wants_writenotify(vma, vm_page_prot)) { 1255 vm_flags &= ~VM_SHARED; 1256 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1257 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1258 } 1259 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1260 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1261 } 1262 1263 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1264 { 1265 if (vma->vm_flags & VM_GROWSDOWN) 1266 return stack_guard_gap; 1267 1268 /* See reasoning around the VM_SHADOW_STACK definition */ 1269 if (vma->vm_flags & VM_SHADOW_STACK) 1270 return PAGE_SIZE; 1271 1272 return 0; 1273 } 1274 1275 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1276 { 1277 unsigned long gap = stack_guard_start_gap(vma); 1278 unsigned long vm_start = vma->vm_start; 1279 1280 vm_start -= gap; 1281 if (vm_start > vma->vm_start) 1282 vm_start = 0; 1283 return vm_start; 1284 } 1285 1286 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1287 { 1288 unsigned long vm_end = vma->vm_end; 1289 1290 if (vma->vm_flags & VM_GROWSUP) { 1291 vm_end += stack_guard_gap; 1292 if (vm_end < vma->vm_end) 1293 vm_end = -PAGE_SIZE; 1294 } 1295 return vm_end; 1296 } 1297 1298 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1299 { 1300 return vma->vm_flags & VM_ACCESS_FLAGS; 1301 } 1302 1303 static inline bool mlock_future_ok(const struct mm_struct *mm, 1304 vm_flags_t vm_flags, unsigned long bytes) 1305 { 1306 unsigned long locked_pages, limit_pages; 1307 1308 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1309 return true; 1310 1311 locked_pages = bytes >> PAGE_SHIFT; 1312 locked_pages += mm->locked_vm; 1313 1314 limit_pages = rlimit(RLIMIT_MEMLOCK); 1315 limit_pages >>= PAGE_SHIFT; 1316 1317 return locked_pages <= limit_pages; 1318 } 1319 1320 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1321 { 1322 /* If MDWE is disabled, we have nothing to deny. */ 1323 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1324 return false; 1325 1326 /* If the new VMA is not executable, we have nothing to deny. */ 1327 if (!(new & VM_EXEC)) 1328 return false; 1329 1330 /* Under MDWE we do not accept newly writably executable VMAs... */ 1331 if (new & VM_WRITE) 1332 return true; 1333 1334 /* ...nor previously non-executable VMAs becoming executable. */ 1335 if (!(old & VM_EXEC)) 1336 return true; 1337 1338 return false; 1339 } 1340 1341 static inline int mapping_map_writable(struct address_space *mapping) 1342 { 1343 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 1344 0 : -EPERM; 1345 } 1346 1347 /* Did the driver provide valid mmap hook configuration? */ 1348 static inline bool can_mmap_file(struct file *file) 1349 { 1350 bool has_mmap = file->f_op->mmap; 1351 bool has_mmap_prepare = file->f_op->mmap_prepare; 1352 1353 /* Hooks are mutually exclusive. */ 1354 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1355 return false; 1356 if (!has_mmap && !has_mmap_prepare) 1357 return false; 1358 1359 return true; 1360 } 1361 1362 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1363 { 1364 if (file->f_op->mmap_prepare) 1365 return compat_vma_mmap(file, vma); 1366 1367 return file->f_op->mmap(file, vma); 1368 } 1369 1370 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1371 { 1372 return file->f_op->mmap_prepare(desc); 1373 } 1374 1375 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1376 { 1377 /* Changing an anonymous vma with this is illegal */ 1378 get_file(file); 1379 swap(vma->vm_file, file); 1380 fput(file); 1381 } 1382 1383 extern int sysctl_max_map_count; 1384 static inline int get_sysctl_max_map_count(void) 1385 { 1386 return READ_ONCE(sysctl_max_map_count); 1387 } 1388