1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 3 #pragma once 4 5 /* Forward declarations to avoid header cycle. */ 6 struct vm_area_struct; 7 static inline void vma_start_write(struct vm_area_struct *vma); 8 9 extern const struct vm_operations_struct vma_dummy_vm_ops; 10 extern unsigned long stack_guard_gap; 11 extern const struct vm_operations_struct vma_dummy_vm_ops; 12 extern unsigned long rlimit(unsigned int limit); 13 struct task_struct *get_current(void); 14 15 #define MMF_HAS_MDWE 28 16 #define current get_current() 17 18 /* 19 * Define the task command name length as enum, then it can be visible to 20 * BPF programs. 21 */ 22 enum { 23 TASK_COMM_LEN = 16, 24 }; 25 26 /* PARTIALLY implemented types. */ 27 struct mm_struct { 28 struct maple_tree mm_mt; 29 int map_count; /* number of VMAs */ 30 unsigned long total_vm; /* Total pages mapped */ 31 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 32 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 33 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 34 unsigned long stack_vm; /* VM_STACK */ 35 36 union { 37 vm_flags_t def_flags; 38 vma_flags_t def_vma_flags; 39 }; 40 41 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 42 }; 43 struct address_space { 44 struct rb_root_cached i_mmap; 45 unsigned long flags; 46 atomic_t i_mmap_writable; 47 }; 48 struct file_operations { 49 int (*mmap)(struct file *, struct vm_area_struct *); 50 int (*mmap_prepare)(struct vm_area_desc *); 51 }; 52 struct file { 53 struct address_space *f_mapping; 54 const struct file_operations *f_op; 55 }; 56 struct anon_vma_chain { 57 struct anon_vma *anon_vma; 58 struct list_head same_vma; 59 }; 60 struct task_struct { 61 char comm[TASK_COMM_LEN]; 62 pid_t pid; 63 struct mm_struct *mm; 64 65 /* Used for emulating ABI behavior of previous Linux versions: */ 66 unsigned int personality; 67 }; 68 69 struct kref { 70 refcount_t refcount; 71 }; 72 73 struct anon_vma_name { 74 struct kref kref; 75 /* The name needs to be at the end because it is dynamically sized. */ 76 char name[]; 77 }; 78 79 /* 80 * Contains declarations that are DUPLICATED from kernel source in order to 81 * faciliate userland VMA testing. 82 * 83 * These must be kept in sync with kernel source. 84 */ 85 86 #define VMA_LOCK_OFFSET 0x40000000 87 88 typedef struct { unsigned long v; } freeptr_t; 89 90 #define VM_NONE 0x00000000 91 92 typedef int __bitwise vma_flag_t; 93 94 #define ACCESS_PRIVATE(p, member) ((p)->member) 95 96 #define DECLARE_VMA_BIT(name, bitnum) \ 97 VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum) 98 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \ 99 VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT 100 enum { 101 DECLARE_VMA_BIT(READ, 0), 102 DECLARE_VMA_BIT(WRITE, 1), 103 DECLARE_VMA_BIT(EXEC, 2), 104 DECLARE_VMA_BIT(SHARED, 3), 105 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 106 DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */ 107 DECLARE_VMA_BIT(MAYWRITE, 5), 108 DECLARE_VMA_BIT(MAYEXEC, 6), 109 DECLARE_VMA_BIT(MAYSHARE, 7), 110 DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */ 111 #ifdef CONFIG_MMU 112 DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */ 113 #else 114 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ 115 DECLARE_VMA_BIT(MAYOVERLAY, 9), 116 #endif /* CONFIG_MMU */ 117 /* Page-ranges managed without "struct page", just pure PFN */ 118 DECLARE_VMA_BIT(PFNMAP, 10), 119 DECLARE_VMA_BIT(MAYBE_GUARD, 11), 120 DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */ 121 DECLARE_VMA_BIT(LOCKED, 13), 122 DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */ 123 DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */ 124 DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */ 125 DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */ 126 DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */ 127 DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */ 128 DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */ 129 DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */ 130 DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */ 131 DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */ 132 DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */ 133 DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */ 134 DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */ 135 DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */ 136 DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */ 137 DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */ 138 DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */ 139 DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */ 140 /* These bits are reused, we define specific uses below. */ 141 DECLARE_VMA_BIT(HIGH_ARCH_0, 32), 142 DECLARE_VMA_BIT(HIGH_ARCH_1, 33), 143 DECLARE_VMA_BIT(HIGH_ARCH_2, 34), 144 DECLARE_VMA_BIT(HIGH_ARCH_3, 35), 145 DECLARE_VMA_BIT(HIGH_ARCH_4, 36), 146 DECLARE_VMA_BIT(HIGH_ARCH_5, 37), 147 DECLARE_VMA_BIT(HIGH_ARCH_6, 38), 148 /* 149 * This flag is used to connect VFIO to arch specific KVM code. It 150 * indicates that the memory under this VMA is safe for use with any 151 * non-cachable memory type inside KVM. Some VFIO devices, on some 152 * platforms, are thought to be unsafe and can cause machine crashes 153 * if KVM does not lock down the memory type. 154 */ 155 DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39), 156 #ifdef CONFIG_PPC32 157 DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1), 158 #else 159 DECLARE_VMA_BIT(DROPPABLE, 40), 160 #endif 161 DECLARE_VMA_BIT(UFFD_MINOR, 41), 162 DECLARE_VMA_BIT(SEALED, 42), 163 /* Flags that reuse flags above. */ 164 DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0), 165 DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1), 166 DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2), 167 DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3), 168 DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4), 169 #if defined(CONFIG_X86_USER_SHADOW_STACK) 170 /* 171 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of 172 * support core mm. 173 * 174 * These VMAs will get a single end guard page. This helps userspace 175 * protect itself from attacks. A single page is enough for current 176 * shadow stack archs (x86). See the comments near alloc_shstk() in 177 * arch/x86/kernel/shstk.c for more details on the guard size. 178 */ 179 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5), 180 #elif defined(CONFIG_ARM64_GCS) 181 /* 182 * arm64's Guarded Control Stack implements similar functionality and 183 * has similar constraints to shadow stacks. 184 */ 185 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6), 186 #endif 187 DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */ 188 DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */ 189 DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */ 190 DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */ 191 DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */ 192 DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */ 193 DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */ 194 DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */ 195 #ifdef CONFIG_STACK_GROWSUP 196 DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP), 197 DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN), 198 #else 199 DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN), 200 #endif 201 }; 202 203 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT) 204 #define VM_READ INIT_VM_FLAG(READ) 205 #define VM_WRITE INIT_VM_FLAG(WRITE) 206 #define VM_EXEC INIT_VM_FLAG(EXEC) 207 #define VM_SHARED INIT_VM_FLAG(SHARED) 208 #define VM_MAYREAD INIT_VM_FLAG(MAYREAD) 209 #define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE) 210 #define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC) 211 #define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE) 212 #define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN) 213 #ifdef CONFIG_MMU 214 #define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING) 215 #else 216 #define VM_UFFD_MISSING VM_NONE 217 #define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY) 218 #endif 219 #define VM_PFNMAP INIT_VM_FLAG(PFNMAP) 220 #define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD) 221 #define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP) 222 #define VM_LOCKED INIT_VM_FLAG(LOCKED) 223 #define VM_IO INIT_VM_FLAG(IO) 224 #define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ) 225 #define VM_RAND_READ INIT_VM_FLAG(RAND_READ) 226 #define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY) 227 #define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND) 228 #define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT) 229 #define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT) 230 #define VM_NORESERVE INIT_VM_FLAG(NORESERVE) 231 #define VM_HUGETLB INIT_VM_FLAG(HUGETLB) 232 #define VM_SYNC INIT_VM_FLAG(SYNC) 233 #define VM_ARCH_1 INIT_VM_FLAG(ARCH_1) 234 #define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK) 235 #define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP) 236 #ifdef CONFIG_MEM_SOFT_DIRTY 237 #define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY) 238 #else 239 #define VM_SOFTDIRTY VM_NONE 240 #endif 241 #define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP) 242 #define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE) 243 #define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) 244 #define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) 245 #define VM_STACK INIT_VM_FLAG(STACK) 246 #ifdef CONFIG_STACK_GROWS_UP 247 #define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) 248 #else 249 #define VM_STACK_EARLY VM_NONE 250 #endif 251 #ifdef CONFIG_ARCH_HAS_PKEYS 252 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT) 253 /* Despite the naming, these are FLAGS not bits. */ 254 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0) 255 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1) 256 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2) 257 #if CONFIG_ARCH_PKEY_BITS > 3 258 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3) 259 #else 260 #define VM_PKEY_BIT3 VM_NONE 261 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */ 262 #if CONFIG_ARCH_PKEY_BITS > 4 263 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4) 264 #else 265 #define VM_PKEY_BIT4 VM_NONE 266 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */ 267 #endif /* CONFIG_ARCH_HAS_PKEYS */ 268 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) 269 #define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK) 270 #else 271 #define VM_SHADOW_STACK VM_NONE 272 #endif 273 #if defined(CONFIG_PPC64) 274 #define VM_SAO INIT_VM_FLAG(SAO) 275 #elif defined(CONFIG_PARISC) 276 #define VM_GROWSUP INIT_VM_FLAG(GROWSUP) 277 #elif defined(CONFIG_SPARC64) 278 #define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI) 279 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 280 #elif defined(CONFIG_ARM64) 281 #define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI) 282 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 283 #elif !defined(CONFIG_MMU) 284 #define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY) 285 #endif 286 #ifndef VM_GROWSUP 287 #define VM_GROWSUP VM_NONE 288 #endif 289 #ifdef CONFIG_ARM64_MTE 290 #define VM_MTE INIT_VM_FLAG(MTE) 291 #define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED) 292 #else 293 #define VM_MTE VM_NONE 294 #define VM_MTE_ALLOWED VM_NONE 295 #endif 296 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 297 #define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR) 298 #else 299 #define VM_UFFD_MINOR VM_NONE 300 #endif 301 #ifdef CONFIG_64BIT 302 #define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED) 303 #define VM_SEALED INIT_VM_FLAG(SEALED) 304 #else 305 #define VM_ALLOW_ANY_UNCACHED VM_NONE 306 #define VM_SEALED VM_NONE 307 #endif 308 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 309 #define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE) 310 #else 311 #define VM_DROPPABLE VM_NONE 312 #endif 313 314 /* Bits set in the VMA until the stack is in its final location */ 315 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 316 317 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 318 319 /* Common data flag combinations */ 320 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 321 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 322 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 323 VM_MAYWRITE | VM_MAYEXEC) 324 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 325 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 326 327 #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 328 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 329 #endif 330 331 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 332 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 333 #endif 334 335 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 336 337 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 338 339 /* VMA basic access permission flags */ 340 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 341 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT) 342 343 /* 344 * Special vmas that are non-mergable, non-mlock()able. 345 */ 346 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 347 348 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 349 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 350 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 351 #define STACK_TOP TASK_SIZE_LOW 352 #define STACK_TOP_MAX TASK_SIZE_MAX 353 354 /* This mask represents all the VMA flag bits used by mlock */ 355 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 356 357 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 358 359 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 360 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 361 362 #define RLIMIT_STACK 3 /* max stack size */ 363 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 364 365 #define CAP_IPC_LOCK 14 366 367 #ifdef CONFIG_MEM_SOFT_DIRTY 368 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT) 369 #else 370 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT) 371 #endif 372 373 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS 374 375 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) 376 377 #define pgprot_val(x) ((x).pgprot) 378 #define __pgprot(x) ((pgprot_t) { (x) } ) 379 380 #define for_each_vma(__vmi, __vma) \ 381 while (((__vma) = vma_next(&(__vmi))) != NULL) 382 383 /* The MM code likes to work with exclusive end addresses */ 384 #define for_each_vma_range(__vmi, __vma, __end) \ 385 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 386 387 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 388 389 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 390 391 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 392 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 393 394 #define AS_MM_ALL_LOCKS 2 395 396 #define swap(a, b) \ 397 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 398 399 /* 400 * Flags for bug emulation. 401 * 402 * These occupy the top three bytes. 403 */ 404 enum { 405 READ_IMPLIES_EXEC = 0x0400000, 406 }; 407 408 struct vma_iterator { 409 struct ma_state mas; 410 }; 411 412 #define VMA_ITERATOR(name, __mm, __addr) \ 413 struct vma_iterator name = { \ 414 .mas = { \ 415 .tree = &(__mm)->mm_mt, \ 416 .index = __addr, \ 417 .node = NULL, \ 418 .status = ma_start, \ 419 }, \ 420 } 421 422 #define DEFINE_MUTEX(mutexname) \ 423 struct mutex mutexname = {} 424 425 #define DECLARE_BITMAP(name, bits) \ 426 unsigned long name[BITS_TO_LONGS(bits)] 427 428 #define EMPTY_VMA_FLAGS ((vma_flags_t){ }) 429 430 #define MAPCOUNT_ELF_CORE_MARGIN (5) 431 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 432 433 static __always_inline bool vma_flags_empty(const vma_flags_t *flags) 434 { 435 const unsigned long *bitmap = flags->__vma_flags; 436 437 return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS); 438 } 439 440 /* What action should be taken after an .mmap_prepare call is complete? */ 441 enum mmap_action_type { 442 MMAP_NOTHING, /* Mapping is complete, no further action. */ 443 MMAP_REMAP_PFN, /* Remap PFN range. */ 444 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ 445 }; 446 447 /* 448 * Describes an action an mmap_prepare hook can instruct to be taken to complete 449 * the mapping of a VMA. Specified in vm_area_desc. 450 */ 451 struct mmap_action { 452 union { 453 /* Remap range. */ 454 struct { 455 unsigned long start; 456 unsigned long start_pfn; 457 unsigned long size; 458 pgprot_t pgprot; 459 } remap; 460 }; 461 enum mmap_action_type type; 462 463 /* 464 * If specified, this hook is invoked after the selected action has been 465 * successfully completed. Note that the VMA write lock still held. 466 * 467 * The absolute minimum ought to be done here. 468 * 469 * Returns 0 on success, or an error code. 470 */ 471 int (*success_hook)(const struct vm_area_struct *vma); 472 473 /* 474 * If specified, this hook is invoked when an error occurred when 475 * attempting the selection action. 476 * 477 * The hook can return an error code in order to filter the error, but 478 * it is not valid to clear the error here. 479 */ 480 int (*error_hook)(int err); 481 482 /* 483 * This should be set in rare instances where the operation required 484 * that the rmap should not be able to access the VMA until 485 * completely set up. 486 */ 487 bool hide_from_rmap_until_complete :1; 488 }; 489 490 /* Operations which modify VMAs. */ 491 enum vma_operation { 492 VMA_OP_SPLIT, 493 VMA_OP_MERGE_UNFAULTED, 494 VMA_OP_REMAP, 495 VMA_OP_FORK, 496 }; 497 498 /* 499 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 500 * manipulate mutable fields which will cause those fields to be updated in the 501 * resultant VMA. 502 * 503 * Helper functions are not required for manipulating any field. 504 */ 505 struct vm_area_desc { 506 /* Immutable state. */ 507 const struct mm_struct *const mm; 508 struct file *const file; /* May vary from vm_file in stacked callers. */ 509 unsigned long start; 510 unsigned long end; 511 512 /* Mutable fields. Populated with initial state. */ 513 pgoff_t pgoff; 514 struct file *vm_file; 515 vma_flags_t vma_flags; 516 pgprot_t page_prot; 517 518 /* Write-only fields. */ 519 const struct vm_operations_struct *vm_ops; 520 void *private_data; 521 522 /* Take further action? */ 523 struct mmap_action action; 524 }; 525 526 struct vm_area_struct { 527 /* The first cache line has the info for VMA tree walking. */ 528 529 union { 530 struct { 531 /* VMA covers [vm_start; vm_end) addresses within mm */ 532 unsigned long vm_start; 533 unsigned long vm_end; 534 }; 535 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 536 }; 537 538 struct mm_struct *vm_mm; /* The address space we belong to. */ 539 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 540 541 /* 542 * Flags, see mm.h. 543 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 544 */ 545 union { 546 const vm_flags_t vm_flags; 547 vma_flags_t flags; 548 }; 549 550 #ifdef CONFIG_PER_VMA_LOCK 551 /* 552 * Can only be written (using WRITE_ONCE()) while holding both: 553 * - mmap_lock (in write mode) 554 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 555 * Can be read reliably while holding one of: 556 * - mmap_lock (in read or write mode) 557 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 558 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 559 * while holding nothing (except RCU to keep the VMA struct allocated). 560 * 561 * This sequence counter is explicitly allowed to overflow; sequence 562 * counter reuse can only lead to occasional unnecessary use of the 563 * slowpath. 564 */ 565 unsigned int vm_lock_seq; 566 #endif 567 568 /* 569 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 570 * list, after a COW of one of the file pages. A MAP_SHARED vma 571 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 572 * or brk vma (with NULL file) can only be in an anon_vma list. 573 */ 574 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 575 * page_table_lock */ 576 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 577 578 /* Function pointers to deal with this struct. */ 579 const struct vm_operations_struct *vm_ops; 580 581 /* Information about our backing store: */ 582 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 583 units */ 584 struct file * vm_file; /* File we map to (can be NULL). */ 585 void * vm_private_data; /* was vm_pte (shared mem) */ 586 587 #ifdef CONFIG_SWAP 588 atomic_long_t swap_readahead_info; 589 #endif 590 #ifndef CONFIG_MMU 591 struct vm_region *vm_region; /* NOMMU mapping region */ 592 #endif 593 #ifdef CONFIG_NUMA 594 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 595 #endif 596 #ifdef CONFIG_NUMA_BALANCING 597 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 598 #endif 599 #ifdef CONFIG_PER_VMA_LOCK 600 /* Unstable RCU readers are allowed to read this. */ 601 refcount_t vm_refcnt; 602 #endif 603 /* 604 * For areas with an address space and backing store, 605 * linkage into the address_space->i_mmap interval tree. 606 * 607 */ 608 struct { 609 struct rb_node rb; 610 unsigned long rb_subtree_last; 611 } shared; 612 #ifdef CONFIG_ANON_VMA_NAME 613 /* 614 * For private and shared anonymous mappings, a pointer to a null 615 * terminated string containing the name given to the vma, or NULL if 616 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 617 */ 618 struct anon_vma_name *anon_name; 619 #endif 620 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 621 } __randomize_layout; 622 623 struct vm_operations_struct { 624 void (*open)(struct vm_area_struct * area); 625 /** 626 * @close: Called when the VMA is being removed from the MM. 627 * Context: User context. May sleep. Caller holds mmap_lock. 628 */ 629 void (*close)(struct vm_area_struct * area); 630 /* Called any time before splitting to check if it's allowed */ 631 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 632 int (*mremap)(struct vm_area_struct *area); 633 /* 634 * Called by mprotect() to make driver-specific permission 635 * checks before mprotect() is finalised. The VMA must not 636 * be modified. Returns 0 if mprotect() can proceed. 637 */ 638 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 639 unsigned long end, unsigned long newflags); 640 vm_fault_t (*fault)(struct vm_fault *vmf); 641 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 642 vm_fault_t (*map_pages)(struct vm_fault *vmf, 643 pgoff_t start_pgoff, pgoff_t end_pgoff); 644 unsigned long (*pagesize)(struct vm_area_struct * area); 645 646 /* notification that a previously read-only page is about to become 647 * writable, if an error is returned it will cause a SIGBUS */ 648 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 649 650 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 651 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 652 653 /* called by access_process_vm when get_user_pages() fails, typically 654 * for use by special VMAs. See also generic_access_phys() for a generic 655 * implementation useful for any iomem mapping. 656 */ 657 int (*access)(struct vm_area_struct *vma, unsigned long addr, 658 void *buf, int len, int write); 659 660 /* Called by the /proc/PID/maps code to ask the vma whether it 661 * has a special name. Returning non-NULL will also cause this 662 * vma to be dumped unconditionally. */ 663 const char *(*name)(struct vm_area_struct *vma); 664 665 #ifdef CONFIG_NUMA 666 /* 667 * set_policy() op must add a reference to any non-NULL @new mempolicy 668 * to hold the policy upon return. Caller should pass NULL @new to 669 * remove a policy and fall back to surrounding context--i.e. do not 670 * install a MPOL_DEFAULT policy, nor the task or system default 671 * mempolicy. 672 */ 673 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 674 675 /* 676 * get_policy() op must add reference [mpol_get()] to any policy at 677 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 678 * in mm/mempolicy.c will do this automatically. 679 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 680 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 681 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 682 * must return NULL--i.e., do not "fallback" to task or system default 683 * policy. 684 */ 685 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 686 unsigned long addr, pgoff_t *ilx); 687 #endif 688 #ifdef CONFIG_FIND_NORMAL_PAGE 689 /* 690 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 691 * allows for returning a "normal" page from vm_normal_page() even 692 * though the PTE indicates that the "struct page" either does not exist 693 * or should not be touched: "special". 694 * 695 * Do not add new users: this really only works when a "normal" page 696 * was mapped, but then the PTE got changed to something weird (+ 697 * marked special) that would not make pte_pfn() identify the originally 698 * inserted page. 699 */ 700 struct page *(*find_normal_page)(struct vm_area_struct *vma, 701 unsigned long addr); 702 #endif /* CONFIG_FIND_NORMAL_PAGE */ 703 }; 704 705 struct vm_unmapped_area_info { 706 #define VM_UNMAPPED_AREA_TOPDOWN 1 707 unsigned long flags; 708 unsigned long length; 709 unsigned long low_limit; 710 unsigned long high_limit; 711 unsigned long align_mask; 712 unsigned long align_offset; 713 unsigned long start_gap; 714 }; 715 716 struct pagetable_move_control { 717 struct vm_area_struct *old; /* Source VMA. */ 718 struct vm_area_struct *new; /* Destination VMA. */ 719 unsigned long old_addr; /* Address from which the move begins. */ 720 unsigned long old_end; /* Exclusive address at which old range ends. */ 721 unsigned long new_addr; /* Address to move page tables to. */ 722 unsigned long len_in; /* Bytes to remap specified by user. */ 723 724 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 725 bool for_stack; /* Is this an early temp stack being moved? */ 726 }; 727 728 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 729 struct pagetable_move_control name = { \ 730 .old = old_, \ 731 .new = new_, \ 732 .old_addr = old_addr_, \ 733 .old_end = (old_addr_) + (len_), \ 734 .new_addr = new_addr_, \ 735 .len_in = len_, \ 736 } 737 738 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 739 { 740 mas_pause(&vmi->mas); 741 } 742 743 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 744 { 745 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 746 } 747 748 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 749 { 750 return __pgprot(vm_flags); 751 } 752 753 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 754 { 755 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 756 } 757 758 /* 759 * Copy value to the first system word of VMA flags, non-atomically. 760 * 761 * IMPORTANT: This does not overwrite bytes past the first system word. The 762 * caller must account for this. 763 */ 764 static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value) 765 { 766 *ACCESS_PRIVATE(flags, __vma_flags) = value; 767 } 768 769 /* 770 * Copy value to the first system word of VMA flags ONCE, non-atomically. 771 * 772 * IMPORTANT: This does not overwrite bytes past the first system word. The 773 * caller must account for this. 774 */ 775 static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value) 776 { 777 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 778 779 WRITE_ONCE(*bitmap, value); 780 } 781 782 /* Update the first system word of VMA flags setting bits, non-atomically. */ 783 static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value) 784 { 785 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 786 787 *bitmap |= value; 788 } 789 790 /* Update the first system word of VMA flags clearing bits, non-atomically. */ 791 static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value) 792 { 793 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 794 795 *bitmap &= ~value; 796 } 797 798 static __always_inline void vma_flags_clear_all(vma_flags_t *flags) 799 { 800 bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS); 801 } 802 803 static __always_inline void vma_flags_set_flag(vma_flags_t *flags, 804 vma_flag_t bit) 805 { 806 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 807 808 __set_bit((__force int)bit, bitmap); 809 } 810 811 /* Use when VMA is not part of the VMA tree and needs no locking */ 812 static inline void vm_flags_init(struct vm_area_struct *vma, 813 vm_flags_t flags) 814 { 815 vma_flags_clear_all(&vma->flags); 816 vma_flags_overwrite_word(&vma->flags, flags); 817 } 818 819 /* 820 * Use when VMA is part of the VMA tree and modifications need coordination 821 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and 822 * it should be locked explicitly beforehand. 823 */ 824 static inline void vm_flags_reset(struct vm_area_struct *vma, 825 vm_flags_t flags) 826 { 827 vma_assert_write_locked(vma); 828 vm_flags_init(vma, flags); 829 } 830 831 static inline void vm_flags_reset_once(struct vm_area_struct *vma, 832 vm_flags_t flags) 833 { 834 vma_assert_write_locked(vma); 835 /* 836 * The user should only be interested in avoiding reordering of 837 * assignment to the first word. 838 */ 839 vma_flags_clear_all(&vma->flags); 840 vma_flags_overwrite_word_once(&vma->flags, flags); 841 } 842 843 static inline void vm_flags_set(struct vm_area_struct *vma, 844 vm_flags_t flags) 845 { 846 vma_start_write(vma); 847 vma_flags_set_word(&vma->flags, flags); 848 } 849 850 static inline void vm_flags_clear(struct vm_area_struct *vma, 851 vm_flags_t flags) 852 { 853 vma_start_write(vma); 854 vma_flags_clear_word(&vma->flags, flags); 855 } 856 857 static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits); 858 859 #define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \ 860 (const vma_flag_t []){__VA_ARGS__}) 861 862 static __always_inline bool vma_flags_test(const vma_flags_t *flags, 863 vma_flag_t bit) 864 { 865 const unsigned long *bitmap = flags->__vma_flags; 866 867 return test_bit((__force int)bit, bitmap); 868 } 869 870 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags, 871 vma_flags_t to_and) 872 { 873 vma_flags_t dst; 874 unsigned long *bitmap_dst = dst.__vma_flags; 875 const unsigned long *bitmap = flags->__vma_flags; 876 const unsigned long *bitmap_to_and = to_and.__vma_flags; 877 878 bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS); 879 return dst; 880 } 881 882 #define vma_flags_and(flags, ...) \ 883 vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__)) 884 885 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags, 886 vma_flags_t to_test) 887 { 888 const unsigned long *bitmap = flags->__vma_flags; 889 const unsigned long *bitmap_to_test = to_test.__vma_flags; 890 891 return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 892 } 893 894 #define vma_flags_test_any(flags, ...) \ 895 vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__)) 896 897 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags, 898 vma_flags_t to_test) 899 { 900 const unsigned long *bitmap = flags->__vma_flags; 901 const unsigned long *bitmap_to_test = to_test.__vma_flags; 902 903 return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 904 } 905 906 #define vma_flags_test_all(flags, ...) \ 907 vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__)) 908 909 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set) 910 { 911 unsigned long *bitmap = flags->__vma_flags; 912 const unsigned long *bitmap_to_set = to_set.__vma_flags; 913 914 bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS); 915 } 916 917 #define vma_flags_set(flags, ...) \ 918 vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__)) 919 920 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear) 921 { 922 unsigned long *bitmap = flags->__vma_flags; 923 const unsigned long *bitmap_to_clear = to_clear.__vma_flags; 924 925 bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS); 926 } 927 928 #define vma_flags_clear(flags, ...) \ 929 vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__)) 930 931 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags, 932 const vma_flags_t *flags_other) 933 { 934 vma_flags_t dst; 935 const unsigned long *bitmap_other = flags_other->__vma_flags; 936 const unsigned long *bitmap = flags->__vma_flags; 937 unsigned long *bitmap_dst = dst.__vma_flags; 938 939 bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS); 940 return dst; 941 } 942 943 static inline bool vma_test_all_mask(const struct vm_area_struct *vma, 944 vma_flags_t flags) 945 { 946 return vma_flags_test_all_mask(&vma->flags, flags); 947 } 948 949 #define vma_test_all(vma, ...) \ 950 vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__)) 951 952 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags) 953 { 954 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 955 (VM_SHARED | VM_MAYWRITE); 956 } 957 958 static inline void vma_set_flags_mask(struct vm_area_struct *vma, 959 vma_flags_t flags) 960 { 961 vma_flags_set_mask(&vma->flags, flags); 962 } 963 964 #define vma_set_flags(vma, ...) \ 965 vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__)) 966 967 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc, 968 vma_flag_t bit) 969 { 970 return vma_flags_test(&desc->vma_flags, bit); 971 } 972 973 static inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc, 974 vma_flags_t flags) 975 { 976 return vma_flags_test_any_mask(&desc->vma_flags, flags); 977 } 978 979 #define vma_desc_test_any(desc, ...) \ 980 vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__)) 981 982 static inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc, 983 vma_flags_t flags) 984 { 985 return vma_flags_test_all_mask(&desc->vma_flags, flags); 986 } 987 988 #define vma_desc_test_all(desc, ...) \ 989 vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__)) 990 991 static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc, 992 vma_flags_t flags) 993 { 994 vma_flags_set_mask(&desc->vma_flags, flags); 995 } 996 997 #define vma_desc_set_flags(desc, ...) \ 998 vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 999 1000 static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc, 1001 vma_flags_t flags) 1002 { 1003 vma_flags_clear_mask(&desc->vma_flags, flags); 1004 } 1005 1006 #define vma_desc_clear_flags(desc, ...) \ 1007 vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 1008 1009 static inline bool is_shared_maywrite(const vma_flags_t *flags) 1010 { 1011 return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT); 1012 } 1013 1014 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 1015 { 1016 return is_shared_maywrite(&vma->flags); 1017 } 1018 1019 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 1020 { 1021 /* 1022 * Uses mas_find() to get the first VMA when the iterator starts. 1023 * Calling mas_next() could skip the first entry. 1024 */ 1025 return mas_find(&vmi->mas, ULONG_MAX); 1026 } 1027 1028 /* 1029 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 1030 * assertions should be made either under mmap_write_lock or when the object 1031 * has been isolated under mmap_write_lock, ensuring no competing writers. 1032 */ 1033 static inline void vma_assert_attached(struct vm_area_struct *vma) 1034 { 1035 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 1036 } 1037 1038 static inline void vma_assert_detached(struct vm_area_struct *vma) 1039 { 1040 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 1041 } 1042 1043 static inline void vma_assert_write_locked(struct vm_area_struct *); 1044 static inline void vma_mark_attached(struct vm_area_struct *vma) 1045 { 1046 vma_assert_write_locked(vma); 1047 vma_assert_detached(vma); 1048 refcount_set_release(&vma->vm_refcnt, 1); 1049 } 1050 1051 static inline void vma_mark_detached(struct vm_area_struct *vma) 1052 { 1053 vma_assert_write_locked(vma); 1054 vma_assert_attached(vma); 1055 /* We are the only writer, so no need to use vma_refcount_put(). */ 1056 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 1057 /* 1058 * Reader must have temporarily raised vm_refcnt but it will 1059 * drop it without using the vma since vma is write-locked. 1060 */ 1061 } 1062 } 1063 1064 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 1065 { 1066 memset(vma, 0, sizeof(*vma)); 1067 vma->vm_mm = mm; 1068 vma->vm_ops = &vma_dummy_vm_ops; 1069 INIT_LIST_HEAD(&vma->anon_vma_chain); 1070 vma->vm_lock_seq = UINT_MAX; 1071 } 1072 1073 /* 1074 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 1075 * kernel/fork.c, so we have to these broadly available there, and temporarily 1076 * define them here to resolve the dependency cycle. 1077 */ 1078 #define is_exec_mapping(flags) \ 1079 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 1080 1081 #define is_stack_mapping(flags) \ 1082 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 1083 1084 #define is_data_mapping(flags) \ 1085 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 1086 1087 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 1088 long npages) 1089 { 1090 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 1091 1092 if (is_exec_mapping(flags)) 1093 mm->exec_vm += npages; 1094 else if (is_stack_mapping(flags)) 1095 mm->stack_vm += npages; 1096 else if (is_data_mapping(flags)) 1097 mm->data_vm += npages; 1098 } 1099 1100 #undef is_exec_mapping 1101 #undef is_stack_mapping 1102 #undef is_data_mapping 1103 1104 static inline void vm_unacct_memory(long pages) 1105 { 1106 vm_acct_memory(-pages); 1107 } 1108 1109 static inline void mapping_allow_writable(struct address_space *mapping) 1110 { 1111 atomic_inc(&mapping->i_mmap_writable); 1112 } 1113 1114 static inline 1115 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 1116 { 1117 return mas_find(&vmi->mas, max - 1); 1118 } 1119 1120 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 1121 unsigned long start, unsigned long end, gfp_t gfp) 1122 { 1123 __mas_set_range(&vmi->mas, start, end - 1); 1124 mas_store_gfp(&vmi->mas, NULL, gfp); 1125 if (unlikely(mas_is_err(&vmi->mas))) 1126 return -ENOMEM; 1127 1128 return 0; 1129 } 1130 1131 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1132 { 1133 vma->vm_ops = NULL; 1134 } 1135 1136 /* Declared in vma.h. */ 1137 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1138 struct vm_area_desc *desc); 1139 1140 static inline int __compat_vma_mmap(const struct file_operations *f_op, 1141 struct file *file, struct vm_area_struct *vma) 1142 { 1143 struct vm_area_desc desc = { 1144 .mm = vma->vm_mm, 1145 .file = file, 1146 .start = vma->vm_start, 1147 .end = vma->vm_end, 1148 1149 .pgoff = vma->vm_pgoff, 1150 .vm_file = vma->vm_file, 1151 .vma_flags = vma->flags, 1152 .page_prot = vma->vm_page_prot, 1153 1154 .action.type = MMAP_NOTHING, /* Default */ 1155 }; 1156 int err; 1157 1158 err = f_op->mmap_prepare(&desc); 1159 if (err) 1160 return err; 1161 1162 mmap_action_prepare(&desc.action, &desc); 1163 set_vma_from_desc(vma, &desc); 1164 return mmap_action_complete(&desc.action, vma); 1165 } 1166 1167 static inline int compat_vma_mmap(struct file *file, 1168 struct vm_area_struct *vma) 1169 { 1170 return __compat_vma_mmap(file->f_op, file, vma); 1171 } 1172 1173 1174 static inline void vma_iter_init(struct vma_iterator *vmi, 1175 struct mm_struct *mm, unsigned long addr) 1176 { 1177 mas_init(&vmi->mas, &mm->mm_mt, addr); 1178 } 1179 1180 static inline unsigned long vma_pages(struct vm_area_struct *vma) 1181 { 1182 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1183 } 1184 1185 static inline void mmap_assert_locked(struct mm_struct *); 1186 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1187 unsigned long start_addr, 1188 unsigned long end_addr) 1189 { 1190 unsigned long index = start_addr; 1191 1192 mmap_assert_locked(mm); 1193 return mt_find(&mm->mm_mt, &index, end_addr - 1); 1194 } 1195 1196 static inline 1197 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 1198 { 1199 return mtree_load(&mm->mm_mt, addr); 1200 } 1201 1202 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 1203 { 1204 return mas_prev(&vmi->mas, 0); 1205 } 1206 1207 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 1208 { 1209 mas_set(&vmi->mas, addr); 1210 } 1211 1212 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1213 { 1214 return !vma->vm_ops; 1215 } 1216 1217 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 1218 #define vma_iter_load(vmi) \ 1219 mas_walk(&(vmi)->mas) 1220 1221 static inline struct vm_area_struct * 1222 find_vma_prev(struct mm_struct *mm, unsigned long addr, 1223 struct vm_area_struct **pprev) 1224 { 1225 struct vm_area_struct *vma; 1226 VMA_ITERATOR(vmi, mm, addr); 1227 1228 vma = vma_iter_load(&vmi); 1229 *pprev = vma_prev(&vmi); 1230 if (!vma) 1231 vma = vma_next(&vmi); 1232 return vma; 1233 } 1234 1235 #undef vma_iter_load 1236 1237 static inline void vma_iter_free(struct vma_iterator *vmi) 1238 { 1239 mas_destroy(&vmi->mas); 1240 } 1241 1242 static inline 1243 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 1244 { 1245 return mas_next_range(&vmi->mas, ULONG_MAX); 1246 } 1247 1248 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1249 1250 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1251 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1252 { 1253 vm_flags_t vm_flags = vma->vm_flags; 1254 pgprot_t vm_page_prot; 1255 1256 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1257 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1258 1259 if (vma_wants_writenotify(vma, vm_page_prot)) { 1260 vm_flags &= ~VM_SHARED; 1261 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1262 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1263 } 1264 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1265 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1266 } 1267 1268 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1269 { 1270 if (vma->vm_flags & VM_GROWSDOWN) 1271 return stack_guard_gap; 1272 1273 /* See reasoning around the VM_SHADOW_STACK definition */ 1274 if (vma->vm_flags & VM_SHADOW_STACK) 1275 return PAGE_SIZE; 1276 1277 return 0; 1278 } 1279 1280 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1281 { 1282 unsigned long gap = stack_guard_start_gap(vma); 1283 unsigned long vm_start = vma->vm_start; 1284 1285 vm_start -= gap; 1286 if (vm_start > vma->vm_start) 1287 vm_start = 0; 1288 return vm_start; 1289 } 1290 1291 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1292 { 1293 unsigned long vm_end = vma->vm_end; 1294 1295 if (vma->vm_flags & VM_GROWSUP) { 1296 vm_end += stack_guard_gap; 1297 if (vm_end < vma->vm_end) 1298 vm_end = -PAGE_SIZE; 1299 } 1300 return vm_end; 1301 } 1302 1303 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1304 { 1305 return vma->vm_flags & VM_ACCESS_FLAGS; 1306 } 1307 1308 static inline bool mlock_future_ok(const struct mm_struct *mm, 1309 vm_flags_t vm_flags, unsigned long bytes) 1310 { 1311 unsigned long locked_pages, limit_pages; 1312 1313 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1314 return true; 1315 1316 locked_pages = bytes >> PAGE_SHIFT; 1317 locked_pages += mm->locked_vm; 1318 1319 limit_pages = rlimit(RLIMIT_MEMLOCK); 1320 limit_pages >>= PAGE_SHIFT; 1321 1322 return locked_pages <= limit_pages; 1323 } 1324 1325 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1326 { 1327 /* If MDWE is disabled, we have nothing to deny. */ 1328 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1329 return false; 1330 1331 /* If the new VMA is not executable, we have nothing to deny. */ 1332 if (!(new & VM_EXEC)) 1333 return false; 1334 1335 /* Under MDWE we do not accept newly writably executable VMAs... */ 1336 if (new & VM_WRITE) 1337 return true; 1338 1339 /* ...nor previously non-executable VMAs becoming executable. */ 1340 if (!(old & VM_EXEC)) 1341 return true; 1342 1343 return false; 1344 } 1345 1346 static inline int mapping_map_writable(struct address_space *mapping) 1347 { 1348 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 1349 0 : -EPERM; 1350 } 1351 1352 /* Did the driver provide valid mmap hook configuration? */ 1353 static inline bool can_mmap_file(struct file *file) 1354 { 1355 bool has_mmap = file->f_op->mmap; 1356 bool has_mmap_prepare = file->f_op->mmap_prepare; 1357 1358 /* Hooks are mutually exclusive. */ 1359 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1360 return false; 1361 if (!has_mmap && !has_mmap_prepare) 1362 return false; 1363 1364 return true; 1365 } 1366 1367 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1368 { 1369 if (file->f_op->mmap_prepare) 1370 return compat_vma_mmap(file, vma); 1371 1372 return file->f_op->mmap(file, vma); 1373 } 1374 1375 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1376 { 1377 return file->f_op->mmap_prepare(desc); 1378 } 1379 1380 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1381 { 1382 /* Changing an anonymous vma with this is illegal */ 1383 get_file(file); 1384 swap(vma->vm_file, file); 1385 fput(file); 1386 } 1387 1388 extern int sysctl_max_map_count; 1389 static inline int get_sysctl_max_map_count(void) 1390 { 1391 return READ_ONCE(sysctl_max_map_count); 1392 } 1393