1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 3 #pragma once 4 5 /* Forward declarations to avoid header cycle. */ 6 struct vm_area_struct; 7 static inline void vma_start_write(struct vm_area_struct *vma); 8 9 extern const struct vm_operations_struct vma_dummy_vm_ops; 10 extern unsigned long stack_guard_gap; 11 extern const struct vm_operations_struct vma_dummy_vm_ops; 12 extern unsigned long rlimit(unsigned int limit); 13 struct task_struct *get_current(void); 14 15 #define MMF_HAS_MDWE 28 16 #define current get_current() 17 18 /* 19 * Define the task command name length as enum, then it can be visible to 20 * BPF programs. 21 */ 22 enum { 23 TASK_COMM_LEN = 16, 24 }; 25 26 /* PARTIALLY implemented types. */ 27 struct mm_struct { 28 struct maple_tree mm_mt; 29 int map_count; /* number of VMAs */ 30 unsigned long total_vm; /* Total pages mapped */ 31 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 32 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 33 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 34 unsigned long stack_vm; /* VM_STACK */ 35 36 union { 37 vm_flags_t def_flags; 38 vma_flags_t def_vma_flags; 39 }; 40 41 mm_flags_t flags; /* Must use mm_flags_* helpers to access */ 42 }; 43 struct address_space { 44 struct rb_root_cached i_mmap; 45 unsigned long flags; 46 atomic_t i_mmap_writable; 47 }; 48 struct file_operations { 49 int (*mmap)(struct file *, struct vm_area_struct *); 50 int (*mmap_prepare)(struct vm_area_desc *); 51 }; 52 struct file { 53 struct address_space *f_mapping; 54 const struct file_operations *f_op; 55 }; 56 struct anon_vma_chain { 57 struct anon_vma *anon_vma; 58 struct list_head same_vma; 59 }; 60 struct task_struct { 61 char comm[TASK_COMM_LEN]; 62 pid_t pid; 63 struct mm_struct *mm; 64 65 /* Used for emulating ABI behavior of previous Linux versions: */ 66 unsigned int personality; 67 }; 68 69 struct kref { 70 refcount_t refcount; 71 }; 72 73 struct anon_vma_name { 74 struct kref kref; 75 /* The name needs to be at the end because it is dynamically sized. */ 76 char name[]; 77 }; 78 79 /* 80 * Contains declarations that are DUPLICATED from kernel source in order to 81 * faciliate userland VMA testing. 82 * 83 * These must be kept in sync with kernel source. 84 */ 85 86 #define VMA_LOCK_OFFSET 0x40000000 87 88 typedef struct { unsigned long v; } freeptr_t; 89 90 #define VM_NONE 0x00000000 91 92 typedef int __bitwise vma_flag_t; 93 94 #define ACCESS_PRIVATE(p, member) ((p)->member) 95 96 #define DECLARE_VMA_BIT(name, bitnum) \ 97 VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum) 98 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \ 99 VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT 100 enum { 101 DECLARE_VMA_BIT(READ, 0), 102 DECLARE_VMA_BIT(WRITE, 1), 103 DECLARE_VMA_BIT(EXEC, 2), 104 DECLARE_VMA_BIT(SHARED, 3), 105 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 106 DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */ 107 DECLARE_VMA_BIT(MAYWRITE, 5), 108 DECLARE_VMA_BIT(MAYEXEC, 6), 109 DECLARE_VMA_BIT(MAYSHARE, 7), 110 DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */ 111 #ifdef CONFIG_MMU 112 DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */ 113 #else 114 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ 115 DECLARE_VMA_BIT(MAYOVERLAY, 9), 116 #endif /* CONFIG_MMU */ 117 /* Page-ranges managed without "struct page", just pure PFN */ 118 DECLARE_VMA_BIT(PFNMAP, 10), 119 DECLARE_VMA_BIT(MAYBE_GUARD, 11), 120 DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */ 121 DECLARE_VMA_BIT(LOCKED, 13), 122 DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */ 123 DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */ 124 DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */ 125 DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */ 126 DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */ 127 DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */ 128 DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */ 129 DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */ 130 DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */ 131 DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */ 132 DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */ 133 DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */ 134 DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */ 135 DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */ 136 DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */ 137 DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */ 138 DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */ 139 DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */ 140 /* These bits are reused, we define specific uses below. */ 141 DECLARE_VMA_BIT(HIGH_ARCH_0, 32), 142 DECLARE_VMA_BIT(HIGH_ARCH_1, 33), 143 DECLARE_VMA_BIT(HIGH_ARCH_2, 34), 144 DECLARE_VMA_BIT(HIGH_ARCH_3, 35), 145 DECLARE_VMA_BIT(HIGH_ARCH_4, 36), 146 DECLARE_VMA_BIT(HIGH_ARCH_5, 37), 147 DECLARE_VMA_BIT(HIGH_ARCH_6, 38), 148 /* 149 * This flag is used to connect VFIO to arch specific KVM code. It 150 * indicates that the memory under this VMA is safe for use with any 151 * non-cachable memory type inside KVM. Some VFIO devices, on some 152 * platforms, are thought to be unsafe and can cause machine crashes 153 * if KVM does not lock down the memory type. 154 */ 155 DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39), 156 #ifdef CONFIG_PPC32 157 DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1), 158 #else 159 DECLARE_VMA_BIT(DROPPABLE, 40), 160 #endif 161 DECLARE_VMA_BIT(UFFD_MINOR, 41), 162 DECLARE_VMA_BIT(SEALED, 42), 163 /* Flags that reuse flags above. */ 164 DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0), 165 DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1), 166 DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2), 167 DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3), 168 DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4), 169 #if defined(CONFIG_X86_USER_SHADOW_STACK) 170 /* 171 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of 172 * support core mm. 173 * 174 * These VMAs will get a single end guard page. This helps userspace 175 * protect itself from attacks. A single page is enough for current 176 * shadow stack archs (x86). See the comments near alloc_shstk() in 177 * arch/x86/kernel/shstk.c for more details on the guard size. 178 */ 179 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5), 180 #elif defined(CONFIG_ARM64_GCS) 181 /* 182 * arm64's Guarded Control Stack implements similar functionality and 183 * has similar constraints to shadow stacks. 184 */ 185 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6), 186 #endif 187 DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */ 188 DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */ 189 DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */ 190 DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */ 191 DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */ 192 DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */ 193 DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */ 194 DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */ 195 #ifdef CONFIG_STACK_GROWSUP 196 DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP), 197 DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN), 198 #else 199 DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN), 200 #endif 201 }; 202 203 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT) 204 #define VM_READ INIT_VM_FLAG(READ) 205 #define VM_WRITE INIT_VM_FLAG(WRITE) 206 #define VM_EXEC INIT_VM_FLAG(EXEC) 207 #define VM_SHARED INIT_VM_FLAG(SHARED) 208 #define VM_MAYREAD INIT_VM_FLAG(MAYREAD) 209 #define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE) 210 #define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC) 211 #define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE) 212 #define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN) 213 #ifdef CONFIG_MMU 214 #define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING) 215 #else 216 #define VM_UFFD_MISSING VM_NONE 217 #define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY) 218 #endif 219 #define VM_PFNMAP INIT_VM_FLAG(PFNMAP) 220 #define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD) 221 #define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP) 222 #define VM_LOCKED INIT_VM_FLAG(LOCKED) 223 #define VM_IO INIT_VM_FLAG(IO) 224 #define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ) 225 #define VM_RAND_READ INIT_VM_FLAG(RAND_READ) 226 #define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY) 227 #define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND) 228 #define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT) 229 #define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT) 230 #define VM_NORESERVE INIT_VM_FLAG(NORESERVE) 231 #define VM_HUGETLB INIT_VM_FLAG(HUGETLB) 232 #define VM_SYNC INIT_VM_FLAG(SYNC) 233 #define VM_ARCH_1 INIT_VM_FLAG(ARCH_1) 234 #define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK) 235 #define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP) 236 #ifdef CONFIG_MEM_SOFT_DIRTY 237 #define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY) 238 #else 239 #define VM_SOFTDIRTY VM_NONE 240 #endif 241 #define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP) 242 #define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE) 243 #define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) 244 #define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) 245 #define VM_STACK INIT_VM_FLAG(STACK) 246 #ifdef CONFIG_STACK_GROWS_UP 247 #define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) 248 #else 249 #define VM_STACK_EARLY VM_NONE 250 #endif 251 #ifdef CONFIG_ARCH_HAS_PKEYS 252 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT) 253 /* Despite the naming, these are FLAGS not bits. */ 254 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0) 255 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1) 256 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2) 257 #if CONFIG_ARCH_PKEY_BITS > 3 258 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3) 259 #else 260 #define VM_PKEY_BIT3 VM_NONE 261 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */ 262 #if CONFIG_ARCH_PKEY_BITS > 4 263 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4) 264 #else 265 #define VM_PKEY_BIT4 VM_NONE 266 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */ 267 #endif /* CONFIG_ARCH_HAS_PKEYS */ 268 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) 269 #define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK) 270 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT) 271 #else 272 #define VM_SHADOW_STACK VM_NONE 273 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT) 274 #endif 275 #if defined(CONFIG_PPC64) 276 #define VM_SAO INIT_VM_FLAG(SAO) 277 #elif defined(CONFIG_PARISC) 278 #define VM_GROWSUP INIT_VM_FLAG(GROWSUP) 279 #elif defined(CONFIG_SPARC64) 280 #define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI) 281 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 282 #elif defined(CONFIG_ARM64) 283 #define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI) 284 #define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) 285 #elif !defined(CONFIG_MMU) 286 #define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY) 287 #endif 288 #ifndef VM_GROWSUP 289 #define VM_GROWSUP VM_NONE 290 #endif 291 #ifdef CONFIG_ARM64_MTE 292 #define VM_MTE INIT_VM_FLAG(MTE) 293 #define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED) 294 #else 295 #define VM_MTE VM_NONE 296 #define VM_MTE_ALLOWED VM_NONE 297 #endif 298 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 299 #define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR) 300 #else 301 #define VM_UFFD_MINOR VM_NONE 302 #endif 303 #ifdef CONFIG_64BIT 304 #define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED) 305 #define VM_SEALED INIT_VM_FLAG(SEALED) 306 #else 307 #define VM_ALLOW_ANY_UNCACHED VM_NONE 308 #define VM_SEALED VM_NONE 309 #endif 310 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 311 #define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE) 312 #else 313 #define VM_DROPPABLE VM_NONE 314 #endif 315 316 /* Bits set in the VMA until the stack is in its final location */ 317 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 318 319 #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \ 320 VM_EXEC_BIT : VM_READ_BIT) 321 322 /* Common data flag combinations */ 323 #define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 324 TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 325 VMA_MAYEXEC_BIT) 326 #define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 327 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT) 328 #define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 329 VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 330 VMA_MAYEXEC_BIT) 331 332 #ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */ 333 #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC 334 #endif 335 336 #ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */ 337 #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS 338 #endif 339 340 #define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \ 341 VMA_STACK_BIT, VMA_ACCOUNT_BIT) 342 /* Temporary until VMA flags conversion complete. */ 343 #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS) 344 345 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 346 347 /* VMA basic access permission flags */ 348 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 349 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT) 350 351 /* 352 * Special vmas that are non-mergable, non-mlock()able. 353 */ 354 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 355 356 #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \ 357 VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT) 358 359 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \ 360 VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT) 361 362 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 363 #define TASK_SIZE_LOW DEFAULT_MAP_WINDOW 364 #define TASK_SIZE_MAX DEFAULT_MAP_WINDOW 365 #define STACK_TOP TASK_SIZE_LOW 366 #define STACK_TOP_MAX TASK_SIZE_MAX 367 368 /* This mask represents all the VMA flag bits used by mlock */ 369 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 370 371 #define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT) 372 373 #define RLIMIT_STACK 3 /* max stack size */ 374 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ 375 376 #define CAP_IPC_LOCK 14 377 378 #ifdef CONFIG_MEM_SOFT_DIRTY 379 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT) 380 #else 381 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT) 382 #endif 383 384 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS 385 386 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) 387 388 #define pgprot_val(x) ((x).pgprot) 389 #define __pgprot(x) ((pgprot_t) { (x) } ) 390 391 #define for_each_vma(__vmi, __vma) \ 392 while (((__vma) = vma_next(&(__vmi))) != NULL) 393 394 /* The MM code likes to work with exclusive end addresses */ 395 #define for_each_vma_range(__vmi, __vma, __end) \ 396 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 397 398 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 399 400 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) 401 402 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr) 403 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr) 404 405 #define AS_MM_ALL_LOCKS 2 406 407 #define swap(a, b) \ 408 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 409 410 /* 411 * Flags for bug emulation. 412 * 413 * These occupy the top three bytes. 414 */ 415 enum { 416 READ_IMPLIES_EXEC = 0x0400000, 417 }; 418 419 struct vma_iterator { 420 struct ma_state mas; 421 }; 422 423 #define VMA_ITERATOR(name, __mm, __addr) \ 424 struct vma_iterator name = { \ 425 .mas = { \ 426 .tree = &(__mm)->mm_mt, \ 427 .index = __addr, \ 428 .node = NULL, \ 429 .status = ma_start, \ 430 }, \ 431 } 432 433 #define DEFINE_MUTEX(mutexname) \ 434 struct mutex mutexname = {} 435 436 #define DECLARE_BITMAP(name, bits) \ 437 unsigned long name[BITS_TO_LONGS(bits)] 438 439 #define EMPTY_VMA_FLAGS ((vma_flags_t){ }) 440 441 #define MAPCOUNT_ELF_CORE_MARGIN (5) 442 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 443 444 static __always_inline bool vma_flags_empty(const vma_flags_t *flags) 445 { 446 const unsigned long *bitmap = flags->__vma_flags; 447 448 return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS); 449 } 450 451 /* What action should be taken after an .mmap_prepare call is complete? */ 452 enum mmap_action_type { 453 MMAP_NOTHING, /* Mapping is complete, no further action. */ 454 MMAP_REMAP_PFN, /* Remap PFN range. */ 455 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ 456 }; 457 458 /* 459 * Describes an action an mmap_prepare hook can instruct to be taken to complete 460 * the mapping of a VMA. Specified in vm_area_desc. 461 */ 462 struct mmap_action { 463 union { 464 /* Remap range. */ 465 struct { 466 unsigned long start; 467 unsigned long start_pfn; 468 unsigned long size; 469 pgprot_t pgprot; 470 } remap; 471 }; 472 enum mmap_action_type type; 473 474 /* 475 * If specified, this hook is invoked after the selected action has been 476 * successfully completed. Note that the VMA write lock still held. 477 * 478 * The absolute minimum ought to be done here. 479 * 480 * Returns 0 on success, or an error code. 481 */ 482 int (*success_hook)(const struct vm_area_struct *vma); 483 484 /* 485 * If specified, this hook is invoked when an error occurred when 486 * attempting the selection action. 487 * 488 * The hook can return an error code in order to filter the error, but 489 * it is not valid to clear the error here. 490 */ 491 int (*error_hook)(int err); 492 493 /* 494 * This should be set in rare instances where the operation required 495 * that the rmap should not be able to access the VMA until 496 * completely set up. 497 */ 498 bool hide_from_rmap_until_complete :1; 499 }; 500 501 /* Operations which modify VMAs. */ 502 enum vma_operation { 503 VMA_OP_SPLIT, 504 VMA_OP_MERGE_UNFAULTED, 505 VMA_OP_REMAP, 506 VMA_OP_FORK, 507 }; 508 509 /* 510 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to 511 * manipulate mutable fields which will cause those fields to be updated in the 512 * resultant VMA. 513 * 514 * Helper functions are not required for manipulating any field. 515 */ 516 struct vm_area_desc { 517 /* Immutable state. */ 518 const struct mm_struct *const mm; 519 struct file *const file; /* May vary from vm_file in stacked callers. */ 520 unsigned long start; 521 unsigned long end; 522 523 /* Mutable fields. Populated with initial state. */ 524 pgoff_t pgoff; 525 struct file *vm_file; 526 vma_flags_t vma_flags; 527 pgprot_t page_prot; 528 529 /* Write-only fields. */ 530 const struct vm_operations_struct *vm_ops; 531 void *private_data; 532 533 /* Take further action? */ 534 struct mmap_action action; 535 }; 536 537 struct vm_area_struct { 538 /* The first cache line has the info for VMA tree walking. */ 539 540 union { 541 struct { 542 /* VMA covers [vm_start; vm_end) addresses within mm */ 543 unsigned long vm_start; 544 unsigned long vm_end; 545 }; 546 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ 547 }; 548 549 struct mm_struct *vm_mm; /* The address space we belong to. */ 550 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 551 552 /* 553 * Flags, see mm.h. 554 * To modify use vm_flags_{init|reset|set|clear|mod} functions. 555 */ 556 union { 557 const vm_flags_t vm_flags; 558 vma_flags_t flags; 559 }; 560 561 #ifdef CONFIG_PER_VMA_LOCK 562 /* 563 * Can only be written (using WRITE_ONCE()) while holding both: 564 * - mmap_lock (in write mode) 565 * - vm_refcnt bit at VMA_LOCK_OFFSET is set 566 * Can be read reliably while holding one of: 567 * - mmap_lock (in read or write mode) 568 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 569 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 570 * while holding nothing (except RCU to keep the VMA struct allocated). 571 * 572 * This sequence counter is explicitly allowed to overflow; sequence 573 * counter reuse can only lead to occasional unnecessary use of the 574 * slowpath. 575 */ 576 unsigned int vm_lock_seq; 577 #endif 578 579 /* 580 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 581 * list, after a COW of one of the file pages. A MAP_SHARED vma 582 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 583 * or brk vma (with NULL file) can only be in an anon_vma list. 584 */ 585 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 586 * page_table_lock */ 587 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 588 589 /* Function pointers to deal with this struct. */ 590 const struct vm_operations_struct *vm_ops; 591 592 /* Information about our backing store: */ 593 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 594 units */ 595 struct file * vm_file; /* File we map to (can be NULL). */ 596 void * vm_private_data; /* was vm_pte (shared mem) */ 597 598 #ifdef CONFIG_SWAP 599 atomic_long_t swap_readahead_info; 600 #endif 601 #ifndef CONFIG_MMU 602 struct vm_region *vm_region; /* NOMMU mapping region */ 603 #endif 604 #ifdef CONFIG_NUMA 605 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 606 #endif 607 #ifdef CONFIG_NUMA_BALANCING 608 struct vma_numab_state *numab_state; /* NUMA Balancing state */ 609 #endif 610 #ifdef CONFIG_PER_VMA_LOCK 611 /* Unstable RCU readers are allowed to read this. */ 612 refcount_t vm_refcnt; 613 #endif 614 /* 615 * For areas with an address space and backing store, 616 * linkage into the address_space->i_mmap interval tree. 617 * 618 */ 619 struct { 620 struct rb_node rb; 621 unsigned long rb_subtree_last; 622 } shared; 623 #ifdef CONFIG_ANON_VMA_NAME 624 /* 625 * For private and shared anonymous mappings, a pointer to a null 626 * terminated string containing the name given to the vma, or NULL if 627 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 628 */ 629 struct anon_vma_name *anon_name; 630 #endif 631 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 632 } __randomize_layout; 633 634 struct vm_operations_struct { 635 void (*open)(struct vm_area_struct * area); 636 /** 637 * @close: Called when the VMA is being removed from the MM. 638 * Context: User context. May sleep. Caller holds mmap_lock. 639 */ 640 void (*close)(struct vm_area_struct * area); 641 /* Called any time before splitting to check if it's allowed */ 642 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 643 int (*mremap)(struct vm_area_struct *area); 644 /* 645 * Called by mprotect() to make driver-specific permission 646 * checks before mprotect() is finalised. The VMA must not 647 * be modified. Returns 0 if mprotect() can proceed. 648 */ 649 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 650 unsigned long end, unsigned long newflags); 651 vm_fault_t (*fault)(struct vm_fault *vmf); 652 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 653 vm_fault_t (*map_pages)(struct vm_fault *vmf, 654 pgoff_t start_pgoff, pgoff_t end_pgoff); 655 unsigned long (*pagesize)(struct vm_area_struct * area); 656 657 /* notification that a previously read-only page is about to become 658 * writable, if an error is returned it will cause a SIGBUS */ 659 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 660 661 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 662 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 663 664 /* called by access_process_vm when get_user_pages() fails, typically 665 * for use by special VMAs. See also generic_access_phys() for a generic 666 * implementation useful for any iomem mapping. 667 */ 668 int (*access)(struct vm_area_struct *vma, unsigned long addr, 669 void *buf, int len, int write); 670 671 /* Called by the /proc/PID/maps code to ask the vma whether it 672 * has a special name. Returning non-NULL will also cause this 673 * vma to be dumped unconditionally. */ 674 const char *(*name)(struct vm_area_struct *vma); 675 676 #ifdef CONFIG_NUMA 677 /* 678 * set_policy() op must add a reference to any non-NULL @new mempolicy 679 * to hold the policy upon return. Caller should pass NULL @new to 680 * remove a policy and fall back to surrounding context--i.e. do not 681 * install a MPOL_DEFAULT policy, nor the task or system default 682 * mempolicy. 683 */ 684 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 685 686 /* 687 * get_policy() op must add reference [mpol_get()] to any policy at 688 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 689 * in mm/mempolicy.c will do this automatically. 690 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 691 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 692 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 693 * must return NULL--i.e., do not "fallback" to task or system default 694 * policy. 695 */ 696 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 697 unsigned long addr, pgoff_t *ilx); 698 #endif 699 #ifdef CONFIG_FIND_NORMAL_PAGE 700 /* 701 * Called by vm_normal_page() for special PTEs in @vma at @addr. This 702 * allows for returning a "normal" page from vm_normal_page() even 703 * though the PTE indicates that the "struct page" either does not exist 704 * or should not be touched: "special". 705 * 706 * Do not add new users: this really only works when a "normal" page 707 * was mapped, but then the PTE got changed to something weird (+ 708 * marked special) that would not make pte_pfn() identify the originally 709 * inserted page. 710 */ 711 struct page *(*find_normal_page)(struct vm_area_struct *vma, 712 unsigned long addr); 713 #endif /* CONFIG_FIND_NORMAL_PAGE */ 714 }; 715 716 struct vm_unmapped_area_info { 717 #define VM_UNMAPPED_AREA_TOPDOWN 1 718 unsigned long flags; 719 unsigned long length; 720 unsigned long low_limit; 721 unsigned long high_limit; 722 unsigned long align_mask; 723 unsigned long align_offset; 724 unsigned long start_gap; 725 }; 726 727 struct pagetable_move_control { 728 struct vm_area_struct *old; /* Source VMA. */ 729 struct vm_area_struct *new; /* Destination VMA. */ 730 unsigned long old_addr; /* Address from which the move begins. */ 731 unsigned long old_end; /* Exclusive address at which old range ends. */ 732 unsigned long new_addr; /* Address to move page tables to. */ 733 unsigned long len_in; /* Bytes to remap specified by user. */ 734 735 bool need_rmap_locks; /* Do rmap locks need to be taken? */ 736 bool for_stack; /* Is this an early temp stack being moved? */ 737 }; 738 739 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ 740 struct pagetable_move_control name = { \ 741 .old = old_, \ 742 .new = new_, \ 743 .old_addr = old_addr_, \ 744 .old_end = (old_addr_) + (len_), \ 745 .new_addr = new_addr_, \ 746 .len_in = len_, \ 747 } 748 749 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 750 { 751 mas_pause(&vmi->mas); 752 } 753 754 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 755 { 756 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot)); 757 } 758 759 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) 760 { 761 return __pgprot(vm_flags); 762 } 763 764 static inline bool mm_flags_test(int flag, const struct mm_struct *mm) 765 { 766 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); 767 } 768 769 /* 770 * Copy value to the first system word of VMA flags, non-atomically. 771 * 772 * IMPORTANT: This does not overwrite bytes past the first system word. The 773 * caller must account for this. 774 */ 775 static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags, 776 unsigned long value) 777 { 778 unsigned long *bitmap = flags->__vma_flags; 779 780 bitmap[0] = value; 781 } 782 783 /* 784 * Copy value to the first system word of VMA flags ONCE, non-atomically. 785 * 786 * IMPORTANT: This does not overwrite bytes past the first system word. The 787 * caller must account for this. 788 */ 789 static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags, 790 unsigned long value) 791 { 792 unsigned long *bitmap = flags->__vma_flags; 793 794 WRITE_ONCE(*bitmap, value); 795 } 796 797 /* Update the first system word of VMA flags setting bits, non-atomically. */ 798 static __always_inline void vma_flags_set_word(vma_flags_t *flags, 799 unsigned long value) 800 { 801 unsigned long *bitmap = flags->__vma_flags; 802 803 *bitmap |= value; 804 } 805 806 /* Update the first system word of VMA flags clearing bits, non-atomically. */ 807 static __always_inline void vma_flags_clear_word(vma_flags_t *flags, 808 unsigned long value) 809 { 810 unsigned long *bitmap = flags->__vma_flags; 811 812 *bitmap &= ~value; 813 } 814 815 static __always_inline void vma_flags_clear_all(vma_flags_t *flags) 816 { 817 bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS); 818 } 819 820 /* 821 * Helper function which converts a vma_flags_t value to a legacy vm_flags_t 822 * value. This is only valid if the input flags value can be expressed in a 823 * system word. 824 * 825 * Will be removed once the conversion to VMA flags is complete. 826 */ 827 static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags) 828 { 829 return (vm_flags_t)flags.__vma_flags[0]; 830 } 831 832 /* 833 * Helper function which converts a legacy vm_flags_t value to a vma_flags_t 834 * value. 835 * 836 * Will be removed once the conversion to VMA flags is complete. 837 */ 838 static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags) 839 { 840 vma_flags_t ret = EMPTY_VMA_FLAGS; 841 842 vma_flags_overwrite_word(&ret, flags); 843 return ret; 844 } 845 846 static __always_inline void vma_flags_set_flag(vma_flags_t *flags, 847 vma_flag_t bit) 848 { 849 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); 850 851 __set_bit((__force int)bit, bitmap); 852 } 853 854 /* Use when VMA is not part of the VMA tree and needs no locking */ 855 static inline void vm_flags_init(struct vm_area_struct *vma, 856 vm_flags_t flags) 857 { 858 vma_flags_clear_all(&vma->flags); 859 vma_flags_overwrite_word(&vma->flags, flags); 860 } 861 862 /* 863 * Use when VMA is part of the VMA tree and modifications need coordination 864 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and 865 * it should be locked explicitly beforehand. 866 */ 867 static inline void vm_flags_reset(struct vm_area_struct *vma, 868 vm_flags_t flags) 869 { 870 vma_assert_write_locked(vma); 871 vm_flags_init(vma, flags); 872 } 873 874 static inline void vm_flags_reset_once(struct vm_area_struct *vma, 875 vm_flags_t flags) 876 { 877 vma_assert_write_locked(vma); 878 /* 879 * The user should only be interested in avoiding reordering of 880 * assignment to the first word. 881 */ 882 vma_flags_clear_all(&vma->flags); 883 vma_flags_overwrite_word_once(&vma->flags, flags); 884 } 885 886 static inline void vm_flags_set(struct vm_area_struct *vma, 887 vm_flags_t flags) 888 { 889 vma_start_write(vma); 890 vma_flags_set_word(&vma->flags, flags); 891 } 892 893 static inline void vm_flags_clear(struct vm_area_struct *vma, 894 vm_flags_t flags) 895 { 896 vma_start_write(vma); 897 vma_flags_clear_word(&vma->flags, flags); 898 } 899 900 static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags, 901 size_t count, const vma_flag_t *bits) 902 { 903 int i; 904 905 for (i = 0; i < count; i++) 906 vma_flags_set_flag(&flags, bits[i]); 907 return flags; 908 } 909 910 #define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS, \ 911 COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__}) 912 913 #define append_vma_flags(flags, ...) __mk_vma_flags(flags, \ 914 COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__}) 915 916 static __always_inline int vma_flags_count(const vma_flags_t *flags) 917 { 918 const unsigned long *bitmap = flags->__vma_flags; 919 920 return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS); 921 } 922 923 static __always_inline bool vma_flags_test(const vma_flags_t *flags, 924 vma_flag_t bit) 925 { 926 const unsigned long *bitmap = flags->__vma_flags; 927 928 return test_bit((__force int)bit, bitmap); 929 } 930 931 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags, 932 vma_flags_t to_and) 933 { 934 vma_flags_t dst; 935 unsigned long *bitmap_dst = dst.__vma_flags; 936 const unsigned long *bitmap = flags->__vma_flags; 937 const unsigned long *bitmap_to_and = to_and.__vma_flags; 938 939 bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS); 940 return dst; 941 } 942 943 #define vma_flags_and(flags, ...) \ 944 vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__)) 945 946 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags, 947 vma_flags_t to_test) 948 { 949 const unsigned long *bitmap = flags->__vma_flags; 950 const unsigned long *bitmap_to_test = to_test.__vma_flags; 951 952 return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 953 } 954 955 #define vma_flags_test_any(flags, ...) \ 956 vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__)) 957 958 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags, 959 vma_flags_t to_test) 960 { 961 const unsigned long *bitmap = flags->__vma_flags; 962 const unsigned long *bitmap_to_test = to_test.__vma_flags; 963 964 return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS); 965 } 966 967 #define vma_flags_test_all(flags, ...) \ 968 vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__)) 969 970 static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags, 971 vma_flags_t flagmask) 972 { 973 VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1); 974 975 return vma_flags_test_any_mask(flags, flagmask); 976 } 977 978 static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set) 979 { 980 unsigned long *bitmap = flags->__vma_flags; 981 const unsigned long *bitmap_to_set = to_set.__vma_flags; 982 983 bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS); 984 } 985 986 #define vma_flags_set(flags, ...) \ 987 vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__)) 988 989 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear) 990 { 991 unsigned long *bitmap = flags->__vma_flags; 992 const unsigned long *bitmap_to_clear = to_clear.__vma_flags; 993 994 bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS); 995 } 996 997 #define vma_flags_clear(flags, ...) \ 998 vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__)) 999 1000 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags, 1001 const vma_flags_t *flags_other) 1002 { 1003 vma_flags_t dst; 1004 const unsigned long *bitmap_other = flags_other->__vma_flags; 1005 const unsigned long *bitmap = flags->__vma_flags; 1006 unsigned long *bitmap_dst = dst.__vma_flags; 1007 1008 bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS); 1009 return dst; 1010 } 1011 1012 static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags, 1013 const vma_flags_t *flags_other) 1014 { 1015 const unsigned long *bitmap = flags->__vma_flags; 1016 const unsigned long *bitmap_other = flags_other->__vma_flags; 1017 1018 return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS); 1019 } 1020 1021 static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags, 1022 vma_flags_t flags_other) 1023 { 1024 const unsigned long *bitmap = flags->__vma_flags; 1025 const unsigned long *bitmap_other = flags_other.__vma_flags; 1026 1027 return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS); 1028 } 1029 1030 #define vma_flags_same(flags, ...) \ 1031 vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__)) 1032 1033 static __always_inline bool vma_test(const struct vm_area_struct *vma, 1034 vma_flag_t bit) 1035 { 1036 return vma_flags_test(&vma->flags, bit); 1037 } 1038 1039 static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma, 1040 vma_flags_t flags) 1041 { 1042 return vma_flags_test_any_mask(&vma->flags, flags); 1043 } 1044 1045 #define vma_test_any(vma, ...) \ 1046 vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__)) 1047 1048 static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma, 1049 vma_flags_t flags) 1050 { 1051 return vma_flags_test_all_mask(&vma->flags, flags); 1052 } 1053 1054 #define vma_test_all(vma, ...) \ 1055 vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__)) 1056 1057 static __always_inline bool 1058 vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask) 1059 { 1060 return vma_flags_test_single_mask(&vma->flags, flagmask); 1061 } 1062 1063 static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma, 1064 vma_flags_t flags) 1065 { 1066 vma_flags_set_mask(&vma->flags, flags); 1067 } 1068 1069 #define vma_set_flags(vma, ...) \ 1070 vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__)) 1071 1072 static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma, 1073 vma_flags_t flags) 1074 { 1075 vma_flags_clear_mask(&vma->flags, flags); 1076 } 1077 1078 #define vma_clear_flags(vma, ...) \ 1079 vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__)) 1080 1081 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc, 1082 vma_flag_t bit) 1083 { 1084 return vma_flags_test(&desc->vma_flags, bit); 1085 } 1086 1087 static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc, 1088 vma_flags_t flags) 1089 { 1090 return vma_flags_test_any_mask(&desc->vma_flags, flags); 1091 } 1092 1093 #define vma_desc_test_any(desc, ...) \ 1094 vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__)) 1095 1096 static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc, 1097 vma_flags_t flags) 1098 { 1099 return vma_flags_test_all_mask(&desc->vma_flags, flags); 1100 } 1101 1102 #define vma_desc_test_all(desc, ...) \ 1103 vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__)) 1104 1105 static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc, 1106 vma_flags_t flags) 1107 { 1108 vma_flags_set_mask(&desc->vma_flags, flags); 1109 } 1110 1111 #define vma_desc_set_flags(desc, ...) \ 1112 vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 1113 1114 static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc, 1115 vma_flags_t flags) 1116 { 1117 vma_flags_clear_mask(&desc->vma_flags, flags); 1118 } 1119 1120 #define vma_desc_clear_flags(desc, ...) \ 1121 vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) 1122 1123 static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags) 1124 { 1125 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == 1126 (VM_SHARED | VM_MAYWRITE); 1127 } 1128 1129 static inline bool is_shared_maywrite(const vma_flags_t *flags) 1130 { 1131 return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT); 1132 } 1133 1134 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 1135 { 1136 return is_shared_maywrite(&vma->flags); 1137 } 1138 1139 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 1140 { 1141 /* 1142 * Uses mas_find() to get the first VMA when the iterator starts. 1143 * Calling mas_next() could skip the first entry. 1144 */ 1145 return mas_find(&vmi->mas, ULONG_MAX); 1146 } 1147 1148 /* 1149 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these 1150 * assertions should be made either under mmap_write_lock or when the object 1151 * has been isolated under mmap_write_lock, ensuring no competing writers. 1152 */ 1153 static inline void vma_assert_attached(struct vm_area_struct *vma) 1154 { 1155 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); 1156 } 1157 1158 static inline void vma_assert_detached(struct vm_area_struct *vma) 1159 { 1160 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); 1161 } 1162 1163 static inline void vma_assert_write_locked(struct vm_area_struct *); 1164 static inline void vma_mark_attached(struct vm_area_struct *vma) 1165 { 1166 vma_assert_write_locked(vma); 1167 vma_assert_detached(vma); 1168 refcount_set_release(&vma->vm_refcnt, 1); 1169 } 1170 1171 static inline void vma_mark_detached(struct vm_area_struct *vma) 1172 { 1173 vma_assert_write_locked(vma); 1174 vma_assert_attached(vma); 1175 /* We are the only writer, so no need to use vma_refcount_put(). */ 1176 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { 1177 /* 1178 * Reader must have temporarily raised vm_refcnt but it will 1179 * drop it without using the vma since vma is write-locked. 1180 */ 1181 } 1182 } 1183 1184 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 1185 { 1186 memset(vma, 0, sizeof(*vma)); 1187 vma->vm_mm = mm; 1188 vma->vm_ops = &vma_dummy_vm_ops; 1189 INIT_LIST_HEAD(&vma->anon_vma_chain); 1190 vma->vm_lock_seq = UINT_MAX; 1191 } 1192 1193 /* 1194 * These are defined in vma.h, but sadly vm_stat_account() is referenced by 1195 * kernel/fork.c, so we have to these broadly available there, and temporarily 1196 * define them here to resolve the dependency cycle. 1197 */ 1198 #define is_exec_mapping(flags) \ 1199 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC) 1200 1201 #define is_stack_mapping(flags) \ 1202 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK)) 1203 1204 #define is_data_mapping(flags) \ 1205 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE) 1206 1207 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, 1208 long npages) 1209 { 1210 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 1211 1212 if (is_exec_mapping(flags)) 1213 mm->exec_vm += npages; 1214 else if (is_stack_mapping(flags)) 1215 mm->stack_vm += npages; 1216 else if (is_data_mapping(flags)) 1217 mm->data_vm += npages; 1218 } 1219 1220 #undef is_exec_mapping 1221 #undef is_stack_mapping 1222 #undef is_data_mapping 1223 1224 static inline void vm_unacct_memory(long pages) 1225 { 1226 vm_acct_memory(-pages); 1227 } 1228 1229 static inline void mapping_allow_writable(struct address_space *mapping) 1230 { 1231 atomic_inc(&mapping->i_mmap_writable); 1232 } 1233 1234 static inline 1235 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 1236 { 1237 return mas_find(&vmi->mas, max - 1); 1238 } 1239 1240 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, 1241 unsigned long start, unsigned long end, gfp_t gfp) 1242 { 1243 __mas_set_range(&vmi->mas, start, end - 1); 1244 mas_store_gfp(&vmi->mas, NULL, gfp); 1245 if (unlikely(mas_is_err(&vmi->mas))) 1246 return -ENOMEM; 1247 1248 return 0; 1249 } 1250 1251 static inline void vma_set_anonymous(struct vm_area_struct *vma) 1252 { 1253 vma->vm_ops = NULL; 1254 } 1255 1256 /* Declared in vma.h. */ 1257 static inline void set_vma_from_desc(struct vm_area_struct *vma, 1258 struct vm_area_desc *desc); 1259 1260 static inline int __compat_vma_mmap(const struct file_operations *f_op, 1261 struct file *file, struct vm_area_struct *vma) 1262 { 1263 struct vm_area_desc desc = { 1264 .mm = vma->vm_mm, 1265 .file = file, 1266 .start = vma->vm_start, 1267 .end = vma->vm_end, 1268 1269 .pgoff = vma->vm_pgoff, 1270 .vm_file = vma->vm_file, 1271 .vma_flags = vma->flags, 1272 .page_prot = vma->vm_page_prot, 1273 1274 .action.type = MMAP_NOTHING, /* Default */ 1275 }; 1276 int err; 1277 1278 err = f_op->mmap_prepare(&desc); 1279 if (err) 1280 return err; 1281 1282 mmap_action_prepare(&desc.action, &desc); 1283 set_vma_from_desc(vma, &desc); 1284 return mmap_action_complete(&desc.action, vma); 1285 } 1286 1287 static inline int compat_vma_mmap(struct file *file, 1288 struct vm_area_struct *vma) 1289 { 1290 return __compat_vma_mmap(file->f_op, file, vma); 1291 } 1292 1293 1294 static inline void vma_iter_init(struct vma_iterator *vmi, 1295 struct mm_struct *mm, unsigned long addr) 1296 { 1297 mas_init(&vmi->mas, &mm->mm_mt, addr); 1298 } 1299 1300 static inline unsigned long vma_pages(struct vm_area_struct *vma) 1301 { 1302 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1303 } 1304 1305 static inline void mmap_assert_locked(struct mm_struct *); 1306 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1307 unsigned long start_addr, 1308 unsigned long end_addr) 1309 { 1310 unsigned long index = start_addr; 1311 1312 mmap_assert_locked(mm); 1313 return mt_find(&mm->mm_mt, &index, end_addr - 1); 1314 } 1315 1316 static inline 1317 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 1318 { 1319 return mtree_load(&mm->mm_mt, addr); 1320 } 1321 1322 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 1323 { 1324 return mas_prev(&vmi->mas, 0); 1325 } 1326 1327 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 1328 { 1329 mas_set(&vmi->mas, addr); 1330 } 1331 1332 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1333 { 1334 return !vma->vm_ops; 1335 } 1336 1337 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */ 1338 #define vma_iter_load(vmi) \ 1339 mas_walk(&(vmi)->mas) 1340 1341 static inline struct vm_area_struct * 1342 find_vma_prev(struct mm_struct *mm, unsigned long addr, 1343 struct vm_area_struct **pprev) 1344 { 1345 struct vm_area_struct *vma; 1346 VMA_ITERATOR(vmi, mm, addr); 1347 1348 vma = vma_iter_load(&vmi); 1349 *pprev = vma_prev(&vmi); 1350 if (!vma) 1351 vma = vma_next(&vmi); 1352 return vma; 1353 } 1354 1355 #undef vma_iter_load 1356 1357 static inline void vma_iter_free(struct vma_iterator *vmi) 1358 { 1359 mas_destroy(&vmi->mas); 1360 } 1361 1362 static inline 1363 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) 1364 { 1365 return mas_next_range(&vmi->mas, ULONG_MAX); 1366 } 1367 1368 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1369 1370 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1371 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1372 { 1373 vm_flags_t vm_flags = vma->vm_flags; 1374 pgprot_t vm_page_prot; 1375 1376 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1377 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); 1378 1379 if (vma_wants_writenotify(vma, vm_page_prot)) { 1380 vm_flags &= ~VM_SHARED; 1381 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ 1382 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); 1383 } 1384 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1385 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1386 } 1387 1388 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 1389 { 1390 if (vma->vm_flags & VM_GROWSDOWN) 1391 return stack_guard_gap; 1392 1393 /* See reasoning around the VM_SHADOW_STACK definition */ 1394 if (vma->vm_flags & VM_SHADOW_STACK) 1395 return PAGE_SIZE; 1396 1397 return 0; 1398 } 1399 1400 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 1401 { 1402 unsigned long gap = stack_guard_start_gap(vma); 1403 unsigned long vm_start = vma->vm_start; 1404 1405 vm_start -= gap; 1406 if (vm_start > vma->vm_start) 1407 vm_start = 0; 1408 return vm_start; 1409 } 1410 1411 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 1412 { 1413 unsigned long vm_end = vma->vm_end; 1414 1415 if (vma->vm_flags & VM_GROWSUP) { 1416 vm_end += stack_guard_gap; 1417 if (vm_end < vma->vm_end) 1418 vm_end = -PAGE_SIZE; 1419 } 1420 return vm_end; 1421 } 1422 1423 static inline bool vma_is_accessible(struct vm_area_struct *vma) 1424 { 1425 return vma->vm_flags & VM_ACCESS_FLAGS; 1426 } 1427 1428 static inline bool mlock_future_ok(const struct mm_struct *mm, 1429 vm_flags_t vm_flags, unsigned long bytes) 1430 { 1431 unsigned long locked_pages, limit_pages; 1432 1433 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1434 return true; 1435 1436 locked_pages = bytes >> PAGE_SHIFT; 1437 locked_pages += mm->locked_vm; 1438 1439 limit_pages = rlimit(RLIMIT_MEMLOCK); 1440 limit_pages >>= PAGE_SHIFT; 1441 1442 return locked_pages <= limit_pages; 1443 } 1444 1445 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 1446 { 1447 /* If MDWE is disabled, we have nothing to deny. */ 1448 if (mm_flags_test(MMF_HAS_MDWE, current->mm)) 1449 return false; 1450 1451 /* If the new VMA is not executable, we have nothing to deny. */ 1452 if (!(new & VM_EXEC)) 1453 return false; 1454 1455 /* Under MDWE we do not accept newly writably executable VMAs... */ 1456 if (new & VM_WRITE) 1457 return true; 1458 1459 /* ...nor previously non-executable VMAs becoming executable. */ 1460 if (!(old & VM_EXEC)) 1461 return true; 1462 1463 return false; 1464 } 1465 1466 static inline int mapping_map_writable(struct address_space *mapping) 1467 { 1468 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 1469 0 : -EPERM; 1470 } 1471 1472 /* Did the driver provide valid mmap hook configuration? */ 1473 static inline bool can_mmap_file(struct file *file) 1474 { 1475 bool has_mmap = file->f_op->mmap; 1476 bool has_mmap_prepare = file->f_op->mmap_prepare; 1477 1478 /* Hooks are mutually exclusive. */ 1479 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare)) 1480 return false; 1481 if (!has_mmap && !has_mmap_prepare) 1482 return false; 1483 1484 return true; 1485 } 1486 1487 static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) 1488 { 1489 if (file->f_op->mmap_prepare) 1490 return compat_vma_mmap(file, vma); 1491 1492 return file->f_op->mmap(file, vma); 1493 } 1494 1495 static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) 1496 { 1497 return file->f_op->mmap_prepare(desc); 1498 } 1499 1500 static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) 1501 { 1502 /* Changing an anonymous vma with this is illegal */ 1503 get_file(file); 1504 swap(vma->vm_file, file); 1505 fput(file); 1506 } 1507 1508 extern int sysctl_max_map_count; 1509 static inline int get_sysctl_max_map_count(void) 1510 { 1511 return READ_ONCE(sysctl_max_map_count); 1512 } 1513 1514 #ifndef pgtable_supports_soft_dirty 1515 #define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) 1516 #endif 1517