1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 bool unlock; /* Unlock after the munmap */ 44 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 45 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */ 46 /* 1 byte hole */ 47 unsigned long nr_pages; /* Number of pages being removed */ 48 unsigned long locked_vm; /* Number of locked pages */ 49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 50 unsigned long exec_vm; 51 unsigned long stack_vm; 52 unsigned long data_vm; 53 }; 54 55 enum vma_merge_state { 56 VMA_MERGE_START, 57 VMA_MERGE_ERROR_NOMEM, 58 VMA_MERGE_NOMERGE, 59 VMA_MERGE_SUCCESS, 60 }; 61 62 /* Represents a VMA merge operation. */ 63 struct vma_merge_struct { 64 struct mm_struct *mm; 65 struct vma_iterator *vmi; 66 pgoff_t pgoff; 67 struct vm_area_struct *prev; 68 struct vm_area_struct *next; /* Modified by vma_merge(). */ 69 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ 70 unsigned long start; 71 unsigned long end; 72 unsigned long flags; 73 struct file *file; 74 struct anon_vma *anon_vma; 75 struct mempolicy *policy; 76 struct vm_userfaultfd_ctx uffd_ctx; 77 struct anon_vma_name *anon_name; 78 enum vma_merge_state state; 79 }; 80 81 static inline bool vmg_nomem(struct vma_merge_struct *vmg) 82 { 83 return vmg->state == VMA_MERGE_ERROR_NOMEM; 84 } 85 86 /* Assumes addr >= vma->vm_start. */ 87 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, 88 unsigned long addr) 89 { 90 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 91 } 92 93 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \ 94 struct vma_merge_struct name = { \ 95 .mm = mm_, \ 96 .vmi = vmi_, \ 97 .start = start_, \ 98 .end = end_, \ 99 .flags = flags_, \ 100 .pgoff = pgoff_, \ 101 .state = VMA_MERGE_START, \ 102 } 103 104 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ 105 struct vma_merge_struct name = { \ 106 .mm = vma_->vm_mm, \ 107 .vmi = vmi_, \ 108 .prev = prev_, \ 109 .next = NULL, \ 110 .vma = vma_, \ 111 .start = start_, \ 112 .end = end_, \ 113 .flags = vma_->vm_flags, \ 114 .pgoff = vma_pgoff_offset(vma_, start_), \ 115 .file = vma_->vm_file, \ 116 .anon_vma = vma_->anon_vma, \ 117 .policy = vma_policy(vma_), \ 118 .uffd_ctx = vma_->vm_userfaultfd_ctx, \ 119 .anon_name = anon_vma_name(vma_), \ 120 .state = VMA_MERGE_START, \ 121 } 122 123 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 124 void validate_mm(struct mm_struct *mm); 125 #else 126 #define validate_mm(mm) do { } while (0) 127 #endif 128 129 /* Required for expand_downwards(). */ 130 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma); 131 132 /* Required for expand_downwards(). */ 133 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma); 134 135 int vma_expand(struct vma_merge_struct *vmg); 136 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 137 unsigned long start, unsigned long end, pgoff_t pgoff); 138 139 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 140 struct vm_area_struct *vma, gfp_t gfp) 141 142 { 143 if (vmi->mas.status != ma_start && 144 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 145 vma_iter_invalidate(vmi); 146 147 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 148 mas_store_gfp(&vmi->mas, vma, gfp); 149 if (unlikely(mas_is_err(&vmi->mas))) 150 return -ENOMEM; 151 152 return 0; 153 } 154 155 #ifdef CONFIG_MMU 156 /* 157 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct 158 * @vms: The vma munmap struct 159 * @vmi: The vma iterator 160 * @vma: The first vm_area_struct to munmap 161 * @start: The aligned start address to munmap 162 * @end: The aligned end address to munmap 163 * @uf: The userfaultfd list_head 164 * @unlock: Unlock after the operation. Only unlocked on success 165 */ 166 static inline void init_vma_munmap(struct vma_munmap_struct *vms, 167 struct vma_iterator *vmi, struct vm_area_struct *vma, 168 unsigned long start, unsigned long end, struct list_head *uf, 169 bool unlock) 170 { 171 vms->vmi = vmi; 172 vms->vma = vma; 173 if (vma) { 174 vms->start = start; 175 vms->end = end; 176 } else { 177 vms->start = vms->end = 0; 178 } 179 vms->unlock = unlock; 180 vms->uf = uf; 181 vms->vma_count = 0; 182 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; 183 vms->exec_vm = vms->stack_vm = vms->data_vm = 0; 184 vms->unmap_start = FIRST_USER_ADDRESS; 185 vms->unmap_end = USER_PGTABLES_CEILING; 186 vms->clear_ptes = false; 187 vms->closed_vm_ops = false; 188 } 189 #endif 190 191 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 192 struct ma_state *mas_detach); 193 194 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 195 struct ma_state *mas_detach); 196 197 void vms_clean_up_area(struct vma_munmap_struct *vms, 198 struct ma_state *mas_detach); 199 200 /* 201 * reattach_vmas() - Undo any munmap work and free resources 202 * @mas_detach: The maple state with the detached maple tree 203 * 204 * Reattach any detached vmas and free up the maple tree used to track the vmas. 205 */ 206 static inline void reattach_vmas(struct ma_state *mas_detach) 207 { 208 struct vm_area_struct *vma; 209 210 mas_set(mas_detach, 0); 211 mas_for_each(mas_detach, vma, ULONG_MAX) 212 vma_mark_detached(vma, false); 213 214 __mt_destroy(mas_detach->tree); 215 } 216 217 /* 218 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() 219 * operation. 220 * @vms: The vma unmap structure 221 * @mas_detach: The maple state with the detached maple tree 222 * 223 * Reattach any detached vmas, free up the maple tree used to track the vmas. 224 * If that's not possible because the ptes are cleared (and vm_ops->closed() may 225 * have been called), then a NULL is written over the vmas and the vmas are 226 * removed (munmap() completed). 227 */ 228 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, 229 struct ma_state *mas_detach) 230 { 231 struct ma_state *mas = &vms->vmi->mas; 232 if (!vms->nr_pages) 233 return; 234 235 if (vms->clear_ptes) 236 return reattach_vmas(mas_detach); 237 238 /* 239 * Aborting cannot just call the vm_ops open() because they are often 240 * not symmetrical and state data has been lost. Resort to the old 241 * failure method of leaving a gap where the MAP_FIXED mapping failed. 242 */ 243 mas_set_range(mas, vms->start, vms->end - 1); 244 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); 245 /* Clean up the insertion of the unfortunate gap */ 246 vms_complete_munmap_vmas(vms, mas_detach); 247 } 248 249 int 250 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 251 struct mm_struct *mm, unsigned long start, 252 unsigned long end, struct list_head *uf, bool unlock); 253 254 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 255 unsigned long start, size_t len, struct list_head *uf, 256 bool unlock); 257 258 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed); 259 260 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 261 struct vm_area_struct *prev, struct vm_area_struct *next); 262 263 /* We are about to modify the VMA's flags. */ 264 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, 265 struct vm_area_struct *prev, struct vm_area_struct *vma, 266 unsigned long start, unsigned long end, 267 unsigned long new_flags); 268 269 /* We are about to modify the VMA's flags and/or anon_name. */ 270 struct vm_area_struct 271 *vma_modify_flags_name(struct vma_iterator *vmi, 272 struct vm_area_struct *prev, 273 struct vm_area_struct *vma, 274 unsigned long start, 275 unsigned long end, 276 unsigned long new_flags, 277 struct anon_vma_name *new_name); 278 279 /* We are about to modify the VMA's memory policy. */ 280 struct vm_area_struct 281 *vma_modify_policy(struct vma_iterator *vmi, 282 struct vm_area_struct *prev, 283 struct vm_area_struct *vma, 284 unsigned long start, unsigned long end, 285 struct mempolicy *new_pol); 286 287 /* We are about to modify the VMA's flags and/or uffd context. */ 288 struct vm_area_struct 289 *vma_modify_flags_uffd(struct vma_iterator *vmi, 290 struct vm_area_struct *prev, 291 struct vm_area_struct *vma, 292 unsigned long start, unsigned long end, 293 unsigned long new_flags, 294 struct vm_userfaultfd_ctx new_ctx); 295 296 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg); 297 298 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 299 struct vm_area_struct *vma, 300 unsigned long delta); 301 302 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 303 304 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 305 306 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 307 struct vm_area_struct *vma); 308 309 void unlink_file_vma(struct vm_area_struct *vma); 310 311 void vma_link_file(struct vm_area_struct *vma); 312 313 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 314 315 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 316 unsigned long addr, unsigned long len, pgoff_t pgoff, 317 bool *need_rmap_locks); 318 319 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 320 321 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 322 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 323 324 int mm_take_all_locks(struct mm_struct *mm); 325 void mm_drop_all_locks(struct mm_struct *mm); 326 327 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 328 { 329 /* 330 * We want to check manually if we can change individual PTEs writable 331 * if we can't do that automatically for all PTEs in a mapping. For 332 * private mappings, that's always the case when we have write 333 * permissions as we properly have to handle COW. 334 */ 335 if (vma->vm_flags & VM_SHARED) 336 return vma_wants_writenotify(vma, vma->vm_page_prot); 337 return !!(vma->vm_flags & VM_WRITE); 338 } 339 340 #ifdef CONFIG_MMU 341 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 342 { 343 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 344 } 345 #endif 346 347 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 348 unsigned long min) 349 { 350 return mas_prev(&vmi->mas, min); 351 } 352 353 /* 354 * These three helpers classifies VMAs for virtual memory accounting. 355 */ 356 357 /* 358 * Executable code area - executable, not writable, not stack 359 */ 360 static inline bool is_exec_mapping(vm_flags_t flags) 361 { 362 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 363 } 364 365 /* 366 * Stack area (including shadow stacks) 367 * 368 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 369 * do_mmap() forbids all other combinations. 370 */ 371 static inline bool is_stack_mapping(vm_flags_t flags) 372 { 373 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 374 } 375 376 /* 377 * Data area - private, writable, not stack 378 */ 379 static inline bool is_data_mapping(vm_flags_t flags) 380 { 381 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 382 } 383 384 385 static inline void vma_iter_config(struct vma_iterator *vmi, 386 unsigned long index, unsigned long last) 387 { 388 __mas_set_range(&vmi->mas, index, last - 1); 389 } 390 391 static inline void vma_iter_reset(struct vma_iterator *vmi) 392 { 393 mas_reset(&vmi->mas); 394 } 395 396 static inline 397 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 398 { 399 return mas_prev_range(&vmi->mas, min); 400 } 401 402 static inline 403 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 404 { 405 return mas_next_range(&vmi->mas, max); 406 } 407 408 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 409 unsigned long max, unsigned long size) 410 { 411 return mas_empty_area(&vmi->mas, min, max - 1, size); 412 } 413 414 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 415 unsigned long max, unsigned long size) 416 { 417 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 418 } 419 420 /* 421 * VMA Iterator functions shared between nommu and mmap 422 */ 423 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 424 struct vm_area_struct *vma) 425 { 426 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 427 } 428 429 static inline void vma_iter_clear(struct vma_iterator *vmi) 430 { 431 mas_store_prealloc(&vmi->mas, NULL); 432 } 433 434 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 435 { 436 return mas_walk(&vmi->mas); 437 } 438 439 /* Store a VMA with preallocated memory */ 440 static inline void vma_iter_store(struct vma_iterator *vmi, 441 struct vm_area_struct *vma) 442 { 443 444 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 445 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 446 vmi->mas.index > vma->vm_start)) { 447 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 448 vmi->mas.index, vma->vm_start, vma->vm_start, 449 vma->vm_end, vmi->mas.index, vmi->mas.last); 450 } 451 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 452 vmi->mas.last < vma->vm_start)) { 453 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 454 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 455 vmi->mas.index, vmi->mas.last); 456 } 457 #endif 458 459 if (vmi->mas.status != ma_start && 460 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 461 vma_iter_invalidate(vmi); 462 463 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 464 mas_store_prealloc(&vmi->mas, vma); 465 } 466 467 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 468 { 469 return vmi->mas.index; 470 } 471 472 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 473 { 474 return vmi->mas.last + 1; 475 } 476 477 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 478 unsigned long count) 479 { 480 return mas_expected_entries(&vmi->mas, count); 481 } 482 483 static inline 484 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 485 { 486 return mas_prev_range(&vmi->mas, 0); 487 } 488 489 /* 490 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or 491 * if no previous VMA, to index 0. 492 */ 493 static inline 494 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, 495 struct vm_area_struct **pprev) 496 { 497 struct vm_area_struct *next = vma_next(vmi); 498 struct vm_area_struct *prev = vma_prev(vmi); 499 500 /* 501 * Consider the case where no previous VMA exists. We advance to the 502 * next VMA, skipping any gap, then rewind to the start of the range. 503 * 504 * If we were to unconditionally advance to the next range we'd wind up 505 * at the next VMA again, so we check to ensure there is a previous VMA 506 * to skip over. 507 */ 508 if (prev) 509 vma_iter_next_range(vmi); 510 511 if (pprev) 512 *pprev = prev; 513 514 return next; 515 } 516 517 #ifdef CONFIG_64BIT 518 519 static inline bool vma_is_sealed(struct vm_area_struct *vma) 520 { 521 return (vma->vm_flags & VM_SEALED); 522 } 523 524 /* 525 * check if a vma is sealed for modification. 526 * return true, if modification is allowed. 527 */ 528 static inline bool can_modify_vma(struct vm_area_struct *vma) 529 { 530 if (unlikely(vma_is_sealed(vma))) 531 return false; 532 533 return true; 534 } 535 536 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 537 538 #else 539 540 static inline bool can_modify_vma(struct vm_area_struct *vma) 541 { 542 return true; 543 } 544 545 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 546 { 547 return true; 548 } 549 550 #endif 551 552 #endif /* __MM_VMA_H */ 553