1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 unsigned long nr_pages; /* Number of pages being removed */ 44 unsigned long locked_vm; /* Number of locked pages */ 45 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 46 unsigned long exec_vm; 47 unsigned long stack_vm; 48 unsigned long data_vm; 49 bool unlock; /* Unlock after the munmap */ 50 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 51 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */ 52 }; 53 54 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 55 void validate_mm(struct mm_struct *mm); 56 #else 57 #define validate_mm(mm) do { } while (0) 58 #endif 59 60 /* Required for expand_downwards(). */ 61 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma); 62 63 /* Required for expand_downwards(). */ 64 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma); 65 66 /* Required for do_brk_flags(). */ 67 void vma_prepare(struct vma_prepare *vp); 68 69 /* Required for do_brk_flags(). */ 70 void init_vma_prep(struct vma_prepare *vp, 71 struct vm_area_struct *vma); 72 73 /* Required for do_brk_flags(). */ 74 void vma_complete(struct vma_prepare *vp, 75 struct vma_iterator *vmi, struct mm_struct *mm); 76 77 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 78 unsigned long start, unsigned long end, pgoff_t pgoff, 79 struct vm_area_struct *next); 80 81 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 82 unsigned long start, unsigned long end, pgoff_t pgoff); 83 84 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 85 struct vm_area_struct *vma, gfp_t gfp) 86 87 { 88 if (vmi->mas.status != ma_start && 89 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 90 vma_iter_invalidate(vmi); 91 92 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 93 mas_store_gfp(&vmi->mas, vma, gfp); 94 if (unlikely(mas_is_err(&vmi->mas))) 95 return -ENOMEM; 96 97 return 0; 98 } 99 100 #ifdef CONFIG_MMU 101 /* 102 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct 103 * @vms: The vma munmap struct 104 * @vmi: The vma iterator 105 * @vma: The first vm_area_struct to munmap 106 * @start: The aligned start address to munmap 107 * @end: The aligned end address to munmap 108 * @uf: The userfaultfd list_head 109 * @unlock: Unlock after the operation. Only unlocked on success 110 */ 111 static inline void init_vma_munmap(struct vma_munmap_struct *vms, 112 struct vma_iterator *vmi, struct vm_area_struct *vma, 113 unsigned long start, unsigned long end, struct list_head *uf, 114 bool unlock) 115 { 116 vms->vmi = vmi; 117 vms->vma = vma; 118 if (vma) { 119 vms->start = start; 120 vms->end = end; 121 } else { 122 vms->start = vms->end = 0; 123 } 124 vms->unlock = unlock; 125 vms->uf = uf; 126 vms->vma_count = 0; 127 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; 128 vms->exec_vm = vms->stack_vm = vms->data_vm = 0; 129 vms->unmap_start = FIRST_USER_ADDRESS; 130 vms->unmap_end = USER_PGTABLES_CEILING; 131 vms->clear_ptes = false; 132 vms->closed_vm_ops = false; 133 } 134 #endif 135 136 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 137 struct ma_state *mas_detach); 138 139 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 140 struct ma_state *mas_detach); 141 142 void vms_clean_up_area(struct vma_munmap_struct *vms, 143 struct ma_state *mas_detach); 144 145 /* 146 * reattach_vmas() - Undo any munmap work and free resources 147 * @mas_detach: The maple state with the detached maple tree 148 * 149 * Reattach any detached vmas and free up the maple tree used to track the vmas. 150 */ 151 static inline void reattach_vmas(struct ma_state *mas_detach) 152 { 153 struct vm_area_struct *vma; 154 155 mas_set(mas_detach, 0); 156 mas_for_each(mas_detach, vma, ULONG_MAX) 157 vma_mark_detached(vma, false); 158 159 __mt_destroy(mas_detach->tree); 160 } 161 162 /* 163 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() 164 * operation. 165 * @vms: The vma unmap structure 166 * @mas_detach: The maple state with the detached maple tree 167 * 168 * Reattach any detached vmas, free up the maple tree used to track the vmas. 169 * If that's not possible because the ptes are cleared (and vm_ops->closed() may 170 * have been called), then a NULL is written over the vmas and the vmas are 171 * removed (munmap() completed). 172 */ 173 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, 174 struct ma_state *mas_detach) 175 { 176 struct ma_state *mas = &vms->vmi->mas; 177 if (!vms->nr_pages) 178 return; 179 180 if (vms->clear_ptes) 181 return reattach_vmas(mas_detach); 182 183 /* 184 * Aborting cannot just call the vm_ops open() because they are often 185 * not symmetrical and state data has been lost. Resort to the old 186 * failure method of leaving a gap where the MAP_FIXED mapping failed. 187 */ 188 mas_set_range(mas, vms->start, vms->end - 1); 189 if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) { 190 pr_warn_once("%s: (%d) Unable to abort munmap() operation\n", 191 current->comm, current->pid); 192 /* Leaving vmas detached and in-tree may hamper recovery */ 193 reattach_vmas(mas_detach); 194 } else { 195 /* Clean up the insertion of the unfortunate gap */ 196 vms_complete_munmap_vmas(vms, mas_detach); 197 } 198 } 199 200 int 201 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 202 struct mm_struct *mm, unsigned long start, 203 unsigned long end, struct list_head *uf, bool unlock); 204 205 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 206 unsigned long start, size_t len, struct list_head *uf, 207 bool unlock); 208 209 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed); 210 211 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 212 struct vm_area_struct *prev, struct vm_area_struct *next); 213 214 /* Required by mmap_region(). */ 215 bool 216 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 217 struct anon_vma *anon_vma, struct file *file, 218 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 219 struct anon_vma_name *anon_name); 220 221 /* Required by mmap_region() and do_brk_flags(). */ 222 bool 223 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 224 struct anon_vma *anon_vma, struct file *file, 225 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 226 struct anon_vma_name *anon_name); 227 228 struct vm_area_struct *vma_modify(struct vma_iterator *vmi, 229 struct vm_area_struct *prev, 230 struct vm_area_struct *vma, 231 unsigned long start, unsigned long end, 232 unsigned long vm_flags, 233 struct mempolicy *policy, 234 struct vm_userfaultfd_ctx uffd_ctx, 235 struct anon_vma_name *anon_name); 236 237 /* We are about to modify the VMA's flags. */ 238 static inline struct vm_area_struct 239 *vma_modify_flags(struct vma_iterator *vmi, 240 struct vm_area_struct *prev, 241 struct vm_area_struct *vma, 242 unsigned long start, unsigned long end, 243 unsigned long new_flags) 244 { 245 return vma_modify(vmi, prev, vma, start, end, new_flags, 246 vma_policy(vma), vma->vm_userfaultfd_ctx, 247 anon_vma_name(vma)); 248 } 249 250 /* We are about to modify the VMA's flags and/or anon_name. */ 251 static inline struct vm_area_struct 252 *vma_modify_flags_name(struct vma_iterator *vmi, 253 struct vm_area_struct *prev, 254 struct vm_area_struct *vma, 255 unsigned long start, 256 unsigned long end, 257 unsigned long new_flags, 258 struct anon_vma_name *new_name) 259 { 260 return vma_modify(vmi, prev, vma, start, end, new_flags, 261 vma_policy(vma), vma->vm_userfaultfd_ctx, new_name); 262 } 263 264 /* We are about to modify the VMA's memory policy. */ 265 static inline struct vm_area_struct 266 *vma_modify_policy(struct vma_iterator *vmi, 267 struct vm_area_struct *prev, 268 struct vm_area_struct *vma, 269 unsigned long start, unsigned long end, 270 struct mempolicy *new_pol) 271 { 272 return vma_modify(vmi, prev, vma, start, end, vma->vm_flags, 273 new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 274 } 275 276 /* We are about to modify the VMA's flags and/or uffd context. */ 277 static inline struct vm_area_struct 278 *vma_modify_flags_uffd(struct vma_iterator *vmi, 279 struct vm_area_struct *prev, 280 struct vm_area_struct *vma, 281 unsigned long start, unsigned long end, 282 unsigned long new_flags, 283 struct vm_userfaultfd_ctx new_ctx) 284 { 285 return vma_modify(vmi, prev, vma, start, end, new_flags, 286 vma_policy(vma), new_ctx, anon_vma_name(vma)); 287 } 288 289 struct vm_area_struct 290 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 291 struct vm_area_struct *vma, unsigned long start, 292 unsigned long end, pgoff_t pgoff); 293 294 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 295 struct vm_area_struct *vma, 296 unsigned long delta); 297 298 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 299 300 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 301 302 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 303 struct vm_area_struct *vma); 304 305 void unlink_file_vma(struct vm_area_struct *vma); 306 307 void vma_link_file(struct vm_area_struct *vma); 308 309 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 310 311 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 312 unsigned long addr, unsigned long len, pgoff_t pgoff, 313 bool *need_rmap_locks); 314 315 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 316 317 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 318 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 319 320 int mm_take_all_locks(struct mm_struct *mm); 321 void mm_drop_all_locks(struct mm_struct *mm); 322 unsigned long count_vma_pages_range(struct mm_struct *mm, 323 unsigned long addr, unsigned long end, 324 unsigned long *nr_accounted); 325 326 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 327 { 328 /* 329 * We want to check manually if we can change individual PTEs writable 330 * if we can't do that automatically for all PTEs in a mapping. For 331 * private mappings, that's always the case when we have write 332 * permissions as we properly have to handle COW. 333 */ 334 if (vma->vm_flags & VM_SHARED) 335 return vma_wants_writenotify(vma, vma->vm_page_prot); 336 return !!(vma->vm_flags & VM_WRITE); 337 } 338 339 #ifdef CONFIG_MMU 340 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 341 { 342 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 343 } 344 #endif 345 346 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 347 unsigned long min) 348 { 349 return mas_prev(&vmi->mas, min); 350 } 351 352 /* 353 * These three helpers classifies VMAs for virtual memory accounting. 354 */ 355 356 /* 357 * Executable code area - executable, not writable, not stack 358 */ 359 static inline bool is_exec_mapping(vm_flags_t flags) 360 { 361 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 362 } 363 364 /* 365 * Stack area (including shadow stacks) 366 * 367 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 368 * do_mmap() forbids all other combinations. 369 */ 370 static inline bool is_stack_mapping(vm_flags_t flags) 371 { 372 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 373 } 374 375 /* 376 * Data area - private, writable, not stack 377 */ 378 static inline bool is_data_mapping(vm_flags_t flags) 379 { 380 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 381 } 382 383 384 static inline void vma_iter_config(struct vma_iterator *vmi, 385 unsigned long index, unsigned long last) 386 { 387 __mas_set_range(&vmi->mas, index, last - 1); 388 } 389 390 static inline void vma_iter_reset(struct vma_iterator *vmi) 391 { 392 mas_reset(&vmi->mas); 393 } 394 395 static inline 396 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 397 { 398 return mas_prev_range(&vmi->mas, min); 399 } 400 401 static inline 402 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 403 { 404 return mas_next_range(&vmi->mas, max); 405 } 406 407 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 408 unsigned long max, unsigned long size) 409 { 410 return mas_empty_area(&vmi->mas, min, max - 1, size); 411 } 412 413 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 414 unsigned long max, unsigned long size) 415 { 416 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 417 } 418 419 /* 420 * VMA Iterator functions shared between nommu and mmap 421 */ 422 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 423 struct vm_area_struct *vma) 424 { 425 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 426 } 427 428 static inline void vma_iter_clear(struct vma_iterator *vmi) 429 { 430 mas_store_prealloc(&vmi->mas, NULL); 431 } 432 433 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 434 { 435 return mas_walk(&vmi->mas); 436 } 437 438 /* Store a VMA with preallocated memory */ 439 static inline void vma_iter_store(struct vma_iterator *vmi, 440 struct vm_area_struct *vma) 441 { 442 443 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 444 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 445 vmi->mas.index > vma->vm_start)) { 446 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 447 vmi->mas.index, vma->vm_start, vma->vm_start, 448 vma->vm_end, vmi->mas.index, vmi->mas.last); 449 } 450 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 451 vmi->mas.last < vma->vm_start)) { 452 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 453 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 454 vmi->mas.index, vmi->mas.last); 455 } 456 #endif 457 458 if (vmi->mas.status != ma_start && 459 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 460 vma_iter_invalidate(vmi); 461 462 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 463 mas_store_prealloc(&vmi->mas, vma); 464 } 465 466 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 467 { 468 return vmi->mas.index; 469 } 470 471 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 472 { 473 return vmi->mas.last + 1; 474 } 475 476 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 477 unsigned long count) 478 { 479 return mas_expected_entries(&vmi->mas, count); 480 } 481 482 static inline 483 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 484 { 485 return mas_prev_range(&vmi->mas, 0); 486 } 487 488 #ifdef CONFIG_64BIT 489 490 static inline bool vma_is_sealed(struct vm_area_struct *vma) 491 { 492 return (vma->vm_flags & VM_SEALED); 493 } 494 495 /* 496 * check if a vma is sealed for modification. 497 * return true, if modification is allowed. 498 */ 499 static inline bool can_modify_vma(struct vm_area_struct *vma) 500 { 501 if (unlikely(vma_is_sealed(vma))) 502 return false; 503 504 return true; 505 } 506 507 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 508 509 #else 510 511 static inline bool can_modify_vma(struct vm_area_struct *vma) 512 { 513 return true; 514 } 515 516 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 517 { 518 return true; 519 } 520 521 #endif 522 523 #endif /* __MM_VMA_H */ 524