1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 bool unlock; /* Unlock after the munmap */ 44 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 45 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */ 46 /* 1 byte hole */ 47 unsigned long nr_pages; /* Number of pages being removed */ 48 unsigned long locked_vm; /* Number of locked pages */ 49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 50 unsigned long exec_vm; 51 unsigned long stack_vm; 52 unsigned long data_vm; 53 }; 54 55 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 56 void validate_mm(struct mm_struct *mm); 57 #else 58 #define validate_mm(mm) do { } while (0) 59 #endif 60 61 /* Required for expand_downwards(). */ 62 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma); 63 64 /* Required for expand_downwards(). */ 65 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma); 66 67 /* Required for do_brk_flags(). */ 68 void vma_prepare(struct vma_prepare *vp); 69 70 /* Required for do_brk_flags(). */ 71 void init_vma_prep(struct vma_prepare *vp, 72 struct vm_area_struct *vma); 73 74 /* Required for do_brk_flags(). */ 75 void vma_complete(struct vma_prepare *vp, 76 struct vma_iterator *vmi, struct mm_struct *mm); 77 78 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 79 unsigned long start, unsigned long end, pgoff_t pgoff, 80 struct vm_area_struct *next); 81 82 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 83 unsigned long start, unsigned long end, pgoff_t pgoff); 84 85 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 86 struct vm_area_struct *vma, gfp_t gfp) 87 88 { 89 if (vmi->mas.status != ma_start && 90 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 91 vma_iter_invalidate(vmi); 92 93 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 94 mas_store_gfp(&vmi->mas, vma, gfp); 95 if (unlikely(mas_is_err(&vmi->mas))) 96 return -ENOMEM; 97 98 return 0; 99 } 100 101 #ifdef CONFIG_MMU 102 /* 103 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct 104 * @vms: The vma munmap struct 105 * @vmi: The vma iterator 106 * @vma: The first vm_area_struct to munmap 107 * @start: The aligned start address to munmap 108 * @end: The aligned end address to munmap 109 * @uf: The userfaultfd list_head 110 * @unlock: Unlock after the operation. Only unlocked on success 111 */ 112 static inline void init_vma_munmap(struct vma_munmap_struct *vms, 113 struct vma_iterator *vmi, struct vm_area_struct *vma, 114 unsigned long start, unsigned long end, struct list_head *uf, 115 bool unlock) 116 { 117 vms->vmi = vmi; 118 vms->vma = vma; 119 if (vma) { 120 vms->start = start; 121 vms->end = end; 122 } else { 123 vms->start = vms->end = 0; 124 } 125 vms->unlock = unlock; 126 vms->uf = uf; 127 vms->vma_count = 0; 128 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; 129 vms->exec_vm = vms->stack_vm = vms->data_vm = 0; 130 vms->unmap_start = FIRST_USER_ADDRESS; 131 vms->unmap_end = USER_PGTABLES_CEILING; 132 vms->clear_ptes = false; 133 vms->closed_vm_ops = false; 134 } 135 #endif 136 137 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 138 struct ma_state *mas_detach); 139 140 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 141 struct ma_state *mas_detach); 142 143 void vms_clean_up_area(struct vma_munmap_struct *vms, 144 struct ma_state *mas_detach); 145 146 /* 147 * reattach_vmas() - Undo any munmap work and free resources 148 * @mas_detach: The maple state with the detached maple tree 149 * 150 * Reattach any detached vmas and free up the maple tree used to track the vmas. 151 */ 152 static inline void reattach_vmas(struct ma_state *mas_detach) 153 { 154 struct vm_area_struct *vma; 155 156 mas_set(mas_detach, 0); 157 mas_for_each(mas_detach, vma, ULONG_MAX) 158 vma_mark_detached(vma, false); 159 160 __mt_destroy(mas_detach->tree); 161 } 162 163 /* 164 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() 165 * operation. 166 * @vms: The vma unmap structure 167 * @mas_detach: The maple state with the detached maple tree 168 * 169 * Reattach any detached vmas, free up the maple tree used to track the vmas. 170 * If that's not possible because the ptes are cleared (and vm_ops->closed() may 171 * have been called), then a NULL is written over the vmas and the vmas are 172 * removed (munmap() completed). 173 */ 174 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, 175 struct ma_state *mas_detach) 176 { 177 struct ma_state *mas = &vms->vmi->mas; 178 if (!vms->nr_pages) 179 return; 180 181 if (vms->clear_ptes) 182 return reattach_vmas(mas_detach); 183 184 /* 185 * Aborting cannot just call the vm_ops open() because they are often 186 * not symmetrical and state data has been lost. Resort to the old 187 * failure method of leaving a gap where the MAP_FIXED mapping failed. 188 */ 189 mas_set_range(mas, vms->start, vms->end - 1); 190 if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) { 191 pr_warn_once("%s: (%d) Unable to abort munmap() operation\n", 192 current->comm, current->pid); 193 /* Leaving vmas detached and in-tree may hamper recovery */ 194 reattach_vmas(mas_detach); 195 } else { 196 /* Clean up the insertion of the unfortunate gap */ 197 vms_complete_munmap_vmas(vms, mas_detach); 198 } 199 } 200 201 int 202 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 203 struct mm_struct *mm, unsigned long start, 204 unsigned long end, struct list_head *uf, bool unlock); 205 206 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 207 unsigned long start, size_t len, struct list_head *uf, 208 bool unlock); 209 210 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed); 211 212 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 213 struct vm_area_struct *prev, struct vm_area_struct *next); 214 215 /* Required by mmap_region(). */ 216 bool 217 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 218 struct anon_vma *anon_vma, struct file *file, 219 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 220 struct anon_vma_name *anon_name); 221 222 /* Required by mmap_region() and do_brk_flags(). */ 223 bool 224 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 225 struct anon_vma *anon_vma, struct file *file, 226 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 227 struct anon_vma_name *anon_name); 228 229 struct vm_area_struct *vma_modify(struct vma_iterator *vmi, 230 struct vm_area_struct *prev, 231 struct vm_area_struct *vma, 232 unsigned long start, unsigned long end, 233 unsigned long vm_flags, 234 struct mempolicy *policy, 235 struct vm_userfaultfd_ctx uffd_ctx, 236 struct anon_vma_name *anon_name); 237 238 /* We are about to modify the VMA's flags. */ 239 static inline struct vm_area_struct 240 *vma_modify_flags(struct vma_iterator *vmi, 241 struct vm_area_struct *prev, 242 struct vm_area_struct *vma, 243 unsigned long start, unsigned long end, 244 unsigned long new_flags) 245 { 246 return vma_modify(vmi, prev, vma, start, end, new_flags, 247 vma_policy(vma), vma->vm_userfaultfd_ctx, 248 anon_vma_name(vma)); 249 } 250 251 /* We are about to modify the VMA's flags and/or anon_name. */ 252 static inline struct vm_area_struct 253 *vma_modify_flags_name(struct vma_iterator *vmi, 254 struct vm_area_struct *prev, 255 struct vm_area_struct *vma, 256 unsigned long start, 257 unsigned long end, 258 unsigned long new_flags, 259 struct anon_vma_name *new_name) 260 { 261 return vma_modify(vmi, prev, vma, start, end, new_flags, 262 vma_policy(vma), vma->vm_userfaultfd_ctx, new_name); 263 } 264 265 /* We are about to modify the VMA's memory policy. */ 266 static inline struct vm_area_struct 267 *vma_modify_policy(struct vma_iterator *vmi, 268 struct vm_area_struct *prev, 269 struct vm_area_struct *vma, 270 unsigned long start, unsigned long end, 271 struct mempolicy *new_pol) 272 { 273 return vma_modify(vmi, prev, vma, start, end, vma->vm_flags, 274 new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 275 } 276 277 /* We are about to modify the VMA's flags and/or uffd context. */ 278 static inline struct vm_area_struct 279 *vma_modify_flags_uffd(struct vma_iterator *vmi, 280 struct vm_area_struct *prev, 281 struct vm_area_struct *vma, 282 unsigned long start, unsigned long end, 283 unsigned long new_flags, 284 struct vm_userfaultfd_ctx new_ctx) 285 { 286 return vma_modify(vmi, prev, vma, start, end, new_flags, 287 vma_policy(vma), new_ctx, anon_vma_name(vma)); 288 } 289 290 struct vm_area_struct 291 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 292 struct vm_area_struct *vma, unsigned long start, 293 unsigned long end, pgoff_t pgoff); 294 295 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 296 struct vm_area_struct *vma, 297 unsigned long delta); 298 299 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 300 301 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 302 303 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 304 struct vm_area_struct *vma); 305 306 void unlink_file_vma(struct vm_area_struct *vma); 307 308 void vma_link_file(struct vm_area_struct *vma); 309 310 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 311 312 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 313 unsigned long addr, unsigned long len, pgoff_t pgoff, 314 bool *need_rmap_locks); 315 316 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 317 318 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 319 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 320 321 int mm_take_all_locks(struct mm_struct *mm); 322 void mm_drop_all_locks(struct mm_struct *mm); 323 324 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 325 { 326 /* 327 * We want to check manually if we can change individual PTEs writable 328 * if we can't do that automatically for all PTEs in a mapping. For 329 * private mappings, that's always the case when we have write 330 * permissions as we properly have to handle COW. 331 */ 332 if (vma->vm_flags & VM_SHARED) 333 return vma_wants_writenotify(vma, vma->vm_page_prot); 334 return !!(vma->vm_flags & VM_WRITE); 335 } 336 337 #ifdef CONFIG_MMU 338 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 339 { 340 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 341 } 342 #endif 343 344 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 345 unsigned long min) 346 { 347 return mas_prev(&vmi->mas, min); 348 } 349 350 /* 351 * These three helpers classifies VMAs for virtual memory accounting. 352 */ 353 354 /* 355 * Executable code area - executable, not writable, not stack 356 */ 357 static inline bool is_exec_mapping(vm_flags_t flags) 358 { 359 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 360 } 361 362 /* 363 * Stack area (including shadow stacks) 364 * 365 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 366 * do_mmap() forbids all other combinations. 367 */ 368 static inline bool is_stack_mapping(vm_flags_t flags) 369 { 370 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 371 } 372 373 /* 374 * Data area - private, writable, not stack 375 */ 376 static inline bool is_data_mapping(vm_flags_t flags) 377 { 378 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 379 } 380 381 382 static inline void vma_iter_config(struct vma_iterator *vmi, 383 unsigned long index, unsigned long last) 384 { 385 __mas_set_range(&vmi->mas, index, last - 1); 386 } 387 388 static inline void vma_iter_reset(struct vma_iterator *vmi) 389 { 390 mas_reset(&vmi->mas); 391 } 392 393 static inline 394 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 395 { 396 return mas_prev_range(&vmi->mas, min); 397 } 398 399 static inline 400 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 401 { 402 return mas_next_range(&vmi->mas, max); 403 } 404 405 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 406 unsigned long max, unsigned long size) 407 { 408 return mas_empty_area(&vmi->mas, min, max - 1, size); 409 } 410 411 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 412 unsigned long max, unsigned long size) 413 { 414 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 415 } 416 417 /* 418 * VMA Iterator functions shared between nommu and mmap 419 */ 420 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 421 struct vm_area_struct *vma) 422 { 423 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 424 } 425 426 static inline void vma_iter_clear(struct vma_iterator *vmi) 427 { 428 mas_store_prealloc(&vmi->mas, NULL); 429 } 430 431 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 432 { 433 return mas_walk(&vmi->mas); 434 } 435 436 /* Store a VMA with preallocated memory */ 437 static inline void vma_iter_store(struct vma_iterator *vmi, 438 struct vm_area_struct *vma) 439 { 440 441 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 442 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 443 vmi->mas.index > vma->vm_start)) { 444 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 445 vmi->mas.index, vma->vm_start, vma->vm_start, 446 vma->vm_end, vmi->mas.index, vmi->mas.last); 447 } 448 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 449 vmi->mas.last < vma->vm_start)) { 450 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 451 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 452 vmi->mas.index, vmi->mas.last); 453 } 454 #endif 455 456 if (vmi->mas.status != ma_start && 457 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 458 vma_iter_invalidate(vmi); 459 460 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 461 mas_store_prealloc(&vmi->mas, vma); 462 } 463 464 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 465 { 466 return vmi->mas.index; 467 } 468 469 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 470 { 471 return vmi->mas.last + 1; 472 } 473 474 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 475 unsigned long count) 476 { 477 return mas_expected_entries(&vmi->mas, count); 478 } 479 480 static inline 481 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 482 { 483 return mas_prev_range(&vmi->mas, 0); 484 } 485 486 #ifdef CONFIG_64BIT 487 488 static inline bool vma_is_sealed(struct vm_area_struct *vma) 489 { 490 return (vma->vm_flags & VM_SEALED); 491 } 492 493 /* 494 * check if a vma is sealed for modification. 495 * return true, if modification is allowed. 496 */ 497 static inline bool can_modify_vma(struct vm_area_struct *vma) 498 { 499 if (unlikely(vma_is_sealed(vma))) 500 return false; 501 502 return true; 503 } 504 505 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 506 507 #else 508 509 static inline bool can_modify_vma(struct vm_area_struct *vma) 510 { 511 return true; 512 } 513 514 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 515 { 516 return true; 517 } 518 519 #endif 520 521 #endif /* __MM_VMA_H */ 522