1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* Helper function which provides a wrapper around a merge new VMA operation. */ 4 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) 5 { 6 struct vm_area_struct *vma; 7 /* 8 * For convenience, get prev and next VMAs. Which the new VMA operation 9 * requires. 10 */ 11 vmg->next = vma_next(vmg->vmi); 12 vmg->prev = vma_prev(vmg->vmi); 13 vma_iter_next_range(vmg->vmi); 14 15 vma = vma_merge_new_range(vmg); 16 if (vma) 17 vma_assert_attached(vma); 18 19 return vma; 20 } 21 22 /* 23 * Helper function which provides a wrapper around the expansion of an existing 24 * VMA. 25 */ 26 static int expand_existing(struct vma_merge_struct *vmg) 27 { 28 return vma_expand(vmg); 29 } 30 31 /* 32 * Helper function to reset merge state the associated VMA iterator to a 33 * specified new range. 34 */ 35 void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, 36 unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags) 37 { 38 vma_iter_set(vmg->vmi, start); 39 40 vmg->prev = NULL; 41 vmg->middle = NULL; 42 vmg->next = NULL; 43 vmg->target = NULL; 44 45 vmg->start = start; 46 vmg->end = end; 47 vmg->pgoff = pgoff; 48 vmg->vma_flags = vma_flags; 49 50 vmg->just_expand = false; 51 vmg->__remove_middle = false; 52 vmg->__remove_next = false; 53 vmg->__adjust_middle_start = false; 54 vmg->__adjust_next_start = false; 55 } 56 57 /* Helper function to set both the VMG range and its anon_vma. */ 58 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start, 59 unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags, 60 struct anon_vma *anon_vma) 61 { 62 vmg_set_range(vmg, start, end, pgoff, vma_flags); 63 vmg->anon_vma = anon_vma; 64 } 65 66 /* 67 * Helper function to try to merge a new VMA. 68 * 69 * Update vmg and the iterator for it and try to merge, otherwise allocate a new 70 * VMA, link it to the maple tree and return it. 71 */ 72 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, 73 struct vma_merge_struct *vmg, unsigned long start, 74 unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags, 75 bool *was_merged) 76 { 77 struct vm_area_struct *merged; 78 79 vmg_set_range(vmg, start, end, pgoff, vma_flags); 80 81 merged = merge_new(vmg); 82 if (merged) { 83 *was_merged = true; 84 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS); 85 return merged; 86 } 87 88 *was_merged = false; 89 90 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE); 91 92 return alloc_and_link_vma(mm, start, end, pgoff, vma_flags); 93 } 94 95 static bool test_simple_merge(void) 96 { 97 struct vm_area_struct *vma; 98 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT, 99 VMA_MAYWRITE_BIT); 100 struct mm_struct mm = {}; 101 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags); 102 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags); 103 VMA_ITERATOR(vmi, &mm, 0x1000); 104 struct vma_merge_struct vmg = { 105 .mm = &mm, 106 .vmi = &vmi, 107 .start = 0x1000, 108 .end = 0x2000, 109 .vma_flags = vma_flags, 110 .pgoff = 1, 111 }; 112 113 ASSERT_FALSE(attach_vma(&mm, vma_left)); 114 ASSERT_FALSE(attach_vma(&mm, vma_right)); 115 116 vma = merge_new(&vmg); 117 ASSERT_NE(vma, NULL); 118 119 ASSERT_EQ(vma->vm_start, 0); 120 ASSERT_EQ(vma->vm_end, 0x3000); 121 ASSERT_EQ(vma->vm_pgoff, 0); 122 ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags); 123 124 detach_free_vma(vma); 125 mtree_destroy(&mm.mm_mt); 126 127 return true; 128 } 129 130 static bool test_simple_modify(void) 131 { 132 struct vm_area_struct *vma; 133 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT, 134 VMA_MAYWRITE_BIT); 135 struct mm_struct mm = {}; 136 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags); 137 VMA_ITERATOR(vmi, &mm, 0x1000); 138 139 ASSERT_FALSE(attach_vma(&mm, init_vma)); 140 141 /* 142 * The flags will not be changed, the vma_modify_flags() function 143 * performs the merge/split only. 144 */ 145 vma = vma_modify_flags(&vmi, init_vma, init_vma, 146 0x1000, 0x2000, &vma_flags); 147 ASSERT_NE(vma, NULL); 148 /* We modify the provided VMA, and on split allocate new VMAs. */ 149 ASSERT_EQ(vma, init_vma); 150 151 ASSERT_EQ(vma->vm_start, 0x1000); 152 ASSERT_EQ(vma->vm_end, 0x2000); 153 ASSERT_EQ(vma->vm_pgoff, 1); 154 155 /* 156 * Now walk through the three split VMAs and make sure they are as 157 * expected. 158 */ 159 160 vma_iter_set(&vmi, 0); 161 vma = vma_iter_load(&vmi); 162 163 ASSERT_EQ(vma->vm_start, 0); 164 ASSERT_EQ(vma->vm_end, 0x1000); 165 ASSERT_EQ(vma->vm_pgoff, 0); 166 167 detach_free_vma(vma); 168 vma_iter_clear(&vmi); 169 170 vma = vma_next(&vmi); 171 172 ASSERT_EQ(vma->vm_start, 0x1000); 173 ASSERT_EQ(vma->vm_end, 0x2000); 174 ASSERT_EQ(vma->vm_pgoff, 1); 175 176 detach_free_vma(vma); 177 vma_iter_clear(&vmi); 178 179 vma = vma_next(&vmi); 180 181 ASSERT_EQ(vma->vm_start, 0x2000); 182 ASSERT_EQ(vma->vm_end, 0x3000); 183 ASSERT_EQ(vma->vm_pgoff, 2); 184 185 detach_free_vma(vma); 186 mtree_destroy(&mm.mm_mt); 187 188 return true; 189 } 190 191 static bool test_simple_expand(void) 192 { 193 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT, 194 VMA_MAYWRITE_BIT); 195 struct mm_struct mm = {}; 196 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags); 197 VMA_ITERATOR(vmi, &mm, 0); 198 struct vma_merge_struct vmg = { 199 .vmi = &vmi, 200 .target = vma, 201 .start = 0, 202 .end = 0x3000, 203 .pgoff = 0, 204 }; 205 206 ASSERT_FALSE(attach_vma(&mm, vma)); 207 208 ASSERT_FALSE(expand_existing(&vmg)); 209 210 ASSERT_EQ(vma->vm_start, 0); 211 ASSERT_EQ(vma->vm_end, 0x3000); 212 ASSERT_EQ(vma->vm_pgoff, 0); 213 214 detach_free_vma(vma); 215 mtree_destroy(&mm.mm_mt); 216 217 return true; 218 } 219 220 static bool test_simple_shrink(void) 221 { 222 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT, 223 VMA_MAYWRITE_BIT); 224 struct mm_struct mm = {}; 225 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags); 226 VMA_ITERATOR(vmi, &mm, 0); 227 228 ASSERT_FALSE(attach_vma(&mm, vma)); 229 230 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0)); 231 232 ASSERT_EQ(vma->vm_start, 0); 233 ASSERT_EQ(vma->vm_end, 0x1000); 234 ASSERT_EQ(vma->vm_pgoff, 0); 235 236 detach_free_vma(vma); 237 mtree_destroy(&mm.mm_mt); 238 239 return true; 240 } 241 242 static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky) 243 { 244 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 245 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 246 struct mm_struct mm = {}; 247 VMA_ITERATOR(vmi, &mm, 0); 248 struct vma_merge_struct vmg = { 249 .mm = &mm, 250 .vmi = &vmi, 251 }; 252 struct anon_vma_chain dummy_anon_vma_chain_a = { 253 .anon_vma = &dummy_anon_vma, 254 }; 255 struct anon_vma_chain dummy_anon_vma_chain_b = { 256 .anon_vma = &dummy_anon_vma, 257 }; 258 struct anon_vma_chain dummy_anon_vma_chain_c = { 259 .anon_vma = &dummy_anon_vma, 260 }; 261 struct anon_vma_chain dummy_anon_vma_chain_d = { 262 .anon_vma = &dummy_anon_vma, 263 }; 264 const struct vm_operations_struct vm_ops = { 265 .close = dummy_close, 266 }; 267 int count; 268 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d; 269 bool merged; 270 271 if (is_sticky) 272 vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS); 273 274 /* 275 * 0123456789abc 276 * AA B CC 277 */ 278 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags); 279 ASSERT_NE(vma_a, NULL); 280 if (a_is_sticky) 281 vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS); 282 /* We give each VMA a single avc so we can test anon_vma duplication. */ 283 INIT_LIST_HEAD(&vma_a->anon_vma_chain); 284 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain); 285 286 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags); 287 ASSERT_NE(vma_b, NULL); 288 if (b_is_sticky) 289 vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS); 290 INIT_LIST_HEAD(&vma_b->anon_vma_chain); 291 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain); 292 293 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags); 294 ASSERT_NE(vma_c, NULL); 295 if (c_is_sticky) 296 vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS); 297 INIT_LIST_HEAD(&vma_c->anon_vma_chain); 298 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain); 299 300 /* 301 * NO merge. 302 * 303 * 0123456789abc 304 * AA B ** CC 305 */ 306 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged); 307 ASSERT_NE(vma_d, NULL); 308 INIT_LIST_HEAD(&vma_d->anon_vma_chain); 309 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain); 310 ASSERT_FALSE(merged); 311 ASSERT_EQ(mm.map_count, 4); 312 313 /* 314 * Merge BOTH sides. 315 * 316 * 0123456789abc 317 * AA*B DD CC 318 */ 319 vma_a->vm_ops = &vm_ops; /* This should have no impact. */ 320 vma_b->anon_vma = &dummy_anon_vma; 321 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged); 322 ASSERT_EQ(vma, vma_a); 323 /* Merge with A, delete B. */ 324 ASSERT_TRUE(merged); 325 ASSERT_EQ(vma->vm_start, 0); 326 ASSERT_EQ(vma->vm_end, 0x4000); 327 ASSERT_EQ(vma->vm_pgoff, 0); 328 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 329 ASSERT_TRUE(vma_write_started(vma)); 330 ASSERT_EQ(mm.map_count, 3); 331 if (is_sticky || a_is_sticky || b_is_sticky) 332 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 333 334 /* 335 * Merge to PREVIOUS VMA. 336 * 337 * 0123456789abc 338 * AAAA* DD CC 339 */ 340 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged); 341 ASSERT_EQ(vma, vma_a); 342 /* Extend A. */ 343 ASSERT_TRUE(merged); 344 ASSERT_EQ(vma->vm_start, 0); 345 ASSERT_EQ(vma->vm_end, 0x5000); 346 ASSERT_EQ(vma->vm_pgoff, 0); 347 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 348 ASSERT_TRUE(vma_write_started(vma)); 349 ASSERT_EQ(mm.map_count, 3); 350 if (is_sticky || a_is_sticky) 351 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 352 353 /* 354 * Merge to NEXT VMA. 355 * 356 * 0123456789abc 357 * AAAAA *DD CC 358 */ 359 vma_d->anon_vma = &dummy_anon_vma; 360 vma_d->vm_ops = &vm_ops; /* This should have no impact. */ 361 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged); 362 ASSERT_EQ(vma, vma_d); 363 /* Prepend. */ 364 ASSERT_TRUE(merged); 365 ASSERT_EQ(vma->vm_start, 0x6000); 366 ASSERT_EQ(vma->vm_end, 0x9000); 367 ASSERT_EQ(vma->vm_pgoff, 6); 368 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 369 ASSERT_TRUE(vma_write_started(vma)); 370 ASSERT_EQ(mm.map_count, 3); 371 if (is_sticky) /* D uses is_sticky. */ 372 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 373 374 /* 375 * Merge BOTH sides. 376 * 377 * 0123456789abc 378 * AAAAA*DDD CC 379 */ 380 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */ 381 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged); 382 ASSERT_EQ(vma, vma_a); 383 /* Merge with A, delete D. */ 384 ASSERT_TRUE(merged); 385 ASSERT_EQ(vma->vm_start, 0); 386 ASSERT_EQ(vma->vm_end, 0x9000); 387 ASSERT_EQ(vma->vm_pgoff, 0); 388 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 389 ASSERT_TRUE(vma_write_started(vma)); 390 ASSERT_EQ(mm.map_count, 2); 391 if (is_sticky || a_is_sticky) 392 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 393 394 /* 395 * Merge to NEXT VMA. 396 * 397 * 0123456789abc 398 * AAAAAAAAA *CC 399 */ 400 vma_c->anon_vma = &dummy_anon_vma; 401 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged); 402 ASSERT_EQ(vma, vma_c); 403 /* Prepend C. */ 404 ASSERT_TRUE(merged); 405 ASSERT_EQ(vma->vm_start, 0xa000); 406 ASSERT_EQ(vma->vm_end, 0xc000); 407 ASSERT_EQ(vma->vm_pgoff, 0xa); 408 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 409 ASSERT_TRUE(vma_write_started(vma)); 410 ASSERT_EQ(mm.map_count, 2); 411 if (is_sticky || c_is_sticky) 412 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 413 414 /* 415 * Merge BOTH sides. 416 * 417 * 0123456789abc 418 * AAAAAAAAA*CCC 419 */ 420 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged); 421 ASSERT_EQ(vma, vma_a); 422 /* Extend A and delete C. */ 423 ASSERT_TRUE(merged); 424 ASSERT_EQ(vma->vm_start, 0); 425 ASSERT_EQ(vma->vm_end, 0xc000); 426 ASSERT_EQ(vma->vm_pgoff, 0); 427 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 428 ASSERT_TRUE(vma_write_started(vma)); 429 ASSERT_EQ(mm.map_count, 1); 430 if (is_sticky || a_is_sticky || c_is_sticky) 431 ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS)); 432 433 /* 434 * Final state. 435 * 436 * 0123456789abc 437 * AAAAAAAAAAAAA 438 */ 439 440 count = 0; 441 vma_iter_set(&vmi, 0); 442 for_each_vma(vmi, vma) { 443 ASSERT_NE(vma, NULL); 444 ASSERT_EQ(vma->vm_start, 0); 445 ASSERT_EQ(vma->vm_end, 0xc000); 446 ASSERT_EQ(vma->vm_pgoff, 0); 447 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); 448 449 detach_free_vma(vma); 450 count++; 451 } 452 453 /* Should only have one VMA left (though freed) after all is done.*/ 454 ASSERT_EQ(count, 1); 455 456 mtree_destroy(&mm.mm_mt); 457 return true; 458 } 459 460 static bool test_merge_new(void) 461 { 462 int i, j, k, l; 463 464 /* Generate every possible permutation of sticky flags. */ 465 for (i = 0; i < 2; i++) 466 for (j = 0; j < 2; j++) 467 for (k = 0; k < 2; k++) 468 for (l = 0; l < 2; l++) 469 ASSERT_TRUE(__test_merge_new(i, j, k, l)); 470 471 return true; 472 } 473 474 static bool test_vma_merge_special_flags(void) 475 { 476 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 477 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 478 struct mm_struct mm = {}; 479 VMA_ITERATOR(vmi, &mm, 0); 480 struct vma_merge_struct vmg = { 481 .mm = &mm, 482 .vmi = &vmi, 483 }; 484 vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT, 485 VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT }; 486 vma_flags_t all_special_flags = EMPTY_VMA_FLAGS; 487 int i; 488 struct vm_area_struct *vma_left, *vma; 489 490 /* Make sure there aren't new VM_SPECIAL flags. */ 491 for (i = 0; i < ARRAY_SIZE(special_flags); i++) 492 vma_flags_set(&all_special_flags, special_flags[i]); 493 ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS); 494 495 /* 496 * 01234 497 * AAA 498 */ 499 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 500 ASSERT_NE(vma_left, NULL); 501 502 /* 1. Set up new VMA with special flag that would otherwise merge. */ 503 504 /* 505 * 01234 506 * AAA* 507 * 508 * This should merge if not for the VM_SPECIAL flag. 509 */ 510 vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags); 511 for (i = 0; i < ARRAY_SIZE(special_flags); i++) { 512 vma_flag_t special_flag = special_flags[i]; 513 vma_flags_t flags = vma_flags; 514 515 vma_flags_set(&flags, special_flag); 516 vma_left->flags = flags; 517 vmg.vma_flags = flags; 518 vma = merge_new(&vmg); 519 ASSERT_EQ(vma, NULL); 520 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 521 } 522 523 /* 2. Modify VMA with special flag that would otherwise merge. */ 524 525 /* 526 * 01234 527 * AAAB 528 * 529 * Create a VMA to modify. 530 */ 531 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags); 532 ASSERT_NE(vma, NULL); 533 vmg.middle = vma; 534 535 for (i = 0; i < ARRAY_SIZE(special_flags); i++) { 536 vma_flag_t special_flag = special_flags[i]; 537 vma_flags_t flags = vma_flags; 538 539 vma_flags_set(&flags, special_flag); 540 vma_left->flags = flags; 541 vmg.vma_flags = flags; 542 vma = merge_existing(&vmg); 543 ASSERT_EQ(vma, NULL); 544 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 545 } 546 547 cleanup_mm(&mm, &vmi); 548 return true; 549 } 550 551 static bool test_vma_merge_with_close(void) 552 { 553 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 554 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 555 struct mm_struct mm = {}; 556 VMA_ITERATOR(vmi, &mm, 0); 557 struct vma_merge_struct vmg = { 558 .mm = &mm, 559 .vmi = &vmi, 560 }; 561 const struct vm_operations_struct vm_ops = { 562 .close = dummy_close, 563 }; 564 struct vm_area_struct *vma_prev, *vma_next, *vma; 565 566 /* 567 * When merging VMAs we are not permitted to remove any VMA that has a 568 * vm_ops->close() hook. 569 * 570 * Considering the two possible adjacent VMAs to which a VMA can be 571 * merged: 572 * 573 * [ prev ][ vma ][ next ] 574 * 575 * In no case will we need to delete prev. If the operation is 576 * mergeable, then prev will be extended with one or both of vma and 577 * next deleted. 578 * 579 * As a result, during initial mergeability checks, only 580 * can_vma_merge_before() (which implies the VMA being merged with is 581 * 'next' as shown above) bothers to check to see whether the next VMA 582 * has a vm_ops->close() callback that will need to be called when 583 * removed. 584 * 585 * If it does, then we cannot merge as the resources that the close() 586 * operation potentially clears down are tied only to the existing VMA 587 * range and we have no way of extending those to the nearly merged one. 588 * 589 * We must consider two scenarios: 590 * 591 * A. 592 * 593 * vm_ops->close: - - !NULL 594 * [ prev ][ vma ][ next ] 595 * 596 * Where prev may or may not be present/mergeable. 597 * 598 * This is picked up by a specific check in can_vma_merge_before(). 599 * 600 * B. 601 * 602 * vm_ops->close: - !NULL 603 * [ prev ][ vma ] 604 * 605 * Where prev and vma are present and mergeable. 606 * 607 * This is picked up by a specific check in the modified VMA merge. 608 * 609 * IMPORTANT NOTE: We make the assumption that the following case: 610 * 611 * - !NULL NULL 612 * [ prev ][ vma ][ next ] 613 * 614 * Cannot occur, because vma->vm_ops being the same implies the same 615 * vma->vm_file, and therefore this would mean that next->vm_ops->close 616 * would be set too, and thus scenario A would pick this up. 617 */ 618 619 /* 620 * The only case of a new VMA merge that results in a VMA being deleted 621 * is one where both the previous and next VMAs are merged - in this 622 * instance the next VMA is deleted, and the previous VMA is extended. 623 * 624 * If we are unable to do so, we reduce the operation to simply 625 * extending the prev VMA and not merging next. 626 * 627 * 0123456789 628 * PPP**NNNN 629 * -> 630 * 0123456789 631 * PPPPPPNNN 632 */ 633 634 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 635 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags); 636 vma_next->vm_ops = &vm_ops; 637 638 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 639 ASSERT_EQ(merge_new(&vmg), vma_prev); 640 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 641 ASSERT_EQ(vma_prev->vm_start, 0); 642 ASSERT_EQ(vma_prev->vm_end, 0x5000); 643 ASSERT_EQ(vma_prev->vm_pgoff, 0); 644 645 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 646 647 /* 648 * When modifying an existing VMA there are further cases where we 649 * delete VMAs. 650 * 651 * <> 652 * 0123456789 653 * PPPVV 654 * 655 * In this instance, if vma has a close hook, the merge simply cannot 656 * proceed. 657 */ 658 659 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 660 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 661 vma->vm_ops = &vm_ops; 662 663 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 664 vmg.prev = vma_prev; 665 vmg.middle = vma; 666 667 /* 668 * The VMA being modified in a way that would otherwise merge should 669 * also fail. 670 */ 671 ASSERT_EQ(merge_existing(&vmg), NULL); 672 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 673 674 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 675 676 /* 677 * This case is mirrored if merging with next. 678 * 679 * <> 680 * 0123456789 681 * VVNNNN 682 * 683 * In this instance, if vma has a close hook, the merge simply cannot 684 * proceed. 685 */ 686 687 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 688 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags); 689 vma->vm_ops = &vm_ops; 690 691 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 692 vmg.middle = vma; 693 ASSERT_EQ(merge_existing(&vmg), NULL); 694 /* 695 * Initially this is misapprehended as an out of memory report, as the 696 * close() check is handled in the same way as anon_vma duplication 697 * failures, however a subsequent patch resolves this. 698 */ 699 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 700 701 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 702 703 /* 704 * Finally, we consider two variants of the case where we modify a VMA 705 * to merge with both the previous and next VMAs. 706 * 707 * The first variant is where vma has a close hook. In this instance, no 708 * merge can proceed. 709 * 710 * <> 711 * 0123456789 712 * PPPVVNNNN 713 */ 714 715 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 716 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 717 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags); 718 vma->vm_ops = &vm_ops; 719 720 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 721 vmg.prev = vma_prev; 722 vmg.middle = vma; 723 724 ASSERT_EQ(merge_existing(&vmg), NULL); 725 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 726 727 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); 728 729 /* 730 * The second variant is where next has a close hook. In this instance, 731 * we reduce the operation to a merge between prev and vma. 732 * 733 * <> 734 * 0123456789 735 * PPPVVNNNN 736 * -> 737 * 0123456789 738 * PPPPPNNNN 739 */ 740 741 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 742 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 743 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags); 744 vma_next->vm_ops = &vm_ops; 745 746 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 747 vmg.prev = vma_prev; 748 vmg.middle = vma; 749 750 ASSERT_EQ(merge_existing(&vmg), vma_prev); 751 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 752 ASSERT_EQ(vma_prev->vm_start, 0); 753 ASSERT_EQ(vma_prev->vm_end, 0x5000); 754 ASSERT_EQ(vma_prev->vm_pgoff, 0); 755 756 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 757 758 return true; 759 } 760 761 static bool test_vma_merge_new_with_close(void) 762 { 763 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 764 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 765 struct mm_struct mm = {}; 766 VMA_ITERATOR(vmi, &mm, 0); 767 struct vma_merge_struct vmg = { 768 .mm = &mm, 769 .vmi = &vmi, 770 }; 771 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags); 772 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags); 773 const struct vm_operations_struct vm_ops = { 774 .close = dummy_close, 775 }; 776 struct vm_area_struct *vma; 777 778 /* 779 * We should allow the partial merge of a proposed new VMA if the 780 * surrounding VMAs have vm_ops->close() hooks (but are otherwise 781 * compatible), e.g.: 782 * 783 * New VMA 784 * A v-------v B 785 * |-----| |-----| 786 * close close 787 * 788 * Since the rule is to not DELETE a VMA with a close operation, this 789 * should be permitted, only rather than expanding A and deleting B, we 790 * should simply expand A and leave B intact, e.g.: 791 * 792 * New VMA 793 * A B 794 * |------------||-----| 795 * close close 796 */ 797 798 /* Have prev and next have a vm_ops->close() hook. */ 799 vma_prev->vm_ops = &vm_ops; 800 vma_next->vm_ops = &vm_ops; 801 802 vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags); 803 vma = merge_new(&vmg); 804 ASSERT_NE(vma, NULL); 805 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 806 ASSERT_EQ(vma->vm_start, 0); 807 ASSERT_EQ(vma->vm_end, 0x5000); 808 ASSERT_EQ(vma->vm_pgoff, 0); 809 ASSERT_EQ(vma->vm_ops, &vm_ops); 810 ASSERT_TRUE(vma_write_started(vma)); 811 ASSERT_EQ(mm.map_count, 2); 812 813 cleanup_mm(&mm, &vmi); 814 return true; 815 } 816 817 static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky) 818 { 819 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 820 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 821 vma_flags_t prev_flags = vma_flags; 822 vma_flags_t next_flags = vma_flags; 823 struct mm_struct mm = {}; 824 VMA_ITERATOR(vmi, &mm, 0); 825 struct vm_area_struct *vma, *vma_prev, *vma_next; 826 struct vma_merge_struct vmg = { 827 .mm = &mm, 828 .vmi = &vmi, 829 }; 830 const struct vm_operations_struct vm_ops = { 831 .close = dummy_close, 832 }; 833 struct anon_vma_chain avc = {}; 834 835 if (prev_is_sticky) 836 vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS); 837 if (middle_is_sticky) 838 vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS); 839 if (next_is_sticky) 840 vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS); 841 842 /* 843 * Merge right case - partial span. 844 * 845 * <-> 846 * 0123456789 847 * VVVVNNN 848 * -> 849 * 0123456789 850 * VNNNNNN 851 */ 852 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags); 853 vma->vm_ops = &vm_ops; /* This should have no impact. */ 854 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); 855 vma_next->vm_ops = &vm_ops; /* This should have no impact. */ 856 vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma); 857 vmg.middle = vma; 858 vmg.prev = vma; 859 vma_set_dummy_anon_vma(vma, &avc); 860 ASSERT_EQ(merge_existing(&vmg), vma_next); 861 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 862 ASSERT_EQ(vma_next->vm_start, 0x3000); 863 ASSERT_EQ(vma_next->vm_end, 0x9000); 864 ASSERT_EQ(vma_next->vm_pgoff, 3); 865 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); 866 ASSERT_EQ(vma->vm_start, 0x2000); 867 ASSERT_EQ(vma->vm_end, 0x3000); 868 ASSERT_EQ(vma->vm_pgoff, 2); 869 ASSERT_TRUE(vma_write_started(vma)); 870 ASSERT_TRUE(vma_write_started(vma_next)); 871 ASSERT_EQ(mm.map_count, 2); 872 if (middle_is_sticky || next_is_sticky) 873 ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS)); 874 875 /* Clear down and reset. */ 876 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 877 878 /* 879 * Merge right case - full span. 880 * 881 * <--> 882 * 0123456789 883 * VVVVNNN 884 * -> 885 * 0123456789 886 * NNNNNNN 887 */ 888 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags); 889 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); 890 vma_next->vm_ops = &vm_ops; /* This should have no impact. */ 891 vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma); 892 vmg.middle = vma; 893 vma_set_dummy_anon_vma(vma, &avc); 894 ASSERT_EQ(merge_existing(&vmg), vma_next); 895 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 896 ASSERT_EQ(vma_next->vm_start, 0x2000); 897 ASSERT_EQ(vma_next->vm_end, 0x9000); 898 ASSERT_EQ(vma_next->vm_pgoff, 2); 899 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); 900 ASSERT_TRUE(vma_write_started(vma_next)); 901 ASSERT_EQ(mm.map_count, 1); 902 if (middle_is_sticky || next_is_sticky) 903 ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS)); 904 905 /* Clear down and reset. We should have deleted vma. */ 906 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); 907 908 /* 909 * Merge left case - partial span. 910 * 911 * <-> 912 * 0123456789 913 * PPPVVVV 914 * -> 915 * 0123456789 916 * PPPPPPV 917 */ 918 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); 919 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 920 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags); 921 vma->vm_ops = &vm_ops; /* This should have no impact. */ 922 vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma); 923 vmg.prev = vma_prev; 924 vmg.middle = vma; 925 vma_set_dummy_anon_vma(vma, &avc); 926 ASSERT_EQ(merge_existing(&vmg), vma_prev); 927 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 928 ASSERT_EQ(vma_prev->vm_start, 0); 929 ASSERT_EQ(vma_prev->vm_end, 0x6000); 930 ASSERT_EQ(vma_prev->vm_pgoff, 0); 931 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 932 ASSERT_EQ(vma->vm_start, 0x6000); 933 ASSERT_EQ(vma->vm_end, 0x7000); 934 ASSERT_EQ(vma->vm_pgoff, 6); 935 ASSERT_TRUE(vma_write_started(vma_prev)); 936 ASSERT_TRUE(vma_write_started(vma)); 937 ASSERT_EQ(mm.map_count, 2); 938 if (prev_is_sticky || middle_is_sticky) 939 ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS)); 940 941 /* Clear down and reset. */ 942 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 943 944 /* 945 * Merge left case - full span. 946 * 947 * <--> 948 * 0123456789 949 * PPPVVVV 950 * -> 951 * 0123456789 952 * PPPPPPP 953 */ 954 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); 955 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 956 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags); 957 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma); 958 vmg.prev = vma_prev; 959 vmg.middle = vma; 960 vma_set_dummy_anon_vma(vma, &avc); 961 ASSERT_EQ(merge_existing(&vmg), vma_prev); 962 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 963 ASSERT_EQ(vma_prev->vm_start, 0); 964 ASSERT_EQ(vma_prev->vm_end, 0x7000); 965 ASSERT_EQ(vma_prev->vm_pgoff, 0); 966 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 967 ASSERT_TRUE(vma_write_started(vma_prev)); 968 ASSERT_EQ(mm.map_count, 1); 969 if (prev_is_sticky || middle_is_sticky) 970 ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS)); 971 972 /* Clear down and reset. We should have deleted vma. */ 973 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); 974 975 /* 976 * Merge both case. 977 * 978 * <--> 979 * 0123456789 980 * PPPVVVVNNN 981 * -> 982 * 0123456789 983 * PPPPPPPPPP 984 */ 985 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); 986 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 987 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags); 988 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags); 989 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma); 990 vmg.prev = vma_prev; 991 vmg.middle = vma; 992 vma_set_dummy_anon_vma(vma, &avc); 993 ASSERT_EQ(merge_existing(&vmg), vma_prev); 994 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 995 ASSERT_EQ(vma_prev->vm_start, 0); 996 ASSERT_EQ(vma_prev->vm_end, 0x9000); 997 ASSERT_EQ(vma_prev->vm_pgoff, 0); 998 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 999 ASSERT_TRUE(vma_write_started(vma_prev)); 1000 ASSERT_EQ(mm.map_count, 1); 1001 if (prev_is_sticky || middle_is_sticky || next_is_sticky) 1002 ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS)); 1003 1004 /* Clear down and reset. We should have deleted prev and next. */ 1005 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); 1006 1007 /* 1008 * Non-merge ranges. the modified VMA merge operation assumes that the 1009 * caller always specifies ranges within the input VMA so we need only 1010 * examine these cases. 1011 * 1012 * - 1013 * - 1014 * - 1015 * <-> 1016 * <> 1017 * <> 1018 * 0123456789a 1019 * PPPVVVVVNNN 1020 */ 1021 1022 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); 1023 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags); 1024 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags); 1025 1026 vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags); 1027 vmg.prev = vma; 1028 vmg.middle = vma; 1029 ASSERT_EQ(merge_existing(&vmg), NULL); 1030 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1031 1032 vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags); 1033 vmg.prev = vma; 1034 vmg.middle = vma; 1035 ASSERT_EQ(merge_existing(&vmg), NULL); 1036 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1037 1038 vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags); 1039 vmg.prev = vma; 1040 vmg.middle = vma; 1041 ASSERT_EQ(merge_existing(&vmg), NULL); 1042 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1043 1044 vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags); 1045 vmg.prev = vma; 1046 vmg.middle = vma; 1047 ASSERT_EQ(merge_existing(&vmg), NULL); 1048 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1049 1050 vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags); 1051 vmg.prev = vma; 1052 vmg.middle = vma; 1053 ASSERT_EQ(merge_existing(&vmg), NULL); 1054 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1055 1056 vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags); 1057 vmg.prev = vma; 1058 vmg.middle = vma; 1059 ASSERT_EQ(merge_existing(&vmg), NULL); 1060 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1061 1062 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); 1063 1064 return true; 1065 } 1066 1067 static bool test_merge_existing(void) 1068 { 1069 int i, j, k; 1070 1071 /* Generate every possible permutation of sticky flags. */ 1072 for (i = 0; i < 2; i++) 1073 for (j = 0; j < 2; j++) 1074 for (k = 0; k < 2; k++) 1075 ASSERT_TRUE(__test_merge_existing(i, j, k)); 1076 1077 return true; 1078 } 1079 1080 static bool test_anon_vma_non_mergeable(void) 1081 { 1082 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1083 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1084 struct mm_struct mm = {}; 1085 VMA_ITERATOR(vmi, &mm, 0); 1086 struct vm_area_struct *vma, *vma_prev, *vma_next; 1087 struct vma_merge_struct vmg = { 1088 .mm = &mm, 1089 .vmi = &vmi, 1090 }; 1091 struct anon_vma_chain dummy_anon_vma_chain_1 = {}; 1092 struct anon_vma_chain dummy_anon_vma_chain_2 = {}; 1093 struct anon_vma dummy_anon_vma_2; 1094 1095 /* 1096 * In the case of modified VMA merge, merging both left and right VMAs 1097 * but where prev and next have incompatible anon_vma objects, we revert 1098 * to a merge of prev and VMA: 1099 * 1100 * <--> 1101 * 0123456789 1102 * PPPVVVVNNN 1103 * -> 1104 * 0123456789 1105 * PPPPPPPNNN 1106 */ 1107 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1108 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags); 1109 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags); 1110 1111 /* 1112 * Give both prev and next single anon_vma_chain fields, so they will 1113 * merge with the NULL vmg->anon_vma. 1114 * 1115 * However, when prev is compared to next, the merge should fail. 1116 */ 1117 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL); 1118 vmg.prev = vma_prev; 1119 vmg.middle = vma; 1120 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); 1121 __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); 1122 1123 ASSERT_EQ(merge_existing(&vmg), vma_prev); 1124 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1125 ASSERT_EQ(vma_prev->vm_start, 0); 1126 ASSERT_EQ(vma_prev->vm_end, 0x7000); 1127 ASSERT_EQ(vma_prev->vm_pgoff, 0); 1128 ASSERT_TRUE(vma_write_started(vma_prev)); 1129 ASSERT_FALSE(vma_write_started(vma_next)); 1130 1131 /* Clear down and reset. */ 1132 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 1133 1134 /* 1135 * Now consider the new VMA case. This is equivalent, only adding a new 1136 * VMA in a gap between prev and next. 1137 * 1138 * <--> 1139 * 0123456789 1140 * PPP****NNN 1141 * -> 1142 * 0123456789 1143 * PPPPPPPNNN 1144 */ 1145 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1146 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags); 1147 1148 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL); 1149 vmg.prev = vma_prev; 1150 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); 1151 __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); 1152 1153 vmg.anon_vma = NULL; 1154 ASSERT_EQ(merge_new(&vmg), vma_prev); 1155 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1156 ASSERT_EQ(vma_prev->vm_start, 0); 1157 ASSERT_EQ(vma_prev->vm_end, 0x7000); 1158 ASSERT_EQ(vma_prev->vm_pgoff, 0); 1159 ASSERT_TRUE(vma_write_started(vma_prev)); 1160 ASSERT_FALSE(vma_write_started(vma_next)); 1161 1162 /* Final cleanup. */ 1163 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); 1164 1165 return true; 1166 } 1167 1168 static bool test_dup_anon_vma(void) 1169 { 1170 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1171 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1172 struct mm_struct mm = {}; 1173 VMA_ITERATOR(vmi, &mm, 0); 1174 struct vma_merge_struct vmg = { 1175 .mm = &mm, 1176 .vmi = &vmi, 1177 }; 1178 struct anon_vma_chain dummy_anon_vma_chain = { 1179 .anon_vma = &dummy_anon_vma, 1180 }; 1181 struct vm_area_struct *vma_prev, *vma_next, *vma; 1182 1183 reset_dummy_anon_vma(); 1184 1185 /* 1186 * Expanding a VMA delete the next one duplicates next's anon_vma and 1187 * assigns it to the expanded VMA. 1188 * 1189 * This covers new VMA merging, as these operations amount to a VMA 1190 * expand. 1191 */ 1192 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1193 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1194 vma_next->anon_vma = &dummy_anon_vma; 1195 1196 vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags); 1197 vmg.target = vma_prev; 1198 vmg.next = vma_next; 1199 1200 ASSERT_EQ(expand_existing(&vmg), 0); 1201 1202 /* Will have been cloned. */ 1203 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1204 ASSERT_TRUE(vma_prev->anon_vma->was_cloned); 1205 1206 /* Cleanup ready for next run. */ 1207 cleanup_mm(&mm, &vmi); 1208 1209 /* 1210 * next has anon_vma, we assign to prev. 1211 * 1212 * |<----->| 1213 * |-------*********-------| 1214 * prev vma next 1215 * extend delete delete 1216 */ 1217 1218 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1219 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1220 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags); 1221 1222 /* Initialise avc so mergeability check passes. */ 1223 INIT_LIST_HEAD(&vma_next->anon_vma_chain); 1224 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain); 1225 1226 vma_next->anon_vma = &dummy_anon_vma; 1227 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 1228 vmg.prev = vma_prev; 1229 vmg.middle = vma; 1230 1231 ASSERT_EQ(merge_existing(&vmg), vma_prev); 1232 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1233 1234 ASSERT_EQ(vma_prev->vm_start, 0); 1235 ASSERT_EQ(vma_prev->vm_end, 0x8000); 1236 1237 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1238 ASSERT_TRUE(vma_prev->anon_vma->was_cloned); 1239 1240 cleanup_mm(&mm, &vmi); 1241 1242 /* 1243 * vma has anon_vma, we assign to prev. 1244 * 1245 * |<----->| 1246 * |-------*********-------| 1247 * prev vma next 1248 * extend delete delete 1249 */ 1250 1251 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1252 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1253 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags); 1254 vmg.anon_vma = &dummy_anon_vma; 1255 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1256 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 1257 vmg.prev = vma_prev; 1258 vmg.middle = vma; 1259 1260 ASSERT_EQ(merge_existing(&vmg), vma_prev); 1261 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1262 1263 ASSERT_EQ(vma_prev->vm_start, 0); 1264 ASSERT_EQ(vma_prev->vm_end, 0x8000); 1265 1266 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1267 ASSERT_TRUE(vma_prev->anon_vma->was_cloned); 1268 1269 cleanup_mm(&mm, &vmi); 1270 1271 /* 1272 * vma has anon_vma, we assign to prev. 1273 * 1274 * |<----->| 1275 * |-------************* 1276 * prev vma 1277 * extend shrink/delete 1278 */ 1279 1280 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1281 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags); 1282 1283 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1284 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 1285 vmg.prev = vma_prev; 1286 vmg.middle = vma; 1287 1288 ASSERT_EQ(merge_existing(&vmg), vma_prev); 1289 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1290 1291 ASSERT_EQ(vma_prev->vm_start, 0); 1292 ASSERT_EQ(vma_prev->vm_end, 0x5000); 1293 1294 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1295 ASSERT_TRUE(vma_prev->anon_vma->was_cloned); 1296 1297 cleanup_mm(&mm, &vmi); 1298 1299 /* 1300 * vma has anon_vma, we assign to next. 1301 * 1302 * |<----->| 1303 * *************-------| 1304 * vma next 1305 * shrink/delete extend 1306 */ 1307 1308 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags); 1309 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags); 1310 1311 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1312 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags); 1313 vmg.prev = vma; 1314 vmg.middle = vma; 1315 1316 ASSERT_EQ(merge_existing(&vmg), vma_next); 1317 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1318 1319 ASSERT_EQ(vma_next->vm_start, 0x3000); 1320 ASSERT_EQ(vma_next->vm_end, 0x8000); 1321 1322 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); 1323 ASSERT_TRUE(vma_next->anon_vma->was_cloned); 1324 1325 cleanup_mm(&mm, &vmi); 1326 return true; 1327 } 1328 1329 static bool test_vmi_prealloc_fail(void) 1330 { 1331 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1332 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1333 struct mm_struct mm = {}; 1334 VMA_ITERATOR(vmi, &mm, 0); 1335 struct vma_merge_struct vmg = { 1336 .mm = &mm, 1337 .vmi = &vmi, 1338 }; 1339 struct anon_vma_chain avc = {}; 1340 struct vm_area_struct *vma_prev, *vma; 1341 1342 /* 1343 * We are merging vma into prev, with vma possessing an anon_vma, which 1344 * will be duplicated. We cause the vmi preallocation to fail and assert 1345 * the duplicated anon_vma is unlinked. 1346 */ 1347 1348 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1349 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1350 vma->anon_vma = &dummy_anon_vma; 1351 1352 vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma); 1353 vmg.prev = vma_prev; 1354 vmg.middle = vma; 1355 vma_set_dummy_anon_vma(vma, &avc); 1356 1357 fail_prealloc = true; 1358 1359 /* This will cause the merge to fail. */ 1360 ASSERT_EQ(merge_existing(&vmg), NULL); 1361 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); 1362 /* We will already have assigned the anon_vma. */ 1363 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1364 /* And it was both cloned and unlinked. */ 1365 ASSERT_TRUE(dummy_anon_vma.was_cloned); 1366 ASSERT_TRUE(dummy_anon_vma.was_unlinked); 1367 1368 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */ 1369 1370 /* 1371 * We repeat the same operation for expanding a VMA, which is what new 1372 * VMA merging ultimately uses too. This asserts that unlinking is 1373 * performed in this case too. 1374 */ 1375 1376 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags); 1377 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1378 vma->anon_vma = &dummy_anon_vma; 1379 1380 vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags); 1381 vmg.target = vma_prev; 1382 vmg.next = vma; 1383 1384 fail_prealloc = true; 1385 ASSERT_EQ(expand_existing(&vmg), -ENOMEM); 1386 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); 1387 1388 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); 1389 ASSERT_TRUE(dummy_anon_vma.was_cloned); 1390 ASSERT_TRUE(dummy_anon_vma.was_unlinked); 1391 1392 cleanup_mm(&mm, &vmi); 1393 return true; 1394 } 1395 1396 static bool test_merge_extend(void) 1397 { 1398 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1399 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1400 struct mm_struct mm = {}; 1401 VMA_ITERATOR(vmi, &mm, 0x1000); 1402 struct vm_area_struct *vma; 1403 1404 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags); 1405 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags); 1406 1407 /* 1408 * Extend a VMA into the gap between itself and the following VMA. 1409 * This should result in a merge. 1410 * 1411 * <-> 1412 * * * 1413 * 1414 */ 1415 1416 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma); 1417 ASSERT_EQ(vma->vm_start, 0); 1418 ASSERT_EQ(vma->vm_end, 0x4000); 1419 ASSERT_EQ(vma->vm_pgoff, 0); 1420 ASSERT_TRUE(vma_write_started(vma)); 1421 ASSERT_EQ(mm.map_count, 1); 1422 1423 cleanup_mm(&mm, &vmi); 1424 return true; 1425 } 1426 1427 static bool test_expand_only_mode(void) 1428 { 1429 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1430 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1431 struct mm_struct mm = {}; 1432 VMA_ITERATOR(vmi, &mm, 0); 1433 struct vm_area_struct *vma_prev, *vma; 1434 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5); 1435 1436 /* 1437 * Place a VMA prior to the one we're expanding so we assert that we do 1438 * not erroneously try to traverse to the previous VMA even though we 1439 * have, through the use of the just_expand flag, indicated we do not 1440 * need to do so. 1441 */ 1442 alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags); 1443 1444 /* 1445 * We will be positioned at the prev VMA, but looking to expand to 1446 * 0x9000. 1447 */ 1448 vma_iter_set(&vmi, 0x3000); 1449 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags); 1450 vmg.prev = vma_prev; 1451 vmg.just_expand = true; 1452 1453 vma = vma_merge_new_range(&vmg); 1454 ASSERT_NE(vma, NULL); 1455 ASSERT_EQ(vma, vma_prev); 1456 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 1457 ASSERT_EQ(vma->vm_start, 0x3000); 1458 ASSERT_EQ(vma->vm_end, 0x9000); 1459 ASSERT_EQ(vma->vm_pgoff, 3); 1460 ASSERT_TRUE(vma_write_started(vma)); 1461 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000); 1462 vma_assert_attached(vma); 1463 1464 cleanup_mm(&mm, &vmi); 1465 return true; 1466 } 1467 1468 static void run_merge_tests(int *num_tests, int *num_fail) 1469 { 1470 /* Very simple tests to kick the tyres. */ 1471 TEST(simple_merge); 1472 TEST(simple_modify); 1473 TEST(simple_expand); 1474 TEST(simple_shrink); 1475 1476 TEST(merge_new); 1477 TEST(vma_merge_special_flags); 1478 TEST(vma_merge_with_close); 1479 TEST(vma_merge_new_with_close); 1480 TEST(merge_existing); 1481 TEST(anon_vma_non_mergeable); 1482 TEST(dup_anon_vma); 1483 TEST(vmi_prealloc_fail); 1484 TEST(merge_extend); 1485 TEST(expand_only_mode); 1486 } 1487