1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)4 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
5 {
6 struct vm_area_struct *vma;
7 /*
8 * For convenience, get prev and next VMAs. Which the new VMA operation
9 * requires.
10 */
11 vmg->next = vma_next(vmg->vmi);
12 vmg->prev = vma_prev(vmg->vmi);
13 vma_iter_next_range(vmg->vmi);
14
15 vma = vma_merge_new_range(vmg);
16 if (vma)
17 vma_assert_attached(vma);
18
19 return vma;
20 }
21
22 /*
23 * Helper function which provides a wrapper around the expansion of an existing
24 * VMA.
25 */
expand_existing(struct vma_merge_struct * vmg)26 static int expand_existing(struct vma_merge_struct *vmg)
27 {
28 return vma_expand(vmg);
29 }
30
31 /*
32 * Helper function to reset merge state the associated VMA iterator to a
33 * specified new range.
34 */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)35 void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
36 unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
37 {
38 vma_iter_set(vmg->vmi, start);
39
40 vmg->prev = NULL;
41 vmg->middle = NULL;
42 vmg->next = NULL;
43 vmg->target = NULL;
44
45 vmg->start = start;
46 vmg->end = end;
47 vmg->pgoff = pgoff;
48 vmg->vm_flags = vm_flags;
49
50 vmg->just_expand = false;
51 vmg->__remove_middle = false;
52 vmg->__remove_next = false;
53 vmg->__adjust_middle_start = false;
54 vmg->__adjust_next_start = false;
55 }
56
57 /* Helper function to set both the VMG range and its anon_vma. */
vmg_set_range_anon_vma(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags,struct anon_vma * anon_vma)58 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
59 unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
60 struct anon_vma *anon_vma)
61 {
62 vmg_set_range(vmg, start, end, pgoff, vm_flags);
63 vmg->anon_vma = anon_vma;
64 }
65
66 /*
67 * Helper function to try to merge a new VMA.
68 *
69 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
70 * VMA, link it to the maple tree and return it.
71 */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags,bool * was_merged)72 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
73 struct vma_merge_struct *vmg, unsigned long start,
74 unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
75 bool *was_merged)
76 {
77 struct vm_area_struct *merged;
78
79 vmg_set_range(vmg, start, end, pgoff, vm_flags);
80
81 merged = merge_new(vmg);
82 if (merged) {
83 *was_merged = true;
84 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
85 return merged;
86 }
87
88 *was_merged = false;
89
90 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
91
92 return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
93 }
94
test_simple_merge(void)95 static bool test_simple_merge(void)
96 {
97 struct vm_area_struct *vma;
98 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
99 struct mm_struct mm = {};
100 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
101 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
102 VMA_ITERATOR(vmi, &mm, 0x1000);
103 struct vma_merge_struct vmg = {
104 .mm = &mm,
105 .vmi = &vmi,
106 .start = 0x1000,
107 .end = 0x2000,
108 .vm_flags = vm_flags,
109 .pgoff = 1,
110 };
111
112 ASSERT_FALSE(attach_vma(&mm, vma_left));
113 ASSERT_FALSE(attach_vma(&mm, vma_right));
114
115 vma = merge_new(&vmg);
116 ASSERT_NE(vma, NULL);
117
118 ASSERT_EQ(vma->vm_start, 0);
119 ASSERT_EQ(vma->vm_end, 0x3000);
120 ASSERT_EQ(vma->vm_pgoff, 0);
121 ASSERT_EQ(vma->vm_flags, vm_flags);
122
123 detach_free_vma(vma);
124 mtree_destroy(&mm.mm_mt);
125
126 return true;
127 }
128
test_simple_modify(void)129 static bool test_simple_modify(void)
130 {
131 struct vm_area_struct *vma;
132 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
133 struct mm_struct mm = {};
134 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
135 VMA_ITERATOR(vmi, &mm, 0x1000);
136 vm_flags_t flags = VM_READ | VM_MAYREAD;
137
138 ASSERT_FALSE(attach_vma(&mm, init_vma));
139
140 /*
141 * The flags will not be changed, the vma_modify_flags() function
142 * performs the merge/split only.
143 */
144 vma = vma_modify_flags(&vmi, init_vma, init_vma,
145 0x1000, 0x2000, &flags);
146 ASSERT_NE(vma, NULL);
147 /* We modify the provided VMA, and on split allocate new VMAs. */
148 ASSERT_EQ(vma, init_vma);
149
150 ASSERT_EQ(vma->vm_start, 0x1000);
151 ASSERT_EQ(vma->vm_end, 0x2000);
152 ASSERT_EQ(vma->vm_pgoff, 1);
153
154 /*
155 * Now walk through the three split VMAs and make sure they are as
156 * expected.
157 */
158
159 vma_iter_set(&vmi, 0);
160 vma = vma_iter_load(&vmi);
161
162 ASSERT_EQ(vma->vm_start, 0);
163 ASSERT_EQ(vma->vm_end, 0x1000);
164 ASSERT_EQ(vma->vm_pgoff, 0);
165
166 detach_free_vma(vma);
167 vma_iter_clear(&vmi);
168
169 vma = vma_next(&vmi);
170
171 ASSERT_EQ(vma->vm_start, 0x1000);
172 ASSERT_EQ(vma->vm_end, 0x2000);
173 ASSERT_EQ(vma->vm_pgoff, 1);
174
175 detach_free_vma(vma);
176 vma_iter_clear(&vmi);
177
178 vma = vma_next(&vmi);
179
180 ASSERT_EQ(vma->vm_start, 0x2000);
181 ASSERT_EQ(vma->vm_end, 0x3000);
182 ASSERT_EQ(vma->vm_pgoff, 2);
183
184 detach_free_vma(vma);
185 mtree_destroy(&mm.mm_mt);
186
187 return true;
188 }
189
test_simple_expand(void)190 static bool test_simple_expand(void)
191 {
192 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
193 struct mm_struct mm = {};
194 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
195 VMA_ITERATOR(vmi, &mm, 0);
196 struct vma_merge_struct vmg = {
197 .vmi = &vmi,
198 .target = vma,
199 .start = 0,
200 .end = 0x3000,
201 .pgoff = 0,
202 };
203
204 ASSERT_FALSE(attach_vma(&mm, vma));
205
206 ASSERT_FALSE(expand_existing(&vmg));
207
208 ASSERT_EQ(vma->vm_start, 0);
209 ASSERT_EQ(vma->vm_end, 0x3000);
210 ASSERT_EQ(vma->vm_pgoff, 0);
211
212 detach_free_vma(vma);
213 mtree_destroy(&mm.mm_mt);
214
215 return true;
216 }
217
test_simple_shrink(void)218 static bool test_simple_shrink(void)
219 {
220 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
221 struct mm_struct mm = {};
222 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
223 VMA_ITERATOR(vmi, &mm, 0);
224
225 ASSERT_FALSE(attach_vma(&mm, vma));
226
227 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
228
229 ASSERT_EQ(vma->vm_start, 0);
230 ASSERT_EQ(vma->vm_end, 0x1000);
231 ASSERT_EQ(vma->vm_pgoff, 0);
232
233 detach_free_vma(vma);
234 mtree_destroy(&mm.mm_mt);
235
236 return true;
237 }
238
__test_merge_new(bool is_sticky,bool a_is_sticky,bool b_is_sticky,bool c_is_sticky)239 static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
240 {
241 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
242 struct mm_struct mm = {};
243 VMA_ITERATOR(vmi, &mm, 0);
244 struct vma_merge_struct vmg = {
245 .mm = &mm,
246 .vmi = &vmi,
247 };
248 struct anon_vma_chain dummy_anon_vma_chain_a = {
249 .anon_vma = &dummy_anon_vma,
250 };
251 struct anon_vma_chain dummy_anon_vma_chain_b = {
252 .anon_vma = &dummy_anon_vma,
253 };
254 struct anon_vma_chain dummy_anon_vma_chain_c = {
255 .anon_vma = &dummy_anon_vma,
256 };
257 struct anon_vma_chain dummy_anon_vma_chain_d = {
258 .anon_vma = &dummy_anon_vma,
259 };
260 const struct vm_operations_struct vm_ops = {
261 .close = dummy_close,
262 };
263 int count;
264 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
265 bool merged;
266
267 if (is_sticky)
268 vm_flags |= VM_STICKY;
269
270 /*
271 * 0123456789abc
272 * AA B CC
273 */
274 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
275 ASSERT_NE(vma_a, NULL);
276 if (a_is_sticky)
277 vm_flags_set(vma_a, VM_STICKY);
278 /* We give each VMA a single avc so we can test anon_vma duplication. */
279 INIT_LIST_HEAD(&vma_a->anon_vma_chain);
280 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
281
282 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
283 ASSERT_NE(vma_b, NULL);
284 if (b_is_sticky)
285 vm_flags_set(vma_b, VM_STICKY);
286 INIT_LIST_HEAD(&vma_b->anon_vma_chain);
287 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
288
289 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
290 ASSERT_NE(vma_c, NULL);
291 if (c_is_sticky)
292 vm_flags_set(vma_c, VM_STICKY);
293 INIT_LIST_HEAD(&vma_c->anon_vma_chain);
294 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
295
296 /*
297 * NO merge.
298 *
299 * 0123456789abc
300 * AA B ** CC
301 */
302 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
303 ASSERT_NE(vma_d, NULL);
304 INIT_LIST_HEAD(&vma_d->anon_vma_chain);
305 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
306 ASSERT_FALSE(merged);
307 ASSERT_EQ(mm.map_count, 4);
308
309 /*
310 * Merge BOTH sides.
311 *
312 * 0123456789abc
313 * AA*B DD CC
314 */
315 vma_a->vm_ops = &vm_ops; /* This should have no impact. */
316 vma_b->anon_vma = &dummy_anon_vma;
317 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
318 ASSERT_EQ(vma, vma_a);
319 /* Merge with A, delete B. */
320 ASSERT_TRUE(merged);
321 ASSERT_EQ(vma->vm_start, 0);
322 ASSERT_EQ(vma->vm_end, 0x4000);
323 ASSERT_EQ(vma->vm_pgoff, 0);
324 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
325 ASSERT_TRUE(vma_write_started(vma));
326 ASSERT_EQ(mm.map_count, 3);
327 if (is_sticky || a_is_sticky || b_is_sticky)
328 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
329
330 /*
331 * Merge to PREVIOUS VMA.
332 *
333 * 0123456789abc
334 * AAAA* DD CC
335 */
336 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
337 ASSERT_EQ(vma, vma_a);
338 /* Extend A. */
339 ASSERT_TRUE(merged);
340 ASSERT_EQ(vma->vm_start, 0);
341 ASSERT_EQ(vma->vm_end, 0x5000);
342 ASSERT_EQ(vma->vm_pgoff, 0);
343 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
344 ASSERT_TRUE(vma_write_started(vma));
345 ASSERT_EQ(mm.map_count, 3);
346 if (is_sticky || a_is_sticky)
347 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
348
349 /*
350 * Merge to NEXT VMA.
351 *
352 * 0123456789abc
353 * AAAAA *DD CC
354 */
355 vma_d->anon_vma = &dummy_anon_vma;
356 vma_d->vm_ops = &vm_ops; /* This should have no impact. */
357 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
358 ASSERT_EQ(vma, vma_d);
359 /* Prepend. */
360 ASSERT_TRUE(merged);
361 ASSERT_EQ(vma->vm_start, 0x6000);
362 ASSERT_EQ(vma->vm_end, 0x9000);
363 ASSERT_EQ(vma->vm_pgoff, 6);
364 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
365 ASSERT_TRUE(vma_write_started(vma));
366 ASSERT_EQ(mm.map_count, 3);
367 if (is_sticky) /* D uses is_sticky. */
368 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
369
370 /*
371 * Merge BOTH sides.
372 *
373 * 0123456789abc
374 * AAAAA*DDD CC
375 */
376 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
377 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
378 ASSERT_EQ(vma, vma_a);
379 /* Merge with A, delete D. */
380 ASSERT_TRUE(merged);
381 ASSERT_EQ(vma->vm_start, 0);
382 ASSERT_EQ(vma->vm_end, 0x9000);
383 ASSERT_EQ(vma->vm_pgoff, 0);
384 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
385 ASSERT_TRUE(vma_write_started(vma));
386 ASSERT_EQ(mm.map_count, 2);
387 if (is_sticky || a_is_sticky)
388 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
389
390 /*
391 * Merge to NEXT VMA.
392 *
393 * 0123456789abc
394 * AAAAAAAAA *CC
395 */
396 vma_c->anon_vma = &dummy_anon_vma;
397 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
398 ASSERT_EQ(vma, vma_c);
399 /* Prepend C. */
400 ASSERT_TRUE(merged);
401 ASSERT_EQ(vma->vm_start, 0xa000);
402 ASSERT_EQ(vma->vm_end, 0xc000);
403 ASSERT_EQ(vma->vm_pgoff, 0xa);
404 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
405 ASSERT_TRUE(vma_write_started(vma));
406 ASSERT_EQ(mm.map_count, 2);
407 if (is_sticky || c_is_sticky)
408 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
409
410 /*
411 * Merge BOTH sides.
412 *
413 * 0123456789abc
414 * AAAAAAAAA*CCC
415 */
416 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
417 ASSERT_EQ(vma, vma_a);
418 /* Extend A and delete C. */
419 ASSERT_TRUE(merged);
420 ASSERT_EQ(vma->vm_start, 0);
421 ASSERT_EQ(vma->vm_end, 0xc000);
422 ASSERT_EQ(vma->vm_pgoff, 0);
423 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
424 ASSERT_TRUE(vma_write_started(vma));
425 ASSERT_EQ(mm.map_count, 1);
426 if (is_sticky || a_is_sticky || c_is_sticky)
427 ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
428
429 /*
430 * Final state.
431 *
432 * 0123456789abc
433 * AAAAAAAAAAAAA
434 */
435
436 count = 0;
437 vma_iter_set(&vmi, 0);
438 for_each_vma(vmi, vma) {
439 ASSERT_NE(vma, NULL);
440 ASSERT_EQ(vma->vm_start, 0);
441 ASSERT_EQ(vma->vm_end, 0xc000);
442 ASSERT_EQ(vma->vm_pgoff, 0);
443 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
444
445 detach_free_vma(vma);
446 count++;
447 }
448
449 /* Should only have one VMA left (though freed) after all is done.*/
450 ASSERT_EQ(count, 1);
451
452 mtree_destroy(&mm.mm_mt);
453 return true;
454 }
455
test_merge_new(void)456 static bool test_merge_new(void)
457 {
458 int i, j, k, l;
459
460 /* Generate every possible permutation of sticky flags. */
461 for (i = 0; i < 2; i++)
462 for (j = 0; j < 2; j++)
463 for (k = 0; k < 2; k++)
464 for (l = 0; l < 2; l++)
465 ASSERT_TRUE(__test_merge_new(i, j, k, l));
466
467 return true;
468 }
469
test_vma_merge_special_flags(void)470 static bool test_vma_merge_special_flags(void)
471 {
472 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
473 struct mm_struct mm = {};
474 VMA_ITERATOR(vmi, &mm, 0);
475 struct vma_merge_struct vmg = {
476 .mm = &mm,
477 .vmi = &vmi,
478 };
479 vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
480 vm_flags_t all_special_flags = 0;
481 int i;
482 struct vm_area_struct *vma_left, *vma;
483
484 /* Make sure there aren't new VM_SPECIAL flags. */
485 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
486 all_special_flags |= special_flags[i];
487 }
488 ASSERT_EQ(all_special_flags, VM_SPECIAL);
489
490 /*
491 * 01234
492 * AAA
493 */
494 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
495 ASSERT_NE(vma_left, NULL);
496
497 /* 1. Set up new VMA with special flag that would otherwise merge. */
498
499 /*
500 * 01234
501 * AAA*
502 *
503 * This should merge if not for the VM_SPECIAL flag.
504 */
505 vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
506 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
507 vm_flags_t special_flag = special_flags[i];
508
509 vm_flags_reset(vma_left, vm_flags | special_flag);
510 vmg.vm_flags = vm_flags | special_flag;
511 vma = merge_new(&vmg);
512 ASSERT_EQ(vma, NULL);
513 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
514 }
515
516 /* 2. Modify VMA with special flag that would otherwise merge. */
517
518 /*
519 * 01234
520 * AAAB
521 *
522 * Create a VMA to modify.
523 */
524 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
525 ASSERT_NE(vma, NULL);
526 vmg.middle = vma;
527
528 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
529 vm_flags_t special_flag = special_flags[i];
530
531 vm_flags_reset(vma_left, vm_flags | special_flag);
532 vmg.vm_flags = vm_flags | special_flag;
533 vma = merge_existing(&vmg);
534 ASSERT_EQ(vma, NULL);
535 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
536 }
537
538 cleanup_mm(&mm, &vmi);
539 return true;
540 }
541
test_vma_merge_with_close(void)542 static bool test_vma_merge_with_close(void)
543 {
544 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
545 struct mm_struct mm = {};
546 VMA_ITERATOR(vmi, &mm, 0);
547 struct vma_merge_struct vmg = {
548 .mm = &mm,
549 .vmi = &vmi,
550 };
551 const struct vm_operations_struct vm_ops = {
552 .close = dummy_close,
553 };
554 struct vm_area_struct *vma_prev, *vma_next, *vma;
555
556 /*
557 * When merging VMAs we are not permitted to remove any VMA that has a
558 * vm_ops->close() hook.
559 *
560 * Considering the two possible adjacent VMAs to which a VMA can be
561 * merged:
562 *
563 * [ prev ][ vma ][ next ]
564 *
565 * In no case will we need to delete prev. If the operation is
566 * mergeable, then prev will be extended with one or both of vma and
567 * next deleted.
568 *
569 * As a result, during initial mergeability checks, only
570 * can_vma_merge_before() (which implies the VMA being merged with is
571 * 'next' as shown above) bothers to check to see whether the next VMA
572 * has a vm_ops->close() callback that will need to be called when
573 * removed.
574 *
575 * If it does, then we cannot merge as the resources that the close()
576 * operation potentially clears down are tied only to the existing VMA
577 * range and we have no way of extending those to the nearly merged one.
578 *
579 * We must consider two scenarios:
580 *
581 * A.
582 *
583 * vm_ops->close: - - !NULL
584 * [ prev ][ vma ][ next ]
585 *
586 * Where prev may or may not be present/mergeable.
587 *
588 * This is picked up by a specific check in can_vma_merge_before().
589 *
590 * B.
591 *
592 * vm_ops->close: - !NULL
593 * [ prev ][ vma ]
594 *
595 * Where prev and vma are present and mergeable.
596 *
597 * This is picked up by a specific check in the modified VMA merge.
598 *
599 * IMPORTANT NOTE: We make the assumption that the following case:
600 *
601 * - !NULL NULL
602 * [ prev ][ vma ][ next ]
603 *
604 * Cannot occur, because vma->vm_ops being the same implies the same
605 * vma->vm_file, and therefore this would mean that next->vm_ops->close
606 * would be set too, and thus scenario A would pick this up.
607 */
608
609 /*
610 * The only case of a new VMA merge that results in a VMA being deleted
611 * is one where both the previous and next VMAs are merged - in this
612 * instance the next VMA is deleted, and the previous VMA is extended.
613 *
614 * If we are unable to do so, we reduce the operation to simply
615 * extending the prev VMA and not merging next.
616 *
617 * 0123456789
618 * PPP**NNNN
619 * ->
620 * 0123456789
621 * PPPPPPNNN
622 */
623
624 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
625 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
626 vma_next->vm_ops = &vm_ops;
627
628 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
629 ASSERT_EQ(merge_new(&vmg), vma_prev);
630 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
631 ASSERT_EQ(vma_prev->vm_start, 0);
632 ASSERT_EQ(vma_prev->vm_end, 0x5000);
633 ASSERT_EQ(vma_prev->vm_pgoff, 0);
634
635 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
636
637 /*
638 * When modifying an existing VMA there are further cases where we
639 * delete VMAs.
640 *
641 * <>
642 * 0123456789
643 * PPPVV
644 *
645 * In this instance, if vma has a close hook, the merge simply cannot
646 * proceed.
647 */
648
649 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
650 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
651 vma->vm_ops = &vm_ops;
652
653 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
654 vmg.prev = vma_prev;
655 vmg.middle = vma;
656
657 /*
658 * The VMA being modified in a way that would otherwise merge should
659 * also fail.
660 */
661 ASSERT_EQ(merge_existing(&vmg), NULL);
662 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
663
664 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
665
666 /*
667 * This case is mirrored if merging with next.
668 *
669 * <>
670 * 0123456789
671 * VVNNNN
672 *
673 * In this instance, if vma has a close hook, the merge simply cannot
674 * proceed.
675 */
676
677 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
678 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
679 vma->vm_ops = &vm_ops;
680
681 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
682 vmg.middle = vma;
683 ASSERT_EQ(merge_existing(&vmg), NULL);
684 /*
685 * Initially this is misapprehended as an out of memory report, as the
686 * close() check is handled in the same way as anon_vma duplication
687 * failures, however a subsequent patch resolves this.
688 */
689 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
690
691 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
692
693 /*
694 * Finally, we consider two variants of the case where we modify a VMA
695 * to merge with both the previous and next VMAs.
696 *
697 * The first variant is where vma has a close hook. In this instance, no
698 * merge can proceed.
699 *
700 * <>
701 * 0123456789
702 * PPPVVNNNN
703 */
704
705 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
706 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
707 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
708 vma->vm_ops = &vm_ops;
709
710 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
711 vmg.prev = vma_prev;
712 vmg.middle = vma;
713
714 ASSERT_EQ(merge_existing(&vmg), NULL);
715 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
716
717 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
718
719 /*
720 * The second variant is where next has a close hook. In this instance,
721 * we reduce the operation to a merge between prev and vma.
722 *
723 * <>
724 * 0123456789
725 * PPPVVNNNN
726 * ->
727 * 0123456789
728 * PPPPPNNNN
729 */
730
731 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
732 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
733 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
734 vma_next->vm_ops = &vm_ops;
735
736 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
737 vmg.prev = vma_prev;
738 vmg.middle = vma;
739
740 ASSERT_EQ(merge_existing(&vmg), vma_prev);
741 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
742 ASSERT_EQ(vma_prev->vm_start, 0);
743 ASSERT_EQ(vma_prev->vm_end, 0x5000);
744 ASSERT_EQ(vma_prev->vm_pgoff, 0);
745
746 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
747
748 return true;
749 }
750
test_vma_merge_new_with_close(void)751 static bool test_vma_merge_new_with_close(void)
752 {
753 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
754 struct mm_struct mm = {};
755 VMA_ITERATOR(vmi, &mm, 0);
756 struct vma_merge_struct vmg = {
757 .mm = &mm,
758 .vmi = &vmi,
759 };
760 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
761 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
762 const struct vm_operations_struct vm_ops = {
763 .close = dummy_close,
764 };
765 struct vm_area_struct *vma;
766
767 /*
768 * We should allow the partial merge of a proposed new VMA if the
769 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
770 * compatible), e.g.:
771 *
772 * New VMA
773 * A v-------v B
774 * |-----| |-----|
775 * close close
776 *
777 * Since the rule is to not DELETE a VMA with a close operation, this
778 * should be permitted, only rather than expanding A and deleting B, we
779 * should simply expand A and leave B intact, e.g.:
780 *
781 * New VMA
782 * A B
783 * |------------||-----|
784 * close close
785 */
786
787 /* Have prev and next have a vm_ops->close() hook. */
788 vma_prev->vm_ops = &vm_ops;
789 vma_next->vm_ops = &vm_ops;
790
791 vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
792 vma = merge_new(&vmg);
793 ASSERT_NE(vma, NULL);
794 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
795 ASSERT_EQ(vma->vm_start, 0);
796 ASSERT_EQ(vma->vm_end, 0x5000);
797 ASSERT_EQ(vma->vm_pgoff, 0);
798 ASSERT_EQ(vma->vm_ops, &vm_ops);
799 ASSERT_TRUE(vma_write_started(vma));
800 ASSERT_EQ(mm.map_count, 2);
801
802 cleanup_mm(&mm, &vmi);
803 return true;
804 }
805
__test_merge_existing(bool prev_is_sticky,bool middle_is_sticky,bool next_is_sticky)806 static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
807 {
808 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
809 vm_flags_t prev_flags = vm_flags;
810 vm_flags_t next_flags = vm_flags;
811 struct mm_struct mm = {};
812 VMA_ITERATOR(vmi, &mm, 0);
813 struct vm_area_struct *vma, *vma_prev, *vma_next;
814 struct vma_merge_struct vmg = {
815 .mm = &mm,
816 .vmi = &vmi,
817 };
818 const struct vm_operations_struct vm_ops = {
819 .close = dummy_close,
820 };
821 struct anon_vma_chain avc = {};
822
823 if (prev_is_sticky)
824 prev_flags |= VM_STICKY;
825 if (middle_is_sticky)
826 vm_flags |= VM_STICKY;
827 if (next_is_sticky)
828 next_flags |= VM_STICKY;
829
830 /*
831 * Merge right case - partial span.
832 *
833 * <->
834 * 0123456789
835 * VVVVNNN
836 * ->
837 * 0123456789
838 * VNNNNNN
839 */
840 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
841 vma->vm_ops = &vm_ops; /* This should have no impact. */
842 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
843 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
844 vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
845 vmg.middle = vma;
846 vmg.prev = vma;
847 vma_set_dummy_anon_vma(vma, &avc);
848 ASSERT_EQ(merge_existing(&vmg), vma_next);
849 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
850 ASSERT_EQ(vma_next->vm_start, 0x3000);
851 ASSERT_EQ(vma_next->vm_end, 0x9000);
852 ASSERT_EQ(vma_next->vm_pgoff, 3);
853 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
854 ASSERT_EQ(vma->vm_start, 0x2000);
855 ASSERT_EQ(vma->vm_end, 0x3000);
856 ASSERT_EQ(vma->vm_pgoff, 2);
857 ASSERT_TRUE(vma_write_started(vma));
858 ASSERT_TRUE(vma_write_started(vma_next));
859 ASSERT_EQ(mm.map_count, 2);
860 if (middle_is_sticky || next_is_sticky)
861 ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
862
863 /* Clear down and reset. */
864 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
865
866 /*
867 * Merge right case - full span.
868 *
869 * <-->
870 * 0123456789
871 * VVVVNNN
872 * ->
873 * 0123456789
874 * NNNNNNN
875 */
876 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
877 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
878 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
879 vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
880 vmg.middle = vma;
881 vma_set_dummy_anon_vma(vma, &avc);
882 ASSERT_EQ(merge_existing(&vmg), vma_next);
883 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
884 ASSERT_EQ(vma_next->vm_start, 0x2000);
885 ASSERT_EQ(vma_next->vm_end, 0x9000);
886 ASSERT_EQ(vma_next->vm_pgoff, 2);
887 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
888 ASSERT_TRUE(vma_write_started(vma_next));
889 ASSERT_EQ(mm.map_count, 1);
890 if (middle_is_sticky || next_is_sticky)
891 ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
892
893 /* Clear down and reset. We should have deleted vma. */
894 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
895
896 /*
897 * Merge left case - partial span.
898 *
899 * <->
900 * 0123456789
901 * PPPVVVV
902 * ->
903 * 0123456789
904 * PPPPPPV
905 */
906 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
907 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
908 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
909 vma->vm_ops = &vm_ops; /* This should have no impact. */
910 vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
911 vmg.prev = vma_prev;
912 vmg.middle = vma;
913 vma_set_dummy_anon_vma(vma, &avc);
914 ASSERT_EQ(merge_existing(&vmg), vma_prev);
915 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
916 ASSERT_EQ(vma_prev->vm_start, 0);
917 ASSERT_EQ(vma_prev->vm_end, 0x6000);
918 ASSERT_EQ(vma_prev->vm_pgoff, 0);
919 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
920 ASSERT_EQ(vma->vm_start, 0x6000);
921 ASSERT_EQ(vma->vm_end, 0x7000);
922 ASSERT_EQ(vma->vm_pgoff, 6);
923 ASSERT_TRUE(vma_write_started(vma_prev));
924 ASSERT_TRUE(vma_write_started(vma));
925 ASSERT_EQ(mm.map_count, 2);
926 if (prev_is_sticky || middle_is_sticky)
927 ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
928
929 /* Clear down and reset. */
930 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
931
932 /*
933 * Merge left case - full span.
934 *
935 * <-->
936 * 0123456789
937 * PPPVVVV
938 * ->
939 * 0123456789
940 * PPPPPPP
941 */
942 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
943 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
944 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
945 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
946 vmg.prev = vma_prev;
947 vmg.middle = vma;
948 vma_set_dummy_anon_vma(vma, &avc);
949 ASSERT_EQ(merge_existing(&vmg), vma_prev);
950 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
951 ASSERT_EQ(vma_prev->vm_start, 0);
952 ASSERT_EQ(vma_prev->vm_end, 0x7000);
953 ASSERT_EQ(vma_prev->vm_pgoff, 0);
954 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
955 ASSERT_TRUE(vma_write_started(vma_prev));
956 ASSERT_EQ(mm.map_count, 1);
957 if (prev_is_sticky || middle_is_sticky)
958 ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
959
960 /* Clear down and reset. We should have deleted vma. */
961 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
962
963 /*
964 * Merge both case.
965 *
966 * <-->
967 * 0123456789
968 * PPPVVVVNNN
969 * ->
970 * 0123456789
971 * PPPPPPPPPP
972 */
973 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
974 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
975 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
976 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
977 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
978 vmg.prev = vma_prev;
979 vmg.middle = vma;
980 vma_set_dummy_anon_vma(vma, &avc);
981 ASSERT_EQ(merge_existing(&vmg), vma_prev);
982 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
983 ASSERT_EQ(vma_prev->vm_start, 0);
984 ASSERT_EQ(vma_prev->vm_end, 0x9000);
985 ASSERT_EQ(vma_prev->vm_pgoff, 0);
986 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
987 ASSERT_TRUE(vma_write_started(vma_prev));
988 ASSERT_EQ(mm.map_count, 1);
989 if (prev_is_sticky || middle_is_sticky || next_is_sticky)
990 ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
991
992 /* Clear down and reset. We should have deleted prev and next. */
993 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
994
995 /*
996 * Non-merge ranges. the modified VMA merge operation assumes that the
997 * caller always specifies ranges within the input VMA so we need only
998 * examine these cases.
999 *
1000 * -
1001 * -
1002 * -
1003 * <->
1004 * <>
1005 * <>
1006 * 0123456789a
1007 * PPPVVVVVNNN
1008 */
1009
1010 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1011 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
1012 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
1013
1014 vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
1015 vmg.prev = vma;
1016 vmg.middle = vma;
1017 ASSERT_EQ(merge_existing(&vmg), NULL);
1018 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1019
1020 vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
1021 vmg.prev = vma;
1022 vmg.middle = vma;
1023 ASSERT_EQ(merge_existing(&vmg), NULL);
1024 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1025
1026 vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
1027 vmg.prev = vma;
1028 vmg.middle = vma;
1029 ASSERT_EQ(merge_existing(&vmg), NULL);
1030 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1031
1032 vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
1033 vmg.prev = vma;
1034 vmg.middle = vma;
1035 ASSERT_EQ(merge_existing(&vmg), NULL);
1036 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1037
1038 vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
1039 vmg.prev = vma;
1040 vmg.middle = vma;
1041 ASSERT_EQ(merge_existing(&vmg), NULL);
1042 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1043
1044 vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
1045 vmg.prev = vma;
1046 vmg.middle = vma;
1047 ASSERT_EQ(merge_existing(&vmg), NULL);
1048 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1049
1050 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1051
1052 return true;
1053 }
1054
test_merge_existing(void)1055 static bool test_merge_existing(void)
1056 {
1057 int i, j, k;
1058
1059 /* Generate every possible permutation of sticky flags. */
1060 for (i = 0; i < 2; i++)
1061 for (j = 0; j < 2; j++)
1062 for (k = 0; k < 2; k++)
1063 ASSERT_TRUE(__test_merge_existing(i, j, k));
1064
1065 return true;
1066 }
1067
test_anon_vma_non_mergeable(void)1068 static bool test_anon_vma_non_mergeable(void)
1069 {
1070 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1071 struct mm_struct mm = {};
1072 VMA_ITERATOR(vmi, &mm, 0);
1073 struct vm_area_struct *vma, *vma_prev, *vma_next;
1074 struct vma_merge_struct vmg = {
1075 .mm = &mm,
1076 .vmi = &vmi,
1077 };
1078 struct anon_vma_chain dummy_anon_vma_chain_1 = {};
1079 struct anon_vma_chain dummy_anon_vma_chain_2 = {};
1080 struct anon_vma dummy_anon_vma_2;
1081
1082 /*
1083 * In the case of modified VMA merge, merging both left and right VMAs
1084 * but where prev and next have incompatible anon_vma objects, we revert
1085 * to a merge of prev and VMA:
1086 *
1087 * <-->
1088 * 0123456789
1089 * PPPVVVVNNN
1090 * ->
1091 * 0123456789
1092 * PPPPPPPNNN
1093 */
1094 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1095 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
1096 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
1097
1098 /*
1099 * Give both prev and next single anon_vma_chain fields, so they will
1100 * merge with the NULL vmg->anon_vma.
1101 *
1102 * However, when prev is compared to next, the merge should fail.
1103 */
1104 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
1105 vmg.prev = vma_prev;
1106 vmg.middle = vma;
1107 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1108 __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1109
1110 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1111 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1112 ASSERT_EQ(vma_prev->vm_start, 0);
1113 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1114 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1115 ASSERT_TRUE(vma_write_started(vma_prev));
1116 ASSERT_FALSE(vma_write_started(vma_next));
1117
1118 /* Clear down and reset. */
1119 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1120
1121 /*
1122 * Now consider the new VMA case. This is equivalent, only adding a new
1123 * VMA in a gap between prev and next.
1124 *
1125 * <-->
1126 * 0123456789
1127 * PPP****NNN
1128 * ->
1129 * 0123456789
1130 * PPPPPPPNNN
1131 */
1132 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1133 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
1134
1135 vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
1136 vmg.prev = vma_prev;
1137 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1138 __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1139
1140 vmg.anon_vma = NULL;
1141 ASSERT_EQ(merge_new(&vmg), vma_prev);
1142 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1143 ASSERT_EQ(vma_prev->vm_start, 0);
1144 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1145 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1146 ASSERT_TRUE(vma_write_started(vma_prev));
1147 ASSERT_FALSE(vma_write_started(vma_next));
1148
1149 /* Final cleanup. */
1150 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1151
1152 return true;
1153 }
1154
test_dup_anon_vma(void)1155 static bool test_dup_anon_vma(void)
1156 {
1157 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1158 struct mm_struct mm = {};
1159 VMA_ITERATOR(vmi, &mm, 0);
1160 struct vma_merge_struct vmg = {
1161 .mm = &mm,
1162 .vmi = &vmi,
1163 };
1164 struct anon_vma_chain dummy_anon_vma_chain = {
1165 .anon_vma = &dummy_anon_vma,
1166 };
1167 struct vm_area_struct *vma_prev, *vma_next, *vma;
1168
1169 reset_dummy_anon_vma();
1170
1171 /*
1172 * Expanding a VMA delete the next one duplicates next's anon_vma and
1173 * assigns it to the expanded VMA.
1174 *
1175 * This covers new VMA merging, as these operations amount to a VMA
1176 * expand.
1177 */
1178 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1179 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1180 vma_next->anon_vma = &dummy_anon_vma;
1181
1182 vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
1183 vmg.target = vma_prev;
1184 vmg.next = vma_next;
1185
1186 ASSERT_EQ(expand_existing(&vmg), 0);
1187
1188 /* Will have been cloned. */
1189 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1190 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1191
1192 /* Cleanup ready for next run. */
1193 cleanup_mm(&mm, &vmi);
1194
1195 /*
1196 * next has anon_vma, we assign to prev.
1197 *
1198 * |<----->|
1199 * |-------*********-------|
1200 * prev vma next
1201 * extend delete delete
1202 */
1203
1204 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1205 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1206 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1207
1208 /* Initialise avc so mergeability check passes. */
1209 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1210 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1211
1212 vma_next->anon_vma = &dummy_anon_vma;
1213 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1214 vmg.prev = vma_prev;
1215 vmg.middle = vma;
1216
1217 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1218 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1219
1220 ASSERT_EQ(vma_prev->vm_start, 0);
1221 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1222
1223 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1224 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1225
1226 cleanup_mm(&mm, &vmi);
1227
1228 /*
1229 * vma has anon_vma, we assign to prev.
1230 *
1231 * |<----->|
1232 * |-------*********-------|
1233 * prev vma next
1234 * extend delete delete
1235 */
1236
1237 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1238 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1239 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1240 vmg.anon_vma = &dummy_anon_vma;
1241 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1242 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1243 vmg.prev = vma_prev;
1244 vmg.middle = vma;
1245
1246 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1247 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1248
1249 ASSERT_EQ(vma_prev->vm_start, 0);
1250 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1251
1252 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1253 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1254
1255 cleanup_mm(&mm, &vmi);
1256
1257 /*
1258 * vma has anon_vma, we assign to prev.
1259 *
1260 * |<----->|
1261 * |-------*************
1262 * prev vma
1263 * extend shrink/delete
1264 */
1265
1266 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1267 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
1268
1269 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1270 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1271 vmg.prev = vma_prev;
1272 vmg.middle = vma;
1273
1274 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1275 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1276
1277 ASSERT_EQ(vma_prev->vm_start, 0);
1278 ASSERT_EQ(vma_prev->vm_end, 0x5000);
1279
1280 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1281 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1282
1283 cleanup_mm(&mm, &vmi);
1284
1285 /*
1286 * vma has anon_vma, we assign to next.
1287 *
1288 * |<----->|
1289 * *************-------|
1290 * vma next
1291 * shrink/delete extend
1292 */
1293
1294 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
1295 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1296
1297 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1298 vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1299 vmg.prev = vma;
1300 vmg.middle = vma;
1301
1302 ASSERT_EQ(merge_existing(&vmg), vma_next);
1303 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1304
1305 ASSERT_EQ(vma_next->vm_start, 0x3000);
1306 ASSERT_EQ(vma_next->vm_end, 0x8000);
1307
1308 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1309 ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1310
1311 cleanup_mm(&mm, &vmi);
1312 return true;
1313 }
1314
test_vmi_prealloc_fail(void)1315 static bool test_vmi_prealloc_fail(void)
1316 {
1317 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1318 struct mm_struct mm = {};
1319 VMA_ITERATOR(vmi, &mm, 0);
1320 struct vma_merge_struct vmg = {
1321 .mm = &mm,
1322 .vmi = &vmi,
1323 };
1324 struct anon_vma_chain avc = {};
1325 struct vm_area_struct *vma_prev, *vma;
1326
1327 /*
1328 * We are merging vma into prev, with vma possessing an anon_vma, which
1329 * will be duplicated. We cause the vmi preallocation to fail and assert
1330 * the duplicated anon_vma is unlinked.
1331 */
1332
1333 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1334 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1335 vma->anon_vma = &dummy_anon_vma;
1336
1337 vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
1338 vmg.prev = vma_prev;
1339 vmg.middle = vma;
1340 vma_set_dummy_anon_vma(vma, &avc);
1341
1342 fail_prealloc = true;
1343
1344 /* This will cause the merge to fail. */
1345 ASSERT_EQ(merge_existing(&vmg), NULL);
1346 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1347 /* We will already have assigned the anon_vma. */
1348 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1349 /* And it was both cloned and unlinked. */
1350 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1351 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1352
1353 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1354
1355 /*
1356 * We repeat the same operation for expanding a VMA, which is what new
1357 * VMA merging ultimately uses too. This asserts that unlinking is
1358 * performed in this case too.
1359 */
1360
1361 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1362 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1363 vma->anon_vma = &dummy_anon_vma;
1364
1365 vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
1366 vmg.target = vma_prev;
1367 vmg.next = vma;
1368
1369 fail_prealloc = true;
1370 ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1371 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1372
1373 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1374 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1375 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1376
1377 cleanup_mm(&mm, &vmi);
1378 return true;
1379 }
1380
test_merge_extend(void)1381 static bool test_merge_extend(void)
1382 {
1383 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1384 struct mm_struct mm = {};
1385 VMA_ITERATOR(vmi, &mm, 0x1000);
1386 struct vm_area_struct *vma;
1387
1388 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
1389 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
1390
1391 /*
1392 * Extend a VMA into the gap between itself and the following VMA.
1393 * This should result in a merge.
1394 *
1395 * <->
1396 * * *
1397 *
1398 */
1399
1400 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1401 ASSERT_EQ(vma->vm_start, 0);
1402 ASSERT_EQ(vma->vm_end, 0x4000);
1403 ASSERT_EQ(vma->vm_pgoff, 0);
1404 ASSERT_TRUE(vma_write_started(vma));
1405 ASSERT_EQ(mm.map_count, 1);
1406
1407 cleanup_mm(&mm, &vmi);
1408 return true;
1409 }
1410
test_expand_only_mode(void)1411 static bool test_expand_only_mode(void)
1412 {
1413 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1414 struct mm_struct mm = {};
1415 VMA_ITERATOR(vmi, &mm, 0);
1416 struct vm_area_struct *vma_prev, *vma;
1417 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
1418
1419 /*
1420 * Place a VMA prior to the one we're expanding so we assert that we do
1421 * not erroneously try to traverse to the previous VMA even though we
1422 * have, through the use of the just_expand flag, indicated we do not
1423 * need to do so.
1424 */
1425 alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
1426
1427 /*
1428 * We will be positioned at the prev VMA, but looking to expand to
1429 * 0x9000.
1430 */
1431 vma_iter_set(&vmi, 0x3000);
1432 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1433 vmg.prev = vma_prev;
1434 vmg.just_expand = true;
1435
1436 vma = vma_merge_new_range(&vmg);
1437 ASSERT_NE(vma, NULL);
1438 ASSERT_EQ(vma, vma_prev);
1439 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1440 ASSERT_EQ(vma->vm_start, 0x3000);
1441 ASSERT_EQ(vma->vm_end, 0x9000);
1442 ASSERT_EQ(vma->vm_pgoff, 3);
1443 ASSERT_TRUE(vma_write_started(vma));
1444 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1445 vma_assert_attached(vma);
1446
1447 cleanup_mm(&mm, &vmi);
1448 return true;
1449 }
1450
run_merge_tests(int * num_tests,int * num_fail)1451 static void run_merge_tests(int *num_tests, int *num_fail)
1452 {
1453 /* Very simple tests to kick the tyres. */
1454 TEST(simple_merge);
1455 TEST(simple_modify);
1456 TEST(simple_expand);
1457 TEST(simple_shrink);
1458
1459 TEST(merge_new);
1460 TEST(vma_merge_special_flags);
1461 TEST(vma_merge_with_close);
1462 TEST(vma_merge_new_with_close);
1463 TEST(merge_existing);
1464 TEST(anon_vma_non_mergeable);
1465 TEST(dup_anon_vma);
1466 TEST(vmi_prealloc_fail);
1467 TEST(merge_extend);
1468 TEST(expand_only_mode);
1469 }
1470