1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6
7 #include "maple-shared.h"
8 #include "vma_internal.h"
9
10 /* Include so header guard set. */
11 #include "../../../mm/vma.h"
12
13 static bool fail_prealloc;
14
15 /* Then override vma_iter_prealloc() so we can choose to fail it. */
16 #define vma_iter_prealloc(vmi, vma) \
17 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
18
19 /*
20 * Directly import the VMA implementation here. Our vma_internal.h wrapper
21 * provides userland-equivalent functionality for everything vma.c uses.
22 */
23 #include "../../../mm/vma.c"
24
25 const struct vm_operations_struct vma_dummy_vm_ops;
26 static struct anon_vma dummy_anon_vma;
27
28 #define ASSERT_TRUE(_expr) \
29 do { \
30 if (!(_expr)) { \
31 fprintf(stderr, \
32 "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
33 __FILE__, __LINE__, __FUNCTION__, #_expr); \
34 return false; \
35 } \
36 } while (0)
37 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
38 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
39 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
40
41 static struct task_struct __current;
42
get_current(void)43 struct task_struct *get_current(void)
44 {
45 return &__current;
46 }
47
48 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)49 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
50 unsigned long start,
51 unsigned long end,
52 pgoff_t pgoff,
53 vm_flags_t flags)
54 {
55 struct vm_area_struct *ret = vm_area_alloc(mm);
56
57 if (ret == NULL)
58 return NULL;
59
60 ret->vm_start = start;
61 ret->vm_end = end;
62 ret->vm_pgoff = pgoff;
63 ret->__vm_flags = flags;
64
65 return ret;
66 }
67
68 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)69 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
70 unsigned long start,
71 unsigned long end,
72 pgoff_t pgoff,
73 vm_flags_t flags)
74 {
75 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
76
77 if (vma == NULL)
78 return NULL;
79
80 if (vma_link(mm, vma)) {
81 vm_area_free(vma);
82 return NULL;
83 }
84
85 /*
86 * Reset this counter which we use to track whether writes have
87 * begun. Linking to the tree will have caused this to be incremented,
88 * which means we will get a false positive otherwise.
89 */
90 vma->vm_lock_seq = -1;
91
92 return vma;
93 }
94
95 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)96 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
97 {
98 /*
99 * For convenience, get prev and next VMAs. Which the new VMA operation
100 * requires.
101 */
102 vmg->next = vma_next(vmg->vmi);
103 vmg->prev = vma_prev(vmg->vmi);
104 vma_iter_next_range(vmg->vmi);
105
106 return vma_merge_new_range(vmg);
107 }
108
109 /*
110 * Helper function which provides a wrapper around a merge existing VMA
111 * operation.
112 */
merge_existing(struct vma_merge_struct * vmg)113 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
114 {
115 return vma_merge_existing_range(vmg);
116 }
117
118 /*
119 * Helper function which provides a wrapper around the expansion of an existing
120 * VMA.
121 */
expand_existing(struct vma_merge_struct * vmg)122 static int expand_existing(struct vma_merge_struct *vmg)
123 {
124 return vma_expand(vmg);
125 }
126
127 /*
128 * Helper function to reset merge state the associated VMA iterator to a
129 * specified new range.
130 */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)131 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
132 unsigned long end, pgoff_t pgoff, vm_flags_t flags)
133 {
134 vma_iter_set(vmg->vmi, start);
135
136 vmg->prev = NULL;
137 vmg->next = NULL;
138 vmg->vma = NULL;
139
140 vmg->start = start;
141 vmg->end = end;
142 vmg->pgoff = pgoff;
143 vmg->flags = flags;
144 }
145
146 /*
147 * Helper function to try to merge a new VMA.
148 *
149 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
150 * VMA, link it to the maple tree and return it.
151 */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,bool * was_merged)152 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
153 struct vma_merge_struct *vmg,
154 unsigned long start, unsigned long end,
155 pgoff_t pgoff, vm_flags_t flags,
156 bool *was_merged)
157 {
158 struct vm_area_struct *merged;
159
160 vmg_set_range(vmg, start, end, pgoff, flags);
161
162 merged = merge_new(vmg);
163 if (merged) {
164 *was_merged = true;
165 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
166 return merged;
167 }
168
169 *was_merged = false;
170
171 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
172
173 return alloc_and_link_vma(mm, start, end, pgoff, flags);
174 }
175
176 /*
177 * Helper function to reset the dummy anon_vma to indicate it has not been
178 * duplicated.
179 */
reset_dummy_anon_vma(void)180 static void reset_dummy_anon_vma(void)
181 {
182 dummy_anon_vma.was_cloned = false;
183 dummy_anon_vma.was_unlinked = false;
184 }
185
186 /*
187 * Helper function to remove all VMAs and destroy the maple tree associated with
188 * a virtual address space. Returns a count of VMAs in the tree.
189 */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)190 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
191 {
192 struct vm_area_struct *vma;
193 int count = 0;
194
195 fail_prealloc = false;
196 reset_dummy_anon_vma();
197
198 vma_iter_set(vmi, 0);
199 for_each_vma(*vmi, vma) {
200 vm_area_free(vma);
201 count++;
202 }
203
204 mtree_destroy(&mm->mm_mt);
205 mm->map_count = 0;
206 return count;
207 }
208
209 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)210 static bool vma_write_started(struct vm_area_struct *vma)
211 {
212 int seq = vma->vm_lock_seq;
213
214 /* We reset after each check. */
215 vma->vm_lock_seq = -1;
216
217 /* The vma_start_write() stub simply increments this value. */
218 return seq > -1;
219 }
220
221 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)222 static void dummy_close(struct vm_area_struct *)
223 {
224 }
225
test_simple_merge(void)226 static bool test_simple_merge(void)
227 {
228 struct vm_area_struct *vma;
229 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
230 struct mm_struct mm = {};
231 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
232 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
233 VMA_ITERATOR(vmi, &mm, 0x1000);
234 struct vma_merge_struct vmg = {
235 .mm = &mm,
236 .vmi = &vmi,
237 .start = 0x1000,
238 .end = 0x2000,
239 .flags = flags,
240 .pgoff = 1,
241 };
242
243 ASSERT_FALSE(vma_link(&mm, vma_left));
244 ASSERT_FALSE(vma_link(&mm, vma_right));
245
246 vma = merge_new(&vmg);
247 ASSERT_NE(vma, NULL);
248
249 ASSERT_EQ(vma->vm_start, 0);
250 ASSERT_EQ(vma->vm_end, 0x3000);
251 ASSERT_EQ(vma->vm_pgoff, 0);
252 ASSERT_EQ(vma->vm_flags, flags);
253
254 vm_area_free(vma);
255 mtree_destroy(&mm.mm_mt);
256
257 return true;
258 }
259
test_simple_modify(void)260 static bool test_simple_modify(void)
261 {
262 struct vm_area_struct *vma;
263 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
264 struct mm_struct mm = {};
265 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
266 VMA_ITERATOR(vmi, &mm, 0x1000);
267
268 ASSERT_FALSE(vma_link(&mm, init_vma));
269
270 /*
271 * The flags will not be changed, the vma_modify_flags() function
272 * performs the merge/split only.
273 */
274 vma = vma_modify_flags(&vmi, init_vma, init_vma,
275 0x1000, 0x2000, VM_READ | VM_MAYREAD);
276 ASSERT_NE(vma, NULL);
277 /* We modify the provided VMA, and on split allocate new VMAs. */
278 ASSERT_EQ(vma, init_vma);
279
280 ASSERT_EQ(vma->vm_start, 0x1000);
281 ASSERT_EQ(vma->vm_end, 0x2000);
282 ASSERT_EQ(vma->vm_pgoff, 1);
283
284 /*
285 * Now walk through the three split VMAs and make sure they are as
286 * expected.
287 */
288
289 vma_iter_set(&vmi, 0);
290 vma = vma_iter_load(&vmi);
291
292 ASSERT_EQ(vma->vm_start, 0);
293 ASSERT_EQ(vma->vm_end, 0x1000);
294 ASSERT_EQ(vma->vm_pgoff, 0);
295
296 vm_area_free(vma);
297 vma_iter_clear(&vmi);
298
299 vma = vma_next(&vmi);
300
301 ASSERT_EQ(vma->vm_start, 0x1000);
302 ASSERT_EQ(vma->vm_end, 0x2000);
303 ASSERT_EQ(vma->vm_pgoff, 1);
304
305 vm_area_free(vma);
306 vma_iter_clear(&vmi);
307
308 vma = vma_next(&vmi);
309
310 ASSERT_EQ(vma->vm_start, 0x2000);
311 ASSERT_EQ(vma->vm_end, 0x3000);
312 ASSERT_EQ(vma->vm_pgoff, 2);
313
314 vm_area_free(vma);
315 mtree_destroy(&mm.mm_mt);
316
317 return true;
318 }
319
test_simple_expand(void)320 static bool test_simple_expand(void)
321 {
322 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
323 struct mm_struct mm = {};
324 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
325 VMA_ITERATOR(vmi, &mm, 0);
326 struct vma_merge_struct vmg = {
327 .vmi = &vmi,
328 .vma = vma,
329 .start = 0,
330 .end = 0x3000,
331 .pgoff = 0,
332 };
333
334 ASSERT_FALSE(vma_link(&mm, vma));
335
336 ASSERT_FALSE(expand_existing(&vmg));
337
338 ASSERT_EQ(vma->vm_start, 0);
339 ASSERT_EQ(vma->vm_end, 0x3000);
340 ASSERT_EQ(vma->vm_pgoff, 0);
341
342 vm_area_free(vma);
343 mtree_destroy(&mm.mm_mt);
344
345 return true;
346 }
347
test_simple_shrink(void)348 static bool test_simple_shrink(void)
349 {
350 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
351 struct mm_struct mm = {};
352 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
353 VMA_ITERATOR(vmi, &mm, 0);
354
355 ASSERT_FALSE(vma_link(&mm, vma));
356
357 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
358
359 ASSERT_EQ(vma->vm_start, 0);
360 ASSERT_EQ(vma->vm_end, 0x1000);
361 ASSERT_EQ(vma->vm_pgoff, 0);
362
363 vm_area_free(vma);
364 mtree_destroy(&mm.mm_mt);
365
366 return true;
367 }
368
test_merge_new(void)369 static bool test_merge_new(void)
370 {
371 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
372 struct mm_struct mm = {};
373 VMA_ITERATOR(vmi, &mm, 0);
374 struct vma_merge_struct vmg = {
375 .mm = &mm,
376 .vmi = &vmi,
377 };
378 struct anon_vma_chain dummy_anon_vma_chain_a = {
379 .anon_vma = &dummy_anon_vma,
380 };
381 struct anon_vma_chain dummy_anon_vma_chain_b = {
382 .anon_vma = &dummy_anon_vma,
383 };
384 struct anon_vma_chain dummy_anon_vma_chain_c = {
385 .anon_vma = &dummy_anon_vma,
386 };
387 struct anon_vma_chain dummy_anon_vma_chain_d = {
388 .anon_vma = &dummy_anon_vma,
389 };
390 const struct vm_operations_struct vm_ops = {
391 .close = dummy_close,
392 };
393 int count;
394 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
395 bool merged;
396
397 /*
398 * 0123456789abc
399 * AA B CC
400 */
401 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
402 ASSERT_NE(vma_a, NULL);
403 /* We give each VMA a single avc so we can test anon_vma duplication. */
404 INIT_LIST_HEAD(&vma_a->anon_vma_chain);
405 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
406
407 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
408 ASSERT_NE(vma_b, NULL);
409 INIT_LIST_HEAD(&vma_b->anon_vma_chain);
410 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
411
412 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
413 ASSERT_NE(vma_c, NULL);
414 INIT_LIST_HEAD(&vma_c->anon_vma_chain);
415 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
416
417 /*
418 * NO merge.
419 *
420 * 0123456789abc
421 * AA B ** CC
422 */
423 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
424 ASSERT_NE(vma_d, NULL);
425 INIT_LIST_HEAD(&vma_d->anon_vma_chain);
426 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
427 ASSERT_FALSE(merged);
428 ASSERT_EQ(mm.map_count, 4);
429
430 /*
431 * Merge BOTH sides.
432 *
433 * 0123456789abc
434 * AA*B DD CC
435 */
436 vma_a->vm_ops = &vm_ops; /* This should have no impact. */
437 vma_b->anon_vma = &dummy_anon_vma;
438 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
439 ASSERT_EQ(vma, vma_a);
440 /* Merge with A, delete B. */
441 ASSERT_TRUE(merged);
442 ASSERT_EQ(vma->vm_start, 0);
443 ASSERT_EQ(vma->vm_end, 0x4000);
444 ASSERT_EQ(vma->vm_pgoff, 0);
445 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
446 ASSERT_TRUE(vma_write_started(vma));
447 ASSERT_EQ(mm.map_count, 3);
448
449 /*
450 * Merge to PREVIOUS VMA.
451 *
452 * 0123456789abc
453 * AAAA* DD CC
454 */
455 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
456 ASSERT_EQ(vma, vma_a);
457 /* Extend A. */
458 ASSERT_TRUE(merged);
459 ASSERT_EQ(vma->vm_start, 0);
460 ASSERT_EQ(vma->vm_end, 0x5000);
461 ASSERT_EQ(vma->vm_pgoff, 0);
462 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
463 ASSERT_TRUE(vma_write_started(vma));
464 ASSERT_EQ(mm.map_count, 3);
465
466 /*
467 * Merge to NEXT VMA.
468 *
469 * 0123456789abc
470 * AAAAA *DD CC
471 */
472 vma_d->anon_vma = &dummy_anon_vma;
473 vma_d->vm_ops = &vm_ops; /* This should have no impact. */
474 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
475 ASSERT_EQ(vma, vma_d);
476 /* Prepend. */
477 ASSERT_TRUE(merged);
478 ASSERT_EQ(vma->vm_start, 0x6000);
479 ASSERT_EQ(vma->vm_end, 0x9000);
480 ASSERT_EQ(vma->vm_pgoff, 6);
481 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
482 ASSERT_TRUE(vma_write_started(vma));
483 ASSERT_EQ(mm.map_count, 3);
484
485 /*
486 * Merge BOTH sides.
487 *
488 * 0123456789abc
489 * AAAAA*DDD CC
490 */
491 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
492 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
493 ASSERT_EQ(vma, vma_a);
494 /* Merge with A, delete D. */
495 ASSERT_TRUE(merged);
496 ASSERT_EQ(vma->vm_start, 0);
497 ASSERT_EQ(vma->vm_end, 0x9000);
498 ASSERT_EQ(vma->vm_pgoff, 0);
499 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
500 ASSERT_TRUE(vma_write_started(vma));
501 ASSERT_EQ(mm.map_count, 2);
502
503 /*
504 * Merge to NEXT VMA.
505 *
506 * 0123456789abc
507 * AAAAAAAAA *CC
508 */
509 vma_c->anon_vma = &dummy_anon_vma;
510 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
511 ASSERT_EQ(vma, vma_c);
512 /* Prepend C. */
513 ASSERT_TRUE(merged);
514 ASSERT_EQ(vma->vm_start, 0xa000);
515 ASSERT_EQ(vma->vm_end, 0xc000);
516 ASSERT_EQ(vma->vm_pgoff, 0xa);
517 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
518 ASSERT_TRUE(vma_write_started(vma));
519 ASSERT_EQ(mm.map_count, 2);
520
521 /*
522 * Merge BOTH sides.
523 *
524 * 0123456789abc
525 * AAAAAAAAA*CCC
526 */
527 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
528 ASSERT_EQ(vma, vma_a);
529 /* Extend A and delete C. */
530 ASSERT_TRUE(merged);
531 ASSERT_EQ(vma->vm_start, 0);
532 ASSERT_EQ(vma->vm_end, 0xc000);
533 ASSERT_EQ(vma->vm_pgoff, 0);
534 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
535 ASSERT_TRUE(vma_write_started(vma));
536 ASSERT_EQ(mm.map_count, 1);
537
538 /*
539 * Final state.
540 *
541 * 0123456789abc
542 * AAAAAAAAAAAAA
543 */
544
545 count = 0;
546 vma_iter_set(&vmi, 0);
547 for_each_vma(vmi, vma) {
548 ASSERT_NE(vma, NULL);
549 ASSERT_EQ(vma->vm_start, 0);
550 ASSERT_EQ(vma->vm_end, 0xc000);
551 ASSERT_EQ(vma->vm_pgoff, 0);
552 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
553
554 vm_area_free(vma);
555 count++;
556 }
557
558 /* Should only have one VMA left (though freed) after all is done.*/
559 ASSERT_EQ(count, 1);
560
561 mtree_destroy(&mm.mm_mt);
562 return true;
563 }
564
test_vma_merge_special_flags(void)565 static bool test_vma_merge_special_flags(void)
566 {
567 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
568 struct mm_struct mm = {};
569 VMA_ITERATOR(vmi, &mm, 0);
570 struct vma_merge_struct vmg = {
571 .mm = &mm,
572 .vmi = &vmi,
573 };
574 vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
575 vm_flags_t all_special_flags = 0;
576 int i;
577 struct vm_area_struct *vma_left, *vma;
578
579 /* Make sure there aren't new VM_SPECIAL flags. */
580 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
581 all_special_flags |= special_flags[i];
582 }
583 ASSERT_EQ(all_special_flags, VM_SPECIAL);
584
585 /*
586 * 01234
587 * AAA
588 */
589 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
590 ASSERT_NE(vma_left, NULL);
591
592 /* 1. Set up new VMA with special flag that would otherwise merge. */
593
594 /*
595 * 01234
596 * AAA*
597 *
598 * This should merge if not for the VM_SPECIAL flag.
599 */
600 vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
601 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
602 vm_flags_t special_flag = special_flags[i];
603
604 vma_left->__vm_flags = flags | special_flag;
605 vmg.flags = flags | special_flag;
606 vma = merge_new(&vmg);
607 ASSERT_EQ(vma, NULL);
608 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
609 }
610
611 /* 2. Modify VMA with special flag that would otherwise merge. */
612
613 /*
614 * 01234
615 * AAAB
616 *
617 * Create a VMA to modify.
618 */
619 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
620 ASSERT_NE(vma, NULL);
621 vmg.vma = vma;
622
623 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
624 vm_flags_t special_flag = special_flags[i];
625
626 vma_left->__vm_flags = flags | special_flag;
627 vmg.flags = flags | special_flag;
628 vma = merge_existing(&vmg);
629 ASSERT_EQ(vma, NULL);
630 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
631 }
632
633 cleanup_mm(&mm, &vmi);
634 return true;
635 }
636
test_vma_merge_with_close(void)637 static bool test_vma_merge_with_close(void)
638 {
639 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
640 struct mm_struct mm = {};
641 VMA_ITERATOR(vmi, &mm, 0);
642 struct vma_merge_struct vmg = {
643 .mm = &mm,
644 .vmi = &vmi,
645 };
646 const struct vm_operations_struct vm_ops = {
647 .close = dummy_close,
648 };
649 struct vm_area_struct *vma_prev, *vma_next, *vma;
650
651 /*
652 * When merging VMAs we are not permitted to remove any VMA that has a
653 * vm_ops->close() hook.
654 *
655 * Considering the two possible adjacent VMAs to which a VMA can be
656 * merged:
657 *
658 * [ prev ][ vma ][ next ]
659 *
660 * In no case will we need to delete prev. If the operation is
661 * mergeable, then prev will be extended with one or both of vma and
662 * next deleted.
663 *
664 * As a result, during initial mergeability checks, only
665 * can_vma_merge_before() (which implies the VMA being merged with is
666 * 'next' as shown above) bothers to check to see whether the next VMA
667 * has a vm_ops->close() callback that will need to be called when
668 * removed.
669 *
670 * If it does, then we cannot merge as the resources that the close()
671 * operation potentially clears down are tied only to the existing VMA
672 * range and we have no way of extending those to the nearly merged one.
673 *
674 * We must consider two scenarios:
675 *
676 * A.
677 *
678 * vm_ops->close: - - !NULL
679 * [ prev ][ vma ][ next ]
680 *
681 * Where prev may or may not be present/mergeable.
682 *
683 * This is picked up by a specific check in can_vma_merge_before().
684 *
685 * B.
686 *
687 * vm_ops->close: - !NULL
688 * [ prev ][ vma ]
689 *
690 * Where prev and vma are present and mergeable.
691 *
692 * This is picked up by a specific check in the modified VMA merge.
693 *
694 * IMPORTANT NOTE: We make the assumption that the following case:
695 *
696 * - !NULL NULL
697 * [ prev ][ vma ][ next ]
698 *
699 * Cannot occur, because vma->vm_ops being the same implies the same
700 * vma->vm_file, and therefore this would mean that next->vm_ops->close
701 * would be set too, and thus scenario A would pick this up.
702 */
703
704 /*
705 * The only case of a new VMA merge that results in a VMA being deleted
706 * is one where both the previous and next VMAs are merged - in this
707 * instance the next VMA is deleted, and the previous VMA is extended.
708 *
709 * If we are unable to do so, we reduce the operation to simply
710 * extending the prev VMA and not merging next.
711 *
712 * 0123456789
713 * PPP**NNNN
714 * ->
715 * 0123456789
716 * PPPPPPNNN
717 */
718
719 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
720 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
721 vma_next->vm_ops = &vm_ops;
722
723 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
724 ASSERT_EQ(merge_new(&vmg), vma_prev);
725 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
726 ASSERT_EQ(vma_prev->vm_start, 0);
727 ASSERT_EQ(vma_prev->vm_end, 0x5000);
728 ASSERT_EQ(vma_prev->vm_pgoff, 0);
729
730 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
731
732 /*
733 * When modifying an existing VMA there are further cases where we
734 * delete VMAs.
735 *
736 * <>
737 * 0123456789
738 * PPPVV
739 *
740 * In this instance, if vma has a close hook, the merge simply cannot
741 * proceed.
742 */
743
744 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
745 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
746 vma->vm_ops = &vm_ops;
747
748 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
749 vmg.prev = vma_prev;
750 vmg.vma = vma;
751
752 /*
753 * The VMA being modified in a way that would otherwise merge should
754 * also fail.
755 */
756 ASSERT_EQ(merge_existing(&vmg), NULL);
757 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
758
759 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
760
761 /*
762 * This case is mirrored if merging with next.
763 *
764 * <>
765 * 0123456789
766 * VVNNNN
767 *
768 * In this instance, if vma has a close hook, the merge simply cannot
769 * proceed.
770 */
771
772 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
773 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
774 vma->vm_ops = &vm_ops;
775
776 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
777 vmg.vma = vma;
778 ASSERT_EQ(merge_existing(&vmg), NULL);
779 /*
780 * Initially this is misapprehended as an out of memory report, as the
781 * close() check is handled in the same way as anon_vma duplication
782 * failures, however a subsequent patch resolves this.
783 */
784 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
785
786 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
787
788 /*
789 * Finally, we consider two variants of the case where we modify a VMA
790 * to merge with both the previous and next VMAs.
791 *
792 * The first variant is where vma has a close hook. In this instance, no
793 * merge can proceed.
794 *
795 * <>
796 * 0123456789
797 * PPPVVNNNN
798 */
799
800 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
801 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
802 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
803 vma->vm_ops = &vm_ops;
804
805 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
806 vmg.prev = vma_prev;
807 vmg.vma = vma;
808
809 ASSERT_EQ(merge_existing(&vmg), NULL);
810 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
811
812 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
813
814 /*
815 * The second variant is where next has a close hook. In this instance,
816 * we reduce the operation to a merge between prev and vma.
817 *
818 * <>
819 * 0123456789
820 * PPPVVNNNN
821 * ->
822 * 0123456789
823 * PPPPPNNNN
824 */
825
826 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
827 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
828 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
829 vma_next->vm_ops = &vm_ops;
830
831 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
832 vmg.prev = vma_prev;
833 vmg.vma = vma;
834
835 ASSERT_EQ(merge_existing(&vmg), vma_prev);
836 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
837 ASSERT_EQ(vma_prev->vm_start, 0);
838 ASSERT_EQ(vma_prev->vm_end, 0x5000);
839 ASSERT_EQ(vma_prev->vm_pgoff, 0);
840
841 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
842
843 return true;
844 }
845
test_vma_merge_new_with_close(void)846 static bool test_vma_merge_new_with_close(void)
847 {
848 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
849 struct mm_struct mm = {};
850 VMA_ITERATOR(vmi, &mm, 0);
851 struct vma_merge_struct vmg = {
852 .mm = &mm,
853 .vmi = &vmi,
854 };
855 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
856 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
857 const struct vm_operations_struct vm_ops = {
858 .close = dummy_close,
859 };
860 struct vm_area_struct *vma;
861
862 /*
863 * We should allow the partial merge of a proposed new VMA if the
864 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
865 * compatible), e.g.:
866 *
867 * New VMA
868 * A v-------v B
869 * |-----| |-----|
870 * close close
871 *
872 * Since the rule is to not DELETE a VMA with a close operation, this
873 * should be permitted, only rather than expanding A and deleting B, we
874 * should simply expand A and leave B intact, e.g.:
875 *
876 * New VMA
877 * A B
878 * |------------||-----|
879 * close close
880 */
881
882 /* Have prev and next have a vm_ops->close() hook. */
883 vma_prev->vm_ops = &vm_ops;
884 vma_next->vm_ops = &vm_ops;
885
886 vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
887 vma = merge_new(&vmg);
888 ASSERT_NE(vma, NULL);
889 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
890 ASSERT_EQ(vma->vm_start, 0);
891 ASSERT_EQ(vma->vm_end, 0x5000);
892 ASSERT_EQ(vma->vm_pgoff, 0);
893 ASSERT_EQ(vma->vm_ops, &vm_ops);
894 ASSERT_TRUE(vma_write_started(vma));
895 ASSERT_EQ(mm.map_count, 2);
896
897 cleanup_mm(&mm, &vmi);
898 return true;
899 }
900
test_merge_existing(void)901 static bool test_merge_existing(void)
902 {
903 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
904 struct mm_struct mm = {};
905 VMA_ITERATOR(vmi, &mm, 0);
906 struct vm_area_struct *vma, *vma_prev, *vma_next;
907 struct vma_merge_struct vmg = {
908 .mm = &mm,
909 .vmi = &vmi,
910 };
911 const struct vm_operations_struct vm_ops = {
912 .close = dummy_close,
913 };
914
915 /*
916 * Merge right case - partial span.
917 *
918 * <->
919 * 0123456789
920 * VVVVNNN
921 * ->
922 * 0123456789
923 * VNNNNNN
924 */
925 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
926 vma->vm_ops = &vm_ops; /* This should have no impact. */
927 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
928 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
929 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
930 vmg.vma = vma;
931 vmg.prev = vma;
932 vma->anon_vma = &dummy_anon_vma;
933 ASSERT_EQ(merge_existing(&vmg), vma_next);
934 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
935 ASSERT_EQ(vma_next->vm_start, 0x3000);
936 ASSERT_EQ(vma_next->vm_end, 0x9000);
937 ASSERT_EQ(vma_next->vm_pgoff, 3);
938 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
939 ASSERT_EQ(vma->vm_start, 0x2000);
940 ASSERT_EQ(vma->vm_end, 0x3000);
941 ASSERT_EQ(vma->vm_pgoff, 2);
942 ASSERT_TRUE(vma_write_started(vma));
943 ASSERT_TRUE(vma_write_started(vma_next));
944 ASSERT_EQ(mm.map_count, 2);
945
946 /* Clear down and reset. */
947 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
948
949 /*
950 * Merge right case - full span.
951 *
952 * <-->
953 * 0123456789
954 * VVVVNNN
955 * ->
956 * 0123456789
957 * NNNNNNN
958 */
959 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
960 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
961 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
962 vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
963 vmg.vma = vma;
964 vma->anon_vma = &dummy_anon_vma;
965 ASSERT_EQ(merge_existing(&vmg), vma_next);
966 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
967 ASSERT_EQ(vma_next->vm_start, 0x2000);
968 ASSERT_EQ(vma_next->vm_end, 0x9000);
969 ASSERT_EQ(vma_next->vm_pgoff, 2);
970 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
971 ASSERT_TRUE(vma_write_started(vma_next));
972 ASSERT_EQ(mm.map_count, 1);
973
974 /* Clear down and reset. We should have deleted vma. */
975 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
976
977 /*
978 * Merge left case - partial span.
979 *
980 * <->
981 * 0123456789
982 * PPPVVVV
983 * ->
984 * 0123456789
985 * PPPPPPV
986 */
987 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
988 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
989 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
990 vma->vm_ops = &vm_ops; /* This should have no impact. */
991 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
992 vmg.prev = vma_prev;
993 vmg.vma = vma;
994 vma->anon_vma = &dummy_anon_vma;
995
996 ASSERT_EQ(merge_existing(&vmg), vma_prev);
997 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
998 ASSERT_EQ(vma_prev->vm_start, 0);
999 ASSERT_EQ(vma_prev->vm_end, 0x6000);
1000 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1001 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1002 ASSERT_EQ(vma->vm_start, 0x6000);
1003 ASSERT_EQ(vma->vm_end, 0x7000);
1004 ASSERT_EQ(vma->vm_pgoff, 6);
1005 ASSERT_TRUE(vma_write_started(vma_prev));
1006 ASSERT_TRUE(vma_write_started(vma));
1007 ASSERT_EQ(mm.map_count, 2);
1008
1009 /* Clear down and reset. */
1010 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1011
1012 /*
1013 * Merge left case - full span.
1014 *
1015 * <-->
1016 * 0123456789
1017 * PPPVVVV
1018 * ->
1019 * 0123456789
1020 * PPPPPPP
1021 */
1022 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1023 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1024 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1025 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1026 vmg.prev = vma_prev;
1027 vmg.vma = vma;
1028 vma->anon_vma = &dummy_anon_vma;
1029 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1030 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1031 ASSERT_EQ(vma_prev->vm_start, 0);
1032 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1033 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1034 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1035 ASSERT_TRUE(vma_write_started(vma_prev));
1036 ASSERT_EQ(mm.map_count, 1);
1037
1038 /* Clear down and reset. We should have deleted vma. */
1039 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1040
1041 /*
1042 * Merge both case.
1043 *
1044 * <-->
1045 * 0123456789
1046 * PPPVVVVNNN
1047 * ->
1048 * 0123456789
1049 * PPPPPPPPPP
1050 */
1051 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1052 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1053 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1054 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1055 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1056 vmg.prev = vma_prev;
1057 vmg.vma = vma;
1058 vma->anon_vma = &dummy_anon_vma;
1059 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1060 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1061 ASSERT_EQ(vma_prev->vm_start, 0);
1062 ASSERT_EQ(vma_prev->vm_end, 0x9000);
1063 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1064 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1065 ASSERT_TRUE(vma_write_started(vma_prev));
1066 ASSERT_EQ(mm.map_count, 1);
1067
1068 /* Clear down and reset. We should have deleted prev and next. */
1069 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1070
1071 /*
1072 * Non-merge ranges. the modified VMA merge operation assumes that the
1073 * caller always specifies ranges within the input VMA so we need only
1074 * examine these cases.
1075 *
1076 * -
1077 * -
1078 * -
1079 * <->
1080 * <>
1081 * <>
1082 * 0123456789a
1083 * PPPVVVVVNNN
1084 */
1085
1086 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1087 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1088 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1089
1090 vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1091 vmg.prev = vma;
1092 vmg.vma = vma;
1093 ASSERT_EQ(merge_existing(&vmg), NULL);
1094 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1095
1096 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1097 vmg.prev = vma;
1098 vmg.vma = vma;
1099 ASSERT_EQ(merge_existing(&vmg), NULL);
1100 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1101
1102 vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1103 vmg.prev = vma;
1104 vmg.vma = vma;
1105 ASSERT_EQ(merge_existing(&vmg), NULL);
1106 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1107
1108 vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1109 vmg.prev = vma;
1110 vmg.vma = vma;
1111 ASSERT_EQ(merge_existing(&vmg), NULL);
1112 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1113
1114 vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1115 vmg.prev = vma;
1116 vmg.vma = vma;
1117 ASSERT_EQ(merge_existing(&vmg), NULL);
1118 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1119
1120 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1121 vmg.prev = vma;
1122 vmg.vma = vma;
1123 ASSERT_EQ(merge_existing(&vmg), NULL);
1124 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1125
1126 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1127
1128 return true;
1129 }
1130
test_anon_vma_non_mergeable(void)1131 static bool test_anon_vma_non_mergeable(void)
1132 {
1133 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1134 struct mm_struct mm = {};
1135 VMA_ITERATOR(vmi, &mm, 0);
1136 struct vm_area_struct *vma, *vma_prev, *vma_next;
1137 struct vma_merge_struct vmg = {
1138 .mm = &mm,
1139 .vmi = &vmi,
1140 };
1141 struct anon_vma_chain dummy_anon_vma_chain1 = {
1142 .anon_vma = &dummy_anon_vma,
1143 };
1144 struct anon_vma_chain dummy_anon_vma_chain2 = {
1145 .anon_vma = &dummy_anon_vma,
1146 };
1147
1148 /*
1149 * In the case of modified VMA merge, merging both left and right VMAs
1150 * but where prev and next have incompatible anon_vma objects, we revert
1151 * to a merge of prev and VMA:
1152 *
1153 * <-->
1154 * 0123456789
1155 * PPPVVVVNNN
1156 * ->
1157 * 0123456789
1158 * PPPPPPPNNN
1159 */
1160 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1161 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1162 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1163
1164 /*
1165 * Give both prev and next single anon_vma_chain fields, so they will
1166 * merge with the NULL vmg->anon_vma.
1167 *
1168 * However, when prev is compared to next, the merge should fail.
1169 */
1170
1171 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1172 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1173 ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1174 vma_prev->anon_vma = &dummy_anon_vma;
1175 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1176
1177 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1178 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1179 ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1180 vma_next->anon_vma = (struct anon_vma *)2;
1181 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1182
1183 ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1184
1185 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1186 vmg.prev = vma_prev;
1187 vmg.vma = vma;
1188
1189 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1190 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1191 ASSERT_EQ(vma_prev->vm_start, 0);
1192 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1193 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1194 ASSERT_TRUE(vma_write_started(vma_prev));
1195 ASSERT_FALSE(vma_write_started(vma_next));
1196
1197 /* Clear down and reset. */
1198 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1199
1200 /*
1201 * Now consider the new VMA case. This is equivalent, only adding a new
1202 * VMA in a gap between prev and next.
1203 *
1204 * <-->
1205 * 0123456789
1206 * PPP****NNN
1207 * ->
1208 * 0123456789
1209 * PPPPPPPNNN
1210 */
1211 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1212 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1213
1214 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1215 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1216 vma_prev->anon_vma = (struct anon_vma *)1;
1217
1218 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1219 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1220 vma_next->anon_vma = (struct anon_vma *)2;
1221
1222 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1223 vmg.prev = vma_prev;
1224
1225 ASSERT_EQ(merge_new(&vmg), vma_prev);
1226 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1227 ASSERT_EQ(vma_prev->vm_start, 0);
1228 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1229 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1230 ASSERT_TRUE(vma_write_started(vma_prev));
1231 ASSERT_FALSE(vma_write_started(vma_next));
1232
1233 /* Final cleanup. */
1234 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1235
1236 return true;
1237 }
1238
test_dup_anon_vma(void)1239 static bool test_dup_anon_vma(void)
1240 {
1241 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1242 struct mm_struct mm = {};
1243 VMA_ITERATOR(vmi, &mm, 0);
1244 struct vma_merge_struct vmg = {
1245 .mm = &mm,
1246 .vmi = &vmi,
1247 };
1248 struct anon_vma_chain dummy_anon_vma_chain = {
1249 .anon_vma = &dummy_anon_vma,
1250 };
1251 struct vm_area_struct *vma_prev, *vma_next, *vma;
1252
1253 reset_dummy_anon_vma();
1254
1255 /*
1256 * Expanding a VMA delete the next one duplicates next's anon_vma and
1257 * assigns it to the expanded VMA.
1258 *
1259 * This covers new VMA merging, as these operations amount to a VMA
1260 * expand.
1261 */
1262 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1263 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1264 vma_next->anon_vma = &dummy_anon_vma;
1265
1266 vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1267 vmg.vma = vma_prev;
1268 vmg.next = vma_next;
1269
1270 ASSERT_EQ(expand_existing(&vmg), 0);
1271
1272 /* Will have been cloned. */
1273 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1274 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1275
1276 /* Cleanup ready for next run. */
1277 cleanup_mm(&mm, &vmi);
1278
1279 /*
1280 * next has anon_vma, we assign to prev.
1281 *
1282 * |<----->|
1283 * |-------*********-------|
1284 * prev vma next
1285 * extend delete delete
1286 */
1287
1288 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1289 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1290 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1291
1292 /* Initialise avc so mergeability check passes. */
1293 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1294 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1295
1296 vma_next->anon_vma = &dummy_anon_vma;
1297 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1298 vmg.prev = vma_prev;
1299 vmg.vma = vma;
1300
1301 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1302 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1303
1304 ASSERT_EQ(vma_prev->vm_start, 0);
1305 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1306
1307 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1308 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1309
1310 cleanup_mm(&mm, &vmi);
1311
1312 /*
1313 * vma has anon_vma, we assign to prev.
1314 *
1315 * |<----->|
1316 * |-------*********-------|
1317 * prev vma next
1318 * extend delete delete
1319 */
1320
1321 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1322 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1323 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1324
1325 vma->anon_vma = &dummy_anon_vma;
1326 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1327 vmg.prev = vma_prev;
1328 vmg.vma = vma;
1329
1330 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1331 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1332
1333 ASSERT_EQ(vma_prev->vm_start, 0);
1334 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1335
1336 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1337 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1338
1339 cleanup_mm(&mm, &vmi);
1340
1341 /*
1342 * vma has anon_vma, we assign to prev.
1343 *
1344 * |<----->|
1345 * |-------*************
1346 * prev vma
1347 * extend shrink/delete
1348 */
1349
1350 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1351 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1352
1353 vma->anon_vma = &dummy_anon_vma;
1354 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1355 vmg.prev = vma_prev;
1356 vmg.vma = vma;
1357
1358 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1359 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1360
1361 ASSERT_EQ(vma_prev->vm_start, 0);
1362 ASSERT_EQ(vma_prev->vm_end, 0x5000);
1363
1364 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1365 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1366
1367 cleanup_mm(&mm, &vmi);
1368
1369 /*
1370 * vma has anon_vma, we assign to next.
1371 *
1372 * |<----->|
1373 * *************-------|
1374 * vma next
1375 * shrink/delete extend
1376 */
1377
1378 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1379 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1380
1381 vma->anon_vma = &dummy_anon_vma;
1382 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1383 vmg.prev = vma;
1384 vmg.vma = vma;
1385
1386 ASSERT_EQ(merge_existing(&vmg), vma_next);
1387 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1388
1389 ASSERT_EQ(vma_next->vm_start, 0x3000);
1390 ASSERT_EQ(vma_next->vm_end, 0x8000);
1391
1392 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1393 ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1394
1395 cleanup_mm(&mm, &vmi);
1396 return true;
1397 }
1398
test_vmi_prealloc_fail(void)1399 static bool test_vmi_prealloc_fail(void)
1400 {
1401 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1402 struct mm_struct mm = {};
1403 VMA_ITERATOR(vmi, &mm, 0);
1404 struct vma_merge_struct vmg = {
1405 .mm = &mm,
1406 .vmi = &vmi,
1407 };
1408 struct vm_area_struct *vma_prev, *vma;
1409
1410 /*
1411 * We are merging vma into prev, with vma possessing an anon_vma, which
1412 * will be duplicated. We cause the vmi preallocation to fail and assert
1413 * the duplicated anon_vma is unlinked.
1414 */
1415
1416 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1417 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1418 vma->anon_vma = &dummy_anon_vma;
1419
1420 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1421 vmg.prev = vma_prev;
1422 vmg.vma = vma;
1423
1424 fail_prealloc = true;
1425
1426 /* This will cause the merge to fail. */
1427 ASSERT_EQ(merge_existing(&vmg), NULL);
1428 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1429 /* We will already have assigned the anon_vma. */
1430 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1431 /* And it was both cloned and unlinked. */
1432 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1433 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1434
1435 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1436
1437 /*
1438 * We repeat the same operation for expanding a VMA, which is what new
1439 * VMA merging ultimately uses too. This asserts that unlinking is
1440 * performed in this case too.
1441 */
1442
1443 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1444 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1445 vma->anon_vma = &dummy_anon_vma;
1446
1447 vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1448 vmg.vma = vma_prev;
1449 vmg.next = vma;
1450
1451 fail_prealloc = true;
1452 ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1453 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1454
1455 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1456 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1457 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1458
1459 cleanup_mm(&mm, &vmi);
1460 return true;
1461 }
1462
test_merge_extend(void)1463 static bool test_merge_extend(void)
1464 {
1465 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1466 struct mm_struct mm = {};
1467 VMA_ITERATOR(vmi, &mm, 0x1000);
1468 struct vm_area_struct *vma;
1469
1470 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1471 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1472
1473 /*
1474 * Extend a VMA into the gap between itself and the following VMA.
1475 * This should result in a merge.
1476 *
1477 * <->
1478 * * *
1479 *
1480 */
1481
1482 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1483 ASSERT_EQ(vma->vm_start, 0);
1484 ASSERT_EQ(vma->vm_end, 0x4000);
1485 ASSERT_EQ(vma->vm_pgoff, 0);
1486 ASSERT_TRUE(vma_write_started(vma));
1487 ASSERT_EQ(mm.map_count, 1);
1488
1489 cleanup_mm(&mm, &vmi);
1490 return true;
1491 }
1492
test_copy_vma(void)1493 static bool test_copy_vma(void)
1494 {
1495 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1496 struct mm_struct mm = {};
1497 bool need_locks = false;
1498 VMA_ITERATOR(vmi, &mm, 0);
1499 struct vm_area_struct *vma, *vma_new, *vma_next;
1500
1501 /* Move backwards and do not merge. */
1502
1503 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1504 vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1505
1506 ASSERT_NE(vma_new, vma);
1507 ASSERT_EQ(vma_new->vm_start, 0);
1508 ASSERT_EQ(vma_new->vm_end, 0x2000);
1509 ASSERT_EQ(vma_new->vm_pgoff, 0);
1510
1511 cleanup_mm(&mm, &vmi);
1512
1513 /* Move a VMA into position next to another and merge the two. */
1514
1515 vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1516 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1517 vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1518
1519 ASSERT_EQ(vma_new, vma_next);
1520
1521 cleanup_mm(&mm, &vmi);
1522 return true;
1523 }
1524
test_expand_only_mode(void)1525 static bool test_expand_only_mode(void)
1526 {
1527 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1528 struct mm_struct mm = {};
1529 VMA_ITERATOR(vmi, &mm, 0);
1530 struct vm_area_struct *vma_prev, *vma;
1531 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1532
1533 /*
1534 * Place a VMA prior to the one we're expanding so we assert that we do
1535 * not erroneously try to traverse to the previous VMA even though we
1536 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1537 * need to do so.
1538 */
1539 alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1540
1541 /*
1542 * We will be positioned at the prev VMA, but looking to expand to
1543 * 0x9000.
1544 */
1545 vma_iter_set(&vmi, 0x3000);
1546 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1547 vmg.prev = vma_prev;
1548 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1549
1550 vma = vma_merge_new_range(&vmg);
1551 ASSERT_NE(vma, NULL);
1552 ASSERT_EQ(vma, vma_prev);
1553 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1554 ASSERT_EQ(vma->vm_start, 0x3000);
1555 ASSERT_EQ(vma->vm_end, 0x9000);
1556 ASSERT_EQ(vma->vm_pgoff, 3);
1557 ASSERT_TRUE(vma_write_started(vma));
1558 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1559
1560 cleanup_mm(&mm, &vmi);
1561 return true;
1562 }
1563
main(void)1564 int main(void)
1565 {
1566 int num_tests = 0, num_fail = 0;
1567
1568 maple_tree_init();
1569
1570 #define TEST(name) \
1571 do { \
1572 num_tests++; \
1573 if (!test_##name()) { \
1574 num_fail++; \
1575 fprintf(stderr, "Test " #name " FAILED\n"); \
1576 } \
1577 } while (0)
1578
1579 /* Very simple tests to kick the tyres. */
1580 TEST(simple_merge);
1581 TEST(simple_modify);
1582 TEST(simple_expand);
1583 TEST(simple_shrink);
1584
1585 TEST(merge_new);
1586 TEST(vma_merge_special_flags);
1587 TEST(vma_merge_with_close);
1588 TEST(vma_merge_new_with_close);
1589 TEST(merge_existing);
1590 TEST(anon_vma_non_mergeable);
1591 TEST(dup_anon_vma);
1592 TEST(vmi_prealloc_fail);
1593 TEST(merge_extend);
1594 TEST(copy_vma);
1595 TEST(expand_only_mode);
1596
1597 #undef TEST
1598
1599 printf("%d tests run, %d passed, %d failed.\n",
1600 num_tests, num_tests - num_fail, num_fail);
1601
1602 return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1603 }
1604