xref: /linux/tools/testing/vma/vma.c (revision 2f1c6611b0a89afcb8641471af5f223c9caa01e0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "maple-shared.h"
8 #include "vma_internal.h"
9 
10 /* Include so header guard set. */
11 #include "../../../mm/vma.h"
12 
13 static bool fail_prealloc;
14 
15 /* Then override vma_iter_prealloc() so we can choose to fail it. */
16 #define vma_iter_prealloc(vmi, vma)					\
17 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
18 
19 /*
20  * Directly import the VMA implementation here. Our vma_internal.h wrapper
21  * provides userland-equivalent functionality for everything vma.c uses.
22  */
23 #include "../../../mm/vma.c"
24 
25 const struct vm_operations_struct vma_dummy_vm_ops;
26 static struct anon_vma dummy_anon_vma;
27 
28 #define ASSERT_TRUE(_expr)						\
29 	do {								\
30 		if (!(_expr)) {						\
31 			fprintf(stderr,					\
32 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
33 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
34 			return false;					\
35 		}							\
36 	} while (0)
37 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
38 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
39 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
40 
41 static struct task_struct __current;
42 
43 struct task_struct *get_current(void)
44 {
45 	return &__current;
46 }
47 
48 /* Helper function to simply allocate a VMA. */
49 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
50 					unsigned long start,
51 					unsigned long end,
52 					pgoff_t pgoff,
53 					vm_flags_t flags)
54 {
55 	struct vm_area_struct *ret = vm_area_alloc(mm);
56 
57 	if (ret == NULL)
58 		return NULL;
59 
60 	ret->vm_start = start;
61 	ret->vm_end = end;
62 	ret->vm_pgoff = pgoff;
63 	ret->__vm_flags = flags;
64 
65 	return ret;
66 }
67 
68 /* Helper function to allocate a VMA and link it to the tree. */
69 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
70 						 unsigned long start,
71 						 unsigned long end,
72 						 pgoff_t pgoff,
73 						 vm_flags_t flags)
74 {
75 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
76 
77 	if (vma == NULL)
78 		return NULL;
79 
80 	if (vma_link(mm, vma)) {
81 		vm_area_free(vma);
82 		return NULL;
83 	}
84 
85 	/*
86 	 * Reset this counter which we use to track whether writes have
87 	 * begun. Linking to the tree will have caused this to be incremented,
88 	 * which means we will get a false positive otherwise.
89 	 */
90 	vma->vm_lock_seq = -1;
91 
92 	return vma;
93 }
94 
95 /* Helper function which provides a wrapper around a merge new VMA operation. */
96 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
97 {
98 	/*
99 	 * For convenience, get prev and next VMAs. Which the new VMA operation
100 	 * requires.
101 	 */
102 	vmg->next = vma_next(vmg->vmi);
103 	vmg->prev = vma_prev(vmg->vmi);
104 
105 	vma_iter_set(vmg->vmi, vmg->start);
106 	return vma_merge(vmg);
107 }
108 
109 /*
110  * Helper function which provides a wrapper around a merge existing VMA
111  * operation.
112  */
113 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
114 {
115 	return vma_merge(vmg);
116 }
117 
118 /*
119  * Helper function which provides a wrapper around the expansion of an existing
120  * VMA.
121  */
122 static int expand_existing(struct vma_merge_struct *vmg)
123 {
124 	return vma_expand(vmg->vmi, vmg->vma, vmg->start, vmg->end, vmg->pgoff,
125 			  vmg->next);
126 }
127 
128 /*
129  * Helper function to reset merge state the associated VMA iterator to a
130  * specified new range.
131  */
132 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
133 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
134 {
135 	vma_iter_set(vmg->vmi, start);
136 
137 	vmg->prev = NULL;
138 	vmg->next = NULL;
139 	vmg->vma = NULL;
140 
141 	vmg->start = start;
142 	vmg->end = end;
143 	vmg->pgoff = pgoff;
144 	vmg->flags = flags;
145 }
146 
147 /*
148  * Helper function to try to merge a new VMA.
149  *
150  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
151  * VMA, link it to the maple tree and return it.
152  */
153 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
154 						struct vma_merge_struct *vmg,
155 						unsigned long start, unsigned long end,
156 						pgoff_t pgoff, vm_flags_t flags,
157 						bool *was_merged)
158 {
159 	struct vm_area_struct *merged;
160 
161 	vmg_set_range(vmg, start, end, pgoff, flags);
162 
163 	merged = merge_new(vmg);
164 	if (merged) {
165 		*was_merged = true;
166 		return merged;
167 	}
168 
169 	*was_merged = false;
170 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
171 }
172 
173 /*
174  * Helper function to reset the dummy anon_vma to indicate it has not been
175  * duplicated.
176  */
177 static void reset_dummy_anon_vma(void)
178 {
179 	dummy_anon_vma.was_cloned = false;
180 	dummy_anon_vma.was_unlinked = false;
181 }
182 
183 /*
184  * Helper function to remove all VMAs and destroy the maple tree associated with
185  * a virtual address space. Returns a count of VMAs in the tree.
186  */
187 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
188 {
189 	struct vm_area_struct *vma;
190 	int count = 0;
191 
192 	fail_prealloc = false;
193 	reset_dummy_anon_vma();
194 
195 	vma_iter_set(vmi, 0);
196 	for_each_vma(*vmi, vma) {
197 		vm_area_free(vma);
198 		count++;
199 	}
200 
201 	mtree_destroy(&mm->mm_mt);
202 	mm->map_count = 0;
203 	return count;
204 }
205 
206 /* Helper function to determine if VMA has had vma_start_write() performed. */
207 static bool vma_write_started(struct vm_area_struct *vma)
208 {
209 	int seq = vma->vm_lock_seq;
210 
211 	/* We reset after each check. */
212 	vma->vm_lock_seq = -1;
213 
214 	/* The vma_start_write() stub simply increments this value. */
215 	return seq > -1;
216 }
217 
218 /* Helper function providing a dummy vm_ops->close() method.*/
219 static void dummy_close(struct vm_area_struct *)
220 {
221 }
222 
223 static bool test_simple_merge(void)
224 {
225 	struct vm_area_struct *vma;
226 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
227 	struct mm_struct mm = {};
228 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
229 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
230 	VMA_ITERATOR(vmi, &mm, 0x1000);
231 	struct vma_merge_struct vmg = {
232 		.mm = &mm,
233 		.vmi = &vmi,
234 		.start = 0x1000,
235 		.end = 0x2000,
236 		.flags = flags,
237 		.pgoff = 1,
238 	};
239 
240 	ASSERT_FALSE(vma_link(&mm, vma_left));
241 	ASSERT_FALSE(vma_link(&mm, vma_right));
242 
243 	vma = merge_new(&vmg);
244 	ASSERT_NE(vma, NULL);
245 
246 	ASSERT_EQ(vma->vm_start, 0);
247 	ASSERT_EQ(vma->vm_end, 0x3000);
248 	ASSERT_EQ(vma->vm_pgoff, 0);
249 	ASSERT_EQ(vma->vm_flags, flags);
250 
251 	vm_area_free(vma);
252 	mtree_destroy(&mm.mm_mt);
253 
254 	return true;
255 }
256 
257 static bool test_simple_modify(void)
258 {
259 	struct vm_area_struct *vma;
260 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
261 	struct mm_struct mm = {};
262 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
263 	VMA_ITERATOR(vmi, &mm, 0x1000);
264 
265 	ASSERT_FALSE(vma_link(&mm, init_vma));
266 
267 	/*
268 	 * The flags will not be changed, the vma_modify_flags() function
269 	 * performs the merge/split only.
270 	 */
271 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
272 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
273 	ASSERT_NE(vma, NULL);
274 	/* We modify the provided VMA, and on split allocate new VMAs. */
275 	ASSERT_EQ(vma, init_vma);
276 
277 	ASSERT_EQ(vma->vm_start, 0x1000);
278 	ASSERT_EQ(vma->vm_end, 0x2000);
279 	ASSERT_EQ(vma->vm_pgoff, 1);
280 
281 	/*
282 	 * Now walk through the three split VMAs and make sure they are as
283 	 * expected.
284 	 */
285 
286 	vma_iter_set(&vmi, 0);
287 	vma = vma_iter_load(&vmi);
288 
289 	ASSERT_EQ(vma->vm_start, 0);
290 	ASSERT_EQ(vma->vm_end, 0x1000);
291 	ASSERT_EQ(vma->vm_pgoff, 0);
292 
293 	vm_area_free(vma);
294 	vma_iter_clear(&vmi);
295 
296 	vma = vma_next(&vmi);
297 
298 	ASSERT_EQ(vma->vm_start, 0x1000);
299 	ASSERT_EQ(vma->vm_end, 0x2000);
300 	ASSERT_EQ(vma->vm_pgoff, 1);
301 
302 	vm_area_free(vma);
303 	vma_iter_clear(&vmi);
304 
305 	vma = vma_next(&vmi);
306 
307 	ASSERT_EQ(vma->vm_start, 0x2000);
308 	ASSERT_EQ(vma->vm_end, 0x3000);
309 	ASSERT_EQ(vma->vm_pgoff, 2);
310 
311 	vm_area_free(vma);
312 	mtree_destroy(&mm.mm_mt);
313 
314 	return true;
315 }
316 
317 static bool test_simple_expand(void)
318 {
319 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
320 	struct mm_struct mm = {};
321 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
322 	VMA_ITERATOR(vmi, &mm, 0);
323 	struct vma_merge_struct vmg = {
324 		.vmi = &vmi,
325 		.vma = vma,
326 		.start = 0,
327 		.end = 0x3000,
328 		.pgoff = 0,
329 	};
330 
331 	ASSERT_FALSE(vma_link(&mm, vma));
332 
333 	ASSERT_FALSE(expand_existing(&vmg));
334 
335 	ASSERT_EQ(vma->vm_start, 0);
336 	ASSERT_EQ(vma->vm_end, 0x3000);
337 	ASSERT_EQ(vma->vm_pgoff, 0);
338 
339 	vm_area_free(vma);
340 	mtree_destroy(&mm.mm_mt);
341 
342 	return true;
343 }
344 
345 static bool test_simple_shrink(void)
346 {
347 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
348 	struct mm_struct mm = {};
349 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
350 	VMA_ITERATOR(vmi, &mm, 0);
351 
352 	ASSERT_FALSE(vma_link(&mm, vma));
353 
354 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
355 
356 	ASSERT_EQ(vma->vm_start, 0);
357 	ASSERT_EQ(vma->vm_end, 0x1000);
358 	ASSERT_EQ(vma->vm_pgoff, 0);
359 
360 	vm_area_free(vma);
361 	mtree_destroy(&mm.mm_mt);
362 
363 	return true;
364 }
365 
366 static bool test_merge_new(void)
367 {
368 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
369 	struct mm_struct mm = {};
370 	VMA_ITERATOR(vmi, &mm, 0);
371 	struct vma_merge_struct vmg = {
372 		.mm = &mm,
373 		.vmi = &vmi,
374 	};
375 	struct anon_vma_chain dummy_anon_vma_chain_a = {
376 		.anon_vma = &dummy_anon_vma,
377 	};
378 	struct anon_vma_chain dummy_anon_vma_chain_b = {
379 		.anon_vma = &dummy_anon_vma,
380 	};
381 	struct anon_vma_chain dummy_anon_vma_chain_c = {
382 		.anon_vma = &dummy_anon_vma,
383 	};
384 	struct anon_vma_chain dummy_anon_vma_chain_d = {
385 		.anon_vma = &dummy_anon_vma,
386 	};
387 	int count;
388 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
389 	bool merged;
390 
391 	/*
392 	 * 0123456789abc
393 	 * AA B       CC
394 	 */
395 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
396 	ASSERT_NE(vma_a, NULL);
397 	/* We give each VMA a single avc so we can test anon_vma duplication. */
398 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
399 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
400 
401 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
402 	ASSERT_NE(vma_b, NULL);
403 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
404 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
405 
406 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
407 	ASSERT_NE(vma_c, NULL);
408 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
409 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
410 
411 	/*
412 	 * NO merge.
413 	 *
414 	 * 0123456789abc
415 	 * AA B   **  CC
416 	 */
417 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
418 	ASSERT_NE(vma_d, NULL);
419 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
420 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
421 	ASSERT_FALSE(merged);
422 	ASSERT_EQ(mm.map_count, 4);
423 
424 	/*
425 	 * Merge BOTH sides.
426 	 *
427 	 * 0123456789abc
428 	 * AA*B   DD  CC
429 	 */
430 	vma_b->anon_vma = &dummy_anon_vma;
431 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
432 	ASSERT_EQ(vma, vma_a);
433 	/* Merge with A, delete B. */
434 	ASSERT_TRUE(merged);
435 	ASSERT_EQ(vma->vm_start, 0);
436 	ASSERT_EQ(vma->vm_end, 0x4000);
437 	ASSERT_EQ(vma->vm_pgoff, 0);
438 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
439 	ASSERT_TRUE(vma_write_started(vma));
440 	ASSERT_EQ(mm.map_count, 3);
441 
442 	/*
443 	 * Merge to PREVIOUS VMA.
444 	 *
445 	 * 0123456789abc
446 	 * AAAA*  DD  CC
447 	 */
448 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
449 	ASSERT_EQ(vma, vma_a);
450 	/* Extend A. */
451 	ASSERT_TRUE(merged);
452 	ASSERT_EQ(vma->vm_start, 0);
453 	ASSERT_EQ(vma->vm_end, 0x5000);
454 	ASSERT_EQ(vma->vm_pgoff, 0);
455 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
456 	ASSERT_TRUE(vma_write_started(vma));
457 	ASSERT_EQ(mm.map_count, 3);
458 
459 	/*
460 	 * Merge to NEXT VMA.
461 	 *
462 	 * 0123456789abc
463 	 * AAAAA *DD  CC
464 	 */
465 	vma_d->anon_vma = &dummy_anon_vma;
466 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
467 	ASSERT_EQ(vma, vma_d);
468 	/* Prepend. */
469 	ASSERT_TRUE(merged);
470 	ASSERT_EQ(vma->vm_start, 0x6000);
471 	ASSERT_EQ(vma->vm_end, 0x9000);
472 	ASSERT_EQ(vma->vm_pgoff, 6);
473 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
474 	ASSERT_TRUE(vma_write_started(vma));
475 	ASSERT_EQ(mm.map_count, 3);
476 
477 	/*
478 	 * Merge BOTH sides.
479 	 *
480 	 * 0123456789abc
481 	 * AAAAA*DDD  CC
482 	 */
483 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
484 	ASSERT_EQ(vma, vma_a);
485 	/* Merge with A, delete D. */
486 	ASSERT_TRUE(merged);
487 	ASSERT_EQ(vma->vm_start, 0);
488 	ASSERT_EQ(vma->vm_end, 0x9000);
489 	ASSERT_EQ(vma->vm_pgoff, 0);
490 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
491 	ASSERT_TRUE(vma_write_started(vma));
492 	ASSERT_EQ(mm.map_count, 2);
493 
494 	/*
495 	 * Merge to NEXT VMA.
496 	 *
497 	 * 0123456789abc
498 	 * AAAAAAAAA *CC
499 	 */
500 	vma_c->anon_vma = &dummy_anon_vma;
501 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
502 	ASSERT_EQ(vma, vma_c);
503 	/* Prepend C. */
504 	ASSERT_TRUE(merged);
505 	ASSERT_EQ(vma->vm_start, 0xa000);
506 	ASSERT_EQ(vma->vm_end, 0xc000);
507 	ASSERT_EQ(vma->vm_pgoff, 0xa);
508 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
509 	ASSERT_TRUE(vma_write_started(vma));
510 	ASSERT_EQ(mm.map_count, 2);
511 
512 	/*
513 	 * Merge BOTH sides.
514 	 *
515 	 * 0123456789abc
516 	 * AAAAAAAAA*CCC
517 	 */
518 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
519 	ASSERT_EQ(vma, vma_a);
520 	/* Extend A and delete C. */
521 	ASSERT_TRUE(merged);
522 	ASSERT_EQ(vma->vm_start, 0);
523 	ASSERT_EQ(vma->vm_end, 0xc000);
524 	ASSERT_EQ(vma->vm_pgoff, 0);
525 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
526 	ASSERT_TRUE(vma_write_started(vma));
527 	ASSERT_EQ(mm.map_count, 1);
528 
529 	/*
530 	 * Final state.
531 	 *
532 	 * 0123456789abc
533 	 * AAAAAAAAAAAAA
534 	 */
535 
536 	count = 0;
537 	vma_iter_set(&vmi, 0);
538 	for_each_vma(vmi, vma) {
539 		ASSERT_NE(vma, NULL);
540 		ASSERT_EQ(vma->vm_start, 0);
541 		ASSERT_EQ(vma->vm_end, 0xc000);
542 		ASSERT_EQ(vma->vm_pgoff, 0);
543 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
544 
545 		vm_area_free(vma);
546 		count++;
547 	}
548 
549 	/* Should only have one VMA left (though freed) after all is done.*/
550 	ASSERT_EQ(count, 1);
551 
552 	mtree_destroy(&mm.mm_mt);
553 	return true;
554 }
555 
556 static bool test_vma_merge_special_flags(void)
557 {
558 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
559 	struct mm_struct mm = {};
560 	VMA_ITERATOR(vmi, &mm, 0);
561 	struct vma_merge_struct vmg = {
562 		.mm = &mm,
563 		.vmi = &vmi,
564 	};
565 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
566 	vm_flags_t all_special_flags = 0;
567 	int i;
568 	struct vm_area_struct *vma_left, *vma;
569 
570 	/* Make sure there aren't new VM_SPECIAL flags. */
571 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
572 		all_special_flags |= special_flags[i];
573 	}
574 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
575 
576 	/*
577 	 * 01234
578 	 * AAA
579 	 */
580 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
581 	ASSERT_NE(vma_left, NULL);
582 
583 	/* 1. Set up new VMA with special flag that would otherwise merge. */
584 
585 	/*
586 	 * 01234
587 	 * AAA*
588 	 *
589 	 * This should merge if not for the VM_SPECIAL flag.
590 	 */
591 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
592 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
593 		vm_flags_t special_flag = special_flags[i];
594 
595 		vma_left->__vm_flags = flags | special_flag;
596 		vmg.flags = flags | special_flag;
597 		vma = merge_new(&vmg);
598 		ASSERT_EQ(vma, NULL);
599 	}
600 
601 	/* 2. Modify VMA with special flag that would otherwise merge. */
602 
603 	/*
604 	 * 01234
605 	 * AAAB
606 	 *
607 	 * Create a VMA to modify.
608 	 */
609 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
610 	ASSERT_NE(vma, NULL);
611 	vmg.vma = vma;
612 
613 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
614 		vm_flags_t special_flag = special_flags[i];
615 
616 		vma_left->__vm_flags = flags | special_flag;
617 		vmg.flags = flags | special_flag;
618 		vma = merge_existing(&vmg);
619 		ASSERT_EQ(vma, NULL);
620 	}
621 
622 	cleanup_mm(&mm, &vmi);
623 	return true;
624 }
625 
626 static bool test_vma_merge_with_close(void)
627 {
628 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
629 	struct mm_struct mm = {};
630 	VMA_ITERATOR(vmi, &mm, 0);
631 	struct vma_merge_struct vmg = {
632 		.mm = &mm,
633 		.vmi = &vmi,
634 	};
635 	const struct vm_operations_struct vm_ops = {
636 		.close = dummy_close,
637 	};
638 	struct vm_area_struct *vma_next =
639 		alloc_and_link_vma(&mm, 0x2000, 0x3000, 2, flags);
640 	struct vm_area_struct *vma;
641 
642 	/*
643 	 * When we merge VMAs we sometimes have to delete others as part of the
644 	 * operation.
645 	 *
646 	 * Considering the two possible adjacent VMAs to which a VMA can be
647 	 * merged:
648 	 *
649 	 * [ prev ][ vma ][ next ]
650 	 *
651 	 * In no case will we need to delete prev. If the operation is
652 	 * mergeable, then prev will be extended with one or both of vma and
653 	 * next deleted.
654 	 *
655 	 * As a result, during initial mergeability checks, only
656 	 * can_vma_merge_before() (which implies the VMA being merged with is
657 	 * 'next' as shown above) bothers to check to see whether the next VMA
658 	 * has a vm_ops->close() callback that will need to be called when
659 	 * removed.
660 	 *
661 	 * If it does, then we cannot merge as the resources that the close()
662 	 * operation potentially clears down are tied only to the existing VMA
663 	 * range and we have no way of extending those to the nearly merged one.
664 	 *
665 	 * We must consider two scenarios:
666 	 *
667 	 * A.
668 	 *
669 	 * vm_ops->close:     -       -    !NULL
670 	 *                 [ prev ][ vma ][ next ]
671 	 *
672 	 * Where prev may or may not be present/mergeable.
673 	 *
674 	 * This is picked up by a specific check in can_vma_merge_before().
675 	 *
676 	 * B.
677 	 *
678 	 * vm_ops->close:     -     !NULL
679 	 *                 [ prev ][ vma ]
680 	 *
681 	 * Where prev and vma are present and mergeable.
682 	 *
683 	 * This is picked up by a specific check in the modified VMA merge.
684 	 *
685 	 * IMPORTANT NOTE: We make the assumption that the following case:
686 	 *
687 	 *    -     !NULL   NULL
688 	 * [ prev ][ vma ][ next ]
689 	 *
690 	 * Cannot occur, because vma->vm_ops being the same implies the same
691 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
692 	 * would be set too, and thus scenario A would pick this up.
693 	 */
694 
695 	ASSERT_NE(vma_next, NULL);
696 
697 	/*
698 	 * SCENARIO A
699 	 *
700 	 * 0123
701 	 *  *N
702 	 */
703 
704 	/* Make the next VMA have a close() callback. */
705 	vma_next->vm_ops = &vm_ops;
706 
707 	/* Our proposed VMA has characteristics that would otherwise be merged. */
708 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
709 
710 	/* The next VMA having a close() operator should cause the merge to fail.*/
711 	ASSERT_EQ(merge_new(&vmg), NULL);
712 
713 	/* Now create the VMA so we can merge via modified flags */
714 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
715 	vma = alloc_and_link_vma(&mm, 0x1000, 0x2000, 1, flags);
716 	vmg.vma = vma;
717 
718 	/*
719 	 * The VMA being modified in a way that would otherwise merge should
720 	 * also fail.
721 	 */
722 	ASSERT_EQ(merge_existing(&vmg), NULL);
723 
724 	/* SCENARIO B
725 	 *
726 	 * 0123
727 	 * P*
728 	 *
729 	 * In order for this scenario to trigger, the VMA currently being
730 	 * modified must also have a .close().
731 	 */
732 
733 	/* Reset VMG state. */
734 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
735 	/*
736 	 * Make next unmergeable, and don't let the scenario A check pick this
737 	 * up, we want to reproduce scenario B only.
738 	 */
739 	vma_next->vm_ops = NULL;
740 	vma_next->__vm_flags &= ~VM_MAYWRITE;
741 	/* Allocate prev. */
742 	vmg.prev = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
743 	/* Assign a vm_ops->close() function to VMA explicitly. */
744 	vma->vm_ops = &vm_ops;
745 	vmg.vma = vma;
746 	/* Make sure merge does not occur. */
747 	ASSERT_EQ(merge_existing(&vmg), NULL);
748 
749 	cleanup_mm(&mm, &vmi);
750 	return true;
751 }
752 
753 static bool test_vma_merge_new_with_close(void)
754 {
755 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
756 	struct mm_struct mm = {};
757 	VMA_ITERATOR(vmi, &mm, 0);
758 	struct vma_merge_struct vmg = {
759 		.mm = &mm,
760 		.vmi = &vmi,
761 	};
762 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
763 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
764 	const struct vm_operations_struct vm_ops = {
765 		.close = dummy_close,
766 	};
767 	struct vm_area_struct *vma;
768 
769 	/*
770 	 * We should allow the partial merge of a proposed new VMA if the
771 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
772 	 * compatible), e.g.:
773 	 *
774 	 *        New VMA
775 	 *    A  v-------v  B
776 	 * |-----|       |-----|
777 	 *  close         close
778 	 *
779 	 * Since the rule is to not DELETE a VMA with a close operation, this
780 	 * should be permitted, only rather than expanding A and deleting B, we
781 	 * should simply expand A and leave B intact, e.g.:
782 	 *
783 	 *        New VMA
784 	 *       A          B
785 	 * |------------||-----|
786 	 *  close         close
787 	 */
788 
789 	/* Have prev and next have a vm_ops->close() hook. */
790 	vma_prev->vm_ops = &vm_ops;
791 	vma_next->vm_ops = &vm_ops;
792 
793 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
794 	vma = merge_new(&vmg);
795 	ASSERT_NE(vma, NULL);
796 	ASSERT_EQ(vma->vm_start, 0);
797 	ASSERT_EQ(vma->vm_end, 0x5000);
798 	ASSERT_EQ(vma->vm_pgoff, 0);
799 	ASSERT_EQ(vma->vm_ops, &vm_ops);
800 	ASSERT_TRUE(vma_write_started(vma));
801 	ASSERT_EQ(mm.map_count, 2);
802 
803 	cleanup_mm(&mm, &vmi);
804 	return true;
805 }
806 
807 static bool test_merge_existing(void)
808 {
809 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
810 	struct mm_struct mm = {};
811 	VMA_ITERATOR(vmi, &mm, 0);
812 	struct vm_area_struct *vma, *vma_prev, *vma_next;
813 	struct vma_merge_struct vmg = {
814 		.mm = &mm,
815 		.vmi = &vmi,
816 	};
817 
818 	/*
819 	 * Merge right case - partial span.
820 	 *
821 	 *    <->
822 	 * 0123456789
823 	 *   VVVVNNN
824 	 *            ->
825 	 * 0123456789
826 	 *   VNNNNNN
827 	 */
828 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
829 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
830 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
831 	vmg.vma = vma;
832 	vmg.prev = vma;
833 	vma->anon_vma = &dummy_anon_vma;
834 	ASSERT_EQ(merge_existing(&vmg), vma_next);
835 	ASSERT_EQ(vma_next->vm_start, 0x3000);
836 	ASSERT_EQ(vma_next->vm_end, 0x9000);
837 	ASSERT_EQ(vma_next->vm_pgoff, 3);
838 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
839 	ASSERT_EQ(vma->vm_start, 0x2000);
840 	ASSERT_EQ(vma->vm_end, 0x3000);
841 	ASSERT_EQ(vma->vm_pgoff, 2);
842 	ASSERT_TRUE(vma_write_started(vma));
843 	ASSERT_TRUE(vma_write_started(vma_next));
844 	ASSERT_EQ(mm.map_count, 2);
845 
846 	/* Clear down and reset. */
847 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
848 
849 	/*
850 	 * Merge right case - full span.
851 	 *
852 	 *   <-->
853 	 * 0123456789
854 	 *   VVVVNNN
855 	 *            ->
856 	 * 0123456789
857 	 *   NNNNNNN
858 	 */
859 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
860 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
861 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
862 	vmg.vma = vma;
863 	vma->anon_vma = &dummy_anon_vma;
864 	ASSERT_EQ(merge_existing(&vmg), vma_next);
865 	ASSERT_EQ(vma_next->vm_start, 0x2000);
866 	ASSERT_EQ(vma_next->vm_end, 0x9000);
867 	ASSERT_EQ(vma_next->vm_pgoff, 2);
868 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
869 	ASSERT_TRUE(vma_write_started(vma_next));
870 	ASSERT_EQ(mm.map_count, 1);
871 
872 	/* Clear down and reset. We should have deleted vma. */
873 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
874 
875 	/*
876 	 * Merge left case - partial span.
877 	 *
878 	 *    <->
879 	 * 0123456789
880 	 * PPPVVVV
881 	 *            ->
882 	 * 0123456789
883 	 * PPPPPPV
884 	 */
885 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
886 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
887 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
888 	vmg.prev = vma_prev;
889 	vmg.vma = vma;
890 	vma->anon_vma = &dummy_anon_vma;
891 
892 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
893 	ASSERT_EQ(vma_prev->vm_start, 0);
894 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
895 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
896 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
897 	ASSERT_EQ(vma->vm_start, 0x6000);
898 	ASSERT_EQ(vma->vm_end, 0x7000);
899 	ASSERT_EQ(vma->vm_pgoff, 6);
900 	ASSERT_TRUE(vma_write_started(vma_prev));
901 	ASSERT_TRUE(vma_write_started(vma));
902 	ASSERT_EQ(mm.map_count, 2);
903 
904 	/* Clear down and reset. */
905 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
906 
907 	/*
908 	 * Merge left case - full span.
909 	 *
910 	 *    <-->
911 	 * 0123456789
912 	 * PPPVVVV
913 	 *            ->
914 	 * 0123456789
915 	 * PPPPPPP
916 	 */
917 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
918 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
919 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
920 	vmg.prev = vma_prev;
921 	vmg.vma = vma;
922 	vma->anon_vma = &dummy_anon_vma;
923 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
924 	ASSERT_EQ(vma_prev->vm_start, 0);
925 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
926 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
927 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
928 	ASSERT_TRUE(vma_write_started(vma_prev));
929 	ASSERT_EQ(mm.map_count, 1);
930 
931 	/* Clear down and reset. We should have deleted vma. */
932 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
933 
934 	/*
935 	 * Merge both case.
936 	 *
937 	 *    <-->
938 	 * 0123456789
939 	 * PPPVVVVNNN
940 	 *             ->
941 	 * 0123456789
942 	 * PPPPPPPPPP
943 	 */
944 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
945 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
946 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
947 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
948 	vmg.prev = vma_prev;
949 	vmg.vma = vma;
950 	vma->anon_vma = &dummy_anon_vma;
951 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
952 	ASSERT_EQ(vma_prev->vm_start, 0);
953 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
954 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
955 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
956 	ASSERT_TRUE(vma_write_started(vma_prev));
957 	ASSERT_EQ(mm.map_count, 1);
958 
959 	/* Clear down and reset. We should have deleted prev and next. */
960 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
961 
962 	/*
963 	 * Non-merge ranges. the modified VMA merge operation assumes that the
964 	 * caller always specifies ranges within the input VMA so we need only
965 	 * examine these cases.
966 	 *
967 	 *     -
968 	 *      -
969 	 *       -
970 	 *     <->
971 	 *     <>
972 	 *      <>
973 	 * 0123456789a
974 	 * PPPVVVVVNNN
975 	 */
976 
977 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
978 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
979 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
980 
981 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
982 	vmg.prev = vma;
983 	vmg.vma = vma;
984 	ASSERT_EQ(merge_existing(&vmg), NULL);
985 
986 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
987 	vmg.prev = vma;
988 	vmg.vma = vma;
989 	ASSERT_EQ(merge_existing(&vmg), NULL);
990 
991 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
992 	vmg.prev = vma;
993 	vmg.vma = vma;
994 	ASSERT_EQ(merge_existing(&vmg), NULL);
995 
996 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
997 	vmg.prev = vma;
998 	vmg.vma = vma;
999 	ASSERT_EQ(merge_existing(&vmg), NULL);
1000 
1001 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1002 	vmg.prev = vma;
1003 	vmg.vma = vma;
1004 	ASSERT_EQ(merge_existing(&vmg), NULL);
1005 
1006 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1007 	vmg.prev = vma;
1008 	vmg.vma = vma;
1009 	ASSERT_EQ(merge_existing(&vmg), NULL);
1010 
1011 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1012 
1013 	return true;
1014 }
1015 
1016 static bool test_anon_vma_non_mergeable(void)
1017 {
1018 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1019 	struct mm_struct mm = {};
1020 	VMA_ITERATOR(vmi, &mm, 0);
1021 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1022 	struct vma_merge_struct vmg = {
1023 		.mm = &mm,
1024 		.vmi = &vmi,
1025 	};
1026 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1027 		.anon_vma = &dummy_anon_vma,
1028 	};
1029 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1030 		.anon_vma = &dummy_anon_vma,
1031 	};
1032 
1033 	/*
1034 	 * In the case of modified VMA merge, merging both left and right VMAs
1035 	 * but where prev and next have incompatible anon_vma objects, we revert
1036 	 * to a merge of prev and VMA:
1037 	 *
1038 	 *    <-->
1039 	 * 0123456789
1040 	 * PPPVVVVNNN
1041 	 *            ->
1042 	 * 0123456789
1043 	 * PPPPPPPNNN
1044 	 */
1045 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1046 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1047 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1048 
1049 	/*
1050 	 * Give both prev and next single anon_vma_chain fields, so they will
1051 	 * merge with the NULL vmg->anon_vma.
1052 	 *
1053 	 * However, when prev is compared to next, the merge should fail.
1054 	 */
1055 
1056 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1057 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1058 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1059 	vma_prev->anon_vma = &dummy_anon_vma;
1060 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1061 
1062 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1063 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1064 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1065 	vma_next->anon_vma = (struct anon_vma *)2;
1066 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1067 
1068 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1069 
1070 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1071 	vmg.prev = vma_prev;
1072 	vmg.vma = vma;
1073 
1074 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1075 	ASSERT_EQ(vma_prev->vm_start, 0);
1076 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1077 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1078 	ASSERT_TRUE(vma_write_started(vma_prev));
1079 	ASSERT_FALSE(vma_write_started(vma_next));
1080 
1081 	/* Clear down and reset. */
1082 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1083 
1084 	/*
1085 	 * Now consider the new VMA case. This is equivalent, only adding a new
1086 	 * VMA in a gap between prev and next.
1087 	 *
1088 	 *    <-->
1089 	 * 0123456789
1090 	 * PPP****NNN
1091 	 *            ->
1092 	 * 0123456789
1093 	 * PPPPPPPNNN
1094 	 */
1095 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1096 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1097 
1098 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1099 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1100 	vma_prev->anon_vma = (struct anon_vma *)1;
1101 
1102 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1103 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1104 	vma_next->anon_vma = (struct anon_vma *)2;
1105 
1106 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1107 	vmg.prev = vma_prev;
1108 
1109 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1110 	ASSERT_EQ(vma_prev->vm_start, 0);
1111 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1112 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1113 	ASSERT_TRUE(vma_write_started(vma_prev));
1114 	ASSERT_FALSE(vma_write_started(vma_next));
1115 
1116 	/* Final cleanup. */
1117 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1118 
1119 	return true;
1120 }
1121 
1122 static bool test_dup_anon_vma(void)
1123 {
1124 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1125 	struct mm_struct mm = {};
1126 	VMA_ITERATOR(vmi, &mm, 0);
1127 	struct vma_merge_struct vmg = {
1128 		.mm = &mm,
1129 		.vmi = &vmi,
1130 	};
1131 	struct anon_vma_chain dummy_anon_vma_chain = {
1132 		.anon_vma = &dummy_anon_vma,
1133 	};
1134 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1135 
1136 	reset_dummy_anon_vma();
1137 
1138 	/*
1139 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1140 	 * assigns it to the expanded VMA.
1141 	 *
1142 	 * This covers new VMA merging, as these operations amount to a VMA
1143 	 * expand.
1144 	 */
1145 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1146 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1147 	vma_next->anon_vma = &dummy_anon_vma;
1148 
1149 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1150 	vmg.vma = vma_prev;
1151 	vmg.next = vma_next;
1152 
1153 	ASSERT_EQ(expand_existing(&vmg), 0);
1154 
1155 	/* Will have been cloned. */
1156 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1157 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1158 
1159 	/* Cleanup ready for next run. */
1160 	cleanup_mm(&mm, &vmi);
1161 
1162 	/*
1163 	 * next has anon_vma, we assign to prev.
1164 	 *
1165 	 *         |<----->|
1166 	 * |-------*********-------|
1167 	 *   prev     vma     next
1168 	 *  extend   delete  delete
1169 	 */
1170 
1171 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1172 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1173 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1174 
1175 	/* Initialise avc so mergeability check passes. */
1176 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1177 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1178 
1179 	vma_next->anon_vma = &dummy_anon_vma;
1180 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1181 	vmg.prev = vma_prev;
1182 	vmg.vma = vma;
1183 
1184 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1185 
1186 	ASSERT_EQ(vma_prev->vm_start, 0);
1187 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1188 
1189 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1190 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1191 
1192 	cleanup_mm(&mm, &vmi);
1193 
1194 	/*
1195 	 * vma has anon_vma, we assign to prev.
1196 	 *
1197 	 *         |<----->|
1198 	 * |-------*********-------|
1199 	 *   prev     vma     next
1200 	 *  extend   delete  delete
1201 	 */
1202 
1203 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1204 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1205 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1206 
1207 	vma->anon_vma = &dummy_anon_vma;
1208 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1209 	vmg.prev = vma_prev;
1210 	vmg.vma = vma;
1211 
1212 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1213 
1214 	ASSERT_EQ(vma_prev->vm_start, 0);
1215 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1216 
1217 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1218 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1219 
1220 	cleanup_mm(&mm, &vmi);
1221 
1222 	/*
1223 	 * vma has anon_vma, we assign to prev.
1224 	 *
1225 	 *         |<----->|
1226 	 * |-------*************
1227 	 *   prev       vma
1228 	 *  extend shrink/delete
1229 	 */
1230 
1231 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1232 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1233 
1234 	vma->anon_vma = &dummy_anon_vma;
1235 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1236 	vmg.prev = vma_prev;
1237 	vmg.vma = vma;
1238 
1239 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1240 
1241 	ASSERT_EQ(vma_prev->vm_start, 0);
1242 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1243 
1244 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1245 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1246 
1247 	cleanup_mm(&mm, &vmi);
1248 
1249 	/*
1250 	 * vma has anon_vma, we assign to next.
1251 	 *
1252 	 *     |<----->|
1253 	 * *************-------|
1254 	 *      vma       next
1255 	 * shrink/delete extend
1256 	 */
1257 
1258 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1259 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1260 
1261 	vma->anon_vma = &dummy_anon_vma;
1262 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1263 	vmg.prev = vma;
1264 	vmg.vma = vma;
1265 
1266 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1267 
1268 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1269 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1270 
1271 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1272 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1273 
1274 	cleanup_mm(&mm, &vmi);
1275 	return true;
1276 }
1277 
1278 static bool test_vmi_prealloc_fail(void)
1279 {
1280 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1281 	struct mm_struct mm = {};
1282 	VMA_ITERATOR(vmi, &mm, 0);
1283 	struct vma_merge_struct vmg = {
1284 		.mm = &mm,
1285 		.vmi = &vmi,
1286 	};
1287 	struct vm_area_struct *vma_prev, *vma;
1288 
1289 	/*
1290 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1291 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1292 	 * the duplicated anon_vma is unlinked.
1293 	 */
1294 
1295 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1296 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1297 	vma->anon_vma = &dummy_anon_vma;
1298 
1299 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1300 	vmg.prev = vma_prev;
1301 	vmg.vma = vma;
1302 
1303 	fail_prealloc = true;
1304 
1305 	/* This will cause the merge to fail. */
1306 	ASSERT_EQ(merge_existing(&vmg), NULL);
1307 	/* We will already have assigned the anon_vma. */
1308 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1309 	/* And it was both cloned and unlinked. */
1310 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1311 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1312 
1313 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1314 
1315 	/*
1316 	 * We repeat the same operation for expanding a VMA, which is what new
1317 	 * VMA merging ultimately uses too. This asserts that unlinking is
1318 	 * performed in this case too.
1319 	 */
1320 
1321 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1322 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1323 	vma->anon_vma = &dummy_anon_vma;
1324 
1325 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1326 	vmg.vma = vma_prev;
1327 	vmg.next = vma;
1328 
1329 	fail_prealloc = true;
1330 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1331 
1332 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1333 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1334 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1335 
1336 	cleanup_mm(&mm, &vmi);
1337 	return true;
1338 }
1339 
1340 static bool test_merge_extend(void)
1341 {
1342 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1343 	struct mm_struct mm = {};
1344 	VMA_ITERATOR(vmi, &mm, 0x1000);
1345 	struct vm_area_struct *vma;
1346 
1347 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1348 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1349 
1350 	/*
1351 	 * Extend a VMA into the gap between itself and the following VMA.
1352 	 * This should result in a merge.
1353 	 *
1354 	 * <->
1355 	 * *  *
1356 	 *
1357 	 */
1358 
1359 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1360 	ASSERT_EQ(vma->vm_start, 0);
1361 	ASSERT_EQ(vma->vm_end, 0x4000);
1362 	ASSERT_EQ(vma->vm_pgoff, 0);
1363 	ASSERT_TRUE(vma_write_started(vma));
1364 	ASSERT_EQ(mm.map_count, 1);
1365 
1366 	cleanup_mm(&mm, &vmi);
1367 	return true;
1368 }
1369 
1370 static bool test_copy_vma(void)
1371 {
1372 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1373 	struct mm_struct mm = {};
1374 	bool need_locks = false;
1375 	VMA_ITERATOR(vmi, &mm, 0);
1376 	struct vm_area_struct *vma, *vma_new, *vma_next;
1377 
1378 	/* Move backwards and do not merge. */
1379 
1380 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1381 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1382 
1383 	ASSERT_NE(vma_new, vma);
1384 	ASSERT_EQ(vma_new->vm_start, 0);
1385 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1386 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1387 
1388 	cleanup_mm(&mm, &vmi);
1389 
1390 	/* Move a VMA into position next to another and merge the two. */
1391 
1392 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1393 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1394 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1395 
1396 	ASSERT_EQ(vma_new, vma_next);
1397 
1398 	cleanup_mm(&mm, &vmi);
1399 	return true;
1400 }
1401 
1402 int main(void)
1403 {
1404 	int num_tests = 0, num_fail = 0;
1405 
1406 	maple_tree_init();
1407 
1408 #define TEST(name)							\
1409 	do {								\
1410 		num_tests++;						\
1411 		if (!test_##name()) {					\
1412 			num_fail++;					\
1413 			fprintf(stderr, "Test " #name " FAILED\n");	\
1414 		}							\
1415 	} while (0)
1416 
1417 	/* Very simple tests to kick the tyres. */
1418 	TEST(simple_merge);
1419 	TEST(simple_modify);
1420 	TEST(simple_expand);
1421 	TEST(simple_shrink);
1422 
1423 	TEST(merge_new);
1424 	TEST(vma_merge_special_flags);
1425 	TEST(vma_merge_with_close);
1426 	TEST(vma_merge_new_with_close);
1427 	TEST(merge_existing);
1428 	TEST(anon_vma_non_mergeable);
1429 	TEST(dup_anon_vma);
1430 	TEST(vmi_prealloc_fail);
1431 	TEST(merge_extend);
1432 	TEST(copy_vma);
1433 
1434 #undef TEST
1435 
1436 	printf("%d tests run, %d passed, %d failed.\n",
1437 	       num_tests, num_tests - num_fail, num_fail);
1438 
1439 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1440 }
1441