xref: /linux/tools/testing/vma/vma.c (revision b05f8d7e077952d14acb63e3ccdf5f64404b59a4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "generated/bit-length.h"
8 
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11 
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14 
15 static bool fail_prealloc;
16 
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma)					\
19 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20 
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22 
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26 
27 /*
28  * Directly import the VMA implementation here. Our vma_internal.h wrapper
29  * provides userland-equivalent functionality for everything vma.c uses.
30  */
31 #include "../../../mm/vma.c"
32 
33 const struct vm_operations_struct vma_dummy_vm_ops;
34 static struct anon_vma dummy_anon_vma;
35 
36 #define ASSERT_TRUE(_expr)						\
37 	do {								\
38 		if (!(_expr)) {						\
39 			fprintf(stderr,					\
40 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
41 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
42 			return false;					\
43 		}							\
44 	} while (0)
45 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
46 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
47 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
48 
49 static struct task_struct __current;
50 
51 struct task_struct *get_current(void)
52 {
53 	return &__current;
54 }
55 
56 unsigned long rlimit(unsigned int limit)
57 {
58 	return (unsigned long)-1;
59 }
60 
61 /* Helper function to simply allocate a VMA. */
62 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
63 					unsigned long start,
64 					unsigned long end,
65 					pgoff_t pgoff,
66 					vm_flags_t flags)
67 {
68 	struct vm_area_struct *ret = vm_area_alloc(mm);
69 
70 	if (ret == NULL)
71 		return NULL;
72 
73 	ret->vm_start = start;
74 	ret->vm_end = end;
75 	ret->vm_pgoff = pgoff;
76 	ret->__vm_flags = flags;
77 	vma_assert_detached(ret);
78 
79 	return ret;
80 }
81 
82 /* Helper function to allocate a VMA and link it to the tree. */
83 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
84 {
85 	int res;
86 
87 	res = vma_link(mm, vma);
88 	if (!res)
89 		vma_assert_attached(vma);
90 	return res;
91 }
92 
93 /* Helper function to allocate a VMA and link it to the tree. */
94 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
95 						 unsigned long start,
96 						 unsigned long end,
97 						 pgoff_t pgoff,
98 						 vm_flags_t flags)
99 {
100 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
101 
102 	if (vma == NULL)
103 		return NULL;
104 
105 	if (attach_vma(mm, vma)) {
106 		vm_area_free(vma);
107 		return NULL;
108 	}
109 
110 	/*
111 	 * Reset this counter which we use to track whether writes have
112 	 * begun. Linking to the tree will have caused this to be incremented,
113 	 * which means we will get a false positive otherwise.
114 	 */
115 	vma->vm_lock_seq = UINT_MAX;
116 
117 	return vma;
118 }
119 
120 /* Helper function which provides a wrapper around a merge new VMA operation. */
121 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
122 {
123 	struct vm_area_struct *vma;
124 	/*
125 	 * For convenience, get prev and next VMAs. Which the new VMA operation
126 	 * requires.
127 	 */
128 	vmg->next = vma_next(vmg->vmi);
129 	vmg->prev = vma_prev(vmg->vmi);
130 	vma_iter_next_range(vmg->vmi);
131 
132 	vma = vma_merge_new_range(vmg);
133 	if (vma)
134 		vma_assert_attached(vma);
135 
136 	return vma;
137 }
138 
139 /*
140  * Helper function which provides a wrapper around a merge existing VMA
141  * operation.
142  */
143 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
144 {
145 	struct vm_area_struct *vma;
146 
147 	vma = vma_merge_existing_range(vmg);
148 	if (vma)
149 		vma_assert_attached(vma);
150 	return vma;
151 }
152 
153 /*
154  * Helper function which provides a wrapper around the expansion of an existing
155  * VMA.
156  */
157 static int expand_existing(struct vma_merge_struct *vmg)
158 {
159 	return vma_expand(vmg);
160 }
161 
162 /*
163  * Helper function to reset merge state the associated VMA iterator to a
164  * specified new range.
165  */
166 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
167 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
168 {
169 	vma_iter_set(vmg->vmi, start);
170 
171 	vmg->prev = NULL;
172 	vmg->middle = NULL;
173 	vmg->next = NULL;
174 	vmg->target = NULL;
175 
176 	vmg->start = start;
177 	vmg->end = end;
178 	vmg->pgoff = pgoff;
179 	vmg->flags = flags;
180 
181 	vmg->just_expand = false;
182 	vmg->__remove_middle = false;
183 	vmg->__remove_next = false;
184 	vmg->__adjust_middle_start = false;
185 	vmg->__adjust_next_start = false;
186 }
187 
188 /* Helper function to set both the VMG range and its anon_vma. */
189 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
190 				   unsigned long end, pgoff_t pgoff, vm_flags_t flags,
191 				   struct anon_vma *anon_vma)
192 {
193 	vmg_set_range(vmg, start, end, pgoff, flags);
194 	vmg->anon_vma = anon_vma;
195 }
196 
197 /*
198  * Helper function to try to merge a new VMA.
199  *
200  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
201  * VMA, link it to the maple tree and return it.
202  */
203 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
204 						struct vma_merge_struct *vmg,
205 						unsigned long start, unsigned long end,
206 						pgoff_t pgoff, vm_flags_t flags,
207 						bool *was_merged)
208 {
209 	struct vm_area_struct *merged;
210 
211 	vmg_set_range(vmg, start, end, pgoff, flags);
212 
213 	merged = merge_new(vmg);
214 	if (merged) {
215 		*was_merged = true;
216 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
217 		return merged;
218 	}
219 
220 	*was_merged = false;
221 
222 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
223 
224 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
225 }
226 
227 /*
228  * Helper function to reset the dummy anon_vma to indicate it has not been
229  * duplicated.
230  */
231 static void reset_dummy_anon_vma(void)
232 {
233 	dummy_anon_vma.was_cloned = false;
234 	dummy_anon_vma.was_unlinked = false;
235 }
236 
237 /*
238  * Helper function to remove all VMAs and destroy the maple tree associated with
239  * a virtual address space. Returns a count of VMAs in the tree.
240  */
241 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
242 {
243 	struct vm_area_struct *vma;
244 	int count = 0;
245 
246 	fail_prealloc = false;
247 	reset_dummy_anon_vma();
248 
249 	vma_iter_set(vmi, 0);
250 	for_each_vma(*vmi, vma) {
251 		vm_area_free(vma);
252 		count++;
253 	}
254 
255 	mtree_destroy(&mm->mm_mt);
256 	mm->map_count = 0;
257 	return count;
258 }
259 
260 /* Helper function to determine if VMA has had vma_start_write() performed. */
261 static bool vma_write_started(struct vm_area_struct *vma)
262 {
263 	int seq = vma->vm_lock_seq;
264 
265 	/* We reset after each check. */
266 	vma->vm_lock_seq = UINT_MAX;
267 
268 	/* The vma_start_write() stub simply increments this value. */
269 	return seq > -1;
270 }
271 
272 /* Helper function providing a dummy vm_ops->close() method.*/
273 static void dummy_close(struct vm_area_struct *)
274 {
275 }
276 
277 static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
278 				     struct anon_vma_chain *avc,
279 				     struct anon_vma *anon_vma)
280 {
281 	vma->anon_vma = anon_vma;
282 	INIT_LIST_HEAD(&vma->anon_vma_chain);
283 	list_add(&avc->same_vma, &vma->anon_vma_chain);
284 	avc->anon_vma = vma->anon_vma;
285 }
286 
287 static void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
288 				   struct anon_vma_chain *avc)
289 {
290 	__vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
291 }
292 
293 static bool test_simple_merge(void)
294 {
295 	struct vm_area_struct *vma;
296 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
297 	struct mm_struct mm = {};
298 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
299 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
300 	VMA_ITERATOR(vmi, &mm, 0x1000);
301 	struct vma_merge_struct vmg = {
302 		.mm = &mm,
303 		.vmi = &vmi,
304 		.start = 0x1000,
305 		.end = 0x2000,
306 		.flags = flags,
307 		.pgoff = 1,
308 	};
309 
310 	ASSERT_FALSE(attach_vma(&mm, vma_left));
311 	ASSERT_FALSE(attach_vma(&mm, vma_right));
312 
313 	vma = merge_new(&vmg);
314 	ASSERT_NE(vma, NULL);
315 
316 	ASSERT_EQ(vma->vm_start, 0);
317 	ASSERT_EQ(vma->vm_end, 0x3000);
318 	ASSERT_EQ(vma->vm_pgoff, 0);
319 	ASSERT_EQ(vma->vm_flags, flags);
320 
321 	vm_area_free(vma);
322 	mtree_destroy(&mm.mm_mt);
323 
324 	return true;
325 }
326 
327 static bool test_simple_modify(void)
328 {
329 	struct vm_area_struct *vma;
330 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
331 	struct mm_struct mm = {};
332 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
333 	VMA_ITERATOR(vmi, &mm, 0x1000);
334 
335 	ASSERT_FALSE(attach_vma(&mm, init_vma));
336 
337 	/*
338 	 * The flags will not be changed, the vma_modify_flags() function
339 	 * performs the merge/split only.
340 	 */
341 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
342 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
343 	ASSERT_NE(vma, NULL);
344 	/* We modify the provided VMA, and on split allocate new VMAs. */
345 	ASSERT_EQ(vma, init_vma);
346 
347 	ASSERT_EQ(vma->vm_start, 0x1000);
348 	ASSERT_EQ(vma->vm_end, 0x2000);
349 	ASSERT_EQ(vma->vm_pgoff, 1);
350 
351 	/*
352 	 * Now walk through the three split VMAs and make sure they are as
353 	 * expected.
354 	 */
355 
356 	vma_iter_set(&vmi, 0);
357 	vma = vma_iter_load(&vmi);
358 
359 	ASSERT_EQ(vma->vm_start, 0);
360 	ASSERT_EQ(vma->vm_end, 0x1000);
361 	ASSERT_EQ(vma->vm_pgoff, 0);
362 
363 	vm_area_free(vma);
364 	vma_iter_clear(&vmi);
365 
366 	vma = vma_next(&vmi);
367 
368 	ASSERT_EQ(vma->vm_start, 0x1000);
369 	ASSERT_EQ(vma->vm_end, 0x2000);
370 	ASSERT_EQ(vma->vm_pgoff, 1);
371 
372 	vm_area_free(vma);
373 	vma_iter_clear(&vmi);
374 
375 	vma = vma_next(&vmi);
376 
377 	ASSERT_EQ(vma->vm_start, 0x2000);
378 	ASSERT_EQ(vma->vm_end, 0x3000);
379 	ASSERT_EQ(vma->vm_pgoff, 2);
380 
381 	vm_area_free(vma);
382 	mtree_destroy(&mm.mm_mt);
383 
384 	return true;
385 }
386 
387 static bool test_simple_expand(void)
388 {
389 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
390 	struct mm_struct mm = {};
391 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
392 	VMA_ITERATOR(vmi, &mm, 0);
393 	struct vma_merge_struct vmg = {
394 		.vmi = &vmi,
395 		.middle = vma,
396 		.start = 0,
397 		.end = 0x3000,
398 		.pgoff = 0,
399 	};
400 
401 	ASSERT_FALSE(attach_vma(&mm, vma));
402 
403 	ASSERT_FALSE(expand_existing(&vmg));
404 
405 	ASSERT_EQ(vma->vm_start, 0);
406 	ASSERT_EQ(vma->vm_end, 0x3000);
407 	ASSERT_EQ(vma->vm_pgoff, 0);
408 
409 	vm_area_free(vma);
410 	mtree_destroy(&mm.mm_mt);
411 
412 	return true;
413 }
414 
415 static bool test_simple_shrink(void)
416 {
417 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
418 	struct mm_struct mm = {};
419 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
420 	VMA_ITERATOR(vmi, &mm, 0);
421 
422 	ASSERT_FALSE(attach_vma(&mm, vma));
423 
424 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
425 
426 	ASSERT_EQ(vma->vm_start, 0);
427 	ASSERT_EQ(vma->vm_end, 0x1000);
428 	ASSERT_EQ(vma->vm_pgoff, 0);
429 
430 	vm_area_free(vma);
431 	mtree_destroy(&mm.mm_mt);
432 
433 	return true;
434 }
435 
436 static bool test_merge_new(void)
437 {
438 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
439 	struct mm_struct mm = {};
440 	VMA_ITERATOR(vmi, &mm, 0);
441 	struct vma_merge_struct vmg = {
442 		.mm = &mm,
443 		.vmi = &vmi,
444 	};
445 	struct anon_vma_chain dummy_anon_vma_chain_a = {
446 		.anon_vma = &dummy_anon_vma,
447 	};
448 	struct anon_vma_chain dummy_anon_vma_chain_b = {
449 		.anon_vma = &dummy_anon_vma,
450 	};
451 	struct anon_vma_chain dummy_anon_vma_chain_c = {
452 		.anon_vma = &dummy_anon_vma,
453 	};
454 	struct anon_vma_chain dummy_anon_vma_chain_d = {
455 		.anon_vma = &dummy_anon_vma,
456 	};
457 	const struct vm_operations_struct vm_ops = {
458 		.close = dummy_close,
459 	};
460 	int count;
461 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
462 	bool merged;
463 
464 	/*
465 	 * 0123456789abc
466 	 * AA B       CC
467 	 */
468 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
469 	ASSERT_NE(vma_a, NULL);
470 	/* We give each VMA a single avc so we can test anon_vma duplication. */
471 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
472 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
473 
474 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
475 	ASSERT_NE(vma_b, NULL);
476 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
477 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
478 
479 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
480 	ASSERT_NE(vma_c, NULL);
481 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
482 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
483 
484 	/*
485 	 * NO merge.
486 	 *
487 	 * 0123456789abc
488 	 * AA B   **  CC
489 	 */
490 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
491 	ASSERT_NE(vma_d, NULL);
492 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
493 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
494 	ASSERT_FALSE(merged);
495 	ASSERT_EQ(mm.map_count, 4);
496 
497 	/*
498 	 * Merge BOTH sides.
499 	 *
500 	 * 0123456789abc
501 	 * AA*B   DD  CC
502 	 */
503 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
504 	vma_b->anon_vma = &dummy_anon_vma;
505 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
506 	ASSERT_EQ(vma, vma_a);
507 	/* Merge with A, delete B. */
508 	ASSERT_TRUE(merged);
509 	ASSERT_EQ(vma->vm_start, 0);
510 	ASSERT_EQ(vma->vm_end, 0x4000);
511 	ASSERT_EQ(vma->vm_pgoff, 0);
512 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
513 	ASSERT_TRUE(vma_write_started(vma));
514 	ASSERT_EQ(mm.map_count, 3);
515 
516 	/*
517 	 * Merge to PREVIOUS VMA.
518 	 *
519 	 * 0123456789abc
520 	 * AAAA*  DD  CC
521 	 */
522 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
523 	ASSERT_EQ(vma, vma_a);
524 	/* Extend A. */
525 	ASSERT_TRUE(merged);
526 	ASSERT_EQ(vma->vm_start, 0);
527 	ASSERT_EQ(vma->vm_end, 0x5000);
528 	ASSERT_EQ(vma->vm_pgoff, 0);
529 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
530 	ASSERT_TRUE(vma_write_started(vma));
531 	ASSERT_EQ(mm.map_count, 3);
532 
533 	/*
534 	 * Merge to NEXT VMA.
535 	 *
536 	 * 0123456789abc
537 	 * AAAAA *DD  CC
538 	 */
539 	vma_d->anon_vma = &dummy_anon_vma;
540 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
541 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
542 	ASSERT_EQ(vma, vma_d);
543 	/* Prepend. */
544 	ASSERT_TRUE(merged);
545 	ASSERT_EQ(vma->vm_start, 0x6000);
546 	ASSERT_EQ(vma->vm_end, 0x9000);
547 	ASSERT_EQ(vma->vm_pgoff, 6);
548 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
549 	ASSERT_TRUE(vma_write_started(vma));
550 	ASSERT_EQ(mm.map_count, 3);
551 
552 	/*
553 	 * Merge BOTH sides.
554 	 *
555 	 * 0123456789abc
556 	 * AAAAA*DDD  CC
557 	 */
558 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
559 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
560 	ASSERT_EQ(vma, vma_a);
561 	/* Merge with A, delete D. */
562 	ASSERT_TRUE(merged);
563 	ASSERT_EQ(vma->vm_start, 0);
564 	ASSERT_EQ(vma->vm_end, 0x9000);
565 	ASSERT_EQ(vma->vm_pgoff, 0);
566 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
567 	ASSERT_TRUE(vma_write_started(vma));
568 	ASSERT_EQ(mm.map_count, 2);
569 
570 	/*
571 	 * Merge to NEXT VMA.
572 	 *
573 	 * 0123456789abc
574 	 * AAAAAAAAA *CC
575 	 */
576 	vma_c->anon_vma = &dummy_anon_vma;
577 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
578 	ASSERT_EQ(vma, vma_c);
579 	/* Prepend C. */
580 	ASSERT_TRUE(merged);
581 	ASSERT_EQ(vma->vm_start, 0xa000);
582 	ASSERT_EQ(vma->vm_end, 0xc000);
583 	ASSERT_EQ(vma->vm_pgoff, 0xa);
584 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
585 	ASSERT_TRUE(vma_write_started(vma));
586 	ASSERT_EQ(mm.map_count, 2);
587 
588 	/*
589 	 * Merge BOTH sides.
590 	 *
591 	 * 0123456789abc
592 	 * AAAAAAAAA*CCC
593 	 */
594 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
595 	ASSERT_EQ(vma, vma_a);
596 	/* Extend A and delete C. */
597 	ASSERT_TRUE(merged);
598 	ASSERT_EQ(vma->vm_start, 0);
599 	ASSERT_EQ(vma->vm_end, 0xc000);
600 	ASSERT_EQ(vma->vm_pgoff, 0);
601 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
602 	ASSERT_TRUE(vma_write_started(vma));
603 	ASSERT_EQ(mm.map_count, 1);
604 
605 	/*
606 	 * Final state.
607 	 *
608 	 * 0123456789abc
609 	 * AAAAAAAAAAAAA
610 	 */
611 
612 	count = 0;
613 	vma_iter_set(&vmi, 0);
614 	for_each_vma(vmi, vma) {
615 		ASSERT_NE(vma, NULL);
616 		ASSERT_EQ(vma->vm_start, 0);
617 		ASSERT_EQ(vma->vm_end, 0xc000);
618 		ASSERT_EQ(vma->vm_pgoff, 0);
619 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
620 
621 		vm_area_free(vma);
622 		count++;
623 	}
624 
625 	/* Should only have one VMA left (though freed) after all is done.*/
626 	ASSERT_EQ(count, 1);
627 
628 	mtree_destroy(&mm.mm_mt);
629 	return true;
630 }
631 
632 static bool test_vma_merge_special_flags(void)
633 {
634 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
635 	struct mm_struct mm = {};
636 	VMA_ITERATOR(vmi, &mm, 0);
637 	struct vma_merge_struct vmg = {
638 		.mm = &mm,
639 		.vmi = &vmi,
640 	};
641 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
642 	vm_flags_t all_special_flags = 0;
643 	int i;
644 	struct vm_area_struct *vma_left, *vma;
645 
646 	/* Make sure there aren't new VM_SPECIAL flags. */
647 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
648 		all_special_flags |= special_flags[i];
649 	}
650 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
651 
652 	/*
653 	 * 01234
654 	 * AAA
655 	 */
656 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
657 	ASSERT_NE(vma_left, NULL);
658 
659 	/* 1. Set up new VMA with special flag that would otherwise merge. */
660 
661 	/*
662 	 * 01234
663 	 * AAA*
664 	 *
665 	 * This should merge if not for the VM_SPECIAL flag.
666 	 */
667 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
668 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
669 		vm_flags_t special_flag = special_flags[i];
670 
671 		vma_left->__vm_flags = flags | special_flag;
672 		vmg.flags = flags | special_flag;
673 		vma = merge_new(&vmg);
674 		ASSERT_EQ(vma, NULL);
675 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
676 	}
677 
678 	/* 2. Modify VMA with special flag that would otherwise merge. */
679 
680 	/*
681 	 * 01234
682 	 * AAAB
683 	 *
684 	 * Create a VMA to modify.
685 	 */
686 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
687 	ASSERT_NE(vma, NULL);
688 	vmg.middle = vma;
689 
690 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
691 		vm_flags_t special_flag = special_flags[i];
692 
693 		vma_left->__vm_flags = flags | special_flag;
694 		vmg.flags = flags | special_flag;
695 		vma = merge_existing(&vmg);
696 		ASSERT_EQ(vma, NULL);
697 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
698 	}
699 
700 	cleanup_mm(&mm, &vmi);
701 	return true;
702 }
703 
704 static bool test_vma_merge_with_close(void)
705 {
706 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
707 	struct mm_struct mm = {};
708 	VMA_ITERATOR(vmi, &mm, 0);
709 	struct vma_merge_struct vmg = {
710 		.mm = &mm,
711 		.vmi = &vmi,
712 	};
713 	const struct vm_operations_struct vm_ops = {
714 		.close = dummy_close,
715 	};
716 	struct vm_area_struct *vma_prev, *vma_next, *vma;
717 
718 	/*
719 	 * When merging VMAs we are not permitted to remove any VMA that has a
720 	 * vm_ops->close() hook.
721 	 *
722 	 * Considering the two possible adjacent VMAs to which a VMA can be
723 	 * merged:
724 	 *
725 	 * [ prev ][ vma ][ next ]
726 	 *
727 	 * In no case will we need to delete prev. If the operation is
728 	 * mergeable, then prev will be extended with one or both of vma and
729 	 * next deleted.
730 	 *
731 	 * As a result, during initial mergeability checks, only
732 	 * can_vma_merge_before() (which implies the VMA being merged with is
733 	 * 'next' as shown above) bothers to check to see whether the next VMA
734 	 * has a vm_ops->close() callback that will need to be called when
735 	 * removed.
736 	 *
737 	 * If it does, then we cannot merge as the resources that the close()
738 	 * operation potentially clears down are tied only to the existing VMA
739 	 * range and we have no way of extending those to the nearly merged one.
740 	 *
741 	 * We must consider two scenarios:
742 	 *
743 	 * A.
744 	 *
745 	 * vm_ops->close:     -       -    !NULL
746 	 *                 [ prev ][ vma ][ next ]
747 	 *
748 	 * Where prev may or may not be present/mergeable.
749 	 *
750 	 * This is picked up by a specific check in can_vma_merge_before().
751 	 *
752 	 * B.
753 	 *
754 	 * vm_ops->close:     -     !NULL
755 	 *                 [ prev ][ vma ]
756 	 *
757 	 * Where prev and vma are present and mergeable.
758 	 *
759 	 * This is picked up by a specific check in the modified VMA merge.
760 	 *
761 	 * IMPORTANT NOTE: We make the assumption that the following case:
762 	 *
763 	 *    -     !NULL   NULL
764 	 * [ prev ][ vma ][ next ]
765 	 *
766 	 * Cannot occur, because vma->vm_ops being the same implies the same
767 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
768 	 * would be set too, and thus scenario A would pick this up.
769 	 */
770 
771 	/*
772 	 * The only case of a new VMA merge that results in a VMA being deleted
773 	 * is one where both the previous and next VMAs are merged - in this
774 	 * instance the next VMA is deleted, and the previous VMA is extended.
775 	 *
776 	 * If we are unable to do so, we reduce the operation to simply
777 	 * extending the prev VMA and not merging next.
778 	 *
779 	 * 0123456789
780 	 * PPP**NNNN
781 	 *             ->
782 	 * 0123456789
783 	 * PPPPPPNNN
784 	 */
785 
786 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
787 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
788 	vma_next->vm_ops = &vm_ops;
789 
790 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
791 	ASSERT_EQ(merge_new(&vmg), vma_prev);
792 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
793 	ASSERT_EQ(vma_prev->vm_start, 0);
794 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
795 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
796 
797 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
798 
799 	/*
800 	 * When modifying an existing VMA there are further cases where we
801 	 * delete VMAs.
802 	 *
803 	 *    <>
804 	 * 0123456789
805 	 * PPPVV
806 	 *
807 	 * In this instance, if vma has a close hook, the merge simply cannot
808 	 * proceed.
809 	 */
810 
811 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
812 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
813 	vma->vm_ops = &vm_ops;
814 
815 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
816 	vmg.prev = vma_prev;
817 	vmg.middle = vma;
818 
819 	/*
820 	 * The VMA being modified in a way that would otherwise merge should
821 	 * also fail.
822 	 */
823 	ASSERT_EQ(merge_existing(&vmg), NULL);
824 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
825 
826 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
827 
828 	/*
829 	 * This case is mirrored if merging with next.
830 	 *
831 	 *    <>
832 	 * 0123456789
833 	 *    VVNNNN
834 	 *
835 	 * In this instance, if vma has a close hook, the merge simply cannot
836 	 * proceed.
837 	 */
838 
839 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
840 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
841 	vma->vm_ops = &vm_ops;
842 
843 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
844 	vmg.middle = vma;
845 	ASSERT_EQ(merge_existing(&vmg), NULL);
846 	/*
847 	 * Initially this is misapprehended as an out of memory report, as the
848 	 * close() check is handled in the same way as anon_vma duplication
849 	 * failures, however a subsequent patch resolves this.
850 	 */
851 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
852 
853 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
854 
855 	/*
856 	 * Finally, we consider two variants of the case where we modify a VMA
857 	 * to merge with both the previous and next VMAs.
858 	 *
859 	 * The first variant is where vma has a close hook. In this instance, no
860 	 * merge can proceed.
861 	 *
862 	 *    <>
863 	 * 0123456789
864 	 * PPPVVNNNN
865 	 */
866 
867 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
868 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
869 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
870 	vma->vm_ops = &vm_ops;
871 
872 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
873 	vmg.prev = vma_prev;
874 	vmg.middle = vma;
875 
876 	ASSERT_EQ(merge_existing(&vmg), NULL);
877 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
878 
879 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
880 
881 	/*
882 	 * The second variant is where next has a close hook. In this instance,
883 	 * we reduce the operation to a merge between prev and vma.
884 	 *
885 	 *    <>
886 	 * 0123456789
887 	 * PPPVVNNNN
888 	 *            ->
889 	 * 0123456789
890 	 * PPPPPNNNN
891 	 */
892 
893 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
894 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
895 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
896 	vma_next->vm_ops = &vm_ops;
897 
898 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
899 	vmg.prev = vma_prev;
900 	vmg.middle = vma;
901 
902 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
903 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
904 	ASSERT_EQ(vma_prev->vm_start, 0);
905 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
906 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
907 
908 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
909 
910 	return true;
911 }
912 
913 static bool test_vma_merge_new_with_close(void)
914 {
915 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
916 	struct mm_struct mm = {};
917 	VMA_ITERATOR(vmi, &mm, 0);
918 	struct vma_merge_struct vmg = {
919 		.mm = &mm,
920 		.vmi = &vmi,
921 	};
922 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
923 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
924 	const struct vm_operations_struct vm_ops = {
925 		.close = dummy_close,
926 	};
927 	struct vm_area_struct *vma;
928 
929 	/*
930 	 * We should allow the partial merge of a proposed new VMA if the
931 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
932 	 * compatible), e.g.:
933 	 *
934 	 *        New VMA
935 	 *    A  v-------v  B
936 	 * |-----|       |-----|
937 	 *  close         close
938 	 *
939 	 * Since the rule is to not DELETE a VMA with a close operation, this
940 	 * should be permitted, only rather than expanding A and deleting B, we
941 	 * should simply expand A and leave B intact, e.g.:
942 	 *
943 	 *        New VMA
944 	 *       A          B
945 	 * |------------||-----|
946 	 *  close         close
947 	 */
948 
949 	/* Have prev and next have a vm_ops->close() hook. */
950 	vma_prev->vm_ops = &vm_ops;
951 	vma_next->vm_ops = &vm_ops;
952 
953 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
954 	vma = merge_new(&vmg);
955 	ASSERT_NE(vma, NULL);
956 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
957 	ASSERT_EQ(vma->vm_start, 0);
958 	ASSERT_EQ(vma->vm_end, 0x5000);
959 	ASSERT_EQ(vma->vm_pgoff, 0);
960 	ASSERT_EQ(vma->vm_ops, &vm_ops);
961 	ASSERT_TRUE(vma_write_started(vma));
962 	ASSERT_EQ(mm.map_count, 2);
963 
964 	cleanup_mm(&mm, &vmi);
965 	return true;
966 }
967 
968 static bool test_merge_existing(void)
969 {
970 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
971 	struct mm_struct mm = {};
972 	VMA_ITERATOR(vmi, &mm, 0);
973 	struct vm_area_struct *vma, *vma_prev, *vma_next;
974 	struct vma_merge_struct vmg = {
975 		.mm = &mm,
976 		.vmi = &vmi,
977 	};
978 	const struct vm_operations_struct vm_ops = {
979 		.close = dummy_close,
980 	};
981 	struct anon_vma_chain avc = {};
982 
983 	/*
984 	 * Merge right case - partial span.
985 	 *
986 	 *    <->
987 	 * 0123456789
988 	 *   VVVVNNN
989 	 *            ->
990 	 * 0123456789
991 	 *   VNNNNNN
992 	 */
993 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
994 	vma->vm_ops = &vm_ops; /* This should have no impact. */
995 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
996 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
997 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma);
998 	vmg.middle = vma;
999 	vmg.prev = vma;
1000 	vma_set_dummy_anon_vma(vma, &avc);
1001 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1002 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1003 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1004 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1005 	ASSERT_EQ(vma_next->vm_pgoff, 3);
1006 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1007 	ASSERT_EQ(vma->vm_start, 0x2000);
1008 	ASSERT_EQ(vma->vm_end, 0x3000);
1009 	ASSERT_EQ(vma->vm_pgoff, 2);
1010 	ASSERT_TRUE(vma_write_started(vma));
1011 	ASSERT_TRUE(vma_write_started(vma_next));
1012 	ASSERT_EQ(mm.map_count, 2);
1013 
1014 	/* Clear down and reset. */
1015 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1016 
1017 	/*
1018 	 * Merge right case - full span.
1019 	 *
1020 	 *   <-->
1021 	 * 0123456789
1022 	 *   VVVVNNN
1023 	 *            ->
1024 	 * 0123456789
1025 	 *   NNNNNNN
1026 	 */
1027 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
1028 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
1029 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1030 	vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, flags, &dummy_anon_vma);
1031 	vmg.middle = vma;
1032 	vma_set_dummy_anon_vma(vma, &avc);
1033 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1034 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1035 	ASSERT_EQ(vma_next->vm_start, 0x2000);
1036 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1037 	ASSERT_EQ(vma_next->vm_pgoff, 2);
1038 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1039 	ASSERT_TRUE(vma_write_started(vma_next));
1040 	ASSERT_EQ(mm.map_count, 1);
1041 
1042 	/* Clear down and reset. We should have deleted vma. */
1043 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1044 
1045 	/*
1046 	 * Merge left case - partial span.
1047 	 *
1048 	 *    <->
1049 	 * 0123456789
1050 	 * PPPVVVV
1051 	 *            ->
1052 	 * 0123456789
1053 	 * PPPPPPV
1054 	 */
1055 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1056 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1057 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1058 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1059 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma);
1060 	vmg.prev = vma_prev;
1061 	vmg.middle = vma;
1062 	vma_set_dummy_anon_vma(vma, &avc);
1063 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1064 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1065 	ASSERT_EQ(vma_prev->vm_start, 0);
1066 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1067 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1068 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1069 	ASSERT_EQ(vma->vm_start, 0x6000);
1070 	ASSERT_EQ(vma->vm_end, 0x7000);
1071 	ASSERT_EQ(vma->vm_pgoff, 6);
1072 	ASSERT_TRUE(vma_write_started(vma_prev));
1073 	ASSERT_TRUE(vma_write_started(vma));
1074 	ASSERT_EQ(mm.map_count, 2);
1075 
1076 	/* Clear down and reset. */
1077 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1078 
1079 	/*
1080 	 * Merge left case - full span.
1081 	 *
1082 	 *    <-->
1083 	 * 0123456789
1084 	 * PPPVVVV
1085 	 *            ->
1086 	 * 0123456789
1087 	 * PPPPPPP
1088 	 */
1089 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1090 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1091 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1092 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma);
1093 	vmg.prev = vma_prev;
1094 	vmg.middle = vma;
1095 	vma_set_dummy_anon_vma(vma, &avc);
1096 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1097 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1098 	ASSERT_EQ(vma_prev->vm_start, 0);
1099 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1100 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1101 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1102 	ASSERT_TRUE(vma_write_started(vma_prev));
1103 	ASSERT_EQ(mm.map_count, 1);
1104 
1105 	/* Clear down and reset. We should have deleted vma. */
1106 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1107 
1108 	/*
1109 	 * Merge both case.
1110 	 *
1111 	 *    <-->
1112 	 * 0123456789
1113 	 * PPPVVVVNNN
1114 	 *             ->
1115 	 * 0123456789
1116 	 * PPPPPPPPPP
1117 	 */
1118 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1119 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1120 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1121 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1122 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma);
1123 	vmg.prev = vma_prev;
1124 	vmg.middle = vma;
1125 	vma_set_dummy_anon_vma(vma, &avc);
1126 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1127 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1128 	ASSERT_EQ(vma_prev->vm_start, 0);
1129 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1130 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1131 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1132 	ASSERT_TRUE(vma_write_started(vma_prev));
1133 	ASSERT_EQ(mm.map_count, 1);
1134 
1135 	/* Clear down and reset. We should have deleted prev and next. */
1136 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1137 
1138 	/*
1139 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1140 	 * caller always specifies ranges within the input VMA so we need only
1141 	 * examine these cases.
1142 	 *
1143 	 *     -
1144 	 *      -
1145 	 *       -
1146 	 *     <->
1147 	 *     <>
1148 	 *      <>
1149 	 * 0123456789a
1150 	 * PPPVVVVVNNN
1151 	 */
1152 
1153 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1154 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1155 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1156 
1157 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1158 	vmg.prev = vma;
1159 	vmg.middle = vma;
1160 	ASSERT_EQ(merge_existing(&vmg), NULL);
1161 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1162 
1163 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1164 	vmg.prev = vma;
1165 	vmg.middle = vma;
1166 	ASSERT_EQ(merge_existing(&vmg), NULL);
1167 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1168 
1169 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1170 	vmg.prev = vma;
1171 	vmg.middle = vma;
1172 	ASSERT_EQ(merge_existing(&vmg), NULL);
1173 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1174 
1175 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1176 	vmg.prev = vma;
1177 	vmg.middle = vma;
1178 	ASSERT_EQ(merge_existing(&vmg), NULL);
1179 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1180 
1181 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1182 	vmg.prev = vma;
1183 	vmg.middle = vma;
1184 	ASSERT_EQ(merge_existing(&vmg), NULL);
1185 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1186 
1187 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1188 	vmg.prev = vma;
1189 	vmg.middle = vma;
1190 	ASSERT_EQ(merge_existing(&vmg), NULL);
1191 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1192 
1193 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1194 
1195 	return true;
1196 }
1197 
1198 static bool test_anon_vma_non_mergeable(void)
1199 {
1200 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1201 	struct mm_struct mm = {};
1202 	VMA_ITERATOR(vmi, &mm, 0);
1203 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1204 	struct vma_merge_struct vmg = {
1205 		.mm = &mm,
1206 		.vmi = &vmi,
1207 	};
1208 	struct anon_vma_chain dummy_anon_vma_chain_1 = {};
1209 	struct anon_vma_chain dummy_anon_vma_chain_2 = {};
1210 	struct anon_vma dummy_anon_vma_2;
1211 
1212 	/*
1213 	 * In the case of modified VMA merge, merging both left and right VMAs
1214 	 * but where prev and next have incompatible anon_vma objects, we revert
1215 	 * to a merge of prev and VMA:
1216 	 *
1217 	 *    <-->
1218 	 * 0123456789
1219 	 * PPPVVVVNNN
1220 	 *            ->
1221 	 * 0123456789
1222 	 * PPPPPPPNNN
1223 	 */
1224 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1225 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1226 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1227 
1228 	/*
1229 	 * Give both prev and next single anon_vma_chain fields, so they will
1230 	 * merge with the NULL vmg->anon_vma.
1231 	 *
1232 	 * However, when prev is compared to next, the merge should fail.
1233 	 */
1234 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL);
1235 	vmg.prev = vma_prev;
1236 	vmg.middle = vma;
1237 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1238 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1239 
1240 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1241 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1242 	ASSERT_EQ(vma_prev->vm_start, 0);
1243 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1244 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1245 	ASSERT_TRUE(vma_write_started(vma_prev));
1246 	ASSERT_FALSE(vma_write_started(vma_next));
1247 
1248 	/* Clear down and reset. */
1249 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1250 
1251 	/*
1252 	 * Now consider the new VMA case. This is equivalent, only adding a new
1253 	 * VMA in a gap between prev and next.
1254 	 *
1255 	 *    <-->
1256 	 * 0123456789
1257 	 * PPP****NNN
1258 	 *            ->
1259 	 * 0123456789
1260 	 * PPPPPPPNNN
1261 	 */
1262 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1263 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1264 
1265 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL);
1266 	vmg.prev = vma_prev;
1267 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1268 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1269 
1270 	vmg.anon_vma = NULL;
1271 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1272 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1273 	ASSERT_EQ(vma_prev->vm_start, 0);
1274 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1275 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1276 	ASSERT_TRUE(vma_write_started(vma_prev));
1277 	ASSERT_FALSE(vma_write_started(vma_next));
1278 
1279 	/* Final cleanup. */
1280 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1281 
1282 	return true;
1283 }
1284 
1285 static bool test_dup_anon_vma(void)
1286 {
1287 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1288 	struct mm_struct mm = {};
1289 	VMA_ITERATOR(vmi, &mm, 0);
1290 	struct vma_merge_struct vmg = {
1291 		.mm = &mm,
1292 		.vmi = &vmi,
1293 	};
1294 	struct anon_vma_chain dummy_anon_vma_chain = {
1295 		.anon_vma = &dummy_anon_vma,
1296 	};
1297 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1298 
1299 	reset_dummy_anon_vma();
1300 
1301 	/*
1302 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1303 	 * assigns it to the expanded VMA.
1304 	 *
1305 	 * This covers new VMA merging, as these operations amount to a VMA
1306 	 * expand.
1307 	 */
1308 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1309 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1310 	vma_next->anon_vma = &dummy_anon_vma;
1311 
1312 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1313 	vmg.middle = vma_prev;
1314 	vmg.next = vma_next;
1315 
1316 	ASSERT_EQ(expand_existing(&vmg), 0);
1317 
1318 	/* Will have been cloned. */
1319 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1320 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1321 
1322 	/* Cleanup ready for next run. */
1323 	cleanup_mm(&mm, &vmi);
1324 
1325 	/*
1326 	 * next has anon_vma, we assign to prev.
1327 	 *
1328 	 *         |<----->|
1329 	 * |-------*********-------|
1330 	 *   prev     vma     next
1331 	 *  extend   delete  delete
1332 	 */
1333 
1334 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1335 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1336 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1337 
1338 	/* Initialise avc so mergeability check passes. */
1339 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1340 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1341 
1342 	vma_next->anon_vma = &dummy_anon_vma;
1343 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1344 	vmg.prev = vma_prev;
1345 	vmg.middle = vma;
1346 
1347 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1348 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1349 
1350 	ASSERT_EQ(vma_prev->vm_start, 0);
1351 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1352 
1353 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1354 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1355 
1356 	cleanup_mm(&mm, &vmi);
1357 
1358 	/*
1359 	 * vma has anon_vma, we assign to prev.
1360 	 *
1361 	 *         |<----->|
1362 	 * |-------*********-------|
1363 	 *   prev     vma     next
1364 	 *  extend   delete  delete
1365 	 */
1366 
1367 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1368 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1369 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1370 	vmg.anon_vma = &dummy_anon_vma;
1371 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1372 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1373 	vmg.prev = vma_prev;
1374 	vmg.middle = vma;
1375 
1376 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1377 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1378 
1379 	ASSERT_EQ(vma_prev->vm_start, 0);
1380 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1381 
1382 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1383 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1384 
1385 	cleanup_mm(&mm, &vmi);
1386 
1387 	/*
1388 	 * vma has anon_vma, we assign to prev.
1389 	 *
1390 	 *         |<----->|
1391 	 * |-------*************
1392 	 *   prev       vma
1393 	 *  extend shrink/delete
1394 	 */
1395 
1396 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1397 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1398 
1399 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1400 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1401 	vmg.prev = vma_prev;
1402 	vmg.middle = vma;
1403 
1404 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1405 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1406 
1407 	ASSERT_EQ(vma_prev->vm_start, 0);
1408 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1409 
1410 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1411 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1412 
1413 	cleanup_mm(&mm, &vmi);
1414 
1415 	/*
1416 	 * vma has anon_vma, we assign to next.
1417 	 *
1418 	 *     |<----->|
1419 	 * *************-------|
1420 	 *      vma       next
1421 	 * shrink/delete extend
1422 	 */
1423 
1424 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1425 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1426 
1427 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1428 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1429 	vmg.prev = vma;
1430 	vmg.middle = vma;
1431 
1432 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1433 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1434 
1435 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1436 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1437 
1438 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1439 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1440 
1441 	cleanup_mm(&mm, &vmi);
1442 	return true;
1443 }
1444 
1445 static bool test_vmi_prealloc_fail(void)
1446 {
1447 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1448 	struct mm_struct mm = {};
1449 	VMA_ITERATOR(vmi, &mm, 0);
1450 	struct vma_merge_struct vmg = {
1451 		.mm = &mm,
1452 		.vmi = &vmi,
1453 	};
1454 	struct anon_vma_chain avc = {};
1455 	struct vm_area_struct *vma_prev, *vma;
1456 
1457 	/*
1458 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1459 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1460 	 * the duplicated anon_vma is unlinked.
1461 	 */
1462 
1463 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1464 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1465 	vma->anon_vma = &dummy_anon_vma;
1466 
1467 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, flags, &dummy_anon_vma);
1468 	vmg.prev = vma_prev;
1469 	vmg.middle = vma;
1470 	vma_set_dummy_anon_vma(vma, &avc);
1471 
1472 	fail_prealloc = true;
1473 
1474 	/* This will cause the merge to fail. */
1475 	ASSERT_EQ(merge_existing(&vmg), NULL);
1476 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1477 	/* We will already have assigned the anon_vma. */
1478 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1479 	/* And it was both cloned and unlinked. */
1480 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1481 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1482 
1483 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1484 
1485 	/*
1486 	 * We repeat the same operation for expanding a VMA, which is what new
1487 	 * VMA merging ultimately uses too. This asserts that unlinking is
1488 	 * performed in this case too.
1489 	 */
1490 
1491 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1492 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1493 	vma->anon_vma = &dummy_anon_vma;
1494 
1495 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1496 	vmg.middle = vma_prev;
1497 	vmg.next = vma;
1498 
1499 	fail_prealloc = true;
1500 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1501 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1502 
1503 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1504 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1505 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1506 
1507 	cleanup_mm(&mm, &vmi);
1508 	return true;
1509 }
1510 
1511 static bool test_merge_extend(void)
1512 {
1513 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1514 	struct mm_struct mm = {};
1515 	VMA_ITERATOR(vmi, &mm, 0x1000);
1516 	struct vm_area_struct *vma;
1517 
1518 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1519 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1520 
1521 	/*
1522 	 * Extend a VMA into the gap between itself and the following VMA.
1523 	 * This should result in a merge.
1524 	 *
1525 	 * <->
1526 	 * *  *
1527 	 *
1528 	 */
1529 
1530 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1531 	ASSERT_EQ(vma->vm_start, 0);
1532 	ASSERT_EQ(vma->vm_end, 0x4000);
1533 	ASSERT_EQ(vma->vm_pgoff, 0);
1534 	ASSERT_TRUE(vma_write_started(vma));
1535 	ASSERT_EQ(mm.map_count, 1);
1536 
1537 	cleanup_mm(&mm, &vmi);
1538 	return true;
1539 }
1540 
1541 static bool test_copy_vma(void)
1542 {
1543 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1544 	struct mm_struct mm = {};
1545 	bool need_locks = false;
1546 	VMA_ITERATOR(vmi, &mm, 0);
1547 	struct vm_area_struct *vma, *vma_new, *vma_next;
1548 
1549 	/* Move backwards and do not merge. */
1550 
1551 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1552 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1553 	ASSERT_NE(vma_new, vma);
1554 	ASSERT_EQ(vma_new->vm_start, 0);
1555 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1556 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1557 	vma_assert_attached(vma_new);
1558 
1559 	cleanup_mm(&mm, &vmi);
1560 
1561 	/* Move a VMA into position next to another and merge the two. */
1562 
1563 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1564 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1565 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1566 	vma_assert_attached(vma_new);
1567 
1568 	ASSERT_EQ(vma_new, vma_next);
1569 
1570 	cleanup_mm(&mm, &vmi);
1571 	return true;
1572 }
1573 
1574 static bool test_expand_only_mode(void)
1575 {
1576 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1577 	struct mm_struct mm = {};
1578 	VMA_ITERATOR(vmi, &mm, 0);
1579 	struct vm_area_struct *vma_prev, *vma;
1580 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1581 
1582 	/*
1583 	 * Place a VMA prior to the one we're expanding so we assert that we do
1584 	 * not erroneously try to traverse to the previous VMA even though we
1585 	 * have, through the use of the just_expand flag, indicated we do not
1586 	 * need to do so.
1587 	 */
1588 	alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1589 
1590 	/*
1591 	 * We will be positioned at the prev VMA, but looking to expand to
1592 	 * 0x9000.
1593 	 */
1594 	vma_iter_set(&vmi, 0x3000);
1595 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1596 	vmg.prev = vma_prev;
1597 	vmg.just_expand = true;
1598 
1599 	vma = vma_merge_new_range(&vmg);
1600 	ASSERT_NE(vma, NULL);
1601 	ASSERT_EQ(vma, vma_prev);
1602 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1603 	ASSERT_EQ(vma->vm_start, 0x3000);
1604 	ASSERT_EQ(vma->vm_end, 0x9000);
1605 	ASSERT_EQ(vma->vm_pgoff, 3);
1606 	ASSERT_TRUE(vma_write_started(vma));
1607 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1608 	vma_assert_attached(vma);
1609 
1610 	cleanup_mm(&mm, &vmi);
1611 	return true;
1612 }
1613 
1614 static bool test_mmap_region_basic(void)
1615 {
1616 	struct mm_struct mm = {};
1617 	unsigned long addr;
1618 	struct vm_area_struct *vma;
1619 	VMA_ITERATOR(vmi, &mm, 0);
1620 
1621 	current->mm = &mm;
1622 
1623 	/* Map at 0x300000, length 0x3000. */
1624 	addr = __mmap_region(NULL, 0x300000, 0x3000,
1625 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1626 			     0x300, NULL);
1627 	ASSERT_EQ(addr, 0x300000);
1628 
1629 	/* Map at 0x250000, length 0x3000. */
1630 	addr = __mmap_region(NULL, 0x250000, 0x3000,
1631 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1632 			     0x250, NULL);
1633 	ASSERT_EQ(addr, 0x250000);
1634 
1635 	/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1636 	addr = __mmap_region(NULL, 0x303000, 0x3000,
1637 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1638 			     0x303, NULL);
1639 	ASSERT_EQ(addr, 0x303000);
1640 
1641 	/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1642 	addr = __mmap_region(NULL, 0x24d000, 0x3000,
1643 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1644 			     0x24d, NULL);
1645 	ASSERT_EQ(addr, 0x24d000);
1646 
1647 	ASSERT_EQ(mm.map_count, 2);
1648 
1649 	for_each_vma(vmi, vma) {
1650 		if (vma->vm_start == 0x300000) {
1651 			ASSERT_EQ(vma->vm_end, 0x306000);
1652 			ASSERT_EQ(vma->vm_pgoff, 0x300);
1653 		} else if (vma->vm_start == 0x24d000) {
1654 			ASSERT_EQ(vma->vm_end, 0x253000);
1655 			ASSERT_EQ(vma->vm_pgoff, 0x24d);
1656 		} else {
1657 			ASSERT_FALSE(true);
1658 		}
1659 	}
1660 
1661 	cleanup_mm(&mm, &vmi);
1662 	return true;
1663 }
1664 
1665 int main(void)
1666 {
1667 	int num_tests = 0, num_fail = 0;
1668 
1669 	maple_tree_init();
1670 
1671 #define TEST(name)							\
1672 	do {								\
1673 		num_tests++;						\
1674 		if (!test_##name()) {					\
1675 			num_fail++;					\
1676 			fprintf(stderr, "Test " #name " FAILED\n");	\
1677 		}							\
1678 	} while (0)
1679 
1680 	/* Very simple tests to kick the tyres. */
1681 	TEST(simple_merge);
1682 	TEST(simple_modify);
1683 	TEST(simple_expand);
1684 	TEST(simple_shrink);
1685 
1686 	TEST(merge_new);
1687 	TEST(vma_merge_special_flags);
1688 	TEST(vma_merge_with_close);
1689 	TEST(vma_merge_new_with_close);
1690 	TEST(merge_existing);
1691 	TEST(anon_vma_non_mergeable);
1692 	TEST(dup_anon_vma);
1693 	TEST(vmi_prealloc_fail);
1694 	TEST(merge_extend);
1695 	TEST(copy_vma);
1696 	TEST(expand_only_mode);
1697 
1698 	TEST(mmap_region_basic);
1699 
1700 #undef TEST
1701 
1702 	printf("%d tests run, %d passed, %d failed.\n",
1703 	       num_tests, num_tests - num_fail, num_fail);
1704 
1705 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1706 }
1707