xref: /linux/tools/testing/vma/vma.c (revision 955db39676b6de84283b370d03683171b67dceb3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "maple-shared.h"
8 #include "vma_internal.h"
9 
10 /* Include so header guard set. */
11 #include "../../../mm/vma.h"
12 
13 static bool fail_prealloc;
14 
15 /* Then override vma_iter_prealloc() so we can choose to fail it. */
16 #define vma_iter_prealloc(vmi, vma)					\
17 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
18 
19 /*
20  * Directly import the VMA implementation here. Our vma_internal.h wrapper
21  * provides userland-equivalent functionality for everything vma.c uses.
22  */
23 #include "../../../mm/vma.c"
24 
25 /*
26  * Temporarily forward-ported from a future in which vmg's are used for merging.
27  */
28 struct vma_merge_struct {
29 	struct mm_struct *mm;
30 	struct vma_iterator *vmi;
31 	pgoff_t pgoff;
32 	struct vm_area_struct *prev;
33 	struct vm_area_struct *next; /* Modified by vma_merge(). */
34 	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
35 	unsigned long start;
36 	unsigned long end;
37 	unsigned long flags;
38 	struct file *file;
39 	struct anon_vma *anon_vma;
40 	struct mempolicy *policy;
41 	struct vm_userfaultfd_ctx uffd_ctx;
42 	struct anon_vma_name *anon_name;
43 };
44 
45 const struct vm_operations_struct vma_dummy_vm_ops;
46 static struct anon_vma dummy_anon_vma;
47 
48 #define ASSERT_TRUE(_expr)						\
49 	do {								\
50 		if (!(_expr)) {						\
51 			fprintf(stderr,					\
52 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
53 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
54 			return false;					\
55 		}							\
56 	} while (0)
57 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
58 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
59 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
60 
61 static struct task_struct __current;
62 
63 struct task_struct *get_current(void)
64 {
65 	return &__current;
66 }
67 
68 /* Helper function to simply allocate a VMA. */
69 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
70 					unsigned long start,
71 					unsigned long end,
72 					pgoff_t pgoff,
73 					vm_flags_t flags)
74 {
75 	struct vm_area_struct *ret = vm_area_alloc(mm);
76 
77 	if (ret == NULL)
78 		return NULL;
79 
80 	ret->vm_start = start;
81 	ret->vm_end = end;
82 	ret->vm_pgoff = pgoff;
83 	ret->__vm_flags = flags;
84 
85 	return ret;
86 }
87 
88 /* Helper function to allocate a VMA and link it to the tree. */
89 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
90 						 unsigned long start,
91 						 unsigned long end,
92 						 pgoff_t pgoff,
93 						 vm_flags_t flags)
94 {
95 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
96 
97 	if (vma == NULL)
98 		return NULL;
99 
100 	if (vma_link(mm, vma)) {
101 		vm_area_free(vma);
102 		return NULL;
103 	}
104 
105 	/*
106 	 * Reset this counter which we use to track whether writes have
107 	 * begun. Linking to the tree will have caused this to be incremented,
108 	 * which means we will get a false positive otherwise.
109 	 */
110 	vma->vm_lock_seq = -1;
111 
112 	return vma;
113 }
114 
115 /* Helper function which provides a wrapper around a merge new VMA operation. */
116 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
117 {
118 	/* vma_merge() needs a VMA to determine mm, anon_vma, and file. */
119 	struct vm_area_struct dummy = {
120 		.vm_mm = vmg->mm,
121 		.vm_flags = vmg->flags,
122 		.anon_vma = vmg->anon_vma,
123 		.vm_file = vmg->file,
124 	};
125 
126 	/*
127 	 * For convenience, get prev and next VMAs. Which the new VMA operation
128 	 * requires.
129 	 */
130 	vmg->next = vma_next(vmg->vmi);
131 	vmg->prev = vma_prev(vmg->vmi);
132 
133 	vma_iter_set(vmg->vmi, vmg->start);
134 	return vma_merge_new_vma(vmg->vmi, vmg->prev, &dummy, vmg->start,
135 				 vmg->end, vmg->pgoff);
136 }
137 
138 /*
139  * Helper function which provides a wrapper around a merge existing VMA
140  * operation.
141  */
142 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
143 {
144 	/* vma_merge() needs a VMA to determine mm, anon_vma, and file. */
145 	struct vm_area_struct dummy = {
146 		.vm_mm = vmg->mm,
147 		.vm_flags = vmg->flags,
148 		.anon_vma = vmg->anon_vma,
149 		.vm_file = vmg->file,
150 	};
151 
152 	return vma_merge(vmg->vmi, vmg->prev, &dummy, vmg->start, vmg->end,
153 			 vmg->flags, vmg->pgoff, vmg->policy, vmg->uffd_ctx,
154 			 vmg->anon_name);
155 }
156 
157 /*
158  * Helper function which provides a wrapper around the expansion of an existing
159  * VMA.
160  */
161 static int expand_existing(struct vma_merge_struct *vmg)
162 {
163 	return vma_expand(vmg->vmi, vmg->vma, vmg->start, vmg->end, vmg->pgoff,
164 			  vmg->next);
165 }
166 
167 /*
168  * Helper function to reset merge state the associated VMA iterator to a
169  * specified new range.
170  */
171 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
172 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
173 {
174 	vma_iter_set(vmg->vmi, start);
175 
176 	vmg->prev = NULL;
177 	vmg->next = NULL;
178 	vmg->vma = NULL;
179 
180 	vmg->start = start;
181 	vmg->end = end;
182 	vmg->pgoff = pgoff;
183 	vmg->flags = flags;
184 }
185 
186 /*
187  * Helper function to try to merge a new VMA.
188  *
189  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
190  * VMA, link it to the maple tree and return it.
191  */
192 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
193 						struct vma_merge_struct *vmg,
194 						unsigned long start, unsigned long end,
195 						pgoff_t pgoff, vm_flags_t flags,
196 						bool *was_merged)
197 {
198 	struct vm_area_struct *merged;
199 
200 	vmg_set_range(vmg, start, end, pgoff, flags);
201 
202 	merged = merge_new(vmg);
203 	if (merged) {
204 		*was_merged = true;
205 		return merged;
206 	}
207 
208 	*was_merged = false;
209 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
210 }
211 
212 /*
213  * Helper function to reset the dummy anon_vma to indicate it has not been
214  * duplicated.
215  */
216 static void reset_dummy_anon_vma(void)
217 {
218 	dummy_anon_vma.was_cloned = false;
219 	dummy_anon_vma.was_unlinked = false;
220 }
221 
222 /*
223  * Helper function to remove all VMAs and destroy the maple tree associated with
224  * a virtual address space. Returns a count of VMAs in the tree.
225  */
226 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
227 {
228 	struct vm_area_struct *vma;
229 	int count = 0;
230 
231 	fail_prealloc = false;
232 	reset_dummy_anon_vma();
233 
234 	vma_iter_set(vmi, 0);
235 	for_each_vma(*vmi, vma) {
236 		vm_area_free(vma);
237 		count++;
238 	}
239 
240 	mtree_destroy(&mm->mm_mt);
241 	mm->map_count = 0;
242 	return count;
243 }
244 
245 /* Helper function to determine if VMA has had vma_start_write() performed. */
246 static bool vma_write_started(struct vm_area_struct *vma)
247 {
248 	int seq = vma->vm_lock_seq;
249 
250 	/* We reset after each check. */
251 	vma->vm_lock_seq = -1;
252 
253 	/* The vma_start_write() stub simply increments this value. */
254 	return seq > -1;
255 }
256 
257 /* Helper function providing a dummy vm_ops->close() method.*/
258 static void dummy_close(struct vm_area_struct *)
259 {
260 }
261 
262 static bool test_simple_merge(void)
263 {
264 	struct vm_area_struct *vma;
265 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
266 	struct mm_struct mm = {};
267 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
268 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
269 	VMA_ITERATOR(vmi, &mm, 0x1000);
270 	struct vma_merge_struct vmg = {
271 		.mm = &mm,
272 		.vmi = &vmi,
273 		.start = 0x1000,
274 		.end = 0x2000,
275 		.flags = flags,
276 		.pgoff = 1,
277 	};
278 
279 	ASSERT_FALSE(vma_link(&mm, vma_left));
280 	ASSERT_FALSE(vma_link(&mm, vma_right));
281 
282 	vma = merge_new(&vmg);
283 	ASSERT_NE(vma, NULL);
284 
285 	ASSERT_EQ(vma->vm_start, 0);
286 	ASSERT_EQ(vma->vm_end, 0x3000);
287 	ASSERT_EQ(vma->vm_pgoff, 0);
288 	ASSERT_EQ(vma->vm_flags, flags);
289 
290 	vm_area_free(vma);
291 	mtree_destroy(&mm.mm_mt);
292 
293 	return true;
294 }
295 
296 static bool test_simple_modify(void)
297 {
298 	struct vm_area_struct *vma;
299 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
300 	struct mm_struct mm = {};
301 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
302 	VMA_ITERATOR(vmi, &mm, 0x1000);
303 
304 	ASSERT_FALSE(vma_link(&mm, init_vma));
305 
306 	/*
307 	 * The flags will not be changed, the vma_modify_flags() function
308 	 * performs the merge/split only.
309 	 */
310 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
311 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
312 	ASSERT_NE(vma, NULL);
313 	/* We modify the provided VMA, and on split allocate new VMAs. */
314 	ASSERT_EQ(vma, init_vma);
315 
316 	ASSERT_EQ(vma->vm_start, 0x1000);
317 	ASSERT_EQ(vma->vm_end, 0x2000);
318 	ASSERT_EQ(vma->vm_pgoff, 1);
319 
320 	/*
321 	 * Now walk through the three split VMAs and make sure they are as
322 	 * expected.
323 	 */
324 
325 	vma_iter_set(&vmi, 0);
326 	vma = vma_iter_load(&vmi);
327 
328 	ASSERT_EQ(vma->vm_start, 0);
329 	ASSERT_EQ(vma->vm_end, 0x1000);
330 	ASSERT_EQ(vma->vm_pgoff, 0);
331 
332 	vm_area_free(vma);
333 	vma_iter_clear(&vmi);
334 
335 	vma = vma_next(&vmi);
336 
337 	ASSERT_EQ(vma->vm_start, 0x1000);
338 	ASSERT_EQ(vma->vm_end, 0x2000);
339 	ASSERT_EQ(vma->vm_pgoff, 1);
340 
341 	vm_area_free(vma);
342 	vma_iter_clear(&vmi);
343 
344 	vma = vma_next(&vmi);
345 
346 	ASSERT_EQ(vma->vm_start, 0x2000);
347 	ASSERT_EQ(vma->vm_end, 0x3000);
348 	ASSERT_EQ(vma->vm_pgoff, 2);
349 
350 	vm_area_free(vma);
351 	mtree_destroy(&mm.mm_mt);
352 
353 	return true;
354 }
355 
356 static bool test_simple_expand(void)
357 {
358 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
359 	struct mm_struct mm = {};
360 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
361 	VMA_ITERATOR(vmi, &mm, 0);
362 	struct vma_merge_struct vmg = {
363 		.vmi = &vmi,
364 		.vma = vma,
365 		.start = 0,
366 		.end = 0x3000,
367 		.pgoff = 0,
368 	};
369 
370 	ASSERT_FALSE(vma_link(&mm, vma));
371 
372 	ASSERT_FALSE(expand_existing(&vmg));
373 
374 	ASSERT_EQ(vma->vm_start, 0);
375 	ASSERT_EQ(vma->vm_end, 0x3000);
376 	ASSERT_EQ(vma->vm_pgoff, 0);
377 
378 	vm_area_free(vma);
379 	mtree_destroy(&mm.mm_mt);
380 
381 	return true;
382 }
383 
384 static bool test_simple_shrink(void)
385 {
386 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
387 	struct mm_struct mm = {};
388 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
389 	VMA_ITERATOR(vmi, &mm, 0);
390 
391 	ASSERT_FALSE(vma_link(&mm, vma));
392 
393 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
394 
395 	ASSERT_EQ(vma->vm_start, 0);
396 	ASSERT_EQ(vma->vm_end, 0x1000);
397 	ASSERT_EQ(vma->vm_pgoff, 0);
398 
399 	vm_area_free(vma);
400 	mtree_destroy(&mm.mm_mt);
401 
402 	return true;
403 }
404 
405 static bool test_merge_new(void)
406 {
407 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
408 	struct mm_struct mm = {};
409 	VMA_ITERATOR(vmi, &mm, 0);
410 	struct vma_merge_struct vmg = {
411 		.mm = &mm,
412 		.vmi = &vmi,
413 	};
414 	struct anon_vma_chain dummy_anon_vma_chain_a = {
415 		.anon_vma = &dummy_anon_vma,
416 	};
417 	struct anon_vma_chain dummy_anon_vma_chain_b = {
418 		.anon_vma = &dummy_anon_vma,
419 	};
420 	struct anon_vma_chain dummy_anon_vma_chain_c = {
421 		.anon_vma = &dummy_anon_vma,
422 	};
423 	struct anon_vma_chain dummy_anon_vma_chain_d = {
424 		.anon_vma = &dummy_anon_vma,
425 	};
426 	int count;
427 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
428 	bool merged;
429 
430 	/*
431 	 * 0123456789abc
432 	 * AA B       CC
433 	 */
434 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
435 	ASSERT_NE(vma_a, NULL);
436 	/* We give each VMA a single avc so we can test anon_vma duplication. */
437 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
438 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
439 
440 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
441 	ASSERT_NE(vma_b, NULL);
442 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
443 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
444 
445 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
446 	ASSERT_NE(vma_c, NULL);
447 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
448 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
449 
450 	/*
451 	 * NO merge.
452 	 *
453 	 * 0123456789abc
454 	 * AA B   **  CC
455 	 */
456 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
457 	ASSERT_NE(vma_d, NULL);
458 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
459 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
460 	ASSERT_FALSE(merged);
461 	ASSERT_EQ(mm.map_count, 4);
462 
463 	/*
464 	 * Merge BOTH sides.
465 	 *
466 	 * 0123456789abc
467 	 * AA*B   DD  CC
468 	 */
469 	vma_b->anon_vma = &dummy_anon_vma;
470 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
471 	ASSERT_EQ(vma, vma_a);
472 	/* Merge with A, delete B. */
473 	ASSERT_TRUE(merged);
474 	ASSERT_EQ(vma->vm_start, 0);
475 	ASSERT_EQ(vma->vm_end, 0x4000);
476 	ASSERT_EQ(vma->vm_pgoff, 0);
477 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
478 	ASSERT_TRUE(vma_write_started(vma));
479 	ASSERT_EQ(mm.map_count, 3);
480 
481 	/*
482 	 * Merge to PREVIOUS VMA.
483 	 *
484 	 * 0123456789abc
485 	 * AAAA*  DD  CC
486 	 */
487 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
488 	ASSERT_EQ(vma, vma_a);
489 	/* Extend A. */
490 	ASSERT_TRUE(merged);
491 	ASSERT_EQ(vma->vm_start, 0);
492 	ASSERT_EQ(vma->vm_end, 0x5000);
493 	ASSERT_EQ(vma->vm_pgoff, 0);
494 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
495 	ASSERT_TRUE(vma_write_started(vma));
496 	ASSERT_EQ(mm.map_count, 3);
497 
498 	/*
499 	 * Merge to NEXT VMA.
500 	 *
501 	 * 0123456789abc
502 	 * AAAAA *DD  CC
503 	 */
504 	vma_d->anon_vma = &dummy_anon_vma;
505 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
506 	ASSERT_EQ(vma, vma_d);
507 	/* Prepend. */
508 	ASSERT_TRUE(merged);
509 	ASSERT_EQ(vma->vm_start, 0x6000);
510 	ASSERT_EQ(vma->vm_end, 0x9000);
511 	ASSERT_EQ(vma->vm_pgoff, 6);
512 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
513 	ASSERT_TRUE(vma_write_started(vma));
514 	ASSERT_EQ(mm.map_count, 3);
515 
516 	/*
517 	 * Merge BOTH sides.
518 	 *
519 	 * 0123456789abc
520 	 * AAAAA*DDD  CC
521 	 */
522 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
523 	ASSERT_EQ(vma, vma_a);
524 	/* Merge with A, delete D. */
525 	ASSERT_TRUE(merged);
526 	ASSERT_EQ(vma->vm_start, 0);
527 	ASSERT_EQ(vma->vm_end, 0x9000);
528 	ASSERT_EQ(vma->vm_pgoff, 0);
529 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
530 	ASSERT_TRUE(vma_write_started(vma));
531 	ASSERT_EQ(mm.map_count, 2);
532 
533 	/*
534 	 * Merge to NEXT VMA.
535 	 *
536 	 * 0123456789abc
537 	 * AAAAAAAAA *CC
538 	 */
539 	vma_c->anon_vma = &dummy_anon_vma;
540 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
541 	ASSERT_EQ(vma, vma_c);
542 	/* Prepend C. */
543 	ASSERT_TRUE(merged);
544 	ASSERT_EQ(vma->vm_start, 0xa000);
545 	ASSERT_EQ(vma->vm_end, 0xc000);
546 	ASSERT_EQ(vma->vm_pgoff, 0xa);
547 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
548 	ASSERT_TRUE(vma_write_started(vma));
549 	ASSERT_EQ(mm.map_count, 2);
550 
551 	/*
552 	 * Merge BOTH sides.
553 	 *
554 	 * 0123456789abc
555 	 * AAAAAAAAA*CCC
556 	 */
557 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
558 	ASSERT_EQ(vma, vma_a);
559 	/* Extend A and delete C. */
560 	ASSERT_TRUE(merged);
561 	ASSERT_EQ(vma->vm_start, 0);
562 	ASSERT_EQ(vma->vm_end, 0xc000);
563 	ASSERT_EQ(vma->vm_pgoff, 0);
564 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
565 	ASSERT_TRUE(vma_write_started(vma));
566 	ASSERT_EQ(mm.map_count, 1);
567 
568 	/*
569 	 * Final state.
570 	 *
571 	 * 0123456789abc
572 	 * AAAAAAAAAAAAA
573 	 */
574 
575 	count = 0;
576 	vma_iter_set(&vmi, 0);
577 	for_each_vma(vmi, vma) {
578 		ASSERT_NE(vma, NULL);
579 		ASSERT_EQ(vma->vm_start, 0);
580 		ASSERT_EQ(vma->vm_end, 0xc000);
581 		ASSERT_EQ(vma->vm_pgoff, 0);
582 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
583 
584 		vm_area_free(vma);
585 		count++;
586 	}
587 
588 	/* Should only have one VMA left (though freed) after all is done.*/
589 	ASSERT_EQ(count, 1);
590 
591 	mtree_destroy(&mm.mm_mt);
592 	return true;
593 }
594 
595 static bool test_vma_merge_special_flags(void)
596 {
597 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
598 	struct mm_struct mm = {};
599 	VMA_ITERATOR(vmi, &mm, 0);
600 	struct vma_merge_struct vmg = {
601 		.mm = &mm,
602 		.vmi = &vmi,
603 	};
604 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
605 	vm_flags_t all_special_flags = 0;
606 	int i;
607 	struct vm_area_struct *vma_left, *vma;
608 
609 	/* Make sure there aren't new VM_SPECIAL flags. */
610 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
611 		all_special_flags |= special_flags[i];
612 	}
613 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
614 
615 	/*
616 	 * 01234
617 	 * AAA
618 	 */
619 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
620 	ASSERT_NE(vma_left, NULL);
621 
622 	/* 1. Set up new VMA with special flag that would otherwise merge. */
623 
624 	/*
625 	 * 01234
626 	 * AAA*
627 	 *
628 	 * This should merge if not for the VM_SPECIAL flag.
629 	 */
630 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
631 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
632 		vm_flags_t special_flag = special_flags[i];
633 
634 		vma_left->__vm_flags = flags | special_flag;
635 		vmg.flags = flags | special_flag;
636 		vma = merge_new(&vmg);
637 		ASSERT_EQ(vma, NULL);
638 	}
639 
640 	/* 2. Modify VMA with special flag that would otherwise merge. */
641 
642 	/*
643 	 * 01234
644 	 * AAAB
645 	 *
646 	 * Create a VMA to modify.
647 	 */
648 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
649 	ASSERT_NE(vma, NULL);
650 	vmg.vma = vma;
651 
652 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
653 		vm_flags_t special_flag = special_flags[i];
654 
655 		vma_left->__vm_flags = flags | special_flag;
656 		vmg.flags = flags | special_flag;
657 		vma = merge_existing(&vmg);
658 		ASSERT_EQ(vma, NULL);
659 	}
660 
661 	cleanup_mm(&mm, &vmi);
662 	return true;
663 }
664 
665 static bool test_vma_merge_with_close(void)
666 {
667 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
668 	struct mm_struct mm = {};
669 	VMA_ITERATOR(vmi, &mm, 0);
670 	struct vma_merge_struct vmg = {
671 		.mm = &mm,
672 		.vmi = &vmi,
673 	};
674 	const struct vm_operations_struct vm_ops = {
675 		.close = dummy_close,
676 	};
677 	struct vm_area_struct *vma_next =
678 		alloc_and_link_vma(&mm, 0x2000, 0x3000, 2, flags);
679 	struct vm_area_struct *vma;
680 
681 	/*
682 	 * When we merge VMAs we sometimes have to delete others as part of the
683 	 * operation.
684 	 *
685 	 * Considering the two possible adjacent VMAs to which a VMA can be
686 	 * merged:
687 	 *
688 	 * [ prev ][ vma ][ next ]
689 	 *
690 	 * In no case will we need to delete prev. If the operation is
691 	 * mergeable, then prev will be extended with one or both of vma and
692 	 * next deleted.
693 	 *
694 	 * As a result, during initial mergeability checks, only
695 	 * can_vma_merge_before() (which implies the VMA being merged with is
696 	 * 'next' as shown above) bothers to check to see whether the next VMA
697 	 * has a vm_ops->close() callback that will need to be called when
698 	 * removed.
699 	 *
700 	 * If it does, then we cannot merge as the resources that the close()
701 	 * operation potentially clears down are tied only to the existing VMA
702 	 * range and we have no way of extending those to the nearly merged one.
703 	 *
704 	 * We must consider two scenarios:
705 	 *
706 	 * A.
707 	 *
708 	 * vm_ops->close:     -       -    !NULL
709 	 *                 [ prev ][ vma ][ next ]
710 	 *
711 	 * Where prev may or may not be present/mergeable.
712 	 *
713 	 * This is picked up by a specific check in can_vma_merge_before().
714 	 *
715 	 * B.
716 	 *
717 	 * vm_ops->close:     -     !NULL
718 	 *                 [ prev ][ vma ]
719 	 *
720 	 * Where prev and vma are present and mergeable.
721 	 *
722 	 * This is picked up by a specific check in the modified VMA merge.
723 	 *
724 	 * IMPORTANT NOTE: We make the assumption that the following case:
725 	 *
726 	 *    -     !NULL   NULL
727 	 * [ prev ][ vma ][ next ]
728 	 *
729 	 * Cannot occur, because vma->vm_ops being the same implies the same
730 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
731 	 * would be set too, and thus scenario A would pick this up.
732 	 */
733 
734 	ASSERT_NE(vma_next, NULL);
735 
736 	/*
737 	 * SCENARIO A
738 	 *
739 	 * 0123
740 	 *  *N
741 	 */
742 
743 	/* Make the next VMA have a close() callback. */
744 	vma_next->vm_ops = &vm_ops;
745 
746 	/* Our proposed VMA has characteristics that would otherwise be merged. */
747 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
748 
749 	/* The next VMA having a close() operator should cause the merge to fail.*/
750 	ASSERT_EQ(merge_new(&vmg), NULL);
751 
752 	/* Now create the VMA so we can merge via modified flags */
753 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
754 	vma = alloc_and_link_vma(&mm, 0x1000, 0x2000, 1, flags);
755 	vmg.vma = vma;
756 
757 	/*
758 	 * The VMA being modified in a way that would otherwise merge should
759 	 * also fail.
760 	 */
761 	ASSERT_EQ(merge_existing(&vmg), NULL);
762 
763 	/* SCENARIO B
764 	 *
765 	 * 0123
766 	 * P*
767 	 *
768 	 * In order for this scenario to trigger, the VMA currently being
769 	 * modified must also have a .close().
770 	 */
771 
772 	/* Reset VMG state. */
773 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
774 	/*
775 	 * Make next unmergeable, and don't let the scenario A check pick this
776 	 * up, we want to reproduce scenario B only.
777 	 */
778 	vma_next->vm_ops = NULL;
779 	vma_next->__vm_flags &= ~VM_MAYWRITE;
780 	/* Allocate prev. */
781 	vmg.prev = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
782 	/* Assign a vm_ops->close() function to VMA explicitly. */
783 	vma->vm_ops = &vm_ops;
784 	vmg.vma = vma;
785 	/* Make sure merge does not occur. */
786 	ASSERT_EQ(merge_existing(&vmg), NULL);
787 
788 	cleanup_mm(&mm, &vmi);
789 	return true;
790 }
791 
792 static bool test_vma_merge_new_with_close(void)
793 {
794 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
795 	struct mm_struct mm = {};
796 	VMA_ITERATOR(vmi, &mm, 0);
797 	struct vma_merge_struct vmg = {
798 		.mm = &mm,
799 		.vmi = &vmi,
800 	};
801 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
802 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
803 	const struct vm_operations_struct vm_ops = {
804 		.close = dummy_close,
805 	};
806 	struct vm_area_struct *vma;
807 
808 	/*
809 	 * We should allow the partial merge of a proposed new VMA if the
810 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
811 	 * compatible), e.g.:
812 	 *
813 	 *        New VMA
814 	 *    A  v-------v  B
815 	 * |-----|       |-----|
816 	 *  close         close
817 	 *
818 	 * Since the rule is to not DELETE a VMA with a close operation, this
819 	 * should be permitted, only rather than expanding A and deleting B, we
820 	 * should simply expand A and leave B intact, e.g.:
821 	 *
822 	 *        New VMA
823 	 *       A          B
824 	 * |------------||-----|
825 	 *  close         close
826 	 */
827 
828 	/* Have prev and next have a vm_ops->close() hook. */
829 	vma_prev->vm_ops = &vm_ops;
830 	vma_next->vm_ops = &vm_ops;
831 
832 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
833 	vma = merge_new(&vmg);
834 	ASSERT_NE(vma, NULL);
835 	ASSERT_EQ(vma->vm_start, 0);
836 	ASSERT_EQ(vma->vm_end, 0x5000);
837 	ASSERT_EQ(vma->vm_pgoff, 0);
838 	ASSERT_EQ(vma->vm_ops, &vm_ops);
839 	ASSERT_TRUE(vma_write_started(vma));
840 	ASSERT_EQ(mm.map_count, 2);
841 
842 	cleanup_mm(&mm, &vmi);
843 	return true;
844 }
845 
846 static bool test_merge_existing(void)
847 {
848 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
849 	struct mm_struct mm = {};
850 	VMA_ITERATOR(vmi, &mm, 0);
851 	struct vm_area_struct *vma, *vma_prev, *vma_next;
852 	struct vma_merge_struct vmg = {
853 		.mm = &mm,
854 		.vmi = &vmi,
855 	};
856 
857 	/*
858 	 * Merge right case - partial span.
859 	 *
860 	 *    <->
861 	 * 0123456789
862 	 *   VVVVNNN
863 	 *            ->
864 	 * 0123456789
865 	 *   VNNNNNN
866 	 */
867 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
868 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
869 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
870 	vmg.vma = vma;
871 	vmg.prev = vma;
872 	vma->anon_vma = &dummy_anon_vma;
873 	ASSERT_EQ(merge_existing(&vmg), vma_next);
874 	ASSERT_EQ(vma_next->vm_start, 0x3000);
875 	ASSERT_EQ(vma_next->vm_end, 0x9000);
876 	ASSERT_EQ(vma_next->vm_pgoff, 3);
877 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
878 	ASSERT_EQ(vma->vm_start, 0x2000);
879 	ASSERT_EQ(vma->vm_end, 0x3000);
880 	ASSERT_EQ(vma->vm_pgoff, 2);
881 	ASSERT_TRUE(vma_write_started(vma));
882 	ASSERT_TRUE(vma_write_started(vma_next));
883 	ASSERT_EQ(mm.map_count, 2);
884 
885 	/* Clear down and reset. */
886 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
887 
888 	/*
889 	 * Merge right case - full span.
890 	 *
891 	 *   <-->
892 	 * 0123456789
893 	 *   VVVVNNN
894 	 *            ->
895 	 * 0123456789
896 	 *   NNNNNNN
897 	 */
898 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
899 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
900 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
901 	vmg.vma = vma;
902 	vma->anon_vma = &dummy_anon_vma;
903 	ASSERT_EQ(merge_existing(&vmg), vma_next);
904 	ASSERT_EQ(vma_next->vm_start, 0x2000);
905 	ASSERT_EQ(vma_next->vm_end, 0x9000);
906 	ASSERT_EQ(vma_next->vm_pgoff, 2);
907 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
908 	ASSERT_TRUE(vma_write_started(vma_next));
909 	ASSERT_EQ(mm.map_count, 1);
910 
911 	/* Clear down and reset. We should have deleted vma. */
912 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
913 
914 	/*
915 	 * Merge left case - partial span.
916 	 *
917 	 *    <->
918 	 * 0123456789
919 	 * PPPVVVV
920 	 *            ->
921 	 * 0123456789
922 	 * PPPPPPV
923 	 */
924 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
925 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
926 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
927 	vmg.prev = vma_prev;
928 	vmg.vma = vma;
929 	vma->anon_vma = &dummy_anon_vma;
930 
931 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
932 	ASSERT_EQ(vma_prev->vm_start, 0);
933 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
934 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
935 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
936 	ASSERT_EQ(vma->vm_start, 0x6000);
937 	ASSERT_EQ(vma->vm_end, 0x7000);
938 	ASSERT_EQ(vma->vm_pgoff, 6);
939 	ASSERT_TRUE(vma_write_started(vma_prev));
940 	ASSERT_TRUE(vma_write_started(vma));
941 	ASSERT_EQ(mm.map_count, 2);
942 
943 	/* Clear down and reset. */
944 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
945 
946 	/*
947 	 * Merge left case - full span.
948 	 *
949 	 *    <-->
950 	 * 0123456789
951 	 * PPPVVVV
952 	 *            ->
953 	 * 0123456789
954 	 * PPPPPPP
955 	 */
956 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
957 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
958 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
959 	vmg.prev = vma_prev;
960 	vmg.vma = vma;
961 	vma->anon_vma = &dummy_anon_vma;
962 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
963 	ASSERT_EQ(vma_prev->vm_start, 0);
964 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
965 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
966 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
967 	ASSERT_TRUE(vma_write_started(vma_prev));
968 	ASSERT_EQ(mm.map_count, 1);
969 
970 	/* Clear down and reset. We should have deleted vma. */
971 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
972 
973 	/*
974 	 * Merge both case.
975 	 *
976 	 *    <-->
977 	 * 0123456789
978 	 * PPPVVVVNNN
979 	 *             ->
980 	 * 0123456789
981 	 * PPPPPPPPPP
982 	 */
983 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
984 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
985 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
986 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
987 	vmg.prev = vma_prev;
988 	vmg.vma = vma;
989 	vma->anon_vma = &dummy_anon_vma;
990 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
991 	ASSERT_EQ(vma_prev->vm_start, 0);
992 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
993 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
994 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
995 	ASSERT_TRUE(vma_write_started(vma_prev));
996 	ASSERT_EQ(mm.map_count, 1);
997 
998 	/* Clear down and reset. We should have deleted prev and next. */
999 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1000 
1001 	/*
1002 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1003 	 * caller always specifies ranges within the input VMA so we need only
1004 	 * examine these cases.
1005 	 *
1006 	 *     -
1007 	 *      -
1008 	 *       -
1009 	 *     <->
1010 	 *     <>
1011 	 *      <>
1012 	 * 0123456789a
1013 	 * PPPVVVVVNNN
1014 	 */
1015 
1016 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1017 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1018 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1019 
1020 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1021 	vmg.prev = vma;
1022 	vmg.vma = vma;
1023 	ASSERT_EQ(merge_existing(&vmg), NULL);
1024 
1025 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1026 	vmg.prev = vma;
1027 	vmg.vma = vma;
1028 	ASSERT_EQ(merge_existing(&vmg), NULL);
1029 
1030 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1031 	vmg.prev = vma;
1032 	vmg.vma = vma;
1033 	ASSERT_EQ(merge_existing(&vmg), NULL);
1034 
1035 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1036 	vmg.prev = vma;
1037 	vmg.vma = vma;
1038 	ASSERT_EQ(merge_existing(&vmg), NULL);
1039 
1040 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1041 	vmg.prev = vma;
1042 	vmg.vma = vma;
1043 	ASSERT_EQ(merge_existing(&vmg), NULL);
1044 
1045 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1046 	vmg.prev = vma;
1047 	vmg.vma = vma;
1048 	ASSERT_EQ(merge_existing(&vmg), NULL);
1049 
1050 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1051 
1052 	return true;
1053 }
1054 
1055 static bool test_anon_vma_non_mergeable(void)
1056 {
1057 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1058 	struct mm_struct mm = {};
1059 	VMA_ITERATOR(vmi, &mm, 0);
1060 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1061 	struct vma_merge_struct vmg = {
1062 		.mm = &mm,
1063 		.vmi = &vmi,
1064 	};
1065 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1066 		.anon_vma = &dummy_anon_vma,
1067 	};
1068 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1069 		.anon_vma = &dummy_anon_vma,
1070 	};
1071 
1072 	/*
1073 	 * In the case of modified VMA merge, merging both left and right VMAs
1074 	 * but where prev and next have incompatible anon_vma objects, we revert
1075 	 * to a merge of prev and VMA:
1076 	 *
1077 	 *    <-->
1078 	 * 0123456789
1079 	 * PPPVVVVNNN
1080 	 *            ->
1081 	 * 0123456789
1082 	 * PPPPPPPNNN
1083 	 */
1084 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1085 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1086 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1087 
1088 	/*
1089 	 * Give both prev and next single anon_vma_chain fields, so they will
1090 	 * merge with the NULL vmg->anon_vma.
1091 	 *
1092 	 * However, when prev is compared to next, the merge should fail.
1093 	 */
1094 
1095 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1096 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1097 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1098 	vma_prev->anon_vma = &dummy_anon_vma;
1099 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1100 
1101 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1102 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1103 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1104 	vma_next->anon_vma = (struct anon_vma *)2;
1105 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1106 
1107 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1108 
1109 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1110 	vmg.prev = vma_prev;
1111 	vmg.vma = vma;
1112 
1113 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1114 	ASSERT_EQ(vma_prev->vm_start, 0);
1115 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1116 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1117 	ASSERT_TRUE(vma_write_started(vma_prev));
1118 	ASSERT_FALSE(vma_write_started(vma_next));
1119 
1120 	/* Clear down and reset. */
1121 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1122 
1123 	/*
1124 	 * Now consider the new VMA case. This is equivalent, only adding a new
1125 	 * VMA in a gap between prev and next.
1126 	 *
1127 	 *    <-->
1128 	 * 0123456789
1129 	 * PPP****NNN
1130 	 *            ->
1131 	 * 0123456789
1132 	 * PPPPPPPNNN
1133 	 */
1134 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1135 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1136 
1137 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1138 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1139 	vma_prev->anon_vma = (struct anon_vma *)1;
1140 
1141 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1142 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1143 	vma_next->anon_vma = (struct anon_vma *)2;
1144 
1145 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1146 	vmg.prev = vma_prev;
1147 
1148 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1149 	ASSERT_EQ(vma_prev->vm_start, 0);
1150 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1151 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1152 	ASSERT_TRUE(vma_write_started(vma_prev));
1153 	ASSERT_FALSE(vma_write_started(vma_next));
1154 
1155 	/* Final cleanup. */
1156 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1157 
1158 	return true;
1159 }
1160 
1161 static bool test_dup_anon_vma(void)
1162 {
1163 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1164 	struct mm_struct mm = {};
1165 	VMA_ITERATOR(vmi, &mm, 0);
1166 	struct vma_merge_struct vmg = {
1167 		.mm = &mm,
1168 		.vmi = &vmi,
1169 	};
1170 	struct anon_vma_chain dummy_anon_vma_chain = {
1171 		.anon_vma = &dummy_anon_vma,
1172 	};
1173 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1174 
1175 	reset_dummy_anon_vma();
1176 
1177 	/*
1178 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1179 	 * assigns it to the expanded VMA.
1180 	 *
1181 	 * This covers new VMA merging, as these operations amount to a VMA
1182 	 * expand.
1183 	 */
1184 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1185 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1186 	vma_next->anon_vma = &dummy_anon_vma;
1187 
1188 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1189 	vmg.vma = vma_prev;
1190 	vmg.next = vma_next;
1191 
1192 	ASSERT_EQ(expand_existing(&vmg), 0);
1193 
1194 	/* Will have been cloned. */
1195 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1196 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1197 
1198 	/* Cleanup ready for next run. */
1199 	cleanup_mm(&mm, &vmi);
1200 
1201 	/*
1202 	 * next has anon_vma, we assign to prev.
1203 	 *
1204 	 *         |<----->|
1205 	 * |-------*********-------|
1206 	 *   prev     vma     next
1207 	 *  extend   delete  delete
1208 	 */
1209 
1210 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1211 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1212 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1213 
1214 	/* Initialise avc so mergeability check passes. */
1215 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1216 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1217 
1218 	vma_next->anon_vma = &dummy_anon_vma;
1219 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1220 	vmg.prev = vma_prev;
1221 	vmg.vma = vma;
1222 
1223 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1224 
1225 	ASSERT_EQ(vma_prev->vm_start, 0);
1226 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1227 
1228 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1229 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1230 
1231 	cleanup_mm(&mm, &vmi);
1232 
1233 	/*
1234 	 * vma has anon_vma, we assign to prev.
1235 	 *
1236 	 *         |<----->|
1237 	 * |-------*********-------|
1238 	 *   prev     vma     next
1239 	 *  extend   delete  delete
1240 	 */
1241 
1242 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1243 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1244 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1245 
1246 	vma->anon_vma = &dummy_anon_vma;
1247 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1248 	vmg.prev = vma_prev;
1249 	vmg.vma = vma;
1250 
1251 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1252 
1253 	ASSERT_EQ(vma_prev->vm_start, 0);
1254 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1255 
1256 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1257 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1258 
1259 	cleanup_mm(&mm, &vmi);
1260 
1261 	/*
1262 	 * vma has anon_vma, we assign to prev.
1263 	 *
1264 	 *         |<----->|
1265 	 * |-------*************
1266 	 *   prev       vma
1267 	 *  extend shrink/delete
1268 	 */
1269 
1270 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1271 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1272 
1273 	vma->anon_vma = &dummy_anon_vma;
1274 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1275 	vmg.prev = vma_prev;
1276 	vmg.vma = vma;
1277 
1278 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1279 
1280 	ASSERT_EQ(vma_prev->vm_start, 0);
1281 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1282 
1283 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1284 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1285 
1286 	cleanup_mm(&mm, &vmi);
1287 
1288 	/*
1289 	 * vma has anon_vma, we assign to next.
1290 	 *
1291 	 *     |<----->|
1292 	 * *************-------|
1293 	 *      vma       next
1294 	 * shrink/delete extend
1295 	 */
1296 
1297 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1298 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1299 
1300 	vma->anon_vma = &dummy_anon_vma;
1301 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1302 	vmg.prev = vma;
1303 	vmg.vma = vma;
1304 
1305 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1306 
1307 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1308 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1309 
1310 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1311 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1312 
1313 	cleanup_mm(&mm, &vmi);
1314 	return true;
1315 }
1316 
1317 static bool test_vmi_prealloc_fail(void)
1318 {
1319 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1320 	struct mm_struct mm = {};
1321 	VMA_ITERATOR(vmi, &mm, 0);
1322 	struct vma_merge_struct vmg = {
1323 		.mm = &mm,
1324 		.vmi = &vmi,
1325 	};
1326 	struct vm_area_struct *vma_prev, *vma;
1327 
1328 	/*
1329 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1330 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1331 	 * the duplicated anon_vma is unlinked.
1332 	 */
1333 
1334 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1335 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1336 	vma->anon_vma = &dummy_anon_vma;
1337 
1338 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1339 	vmg.prev = vma_prev;
1340 	vmg.vma = vma;
1341 
1342 	fail_prealloc = true;
1343 
1344 	/* This will cause the merge to fail. */
1345 	ASSERT_EQ(merge_existing(&vmg), NULL);
1346 	/* We will already have assigned the anon_vma. */
1347 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1348 	/* And it was both cloned and unlinked. */
1349 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1350 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1351 
1352 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1353 
1354 	/*
1355 	 * We repeat the same operation for expanding a VMA, which is what new
1356 	 * VMA merging ultimately uses too. This asserts that unlinking is
1357 	 * performed in this case too.
1358 	 */
1359 
1360 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1361 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1362 	vma->anon_vma = &dummy_anon_vma;
1363 
1364 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1365 	vmg.vma = vma_prev;
1366 	vmg.next = vma;
1367 
1368 	fail_prealloc = true;
1369 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1370 
1371 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1372 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1373 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1374 
1375 	cleanup_mm(&mm, &vmi);
1376 	return true;
1377 }
1378 
1379 static bool test_merge_extend(void)
1380 {
1381 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1382 	struct mm_struct mm = {};
1383 	VMA_ITERATOR(vmi, &mm, 0x1000);
1384 	struct vm_area_struct *vma;
1385 
1386 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1387 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1388 
1389 	/*
1390 	 * Extend a VMA into the gap between itself and the following VMA.
1391 	 * This should result in a merge.
1392 	 *
1393 	 * <->
1394 	 * *  *
1395 	 *
1396 	 */
1397 
1398 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1399 	ASSERT_EQ(vma->vm_start, 0);
1400 	ASSERT_EQ(vma->vm_end, 0x4000);
1401 	ASSERT_EQ(vma->vm_pgoff, 0);
1402 	ASSERT_TRUE(vma_write_started(vma));
1403 	ASSERT_EQ(mm.map_count, 1);
1404 
1405 	cleanup_mm(&mm, &vmi);
1406 	return true;
1407 }
1408 
1409 static bool test_copy_vma(void)
1410 {
1411 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1412 	struct mm_struct mm = {};
1413 	bool need_locks = false;
1414 	VMA_ITERATOR(vmi, &mm, 0);
1415 	struct vm_area_struct *vma, *vma_new, *vma_next;
1416 
1417 	/* Move backwards and do not merge. */
1418 
1419 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1420 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1421 
1422 	ASSERT_NE(vma_new, vma);
1423 	ASSERT_EQ(vma_new->vm_start, 0);
1424 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1425 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1426 
1427 	cleanup_mm(&mm, &vmi);
1428 
1429 	/* Move a VMA into position next to another and merge the two. */
1430 
1431 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1432 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1433 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1434 
1435 	ASSERT_EQ(vma_new, vma_next);
1436 
1437 	cleanup_mm(&mm, &vmi);
1438 	return true;
1439 }
1440 
1441 int main(void)
1442 {
1443 	int num_tests = 0, num_fail = 0;
1444 
1445 	maple_tree_init();
1446 
1447 #define TEST(name)							\
1448 	do {								\
1449 		num_tests++;						\
1450 		if (!test_##name()) {					\
1451 			num_fail++;					\
1452 			fprintf(stderr, "Test " #name " FAILED\n");	\
1453 		}							\
1454 	} while (0)
1455 
1456 	/* Very simple tests to kick the tyres. */
1457 	TEST(simple_merge);
1458 	TEST(simple_modify);
1459 	TEST(simple_expand);
1460 	TEST(simple_shrink);
1461 
1462 	TEST(merge_new);
1463 	TEST(vma_merge_special_flags);
1464 	TEST(vma_merge_with_close);
1465 	TEST(vma_merge_new_with_close);
1466 	TEST(merge_existing);
1467 	TEST(anon_vma_non_mergeable);
1468 	TEST(dup_anon_vma);
1469 	TEST(vmi_prealloc_fail);
1470 	TEST(merge_extend);
1471 	TEST(copy_vma);
1472 
1473 #undef TEST
1474 
1475 	printf("%d tests run, %d passed, %d failed.\n",
1476 	       num_tests, num_tests - num_fail, num_fail);
1477 
1478 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1479 }
1480