xref: /linux/tools/testing/vma/vma.c (revision fc21959f74bc1138b28e90a02ec224ab8626111e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "maple-shared.h"
8 #include "vma_internal.h"
9 
10 /* Include so header guard set. */
11 #include "../../../mm/vma.h"
12 
13 static bool fail_prealloc;
14 
15 /* Then override vma_iter_prealloc() so we can choose to fail it. */
16 #define vma_iter_prealloc(vmi, vma)					\
17 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
18 
19 /*
20  * Directly import the VMA implementation here. Our vma_internal.h wrapper
21  * provides userland-equivalent functionality for everything vma.c uses.
22  */
23 #include "../../../mm/vma.c"
24 
25 const struct vm_operations_struct vma_dummy_vm_ops;
26 static struct anon_vma dummy_anon_vma;
27 
28 #define ASSERT_TRUE(_expr)						\
29 	do {								\
30 		if (!(_expr)) {						\
31 			fprintf(stderr,					\
32 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
33 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
34 			return false;					\
35 		}							\
36 	} while (0)
37 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
38 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
39 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
40 
41 static struct task_struct __current;
42 
43 struct task_struct *get_current(void)
44 {
45 	return &__current;
46 }
47 
48 /* Helper function to simply allocate a VMA. */
49 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
50 					unsigned long start,
51 					unsigned long end,
52 					pgoff_t pgoff,
53 					vm_flags_t flags)
54 {
55 	struct vm_area_struct *ret = vm_area_alloc(mm);
56 
57 	if (ret == NULL)
58 		return NULL;
59 
60 	ret->vm_start = start;
61 	ret->vm_end = end;
62 	ret->vm_pgoff = pgoff;
63 	ret->__vm_flags = flags;
64 
65 	return ret;
66 }
67 
68 /* Helper function to allocate a VMA and link it to the tree. */
69 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
70 						 unsigned long start,
71 						 unsigned long end,
72 						 pgoff_t pgoff,
73 						 vm_flags_t flags)
74 {
75 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
76 
77 	if (vma == NULL)
78 		return NULL;
79 
80 	if (vma_link(mm, vma)) {
81 		vm_area_free(vma);
82 		return NULL;
83 	}
84 
85 	/*
86 	 * Reset this counter which we use to track whether writes have
87 	 * begun. Linking to the tree will have caused this to be incremented,
88 	 * which means we will get a false positive otherwise.
89 	 */
90 	vma->vm_lock_seq = -1;
91 
92 	return vma;
93 }
94 
95 /* Helper function which provides a wrapper around a merge new VMA operation. */
96 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
97 {
98 	/*
99 	 * For convenience, get prev and next VMAs. Which the new VMA operation
100 	 * requires.
101 	 */
102 	vmg->next = vma_next(vmg->vmi);
103 	vmg->prev = vma_prev(vmg->vmi);
104 
105 	vma_iter_set(vmg->vmi, vmg->start);
106 	return vma_merge(vmg);
107 }
108 
109 /*
110  * Helper function which provides a wrapper around a merge existing VMA
111  * operation.
112  */
113 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
114 {
115 	return vma_merge(vmg);
116 }
117 
118 /*
119  * Helper function which provides a wrapper around the expansion of an existing
120  * VMA.
121  */
122 static int expand_existing(struct vma_merge_struct *vmg)
123 {
124 	return vma_expand(vmg);
125 }
126 
127 /*
128  * Helper function to reset merge state the associated VMA iterator to a
129  * specified new range.
130  */
131 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
132 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
133 {
134 	vma_iter_set(vmg->vmi, start);
135 
136 	vmg->prev = NULL;
137 	vmg->next = NULL;
138 	vmg->vma = NULL;
139 
140 	vmg->start = start;
141 	vmg->end = end;
142 	vmg->pgoff = pgoff;
143 	vmg->flags = flags;
144 }
145 
146 /*
147  * Helper function to try to merge a new VMA.
148  *
149  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
150  * VMA, link it to the maple tree and return it.
151  */
152 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
153 						struct vma_merge_struct *vmg,
154 						unsigned long start, unsigned long end,
155 						pgoff_t pgoff, vm_flags_t flags,
156 						bool *was_merged)
157 {
158 	struct vm_area_struct *merged;
159 
160 	vmg_set_range(vmg, start, end, pgoff, flags);
161 
162 	merged = merge_new(vmg);
163 	if (merged) {
164 		*was_merged = true;
165 		return merged;
166 	}
167 
168 	*was_merged = false;
169 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
170 }
171 
172 /*
173  * Helper function to reset the dummy anon_vma to indicate it has not been
174  * duplicated.
175  */
176 static void reset_dummy_anon_vma(void)
177 {
178 	dummy_anon_vma.was_cloned = false;
179 	dummy_anon_vma.was_unlinked = false;
180 }
181 
182 /*
183  * Helper function to remove all VMAs and destroy the maple tree associated with
184  * a virtual address space. Returns a count of VMAs in the tree.
185  */
186 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
187 {
188 	struct vm_area_struct *vma;
189 	int count = 0;
190 
191 	fail_prealloc = false;
192 	reset_dummy_anon_vma();
193 
194 	vma_iter_set(vmi, 0);
195 	for_each_vma(*vmi, vma) {
196 		vm_area_free(vma);
197 		count++;
198 	}
199 
200 	mtree_destroy(&mm->mm_mt);
201 	mm->map_count = 0;
202 	return count;
203 }
204 
205 /* Helper function to determine if VMA has had vma_start_write() performed. */
206 static bool vma_write_started(struct vm_area_struct *vma)
207 {
208 	int seq = vma->vm_lock_seq;
209 
210 	/* We reset after each check. */
211 	vma->vm_lock_seq = -1;
212 
213 	/* The vma_start_write() stub simply increments this value. */
214 	return seq > -1;
215 }
216 
217 /* Helper function providing a dummy vm_ops->close() method.*/
218 static void dummy_close(struct vm_area_struct *)
219 {
220 }
221 
222 static bool test_simple_merge(void)
223 {
224 	struct vm_area_struct *vma;
225 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
226 	struct mm_struct mm = {};
227 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
228 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
229 	VMA_ITERATOR(vmi, &mm, 0x1000);
230 	struct vma_merge_struct vmg = {
231 		.mm = &mm,
232 		.vmi = &vmi,
233 		.start = 0x1000,
234 		.end = 0x2000,
235 		.flags = flags,
236 		.pgoff = 1,
237 	};
238 
239 	ASSERT_FALSE(vma_link(&mm, vma_left));
240 	ASSERT_FALSE(vma_link(&mm, vma_right));
241 
242 	vma = merge_new(&vmg);
243 	ASSERT_NE(vma, NULL);
244 
245 	ASSERT_EQ(vma->vm_start, 0);
246 	ASSERT_EQ(vma->vm_end, 0x3000);
247 	ASSERT_EQ(vma->vm_pgoff, 0);
248 	ASSERT_EQ(vma->vm_flags, flags);
249 
250 	vm_area_free(vma);
251 	mtree_destroy(&mm.mm_mt);
252 
253 	return true;
254 }
255 
256 static bool test_simple_modify(void)
257 {
258 	struct vm_area_struct *vma;
259 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
260 	struct mm_struct mm = {};
261 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
262 	VMA_ITERATOR(vmi, &mm, 0x1000);
263 
264 	ASSERT_FALSE(vma_link(&mm, init_vma));
265 
266 	/*
267 	 * The flags will not be changed, the vma_modify_flags() function
268 	 * performs the merge/split only.
269 	 */
270 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
271 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
272 	ASSERT_NE(vma, NULL);
273 	/* We modify the provided VMA, and on split allocate new VMAs. */
274 	ASSERT_EQ(vma, init_vma);
275 
276 	ASSERT_EQ(vma->vm_start, 0x1000);
277 	ASSERT_EQ(vma->vm_end, 0x2000);
278 	ASSERT_EQ(vma->vm_pgoff, 1);
279 
280 	/*
281 	 * Now walk through the three split VMAs and make sure they are as
282 	 * expected.
283 	 */
284 
285 	vma_iter_set(&vmi, 0);
286 	vma = vma_iter_load(&vmi);
287 
288 	ASSERT_EQ(vma->vm_start, 0);
289 	ASSERT_EQ(vma->vm_end, 0x1000);
290 	ASSERT_EQ(vma->vm_pgoff, 0);
291 
292 	vm_area_free(vma);
293 	vma_iter_clear(&vmi);
294 
295 	vma = vma_next(&vmi);
296 
297 	ASSERT_EQ(vma->vm_start, 0x1000);
298 	ASSERT_EQ(vma->vm_end, 0x2000);
299 	ASSERT_EQ(vma->vm_pgoff, 1);
300 
301 	vm_area_free(vma);
302 	vma_iter_clear(&vmi);
303 
304 	vma = vma_next(&vmi);
305 
306 	ASSERT_EQ(vma->vm_start, 0x2000);
307 	ASSERT_EQ(vma->vm_end, 0x3000);
308 	ASSERT_EQ(vma->vm_pgoff, 2);
309 
310 	vm_area_free(vma);
311 	mtree_destroy(&mm.mm_mt);
312 
313 	return true;
314 }
315 
316 static bool test_simple_expand(void)
317 {
318 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
319 	struct mm_struct mm = {};
320 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
321 	VMA_ITERATOR(vmi, &mm, 0);
322 	struct vma_merge_struct vmg = {
323 		.vmi = &vmi,
324 		.vma = vma,
325 		.start = 0,
326 		.end = 0x3000,
327 		.pgoff = 0,
328 	};
329 
330 	ASSERT_FALSE(vma_link(&mm, vma));
331 
332 	ASSERT_FALSE(expand_existing(&vmg));
333 
334 	ASSERT_EQ(vma->vm_start, 0);
335 	ASSERT_EQ(vma->vm_end, 0x3000);
336 	ASSERT_EQ(vma->vm_pgoff, 0);
337 
338 	vm_area_free(vma);
339 	mtree_destroy(&mm.mm_mt);
340 
341 	return true;
342 }
343 
344 static bool test_simple_shrink(void)
345 {
346 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
347 	struct mm_struct mm = {};
348 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
349 	VMA_ITERATOR(vmi, &mm, 0);
350 
351 	ASSERT_FALSE(vma_link(&mm, vma));
352 
353 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
354 
355 	ASSERT_EQ(vma->vm_start, 0);
356 	ASSERT_EQ(vma->vm_end, 0x1000);
357 	ASSERT_EQ(vma->vm_pgoff, 0);
358 
359 	vm_area_free(vma);
360 	mtree_destroy(&mm.mm_mt);
361 
362 	return true;
363 }
364 
365 static bool test_merge_new(void)
366 {
367 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
368 	struct mm_struct mm = {};
369 	VMA_ITERATOR(vmi, &mm, 0);
370 	struct vma_merge_struct vmg = {
371 		.mm = &mm,
372 		.vmi = &vmi,
373 	};
374 	struct anon_vma_chain dummy_anon_vma_chain_a = {
375 		.anon_vma = &dummy_anon_vma,
376 	};
377 	struct anon_vma_chain dummy_anon_vma_chain_b = {
378 		.anon_vma = &dummy_anon_vma,
379 	};
380 	struct anon_vma_chain dummy_anon_vma_chain_c = {
381 		.anon_vma = &dummy_anon_vma,
382 	};
383 	struct anon_vma_chain dummy_anon_vma_chain_d = {
384 		.anon_vma = &dummy_anon_vma,
385 	};
386 	int count;
387 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
388 	bool merged;
389 
390 	/*
391 	 * 0123456789abc
392 	 * AA B       CC
393 	 */
394 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
395 	ASSERT_NE(vma_a, NULL);
396 	/* We give each VMA a single avc so we can test anon_vma duplication. */
397 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
398 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
399 
400 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
401 	ASSERT_NE(vma_b, NULL);
402 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
403 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
404 
405 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
406 	ASSERT_NE(vma_c, NULL);
407 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
408 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
409 
410 	/*
411 	 * NO merge.
412 	 *
413 	 * 0123456789abc
414 	 * AA B   **  CC
415 	 */
416 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
417 	ASSERT_NE(vma_d, NULL);
418 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
419 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
420 	ASSERT_FALSE(merged);
421 	ASSERT_EQ(mm.map_count, 4);
422 
423 	/*
424 	 * Merge BOTH sides.
425 	 *
426 	 * 0123456789abc
427 	 * AA*B   DD  CC
428 	 */
429 	vma_b->anon_vma = &dummy_anon_vma;
430 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
431 	ASSERT_EQ(vma, vma_a);
432 	/* Merge with A, delete B. */
433 	ASSERT_TRUE(merged);
434 	ASSERT_EQ(vma->vm_start, 0);
435 	ASSERT_EQ(vma->vm_end, 0x4000);
436 	ASSERT_EQ(vma->vm_pgoff, 0);
437 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
438 	ASSERT_TRUE(vma_write_started(vma));
439 	ASSERT_EQ(mm.map_count, 3);
440 
441 	/*
442 	 * Merge to PREVIOUS VMA.
443 	 *
444 	 * 0123456789abc
445 	 * AAAA*  DD  CC
446 	 */
447 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
448 	ASSERT_EQ(vma, vma_a);
449 	/* Extend A. */
450 	ASSERT_TRUE(merged);
451 	ASSERT_EQ(vma->vm_start, 0);
452 	ASSERT_EQ(vma->vm_end, 0x5000);
453 	ASSERT_EQ(vma->vm_pgoff, 0);
454 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
455 	ASSERT_TRUE(vma_write_started(vma));
456 	ASSERT_EQ(mm.map_count, 3);
457 
458 	/*
459 	 * Merge to NEXT VMA.
460 	 *
461 	 * 0123456789abc
462 	 * AAAAA *DD  CC
463 	 */
464 	vma_d->anon_vma = &dummy_anon_vma;
465 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
466 	ASSERT_EQ(vma, vma_d);
467 	/* Prepend. */
468 	ASSERT_TRUE(merged);
469 	ASSERT_EQ(vma->vm_start, 0x6000);
470 	ASSERT_EQ(vma->vm_end, 0x9000);
471 	ASSERT_EQ(vma->vm_pgoff, 6);
472 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
473 	ASSERT_TRUE(vma_write_started(vma));
474 	ASSERT_EQ(mm.map_count, 3);
475 
476 	/*
477 	 * Merge BOTH sides.
478 	 *
479 	 * 0123456789abc
480 	 * AAAAA*DDD  CC
481 	 */
482 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
483 	ASSERT_EQ(vma, vma_a);
484 	/* Merge with A, delete D. */
485 	ASSERT_TRUE(merged);
486 	ASSERT_EQ(vma->vm_start, 0);
487 	ASSERT_EQ(vma->vm_end, 0x9000);
488 	ASSERT_EQ(vma->vm_pgoff, 0);
489 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
490 	ASSERT_TRUE(vma_write_started(vma));
491 	ASSERT_EQ(mm.map_count, 2);
492 
493 	/*
494 	 * Merge to NEXT VMA.
495 	 *
496 	 * 0123456789abc
497 	 * AAAAAAAAA *CC
498 	 */
499 	vma_c->anon_vma = &dummy_anon_vma;
500 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
501 	ASSERT_EQ(vma, vma_c);
502 	/* Prepend C. */
503 	ASSERT_TRUE(merged);
504 	ASSERT_EQ(vma->vm_start, 0xa000);
505 	ASSERT_EQ(vma->vm_end, 0xc000);
506 	ASSERT_EQ(vma->vm_pgoff, 0xa);
507 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
508 	ASSERT_TRUE(vma_write_started(vma));
509 	ASSERT_EQ(mm.map_count, 2);
510 
511 	/*
512 	 * Merge BOTH sides.
513 	 *
514 	 * 0123456789abc
515 	 * AAAAAAAAA*CCC
516 	 */
517 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
518 	ASSERT_EQ(vma, vma_a);
519 	/* Extend A and delete C. */
520 	ASSERT_TRUE(merged);
521 	ASSERT_EQ(vma->vm_start, 0);
522 	ASSERT_EQ(vma->vm_end, 0xc000);
523 	ASSERT_EQ(vma->vm_pgoff, 0);
524 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
525 	ASSERT_TRUE(vma_write_started(vma));
526 	ASSERT_EQ(mm.map_count, 1);
527 
528 	/*
529 	 * Final state.
530 	 *
531 	 * 0123456789abc
532 	 * AAAAAAAAAAAAA
533 	 */
534 
535 	count = 0;
536 	vma_iter_set(&vmi, 0);
537 	for_each_vma(vmi, vma) {
538 		ASSERT_NE(vma, NULL);
539 		ASSERT_EQ(vma->vm_start, 0);
540 		ASSERT_EQ(vma->vm_end, 0xc000);
541 		ASSERT_EQ(vma->vm_pgoff, 0);
542 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
543 
544 		vm_area_free(vma);
545 		count++;
546 	}
547 
548 	/* Should only have one VMA left (though freed) after all is done.*/
549 	ASSERT_EQ(count, 1);
550 
551 	mtree_destroy(&mm.mm_mt);
552 	return true;
553 }
554 
555 static bool test_vma_merge_special_flags(void)
556 {
557 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
558 	struct mm_struct mm = {};
559 	VMA_ITERATOR(vmi, &mm, 0);
560 	struct vma_merge_struct vmg = {
561 		.mm = &mm,
562 		.vmi = &vmi,
563 	};
564 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
565 	vm_flags_t all_special_flags = 0;
566 	int i;
567 	struct vm_area_struct *vma_left, *vma;
568 
569 	/* Make sure there aren't new VM_SPECIAL flags. */
570 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
571 		all_special_flags |= special_flags[i];
572 	}
573 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
574 
575 	/*
576 	 * 01234
577 	 * AAA
578 	 */
579 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
580 	ASSERT_NE(vma_left, NULL);
581 
582 	/* 1. Set up new VMA with special flag that would otherwise merge. */
583 
584 	/*
585 	 * 01234
586 	 * AAA*
587 	 *
588 	 * This should merge if not for the VM_SPECIAL flag.
589 	 */
590 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
591 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
592 		vm_flags_t special_flag = special_flags[i];
593 
594 		vma_left->__vm_flags = flags | special_flag;
595 		vmg.flags = flags | special_flag;
596 		vma = merge_new(&vmg);
597 		ASSERT_EQ(vma, NULL);
598 	}
599 
600 	/* 2. Modify VMA with special flag that would otherwise merge. */
601 
602 	/*
603 	 * 01234
604 	 * AAAB
605 	 *
606 	 * Create a VMA to modify.
607 	 */
608 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
609 	ASSERT_NE(vma, NULL);
610 	vmg.vma = vma;
611 
612 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
613 		vm_flags_t special_flag = special_flags[i];
614 
615 		vma_left->__vm_flags = flags | special_flag;
616 		vmg.flags = flags | special_flag;
617 		vma = merge_existing(&vmg);
618 		ASSERT_EQ(vma, NULL);
619 	}
620 
621 	cleanup_mm(&mm, &vmi);
622 	return true;
623 }
624 
625 static bool test_vma_merge_with_close(void)
626 {
627 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
628 	struct mm_struct mm = {};
629 	VMA_ITERATOR(vmi, &mm, 0);
630 	struct vma_merge_struct vmg = {
631 		.mm = &mm,
632 		.vmi = &vmi,
633 	};
634 	const struct vm_operations_struct vm_ops = {
635 		.close = dummy_close,
636 	};
637 	struct vm_area_struct *vma_next =
638 		alloc_and_link_vma(&mm, 0x2000, 0x3000, 2, flags);
639 	struct vm_area_struct *vma;
640 
641 	/*
642 	 * When we merge VMAs we sometimes have to delete others as part of the
643 	 * operation.
644 	 *
645 	 * Considering the two possible adjacent VMAs to which a VMA can be
646 	 * merged:
647 	 *
648 	 * [ prev ][ vma ][ next ]
649 	 *
650 	 * In no case will we need to delete prev. If the operation is
651 	 * mergeable, then prev will be extended with one or both of vma and
652 	 * next deleted.
653 	 *
654 	 * As a result, during initial mergeability checks, only
655 	 * can_vma_merge_before() (which implies the VMA being merged with is
656 	 * 'next' as shown above) bothers to check to see whether the next VMA
657 	 * has a vm_ops->close() callback that will need to be called when
658 	 * removed.
659 	 *
660 	 * If it does, then we cannot merge as the resources that the close()
661 	 * operation potentially clears down are tied only to the existing VMA
662 	 * range and we have no way of extending those to the nearly merged one.
663 	 *
664 	 * We must consider two scenarios:
665 	 *
666 	 * A.
667 	 *
668 	 * vm_ops->close:     -       -    !NULL
669 	 *                 [ prev ][ vma ][ next ]
670 	 *
671 	 * Where prev may or may not be present/mergeable.
672 	 *
673 	 * This is picked up by a specific check in can_vma_merge_before().
674 	 *
675 	 * B.
676 	 *
677 	 * vm_ops->close:     -     !NULL
678 	 *                 [ prev ][ vma ]
679 	 *
680 	 * Where prev and vma are present and mergeable.
681 	 *
682 	 * This is picked up by a specific check in the modified VMA merge.
683 	 *
684 	 * IMPORTANT NOTE: We make the assumption that the following case:
685 	 *
686 	 *    -     !NULL   NULL
687 	 * [ prev ][ vma ][ next ]
688 	 *
689 	 * Cannot occur, because vma->vm_ops being the same implies the same
690 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
691 	 * would be set too, and thus scenario A would pick this up.
692 	 */
693 
694 	ASSERT_NE(vma_next, NULL);
695 
696 	/*
697 	 * SCENARIO A
698 	 *
699 	 * 0123
700 	 *  *N
701 	 */
702 
703 	/* Make the next VMA have a close() callback. */
704 	vma_next->vm_ops = &vm_ops;
705 
706 	/* Our proposed VMA has characteristics that would otherwise be merged. */
707 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
708 
709 	/* The next VMA having a close() operator should cause the merge to fail.*/
710 	ASSERT_EQ(merge_new(&vmg), NULL);
711 
712 	/* Now create the VMA so we can merge via modified flags */
713 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
714 	vma = alloc_and_link_vma(&mm, 0x1000, 0x2000, 1, flags);
715 	vmg.vma = vma;
716 
717 	/*
718 	 * The VMA being modified in a way that would otherwise merge should
719 	 * also fail.
720 	 */
721 	ASSERT_EQ(merge_existing(&vmg), NULL);
722 
723 	/* SCENARIO B
724 	 *
725 	 * 0123
726 	 * P*
727 	 *
728 	 * In order for this scenario to trigger, the VMA currently being
729 	 * modified must also have a .close().
730 	 */
731 
732 	/* Reset VMG state. */
733 	vmg_set_range(&vmg, 0x1000, 0x2000, 1, flags);
734 	/*
735 	 * Make next unmergeable, and don't let the scenario A check pick this
736 	 * up, we want to reproduce scenario B only.
737 	 */
738 	vma_next->vm_ops = NULL;
739 	vma_next->__vm_flags &= ~VM_MAYWRITE;
740 	/* Allocate prev. */
741 	vmg.prev = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
742 	/* Assign a vm_ops->close() function to VMA explicitly. */
743 	vma->vm_ops = &vm_ops;
744 	vmg.vma = vma;
745 	/* Make sure merge does not occur. */
746 	ASSERT_EQ(merge_existing(&vmg), NULL);
747 
748 	cleanup_mm(&mm, &vmi);
749 	return true;
750 }
751 
752 static bool test_vma_merge_new_with_close(void)
753 {
754 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
755 	struct mm_struct mm = {};
756 	VMA_ITERATOR(vmi, &mm, 0);
757 	struct vma_merge_struct vmg = {
758 		.mm = &mm,
759 		.vmi = &vmi,
760 	};
761 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
762 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
763 	const struct vm_operations_struct vm_ops = {
764 		.close = dummy_close,
765 	};
766 	struct vm_area_struct *vma;
767 
768 	/*
769 	 * We should allow the partial merge of a proposed new VMA if the
770 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
771 	 * compatible), e.g.:
772 	 *
773 	 *        New VMA
774 	 *    A  v-------v  B
775 	 * |-----|       |-----|
776 	 *  close         close
777 	 *
778 	 * Since the rule is to not DELETE a VMA with a close operation, this
779 	 * should be permitted, only rather than expanding A and deleting B, we
780 	 * should simply expand A and leave B intact, e.g.:
781 	 *
782 	 *        New VMA
783 	 *       A          B
784 	 * |------------||-----|
785 	 *  close         close
786 	 */
787 
788 	/* Have prev and next have a vm_ops->close() hook. */
789 	vma_prev->vm_ops = &vm_ops;
790 	vma_next->vm_ops = &vm_ops;
791 
792 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
793 	vma = merge_new(&vmg);
794 	ASSERT_NE(vma, NULL);
795 	ASSERT_EQ(vma->vm_start, 0);
796 	ASSERT_EQ(vma->vm_end, 0x5000);
797 	ASSERT_EQ(vma->vm_pgoff, 0);
798 	ASSERT_EQ(vma->vm_ops, &vm_ops);
799 	ASSERT_TRUE(vma_write_started(vma));
800 	ASSERT_EQ(mm.map_count, 2);
801 
802 	cleanup_mm(&mm, &vmi);
803 	return true;
804 }
805 
806 static bool test_merge_existing(void)
807 {
808 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
809 	struct mm_struct mm = {};
810 	VMA_ITERATOR(vmi, &mm, 0);
811 	struct vm_area_struct *vma, *vma_prev, *vma_next;
812 	struct vma_merge_struct vmg = {
813 		.mm = &mm,
814 		.vmi = &vmi,
815 	};
816 
817 	/*
818 	 * Merge right case - partial span.
819 	 *
820 	 *    <->
821 	 * 0123456789
822 	 *   VVVVNNN
823 	 *            ->
824 	 * 0123456789
825 	 *   VNNNNNN
826 	 */
827 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
828 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
829 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
830 	vmg.vma = vma;
831 	vmg.prev = vma;
832 	vma->anon_vma = &dummy_anon_vma;
833 	ASSERT_EQ(merge_existing(&vmg), vma_next);
834 	ASSERT_EQ(vma_next->vm_start, 0x3000);
835 	ASSERT_EQ(vma_next->vm_end, 0x9000);
836 	ASSERT_EQ(vma_next->vm_pgoff, 3);
837 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
838 	ASSERT_EQ(vma->vm_start, 0x2000);
839 	ASSERT_EQ(vma->vm_end, 0x3000);
840 	ASSERT_EQ(vma->vm_pgoff, 2);
841 	ASSERT_TRUE(vma_write_started(vma));
842 	ASSERT_TRUE(vma_write_started(vma_next));
843 	ASSERT_EQ(mm.map_count, 2);
844 
845 	/* Clear down and reset. */
846 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
847 
848 	/*
849 	 * Merge right case - full span.
850 	 *
851 	 *   <-->
852 	 * 0123456789
853 	 *   VVVVNNN
854 	 *            ->
855 	 * 0123456789
856 	 *   NNNNNNN
857 	 */
858 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
859 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
860 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
861 	vmg.vma = vma;
862 	vma->anon_vma = &dummy_anon_vma;
863 	ASSERT_EQ(merge_existing(&vmg), vma_next);
864 	ASSERT_EQ(vma_next->vm_start, 0x2000);
865 	ASSERT_EQ(vma_next->vm_end, 0x9000);
866 	ASSERT_EQ(vma_next->vm_pgoff, 2);
867 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
868 	ASSERT_TRUE(vma_write_started(vma_next));
869 	ASSERT_EQ(mm.map_count, 1);
870 
871 	/* Clear down and reset. We should have deleted vma. */
872 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
873 
874 	/*
875 	 * Merge left case - partial span.
876 	 *
877 	 *    <->
878 	 * 0123456789
879 	 * PPPVVVV
880 	 *            ->
881 	 * 0123456789
882 	 * PPPPPPV
883 	 */
884 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
885 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
886 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
887 	vmg.prev = vma_prev;
888 	vmg.vma = vma;
889 	vma->anon_vma = &dummy_anon_vma;
890 
891 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
892 	ASSERT_EQ(vma_prev->vm_start, 0);
893 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
894 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
895 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
896 	ASSERT_EQ(vma->vm_start, 0x6000);
897 	ASSERT_EQ(vma->vm_end, 0x7000);
898 	ASSERT_EQ(vma->vm_pgoff, 6);
899 	ASSERT_TRUE(vma_write_started(vma_prev));
900 	ASSERT_TRUE(vma_write_started(vma));
901 	ASSERT_EQ(mm.map_count, 2);
902 
903 	/* Clear down and reset. */
904 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
905 
906 	/*
907 	 * Merge left case - full span.
908 	 *
909 	 *    <-->
910 	 * 0123456789
911 	 * PPPVVVV
912 	 *            ->
913 	 * 0123456789
914 	 * PPPPPPP
915 	 */
916 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
917 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
918 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
919 	vmg.prev = vma_prev;
920 	vmg.vma = vma;
921 	vma->anon_vma = &dummy_anon_vma;
922 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
923 	ASSERT_EQ(vma_prev->vm_start, 0);
924 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
925 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
926 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
927 	ASSERT_TRUE(vma_write_started(vma_prev));
928 	ASSERT_EQ(mm.map_count, 1);
929 
930 	/* Clear down and reset. We should have deleted vma. */
931 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
932 
933 	/*
934 	 * Merge both case.
935 	 *
936 	 *    <-->
937 	 * 0123456789
938 	 * PPPVVVVNNN
939 	 *             ->
940 	 * 0123456789
941 	 * PPPPPPPPPP
942 	 */
943 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
944 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
945 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
946 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
947 	vmg.prev = vma_prev;
948 	vmg.vma = vma;
949 	vma->anon_vma = &dummy_anon_vma;
950 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
951 	ASSERT_EQ(vma_prev->vm_start, 0);
952 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
953 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
954 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
955 	ASSERT_TRUE(vma_write_started(vma_prev));
956 	ASSERT_EQ(mm.map_count, 1);
957 
958 	/* Clear down and reset. We should have deleted prev and next. */
959 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
960 
961 	/*
962 	 * Non-merge ranges. the modified VMA merge operation assumes that the
963 	 * caller always specifies ranges within the input VMA so we need only
964 	 * examine these cases.
965 	 *
966 	 *     -
967 	 *      -
968 	 *       -
969 	 *     <->
970 	 *     <>
971 	 *      <>
972 	 * 0123456789a
973 	 * PPPVVVVVNNN
974 	 */
975 
976 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
977 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
978 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
979 
980 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
981 	vmg.prev = vma;
982 	vmg.vma = vma;
983 	ASSERT_EQ(merge_existing(&vmg), NULL);
984 
985 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
986 	vmg.prev = vma;
987 	vmg.vma = vma;
988 	ASSERT_EQ(merge_existing(&vmg), NULL);
989 
990 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
991 	vmg.prev = vma;
992 	vmg.vma = vma;
993 	ASSERT_EQ(merge_existing(&vmg), NULL);
994 
995 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
996 	vmg.prev = vma;
997 	vmg.vma = vma;
998 	ASSERT_EQ(merge_existing(&vmg), NULL);
999 
1000 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1001 	vmg.prev = vma;
1002 	vmg.vma = vma;
1003 	ASSERT_EQ(merge_existing(&vmg), NULL);
1004 
1005 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1006 	vmg.prev = vma;
1007 	vmg.vma = vma;
1008 	ASSERT_EQ(merge_existing(&vmg), NULL);
1009 
1010 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1011 
1012 	return true;
1013 }
1014 
1015 static bool test_anon_vma_non_mergeable(void)
1016 {
1017 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1018 	struct mm_struct mm = {};
1019 	VMA_ITERATOR(vmi, &mm, 0);
1020 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1021 	struct vma_merge_struct vmg = {
1022 		.mm = &mm,
1023 		.vmi = &vmi,
1024 	};
1025 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1026 		.anon_vma = &dummy_anon_vma,
1027 	};
1028 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1029 		.anon_vma = &dummy_anon_vma,
1030 	};
1031 
1032 	/*
1033 	 * In the case of modified VMA merge, merging both left and right VMAs
1034 	 * but where prev and next have incompatible anon_vma objects, we revert
1035 	 * to a merge of prev and VMA:
1036 	 *
1037 	 *    <-->
1038 	 * 0123456789
1039 	 * PPPVVVVNNN
1040 	 *            ->
1041 	 * 0123456789
1042 	 * PPPPPPPNNN
1043 	 */
1044 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1045 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1046 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1047 
1048 	/*
1049 	 * Give both prev and next single anon_vma_chain fields, so they will
1050 	 * merge with the NULL vmg->anon_vma.
1051 	 *
1052 	 * However, when prev is compared to next, the merge should fail.
1053 	 */
1054 
1055 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1056 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1057 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1058 	vma_prev->anon_vma = &dummy_anon_vma;
1059 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1060 
1061 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1062 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1063 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1064 	vma_next->anon_vma = (struct anon_vma *)2;
1065 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1066 
1067 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1068 
1069 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1070 	vmg.prev = vma_prev;
1071 	vmg.vma = vma;
1072 
1073 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1074 	ASSERT_EQ(vma_prev->vm_start, 0);
1075 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1076 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1077 	ASSERT_TRUE(vma_write_started(vma_prev));
1078 	ASSERT_FALSE(vma_write_started(vma_next));
1079 
1080 	/* Clear down and reset. */
1081 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1082 
1083 	/*
1084 	 * Now consider the new VMA case. This is equivalent, only adding a new
1085 	 * VMA in a gap between prev and next.
1086 	 *
1087 	 *    <-->
1088 	 * 0123456789
1089 	 * PPP****NNN
1090 	 *            ->
1091 	 * 0123456789
1092 	 * PPPPPPPNNN
1093 	 */
1094 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1095 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1096 
1097 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1098 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1099 	vma_prev->anon_vma = (struct anon_vma *)1;
1100 
1101 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1102 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1103 	vma_next->anon_vma = (struct anon_vma *)2;
1104 
1105 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1106 	vmg.prev = vma_prev;
1107 
1108 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1109 	ASSERT_EQ(vma_prev->vm_start, 0);
1110 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1111 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1112 	ASSERT_TRUE(vma_write_started(vma_prev));
1113 	ASSERT_FALSE(vma_write_started(vma_next));
1114 
1115 	/* Final cleanup. */
1116 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1117 
1118 	return true;
1119 }
1120 
1121 static bool test_dup_anon_vma(void)
1122 {
1123 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1124 	struct mm_struct mm = {};
1125 	VMA_ITERATOR(vmi, &mm, 0);
1126 	struct vma_merge_struct vmg = {
1127 		.mm = &mm,
1128 		.vmi = &vmi,
1129 	};
1130 	struct anon_vma_chain dummy_anon_vma_chain = {
1131 		.anon_vma = &dummy_anon_vma,
1132 	};
1133 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1134 
1135 	reset_dummy_anon_vma();
1136 
1137 	/*
1138 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1139 	 * assigns it to the expanded VMA.
1140 	 *
1141 	 * This covers new VMA merging, as these operations amount to a VMA
1142 	 * expand.
1143 	 */
1144 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1145 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1146 	vma_next->anon_vma = &dummy_anon_vma;
1147 
1148 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1149 	vmg.vma = vma_prev;
1150 	vmg.next = vma_next;
1151 
1152 	ASSERT_EQ(expand_existing(&vmg), 0);
1153 
1154 	/* Will have been cloned. */
1155 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1156 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1157 
1158 	/* Cleanup ready for next run. */
1159 	cleanup_mm(&mm, &vmi);
1160 
1161 	/*
1162 	 * next has anon_vma, we assign to prev.
1163 	 *
1164 	 *         |<----->|
1165 	 * |-------*********-------|
1166 	 *   prev     vma     next
1167 	 *  extend   delete  delete
1168 	 */
1169 
1170 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1171 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1172 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1173 
1174 	/* Initialise avc so mergeability check passes. */
1175 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1176 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1177 
1178 	vma_next->anon_vma = &dummy_anon_vma;
1179 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1180 	vmg.prev = vma_prev;
1181 	vmg.vma = vma;
1182 
1183 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1184 
1185 	ASSERT_EQ(vma_prev->vm_start, 0);
1186 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1187 
1188 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1189 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1190 
1191 	cleanup_mm(&mm, &vmi);
1192 
1193 	/*
1194 	 * vma has anon_vma, we assign to prev.
1195 	 *
1196 	 *         |<----->|
1197 	 * |-------*********-------|
1198 	 *   prev     vma     next
1199 	 *  extend   delete  delete
1200 	 */
1201 
1202 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1203 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1204 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1205 
1206 	vma->anon_vma = &dummy_anon_vma;
1207 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1208 	vmg.prev = vma_prev;
1209 	vmg.vma = vma;
1210 
1211 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1212 
1213 	ASSERT_EQ(vma_prev->vm_start, 0);
1214 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1215 
1216 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1217 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1218 
1219 	cleanup_mm(&mm, &vmi);
1220 
1221 	/*
1222 	 * vma has anon_vma, we assign to prev.
1223 	 *
1224 	 *         |<----->|
1225 	 * |-------*************
1226 	 *   prev       vma
1227 	 *  extend shrink/delete
1228 	 */
1229 
1230 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1231 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1232 
1233 	vma->anon_vma = &dummy_anon_vma;
1234 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1235 	vmg.prev = vma_prev;
1236 	vmg.vma = vma;
1237 
1238 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1239 
1240 	ASSERT_EQ(vma_prev->vm_start, 0);
1241 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1242 
1243 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1244 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1245 
1246 	cleanup_mm(&mm, &vmi);
1247 
1248 	/*
1249 	 * vma has anon_vma, we assign to next.
1250 	 *
1251 	 *     |<----->|
1252 	 * *************-------|
1253 	 *      vma       next
1254 	 * shrink/delete extend
1255 	 */
1256 
1257 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1258 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1259 
1260 	vma->anon_vma = &dummy_anon_vma;
1261 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1262 	vmg.prev = vma;
1263 	vmg.vma = vma;
1264 
1265 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1266 
1267 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1268 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1269 
1270 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1271 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1272 
1273 	cleanup_mm(&mm, &vmi);
1274 	return true;
1275 }
1276 
1277 static bool test_vmi_prealloc_fail(void)
1278 {
1279 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1280 	struct mm_struct mm = {};
1281 	VMA_ITERATOR(vmi, &mm, 0);
1282 	struct vma_merge_struct vmg = {
1283 		.mm = &mm,
1284 		.vmi = &vmi,
1285 	};
1286 	struct vm_area_struct *vma_prev, *vma;
1287 
1288 	/*
1289 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1290 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1291 	 * the duplicated anon_vma is unlinked.
1292 	 */
1293 
1294 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1295 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1296 	vma->anon_vma = &dummy_anon_vma;
1297 
1298 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1299 	vmg.prev = vma_prev;
1300 	vmg.vma = vma;
1301 
1302 	fail_prealloc = true;
1303 
1304 	/* This will cause the merge to fail. */
1305 	ASSERT_EQ(merge_existing(&vmg), NULL);
1306 	/* We will already have assigned the anon_vma. */
1307 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1308 	/* And it was both cloned and unlinked. */
1309 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1310 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1311 
1312 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1313 
1314 	/*
1315 	 * We repeat the same operation for expanding a VMA, which is what new
1316 	 * VMA merging ultimately uses too. This asserts that unlinking is
1317 	 * performed in this case too.
1318 	 */
1319 
1320 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1321 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1322 	vma->anon_vma = &dummy_anon_vma;
1323 
1324 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1325 	vmg.vma = vma_prev;
1326 	vmg.next = vma;
1327 
1328 	fail_prealloc = true;
1329 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1330 
1331 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1332 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1333 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1334 
1335 	cleanup_mm(&mm, &vmi);
1336 	return true;
1337 }
1338 
1339 static bool test_merge_extend(void)
1340 {
1341 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1342 	struct mm_struct mm = {};
1343 	VMA_ITERATOR(vmi, &mm, 0x1000);
1344 	struct vm_area_struct *vma;
1345 
1346 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1347 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1348 
1349 	/*
1350 	 * Extend a VMA into the gap between itself and the following VMA.
1351 	 * This should result in a merge.
1352 	 *
1353 	 * <->
1354 	 * *  *
1355 	 *
1356 	 */
1357 
1358 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1359 	ASSERT_EQ(vma->vm_start, 0);
1360 	ASSERT_EQ(vma->vm_end, 0x4000);
1361 	ASSERT_EQ(vma->vm_pgoff, 0);
1362 	ASSERT_TRUE(vma_write_started(vma));
1363 	ASSERT_EQ(mm.map_count, 1);
1364 
1365 	cleanup_mm(&mm, &vmi);
1366 	return true;
1367 }
1368 
1369 static bool test_copy_vma(void)
1370 {
1371 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1372 	struct mm_struct mm = {};
1373 	bool need_locks = false;
1374 	VMA_ITERATOR(vmi, &mm, 0);
1375 	struct vm_area_struct *vma, *vma_new, *vma_next;
1376 
1377 	/* Move backwards and do not merge. */
1378 
1379 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1380 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1381 
1382 	ASSERT_NE(vma_new, vma);
1383 	ASSERT_EQ(vma_new->vm_start, 0);
1384 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1385 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1386 
1387 	cleanup_mm(&mm, &vmi);
1388 
1389 	/* Move a VMA into position next to another and merge the two. */
1390 
1391 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1392 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1393 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1394 
1395 	ASSERT_EQ(vma_new, vma_next);
1396 
1397 	cleanup_mm(&mm, &vmi);
1398 	return true;
1399 }
1400 
1401 int main(void)
1402 {
1403 	int num_tests = 0, num_fail = 0;
1404 
1405 	maple_tree_init();
1406 
1407 #define TEST(name)							\
1408 	do {								\
1409 		num_tests++;						\
1410 		if (!test_##name()) {					\
1411 			num_fail++;					\
1412 			fprintf(stderr, "Test " #name " FAILED\n");	\
1413 		}							\
1414 	} while (0)
1415 
1416 	/* Very simple tests to kick the tyres. */
1417 	TEST(simple_merge);
1418 	TEST(simple_modify);
1419 	TEST(simple_expand);
1420 	TEST(simple_shrink);
1421 
1422 	TEST(merge_new);
1423 	TEST(vma_merge_special_flags);
1424 	TEST(vma_merge_with_close);
1425 	TEST(vma_merge_new_with_close);
1426 	TEST(merge_existing);
1427 	TEST(anon_vma_non_mergeable);
1428 	TEST(dup_anon_vma);
1429 	TEST(vmi_prealloc_fail);
1430 	TEST(merge_extend);
1431 	TEST(copy_vma);
1432 
1433 #undef TEST
1434 
1435 	printf("%d tests run, %d passed, %d failed.\n",
1436 	       num_tests, num_tests - num_fail, num_fail);
1437 
1438 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1439 }
1440