xref: /linux/tools/testing/vma/vma.c (revision fe1136b4ccbfac9b8e72d4551d1ce788a67d59cb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "generated/bit-length.h"
8 
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11 
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14 
15 static bool fail_prealloc;
16 
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma)					\
19 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20 
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22 
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26 
27 /*
28  * Directly import the VMA implementation here. Our vma_internal.h wrapper
29  * provides userland-equivalent functionality for everything vma.c uses.
30  */
31 #include "../../../mm/vma.c"
32 
33 const struct vm_operations_struct vma_dummy_vm_ops;
34 static struct anon_vma dummy_anon_vma;
35 
36 #define ASSERT_TRUE(_expr)						\
37 	do {								\
38 		if (!(_expr)) {						\
39 			fprintf(stderr,					\
40 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
41 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
42 			return false;					\
43 		}							\
44 	} while (0)
45 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
46 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
47 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
48 
49 static struct task_struct __current;
50 
51 struct task_struct *get_current(void)
52 {
53 	return &__current;
54 }
55 
56 unsigned long rlimit(unsigned int limit)
57 {
58 	return (unsigned long)-1;
59 }
60 
61 /* Helper function to simply allocate a VMA. */
62 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
63 					unsigned long start,
64 					unsigned long end,
65 					pgoff_t pgoff,
66 					vm_flags_t flags)
67 {
68 	struct vm_area_struct *ret = vm_area_alloc(mm);
69 
70 	if (ret == NULL)
71 		return NULL;
72 
73 	ret->vm_start = start;
74 	ret->vm_end = end;
75 	ret->vm_pgoff = pgoff;
76 	ret->__vm_flags = flags;
77 
78 	return ret;
79 }
80 
81 /* Helper function to allocate a VMA and link it to the tree. */
82 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
83 						 unsigned long start,
84 						 unsigned long end,
85 						 pgoff_t pgoff,
86 						 vm_flags_t flags)
87 {
88 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
89 
90 	if (vma == NULL)
91 		return NULL;
92 
93 	if (vma_link(mm, vma)) {
94 		vm_area_free(vma);
95 		return NULL;
96 	}
97 
98 	/*
99 	 * Reset this counter which we use to track whether writes have
100 	 * begun. Linking to the tree will have caused this to be incremented,
101 	 * which means we will get a false positive otherwise.
102 	 */
103 	vma->vm_lock_seq = UINT_MAX;
104 
105 	return vma;
106 }
107 
108 /* Helper function which provides a wrapper around a merge new VMA operation. */
109 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
110 {
111 	/*
112 	 * For convenience, get prev and next VMAs. Which the new VMA operation
113 	 * requires.
114 	 */
115 	vmg->next = vma_next(vmg->vmi);
116 	vmg->prev = vma_prev(vmg->vmi);
117 	vma_iter_next_range(vmg->vmi);
118 
119 	return vma_merge_new_range(vmg);
120 }
121 
122 /*
123  * Helper function which provides a wrapper around a merge existing VMA
124  * operation.
125  */
126 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
127 {
128 	return vma_merge_existing_range(vmg);
129 }
130 
131 /*
132  * Helper function which provides a wrapper around the expansion of an existing
133  * VMA.
134  */
135 static int expand_existing(struct vma_merge_struct *vmg)
136 {
137 	return vma_expand(vmg);
138 }
139 
140 /*
141  * Helper function to reset merge state the associated VMA iterator to a
142  * specified new range.
143  */
144 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
145 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
146 {
147 	vma_iter_set(vmg->vmi, start);
148 
149 	vmg->prev = NULL;
150 	vmg->middle = NULL;
151 	vmg->next = NULL;
152 	vmg->target = NULL;
153 
154 	vmg->start = start;
155 	vmg->end = end;
156 	vmg->pgoff = pgoff;
157 	vmg->flags = flags;
158 
159 	vmg->just_expand = false;
160 	vmg->__remove_middle = false;
161 	vmg->__remove_next = false;
162 	vmg->__adjust_middle_start = false;
163 	vmg->__adjust_next_start = false;
164 }
165 
166 /*
167  * Helper function to try to merge a new VMA.
168  *
169  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
170  * VMA, link it to the maple tree and return it.
171  */
172 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
173 						struct vma_merge_struct *vmg,
174 						unsigned long start, unsigned long end,
175 						pgoff_t pgoff, vm_flags_t flags,
176 						bool *was_merged)
177 {
178 	struct vm_area_struct *merged;
179 
180 	vmg_set_range(vmg, start, end, pgoff, flags);
181 
182 	merged = merge_new(vmg);
183 	if (merged) {
184 		*was_merged = true;
185 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
186 		return merged;
187 	}
188 
189 	*was_merged = false;
190 
191 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
192 
193 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
194 }
195 
196 /*
197  * Helper function to reset the dummy anon_vma to indicate it has not been
198  * duplicated.
199  */
200 static void reset_dummy_anon_vma(void)
201 {
202 	dummy_anon_vma.was_cloned = false;
203 	dummy_anon_vma.was_unlinked = false;
204 }
205 
206 /*
207  * Helper function to remove all VMAs and destroy the maple tree associated with
208  * a virtual address space. Returns a count of VMAs in the tree.
209  */
210 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
211 {
212 	struct vm_area_struct *vma;
213 	int count = 0;
214 
215 	fail_prealloc = false;
216 	reset_dummy_anon_vma();
217 
218 	vma_iter_set(vmi, 0);
219 	for_each_vma(*vmi, vma) {
220 		vm_area_free(vma);
221 		count++;
222 	}
223 
224 	mtree_destroy(&mm->mm_mt);
225 	mm->map_count = 0;
226 	return count;
227 }
228 
229 /* Helper function to determine if VMA has had vma_start_write() performed. */
230 static bool vma_write_started(struct vm_area_struct *vma)
231 {
232 	int seq = vma->vm_lock_seq;
233 
234 	/* We reset after each check. */
235 	vma->vm_lock_seq = UINT_MAX;
236 
237 	/* The vma_start_write() stub simply increments this value. */
238 	return seq > -1;
239 }
240 
241 /* Helper function providing a dummy vm_ops->close() method.*/
242 static void dummy_close(struct vm_area_struct *)
243 {
244 }
245 
246 static bool test_simple_merge(void)
247 {
248 	struct vm_area_struct *vma;
249 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
250 	struct mm_struct mm = {};
251 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
252 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
253 	VMA_ITERATOR(vmi, &mm, 0x1000);
254 	struct vma_merge_struct vmg = {
255 		.mm = &mm,
256 		.vmi = &vmi,
257 		.start = 0x1000,
258 		.end = 0x2000,
259 		.flags = flags,
260 		.pgoff = 1,
261 	};
262 
263 	ASSERT_FALSE(vma_link(&mm, vma_left));
264 	ASSERT_FALSE(vma_link(&mm, vma_right));
265 
266 	vma = merge_new(&vmg);
267 	ASSERT_NE(vma, NULL);
268 
269 	ASSERT_EQ(vma->vm_start, 0);
270 	ASSERT_EQ(vma->vm_end, 0x3000);
271 	ASSERT_EQ(vma->vm_pgoff, 0);
272 	ASSERT_EQ(vma->vm_flags, flags);
273 
274 	vm_area_free(vma);
275 	mtree_destroy(&mm.mm_mt);
276 
277 	return true;
278 }
279 
280 static bool test_simple_modify(void)
281 {
282 	struct vm_area_struct *vma;
283 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
284 	struct mm_struct mm = {};
285 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
286 	VMA_ITERATOR(vmi, &mm, 0x1000);
287 
288 	ASSERT_FALSE(vma_link(&mm, init_vma));
289 
290 	/*
291 	 * The flags will not be changed, the vma_modify_flags() function
292 	 * performs the merge/split only.
293 	 */
294 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
295 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
296 	ASSERT_NE(vma, NULL);
297 	/* We modify the provided VMA, and on split allocate new VMAs. */
298 	ASSERT_EQ(vma, init_vma);
299 
300 	ASSERT_EQ(vma->vm_start, 0x1000);
301 	ASSERT_EQ(vma->vm_end, 0x2000);
302 	ASSERT_EQ(vma->vm_pgoff, 1);
303 
304 	/*
305 	 * Now walk through the three split VMAs and make sure they are as
306 	 * expected.
307 	 */
308 
309 	vma_iter_set(&vmi, 0);
310 	vma = vma_iter_load(&vmi);
311 
312 	ASSERT_EQ(vma->vm_start, 0);
313 	ASSERT_EQ(vma->vm_end, 0x1000);
314 	ASSERT_EQ(vma->vm_pgoff, 0);
315 
316 	vm_area_free(vma);
317 	vma_iter_clear(&vmi);
318 
319 	vma = vma_next(&vmi);
320 
321 	ASSERT_EQ(vma->vm_start, 0x1000);
322 	ASSERT_EQ(vma->vm_end, 0x2000);
323 	ASSERT_EQ(vma->vm_pgoff, 1);
324 
325 	vm_area_free(vma);
326 	vma_iter_clear(&vmi);
327 
328 	vma = vma_next(&vmi);
329 
330 	ASSERT_EQ(vma->vm_start, 0x2000);
331 	ASSERT_EQ(vma->vm_end, 0x3000);
332 	ASSERT_EQ(vma->vm_pgoff, 2);
333 
334 	vm_area_free(vma);
335 	mtree_destroy(&mm.mm_mt);
336 
337 	return true;
338 }
339 
340 static bool test_simple_expand(void)
341 {
342 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
343 	struct mm_struct mm = {};
344 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
345 	VMA_ITERATOR(vmi, &mm, 0);
346 	struct vma_merge_struct vmg = {
347 		.vmi = &vmi,
348 		.middle = vma,
349 		.start = 0,
350 		.end = 0x3000,
351 		.pgoff = 0,
352 	};
353 
354 	ASSERT_FALSE(vma_link(&mm, vma));
355 
356 	ASSERT_FALSE(expand_existing(&vmg));
357 
358 	ASSERT_EQ(vma->vm_start, 0);
359 	ASSERT_EQ(vma->vm_end, 0x3000);
360 	ASSERT_EQ(vma->vm_pgoff, 0);
361 
362 	vm_area_free(vma);
363 	mtree_destroy(&mm.mm_mt);
364 
365 	return true;
366 }
367 
368 static bool test_simple_shrink(void)
369 {
370 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
371 	struct mm_struct mm = {};
372 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
373 	VMA_ITERATOR(vmi, &mm, 0);
374 
375 	ASSERT_FALSE(vma_link(&mm, vma));
376 
377 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
378 
379 	ASSERT_EQ(vma->vm_start, 0);
380 	ASSERT_EQ(vma->vm_end, 0x1000);
381 	ASSERT_EQ(vma->vm_pgoff, 0);
382 
383 	vm_area_free(vma);
384 	mtree_destroy(&mm.mm_mt);
385 
386 	return true;
387 }
388 
389 static bool test_merge_new(void)
390 {
391 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
392 	struct mm_struct mm = {};
393 	VMA_ITERATOR(vmi, &mm, 0);
394 	struct vma_merge_struct vmg = {
395 		.mm = &mm,
396 		.vmi = &vmi,
397 	};
398 	struct anon_vma_chain dummy_anon_vma_chain_a = {
399 		.anon_vma = &dummy_anon_vma,
400 	};
401 	struct anon_vma_chain dummy_anon_vma_chain_b = {
402 		.anon_vma = &dummy_anon_vma,
403 	};
404 	struct anon_vma_chain dummy_anon_vma_chain_c = {
405 		.anon_vma = &dummy_anon_vma,
406 	};
407 	struct anon_vma_chain dummy_anon_vma_chain_d = {
408 		.anon_vma = &dummy_anon_vma,
409 	};
410 	const struct vm_operations_struct vm_ops = {
411 		.close = dummy_close,
412 	};
413 	int count;
414 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
415 	bool merged;
416 
417 	/*
418 	 * 0123456789abc
419 	 * AA B       CC
420 	 */
421 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
422 	ASSERT_NE(vma_a, NULL);
423 	/* We give each VMA a single avc so we can test anon_vma duplication. */
424 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
425 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
426 
427 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
428 	ASSERT_NE(vma_b, NULL);
429 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
430 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
431 
432 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
433 	ASSERT_NE(vma_c, NULL);
434 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
435 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
436 
437 	/*
438 	 * NO merge.
439 	 *
440 	 * 0123456789abc
441 	 * AA B   **  CC
442 	 */
443 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
444 	ASSERT_NE(vma_d, NULL);
445 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
446 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
447 	ASSERT_FALSE(merged);
448 	ASSERT_EQ(mm.map_count, 4);
449 
450 	/*
451 	 * Merge BOTH sides.
452 	 *
453 	 * 0123456789abc
454 	 * AA*B   DD  CC
455 	 */
456 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
457 	vma_b->anon_vma = &dummy_anon_vma;
458 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
459 	ASSERT_EQ(vma, vma_a);
460 	/* Merge with A, delete B. */
461 	ASSERT_TRUE(merged);
462 	ASSERT_EQ(vma->vm_start, 0);
463 	ASSERT_EQ(vma->vm_end, 0x4000);
464 	ASSERT_EQ(vma->vm_pgoff, 0);
465 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
466 	ASSERT_TRUE(vma_write_started(vma));
467 	ASSERT_EQ(mm.map_count, 3);
468 
469 	/*
470 	 * Merge to PREVIOUS VMA.
471 	 *
472 	 * 0123456789abc
473 	 * AAAA*  DD  CC
474 	 */
475 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
476 	ASSERT_EQ(vma, vma_a);
477 	/* Extend A. */
478 	ASSERT_TRUE(merged);
479 	ASSERT_EQ(vma->vm_start, 0);
480 	ASSERT_EQ(vma->vm_end, 0x5000);
481 	ASSERT_EQ(vma->vm_pgoff, 0);
482 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
483 	ASSERT_TRUE(vma_write_started(vma));
484 	ASSERT_EQ(mm.map_count, 3);
485 
486 	/*
487 	 * Merge to NEXT VMA.
488 	 *
489 	 * 0123456789abc
490 	 * AAAAA *DD  CC
491 	 */
492 	vma_d->anon_vma = &dummy_anon_vma;
493 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
494 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
495 	ASSERT_EQ(vma, vma_d);
496 	/* Prepend. */
497 	ASSERT_TRUE(merged);
498 	ASSERT_EQ(vma->vm_start, 0x6000);
499 	ASSERT_EQ(vma->vm_end, 0x9000);
500 	ASSERT_EQ(vma->vm_pgoff, 6);
501 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
502 	ASSERT_TRUE(vma_write_started(vma));
503 	ASSERT_EQ(mm.map_count, 3);
504 
505 	/*
506 	 * Merge BOTH sides.
507 	 *
508 	 * 0123456789abc
509 	 * AAAAA*DDD  CC
510 	 */
511 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
512 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
513 	ASSERT_EQ(vma, vma_a);
514 	/* Merge with A, delete D. */
515 	ASSERT_TRUE(merged);
516 	ASSERT_EQ(vma->vm_start, 0);
517 	ASSERT_EQ(vma->vm_end, 0x9000);
518 	ASSERT_EQ(vma->vm_pgoff, 0);
519 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
520 	ASSERT_TRUE(vma_write_started(vma));
521 	ASSERT_EQ(mm.map_count, 2);
522 
523 	/*
524 	 * Merge to NEXT VMA.
525 	 *
526 	 * 0123456789abc
527 	 * AAAAAAAAA *CC
528 	 */
529 	vma_c->anon_vma = &dummy_anon_vma;
530 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
531 	ASSERT_EQ(vma, vma_c);
532 	/* Prepend C. */
533 	ASSERT_TRUE(merged);
534 	ASSERT_EQ(vma->vm_start, 0xa000);
535 	ASSERT_EQ(vma->vm_end, 0xc000);
536 	ASSERT_EQ(vma->vm_pgoff, 0xa);
537 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
538 	ASSERT_TRUE(vma_write_started(vma));
539 	ASSERT_EQ(mm.map_count, 2);
540 
541 	/*
542 	 * Merge BOTH sides.
543 	 *
544 	 * 0123456789abc
545 	 * AAAAAAAAA*CCC
546 	 */
547 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
548 	ASSERT_EQ(vma, vma_a);
549 	/* Extend A and delete C. */
550 	ASSERT_TRUE(merged);
551 	ASSERT_EQ(vma->vm_start, 0);
552 	ASSERT_EQ(vma->vm_end, 0xc000);
553 	ASSERT_EQ(vma->vm_pgoff, 0);
554 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
555 	ASSERT_TRUE(vma_write_started(vma));
556 	ASSERT_EQ(mm.map_count, 1);
557 
558 	/*
559 	 * Final state.
560 	 *
561 	 * 0123456789abc
562 	 * AAAAAAAAAAAAA
563 	 */
564 
565 	count = 0;
566 	vma_iter_set(&vmi, 0);
567 	for_each_vma(vmi, vma) {
568 		ASSERT_NE(vma, NULL);
569 		ASSERT_EQ(vma->vm_start, 0);
570 		ASSERT_EQ(vma->vm_end, 0xc000);
571 		ASSERT_EQ(vma->vm_pgoff, 0);
572 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
573 
574 		vm_area_free(vma);
575 		count++;
576 	}
577 
578 	/* Should only have one VMA left (though freed) after all is done.*/
579 	ASSERT_EQ(count, 1);
580 
581 	mtree_destroy(&mm.mm_mt);
582 	return true;
583 }
584 
585 static bool test_vma_merge_special_flags(void)
586 {
587 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
588 	struct mm_struct mm = {};
589 	VMA_ITERATOR(vmi, &mm, 0);
590 	struct vma_merge_struct vmg = {
591 		.mm = &mm,
592 		.vmi = &vmi,
593 	};
594 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
595 	vm_flags_t all_special_flags = 0;
596 	int i;
597 	struct vm_area_struct *vma_left, *vma;
598 
599 	/* Make sure there aren't new VM_SPECIAL flags. */
600 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
601 		all_special_flags |= special_flags[i];
602 	}
603 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
604 
605 	/*
606 	 * 01234
607 	 * AAA
608 	 */
609 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
610 	ASSERT_NE(vma_left, NULL);
611 
612 	/* 1. Set up new VMA with special flag that would otherwise merge. */
613 
614 	/*
615 	 * 01234
616 	 * AAA*
617 	 *
618 	 * This should merge if not for the VM_SPECIAL flag.
619 	 */
620 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
621 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
622 		vm_flags_t special_flag = special_flags[i];
623 
624 		vma_left->__vm_flags = flags | special_flag;
625 		vmg.flags = flags | special_flag;
626 		vma = merge_new(&vmg);
627 		ASSERT_EQ(vma, NULL);
628 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
629 	}
630 
631 	/* 2. Modify VMA with special flag that would otherwise merge. */
632 
633 	/*
634 	 * 01234
635 	 * AAAB
636 	 *
637 	 * Create a VMA to modify.
638 	 */
639 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
640 	ASSERT_NE(vma, NULL);
641 	vmg.middle = vma;
642 
643 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
644 		vm_flags_t special_flag = special_flags[i];
645 
646 		vma_left->__vm_flags = flags | special_flag;
647 		vmg.flags = flags | special_flag;
648 		vma = merge_existing(&vmg);
649 		ASSERT_EQ(vma, NULL);
650 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
651 	}
652 
653 	cleanup_mm(&mm, &vmi);
654 	return true;
655 }
656 
657 static bool test_vma_merge_with_close(void)
658 {
659 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
660 	struct mm_struct mm = {};
661 	VMA_ITERATOR(vmi, &mm, 0);
662 	struct vma_merge_struct vmg = {
663 		.mm = &mm,
664 		.vmi = &vmi,
665 	};
666 	const struct vm_operations_struct vm_ops = {
667 		.close = dummy_close,
668 	};
669 	struct vm_area_struct *vma_prev, *vma_next, *vma;
670 
671 	/*
672 	 * When merging VMAs we are not permitted to remove any VMA that has a
673 	 * vm_ops->close() hook.
674 	 *
675 	 * Considering the two possible adjacent VMAs to which a VMA can be
676 	 * merged:
677 	 *
678 	 * [ prev ][ vma ][ next ]
679 	 *
680 	 * In no case will we need to delete prev. If the operation is
681 	 * mergeable, then prev will be extended with one or both of vma and
682 	 * next deleted.
683 	 *
684 	 * As a result, during initial mergeability checks, only
685 	 * can_vma_merge_before() (which implies the VMA being merged with is
686 	 * 'next' as shown above) bothers to check to see whether the next VMA
687 	 * has a vm_ops->close() callback that will need to be called when
688 	 * removed.
689 	 *
690 	 * If it does, then we cannot merge as the resources that the close()
691 	 * operation potentially clears down are tied only to the existing VMA
692 	 * range and we have no way of extending those to the nearly merged one.
693 	 *
694 	 * We must consider two scenarios:
695 	 *
696 	 * A.
697 	 *
698 	 * vm_ops->close:     -       -    !NULL
699 	 *                 [ prev ][ vma ][ next ]
700 	 *
701 	 * Where prev may or may not be present/mergeable.
702 	 *
703 	 * This is picked up by a specific check in can_vma_merge_before().
704 	 *
705 	 * B.
706 	 *
707 	 * vm_ops->close:     -     !NULL
708 	 *                 [ prev ][ vma ]
709 	 *
710 	 * Where prev and vma are present and mergeable.
711 	 *
712 	 * This is picked up by a specific check in the modified VMA merge.
713 	 *
714 	 * IMPORTANT NOTE: We make the assumption that the following case:
715 	 *
716 	 *    -     !NULL   NULL
717 	 * [ prev ][ vma ][ next ]
718 	 *
719 	 * Cannot occur, because vma->vm_ops being the same implies the same
720 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
721 	 * would be set too, and thus scenario A would pick this up.
722 	 */
723 
724 	/*
725 	 * The only case of a new VMA merge that results in a VMA being deleted
726 	 * is one where both the previous and next VMAs are merged - in this
727 	 * instance the next VMA is deleted, and the previous VMA is extended.
728 	 *
729 	 * If we are unable to do so, we reduce the operation to simply
730 	 * extending the prev VMA and not merging next.
731 	 *
732 	 * 0123456789
733 	 * PPP**NNNN
734 	 *             ->
735 	 * 0123456789
736 	 * PPPPPPNNN
737 	 */
738 
739 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
740 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
741 	vma_next->vm_ops = &vm_ops;
742 
743 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
744 	ASSERT_EQ(merge_new(&vmg), vma_prev);
745 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
746 	ASSERT_EQ(vma_prev->vm_start, 0);
747 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
748 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
749 
750 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
751 
752 	/*
753 	 * When modifying an existing VMA there are further cases where we
754 	 * delete VMAs.
755 	 *
756 	 *    <>
757 	 * 0123456789
758 	 * PPPVV
759 	 *
760 	 * In this instance, if vma has a close hook, the merge simply cannot
761 	 * proceed.
762 	 */
763 
764 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
765 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
766 	vma->vm_ops = &vm_ops;
767 
768 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
769 	vmg.prev = vma_prev;
770 	vmg.middle = vma;
771 
772 	/*
773 	 * The VMA being modified in a way that would otherwise merge should
774 	 * also fail.
775 	 */
776 	ASSERT_EQ(merge_existing(&vmg), NULL);
777 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
778 
779 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
780 
781 	/*
782 	 * This case is mirrored if merging with next.
783 	 *
784 	 *    <>
785 	 * 0123456789
786 	 *    VVNNNN
787 	 *
788 	 * In this instance, if vma has a close hook, the merge simply cannot
789 	 * proceed.
790 	 */
791 
792 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
793 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
794 	vma->vm_ops = &vm_ops;
795 
796 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
797 	vmg.middle = vma;
798 	ASSERT_EQ(merge_existing(&vmg), NULL);
799 	/*
800 	 * Initially this is misapprehended as an out of memory report, as the
801 	 * close() check is handled in the same way as anon_vma duplication
802 	 * failures, however a subsequent patch resolves this.
803 	 */
804 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
805 
806 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
807 
808 	/*
809 	 * Finally, we consider two variants of the case where we modify a VMA
810 	 * to merge with both the previous and next VMAs.
811 	 *
812 	 * The first variant is where vma has a close hook. In this instance, no
813 	 * merge can proceed.
814 	 *
815 	 *    <>
816 	 * 0123456789
817 	 * PPPVVNNNN
818 	 */
819 
820 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
821 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
822 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
823 	vma->vm_ops = &vm_ops;
824 
825 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
826 	vmg.prev = vma_prev;
827 	vmg.middle = vma;
828 
829 	ASSERT_EQ(merge_existing(&vmg), NULL);
830 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
831 
832 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
833 
834 	/*
835 	 * The second variant is where next has a close hook. In this instance,
836 	 * we reduce the operation to a merge between prev and vma.
837 	 *
838 	 *    <>
839 	 * 0123456789
840 	 * PPPVVNNNN
841 	 *            ->
842 	 * 0123456789
843 	 * PPPPPNNNN
844 	 */
845 
846 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
847 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
848 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
849 	vma_next->vm_ops = &vm_ops;
850 
851 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
852 	vmg.prev = vma_prev;
853 	vmg.middle = vma;
854 
855 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
856 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
857 	ASSERT_EQ(vma_prev->vm_start, 0);
858 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
859 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
860 
861 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
862 
863 	return true;
864 }
865 
866 static bool test_vma_merge_new_with_close(void)
867 {
868 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
869 	struct mm_struct mm = {};
870 	VMA_ITERATOR(vmi, &mm, 0);
871 	struct vma_merge_struct vmg = {
872 		.mm = &mm,
873 		.vmi = &vmi,
874 	};
875 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
876 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
877 	const struct vm_operations_struct vm_ops = {
878 		.close = dummy_close,
879 	};
880 	struct vm_area_struct *vma;
881 
882 	/*
883 	 * We should allow the partial merge of a proposed new VMA if the
884 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
885 	 * compatible), e.g.:
886 	 *
887 	 *        New VMA
888 	 *    A  v-------v  B
889 	 * |-----|       |-----|
890 	 *  close         close
891 	 *
892 	 * Since the rule is to not DELETE a VMA with a close operation, this
893 	 * should be permitted, only rather than expanding A and deleting B, we
894 	 * should simply expand A and leave B intact, e.g.:
895 	 *
896 	 *        New VMA
897 	 *       A          B
898 	 * |------------||-----|
899 	 *  close         close
900 	 */
901 
902 	/* Have prev and next have a vm_ops->close() hook. */
903 	vma_prev->vm_ops = &vm_ops;
904 	vma_next->vm_ops = &vm_ops;
905 
906 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
907 	vma = merge_new(&vmg);
908 	ASSERT_NE(vma, NULL);
909 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
910 	ASSERT_EQ(vma->vm_start, 0);
911 	ASSERT_EQ(vma->vm_end, 0x5000);
912 	ASSERT_EQ(vma->vm_pgoff, 0);
913 	ASSERT_EQ(vma->vm_ops, &vm_ops);
914 	ASSERT_TRUE(vma_write_started(vma));
915 	ASSERT_EQ(mm.map_count, 2);
916 
917 	cleanup_mm(&mm, &vmi);
918 	return true;
919 }
920 
921 static bool test_merge_existing(void)
922 {
923 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
924 	struct mm_struct mm = {};
925 	VMA_ITERATOR(vmi, &mm, 0);
926 	struct vm_area_struct *vma, *vma_prev, *vma_next;
927 	struct vma_merge_struct vmg = {
928 		.mm = &mm,
929 		.vmi = &vmi,
930 	};
931 	const struct vm_operations_struct vm_ops = {
932 		.close = dummy_close,
933 	};
934 
935 	/*
936 	 * Merge right case - partial span.
937 	 *
938 	 *    <->
939 	 * 0123456789
940 	 *   VVVVNNN
941 	 *            ->
942 	 * 0123456789
943 	 *   VNNNNNN
944 	 */
945 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
946 	vma->vm_ops = &vm_ops; /* This should have no impact. */
947 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
948 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
949 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
950 	vmg.middle = vma;
951 	vmg.prev = vma;
952 	vma->anon_vma = &dummy_anon_vma;
953 	ASSERT_EQ(merge_existing(&vmg), vma_next);
954 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
955 	ASSERT_EQ(vma_next->vm_start, 0x3000);
956 	ASSERT_EQ(vma_next->vm_end, 0x9000);
957 	ASSERT_EQ(vma_next->vm_pgoff, 3);
958 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
959 	ASSERT_EQ(vma->vm_start, 0x2000);
960 	ASSERT_EQ(vma->vm_end, 0x3000);
961 	ASSERT_EQ(vma->vm_pgoff, 2);
962 	ASSERT_TRUE(vma_write_started(vma));
963 	ASSERT_TRUE(vma_write_started(vma_next));
964 	ASSERT_EQ(mm.map_count, 2);
965 
966 	/* Clear down and reset. */
967 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
968 
969 	/*
970 	 * Merge right case - full span.
971 	 *
972 	 *   <-->
973 	 * 0123456789
974 	 *   VVVVNNN
975 	 *            ->
976 	 * 0123456789
977 	 *   NNNNNNN
978 	 */
979 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
980 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
981 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
982 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
983 	vmg.middle = vma;
984 	vma->anon_vma = &dummy_anon_vma;
985 	ASSERT_EQ(merge_existing(&vmg), vma_next);
986 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
987 	ASSERT_EQ(vma_next->vm_start, 0x2000);
988 	ASSERT_EQ(vma_next->vm_end, 0x9000);
989 	ASSERT_EQ(vma_next->vm_pgoff, 2);
990 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
991 	ASSERT_TRUE(vma_write_started(vma_next));
992 	ASSERT_EQ(mm.map_count, 1);
993 
994 	/* Clear down and reset. We should have deleted vma. */
995 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
996 
997 	/*
998 	 * Merge left case - partial span.
999 	 *
1000 	 *    <->
1001 	 * 0123456789
1002 	 * PPPVVVV
1003 	 *            ->
1004 	 * 0123456789
1005 	 * PPPPPPV
1006 	 */
1007 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1008 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1009 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1010 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1011 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
1012 	vmg.prev = vma_prev;
1013 	vmg.middle = vma;
1014 	vma->anon_vma = &dummy_anon_vma;
1015 
1016 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1017 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1018 	ASSERT_EQ(vma_prev->vm_start, 0);
1019 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1020 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1021 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1022 	ASSERT_EQ(vma->vm_start, 0x6000);
1023 	ASSERT_EQ(vma->vm_end, 0x7000);
1024 	ASSERT_EQ(vma->vm_pgoff, 6);
1025 	ASSERT_TRUE(vma_write_started(vma_prev));
1026 	ASSERT_TRUE(vma_write_started(vma));
1027 	ASSERT_EQ(mm.map_count, 2);
1028 
1029 	/* Clear down and reset. */
1030 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1031 
1032 	/*
1033 	 * Merge left case - full span.
1034 	 *
1035 	 *    <-->
1036 	 * 0123456789
1037 	 * PPPVVVV
1038 	 *            ->
1039 	 * 0123456789
1040 	 * PPPPPPP
1041 	 */
1042 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1043 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1044 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1045 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1046 	vmg.prev = vma_prev;
1047 	vmg.middle = vma;
1048 	vma->anon_vma = &dummy_anon_vma;
1049 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1050 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1051 	ASSERT_EQ(vma_prev->vm_start, 0);
1052 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1053 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1054 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1055 	ASSERT_TRUE(vma_write_started(vma_prev));
1056 	ASSERT_EQ(mm.map_count, 1);
1057 
1058 	/* Clear down and reset. We should have deleted vma. */
1059 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1060 
1061 	/*
1062 	 * Merge both case.
1063 	 *
1064 	 *    <-->
1065 	 * 0123456789
1066 	 * PPPVVVVNNN
1067 	 *             ->
1068 	 * 0123456789
1069 	 * PPPPPPPPPP
1070 	 */
1071 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1072 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1073 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1074 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1075 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1076 	vmg.prev = vma_prev;
1077 	vmg.middle = vma;
1078 	vma->anon_vma = &dummy_anon_vma;
1079 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1080 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1081 	ASSERT_EQ(vma_prev->vm_start, 0);
1082 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1083 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1084 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1085 	ASSERT_TRUE(vma_write_started(vma_prev));
1086 	ASSERT_EQ(mm.map_count, 1);
1087 
1088 	/* Clear down and reset. We should have deleted prev and next. */
1089 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1090 
1091 	/*
1092 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1093 	 * caller always specifies ranges within the input VMA so we need only
1094 	 * examine these cases.
1095 	 *
1096 	 *     -
1097 	 *      -
1098 	 *       -
1099 	 *     <->
1100 	 *     <>
1101 	 *      <>
1102 	 * 0123456789a
1103 	 * PPPVVVVVNNN
1104 	 */
1105 
1106 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1107 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1108 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1109 
1110 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1111 	vmg.prev = vma;
1112 	vmg.middle = vma;
1113 	ASSERT_EQ(merge_existing(&vmg), NULL);
1114 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1115 
1116 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1117 	vmg.prev = vma;
1118 	vmg.middle = vma;
1119 	ASSERT_EQ(merge_existing(&vmg), NULL);
1120 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1121 
1122 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1123 	vmg.prev = vma;
1124 	vmg.middle = vma;
1125 	ASSERT_EQ(merge_existing(&vmg), NULL);
1126 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1127 
1128 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1129 	vmg.prev = vma;
1130 	vmg.middle = vma;
1131 	ASSERT_EQ(merge_existing(&vmg), NULL);
1132 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1133 
1134 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1135 	vmg.prev = vma;
1136 	vmg.middle = vma;
1137 	ASSERT_EQ(merge_existing(&vmg), NULL);
1138 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1139 
1140 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1141 	vmg.prev = vma;
1142 	vmg.middle = vma;
1143 	ASSERT_EQ(merge_existing(&vmg), NULL);
1144 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1145 
1146 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1147 
1148 	return true;
1149 }
1150 
1151 static bool test_anon_vma_non_mergeable(void)
1152 {
1153 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1154 	struct mm_struct mm = {};
1155 	VMA_ITERATOR(vmi, &mm, 0);
1156 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1157 	struct vma_merge_struct vmg = {
1158 		.mm = &mm,
1159 		.vmi = &vmi,
1160 	};
1161 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1162 		.anon_vma = &dummy_anon_vma,
1163 	};
1164 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1165 		.anon_vma = &dummy_anon_vma,
1166 	};
1167 
1168 	/*
1169 	 * In the case of modified VMA merge, merging both left and right VMAs
1170 	 * but where prev and next have incompatible anon_vma objects, we revert
1171 	 * to a merge of prev and VMA:
1172 	 *
1173 	 *    <-->
1174 	 * 0123456789
1175 	 * PPPVVVVNNN
1176 	 *            ->
1177 	 * 0123456789
1178 	 * PPPPPPPNNN
1179 	 */
1180 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1181 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1182 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1183 
1184 	/*
1185 	 * Give both prev and next single anon_vma_chain fields, so they will
1186 	 * merge with the NULL vmg->anon_vma.
1187 	 *
1188 	 * However, when prev is compared to next, the merge should fail.
1189 	 */
1190 
1191 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1192 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1193 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1194 	vma_prev->anon_vma = &dummy_anon_vma;
1195 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1196 
1197 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1198 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1199 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1200 	vma_next->anon_vma = (struct anon_vma *)2;
1201 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1202 
1203 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1204 
1205 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1206 	vmg.prev = vma_prev;
1207 	vmg.middle = vma;
1208 
1209 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1210 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1211 	ASSERT_EQ(vma_prev->vm_start, 0);
1212 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1213 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1214 	ASSERT_TRUE(vma_write_started(vma_prev));
1215 	ASSERT_FALSE(vma_write_started(vma_next));
1216 
1217 	/* Clear down and reset. */
1218 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1219 
1220 	/*
1221 	 * Now consider the new VMA case. This is equivalent, only adding a new
1222 	 * VMA in a gap between prev and next.
1223 	 *
1224 	 *    <-->
1225 	 * 0123456789
1226 	 * PPP****NNN
1227 	 *            ->
1228 	 * 0123456789
1229 	 * PPPPPPPNNN
1230 	 */
1231 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1232 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1233 
1234 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1235 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1236 	vma_prev->anon_vma = (struct anon_vma *)1;
1237 
1238 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1239 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1240 	vma_next->anon_vma = (struct anon_vma *)2;
1241 
1242 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1243 	vmg.prev = vma_prev;
1244 
1245 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1246 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1247 	ASSERT_EQ(vma_prev->vm_start, 0);
1248 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1249 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1250 	ASSERT_TRUE(vma_write_started(vma_prev));
1251 	ASSERT_FALSE(vma_write_started(vma_next));
1252 
1253 	/* Final cleanup. */
1254 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1255 
1256 	return true;
1257 }
1258 
1259 static bool test_dup_anon_vma(void)
1260 {
1261 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1262 	struct mm_struct mm = {};
1263 	VMA_ITERATOR(vmi, &mm, 0);
1264 	struct vma_merge_struct vmg = {
1265 		.mm = &mm,
1266 		.vmi = &vmi,
1267 	};
1268 	struct anon_vma_chain dummy_anon_vma_chain = {
1269 		.anon_vma = &dummy_anon_vma,
1270 	};
1271 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1272 
1273 	reset_dummy_anon_vma();
1274 
1275 	/*
1276 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1277 	 * assigns it to the expanded VMA.
1278 	 *
1279 	 * This covers new VMA merging, as these operations amount to a VMA
1280 	 * expand.
1281 	 */
1282 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1283 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1284 	vma_next->anon_vma = &dummy_anon_vma;
1285 
1286 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1287 	vmg.middle = vma_prev;
1288 	vmg.next = vma_next;
1289 
1290 	ASSERT_EQ(expand_existing(&vmg), 0);
1291 
1292 	/* Will have been cloned. */
1293 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1294 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1295 
1296 	/* Cleanup ready for next run. */
1297 	cleanup_mm(&mm, &vmi);
1298 
1299 	/*
1300 	 * next has anon_vma, we assign to prev.
1301 	 *
1302 	 *         |<----->|
1303 	 * |-------*********-------|
1304 	 *   prev     vma     next
1305 	 *  extend   delete  delete
1306 	 */
1307 
1308 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1309 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1310 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1311 
1312 	/* Initialise avc so mergeability check passes. */
1313 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1314 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1315 
1316 	vma_next->anon_vma = &dummy_anon_vma;
1317 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1318 	vmg.prev = vma_prev;
1319 	vmg.middle = vma;
1320 
1321 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1322 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1323 
1324 	ASSERT_EQ(vma_prev->vm_start, 0);
1325 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1326 
1327 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1328 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1329 
1330 	cleanup_mm(&mm, &vmi);
1331 
1332 	/*
1333 	 * vma has anon_vma, we assign to prev.
1334 	 *
1335 	 *         |<----->|
1336 	 * |-------*********-------|
1337 	 *   prev     vma     next
1338 	 *  extend   delete  delete
1339 	 */
1340 
1341 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1342 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1343 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1344 
1345 	vma->anon_vma = &dummy_anon_vma;
1346 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1347 	vmg.prev = vma_prev;
1348 	vmg.middle = vma;
1349 
1350 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1351 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1352 
1353 	ASSERT_EQ(vma_prev->vm_start, 0);
1354 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1355 
1356 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1357 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1358 
1359 	cleanup_mm(&mm, &vmi);
1360 
1361 	/*
1362 	 * vma has anon_vma, we assign to prev.
1363 	 *
1364 	 *         |<----->|
1365 	 * |-------*************
1366 	 *   prev       vma
1367 	 *  extend shrink/delete
1368 	 */
1369 
1370 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1371 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1372 
1373 	vma->anon_vma = &dummy_anon_vma;
1374 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1375 	vmg.prev = vma_prev;
1376 	vmg.middle = vma;
1377 
1378 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1379 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1380 
1381 	ASSERT_EQ(vma_prev->vm_start, 0);
1382 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1383 
1384 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1385 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1386 
1387 	cleanup_mm(&mm, &vmi);
1388 
1389 	/*
1390 	 * vma has anon_vma, we assign to next.
1391 	 *
1392 	 *     |<----->|
1393 	 * *************-------|
1394 	 *      vma       next
1395 	 * shrink/delete extend
1396 	 */
1397 
1398 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1399 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1400 
1401 	vma->anon_vma = &dummy_anon_vma;
1402 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1403 	vmg.prev = vma;
1404 	vmg.middle = vma;
1405 
1406 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1407 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1408 
1409 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1410 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1411 
1412 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1413 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1414 
1415 	cleanup_mm(&mm, &vmi);
1416 	return true;
1417 }
1418 
1419 static bool test_vmi_prealloc_fail(void)
1420 {
1421 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1422 	struct mm_struct mm = {};
1423 	VMA_ITERATOR(vmi, &mm, 0);
1424 	struct vma_merge_struct vmg = {
1425 		.mm = &mm,
1426 		.vmi = &vmi,
1427 	};
1428 	struct vm_area_struct *vma_prev, *vma;
1429 
1430 	/*
1431 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1432 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1433 	 * the duplicated anon_vma is unlinked.
1434 	 */
1435 
1436 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1437 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1438 	vma->anon_vma = &dummy_anon_vma;
1439 
1440 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1441 	vmg.prev = vma_prev;
1442 	vmg.middle = vma;
1443 
1444 	fail_prealloc = true;
1445 
1446 	/* This will cause the merge to fail. */
1447 	ASSERT_EQ(merge_existing(&vmg), NULL);
1448 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1449 	/* We will already have assigned the anon_vma. */
1450 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1451 	/* And it was both cloned and unlinked. */
1452 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1453 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1454 
1455 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1456 
1457 	/*
1458 	 * We repeat the same operation for expanding a VMA, which is what new
1459 	 * VMA merging ultimately uses too. This asserts that unlinking is
1460 	 * performed in this case too.
1461 	 */
1462 
1463 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1464 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1465 	vma->anon_vma = &dummy_anon_vma;
1466 
1467 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1468 	vmg.middle = vma_prev;
1469 	vmg.next = vma;
1470 
1471 	fail_prealloc = true;
1472 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1473 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1474 
1475 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1476 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1477 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1478 
1479 	cleanup_mm(&mm, &vmi);
1480 	return true;
1481 }
1482 
1483 static bool test_merge_extend(void)
1484 {
1485 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1486 	struct mm_struct mm = {};
1487 	VMA_ITERATOR(vmi, &mm, 0x1000);
1488 	struct vm_area_struct *vma;
1489 
1490 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1491 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1492 
1493 	/*
1494 	 * Extend a VMA into the gap between itself and the following VMA.
1495 	 * This should result in a merge.
1496 	 *
1497 	 * <->
1498 	 * *  *
1499 	 *
1500 	 */
1501 
1502 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1503 	ASSERT_EQ(vma->vm_start, 0);
1504 	ASSERT_EQ(vma->vm_end, 0x4000);
1505 	ASSERT_EQ(vma->vm_pgoff, 0);
1506 	ASSERT_TRUE(vma_write_started(vma));
1507 	ASSERT_EQ(mm.map_count, 1);
1508 
1509 	cleanup_mm(&mm, &vmi);
1510 	return true;
1511 }
1512 
1513 static bool test_copy_vma(void)
1514 {
1515 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1516 	struct mm_struct mm = {};
1517 	bool need_locks = false;
1518 	VMA_ITERATOR(vmi, &mm, 0);
1519 	struct vm_area_struct *vma, *vma_new, *vma_next;
1520 
1521 	/* Move backwards and do not merge. */
1522 
1523 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1524 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1525 
1526 	ASSERT_NE(vma_new, vma);
1527 	ASSERT_EQ(vma_new->vm_start, 0);
1528 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1529 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1530 
1531 	cleanup_mm(&mm, &vmi);
1532 
1533 	/* Move a VMA into position next to another and merge the two. */
1534 
1535 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1536 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1537 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1538 
1539 	ASSERT_EQ(vma_new, vma_next);
1540 
1541 	cleanup_mm(&mm, &vmi);
1542 	return true;
1543 }
1544 
1545 static bool test_expand_only_mode(void)
1546 {
1547 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1548 	struct mm_struct mm = {};
1549 	VMA_ITERATOR(vmi, &mm, 0);
1550 	struct vm_area_struct *vma_prev, *vma;
1551 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1552 
1553 	/*
1554 	 * Place a VMA prior to the one we're expanding so we assert that we do
1555 	 * not erroneously try to traverse to the previous VMA even though we
1556 	 * have, through the use of the just_expand flag, indicated we do not
1557 	 * need to do so.
1558 	 */
1559 	alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1560 
1561 	/*
1562 	 * We will be positioned at the prev VMA, but looking to expand to
1563 	 * 0x9000.
1564 	 */
1565 	vma_iter_set(&vmi, 0x3000);
1566 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1567 	vmg.prev = vma_prev;
1568 	vmg.just_expand = true;
1569 
1570 	vma = vma_merge_new_range(&vmg);
1571 	ASSERT_NE(vma, NULL);
1572 	ASSERT_EQ(vma, vma_prev);
1573 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1574 	ASSERT_EQ(vma->vm_start, 0x3000);
1575 	ASSERT_EQ(vma->vm_end, 0x9000);
1576 	ASSERT_EQ(vma->vm_pgoff, 3);
1577 	ASSERT_TRUE(vma_write_started(vma));
1578 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1579 
1580 	cleanup_mm(&mm, &vmi);
1581 	return true;
1582 }
1583 
1584 static bool test_mmap_region_basic(void)
1585 {
1586 	struct mm_struct mm = {};
1587 	unsigned long addr;
1588 	struct vm_area_struct *vma;
1589 	VMA_ITERATOR(vmi, &mm, 0);
1590 
1591 	current->mm = &mm;
1592 
1593 	/* Map at 0x300000, length 0x3000. */
1594 	addr = __mmap_region(NULL, 0x300000, 0x3000,
1595 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1596 			     0x300, NULL);
1597 	ASSERT_EQ(addr, 0x300000);
1598 
1599 	/* Map at 0x250000, length 0x3000. */
1600 	addr = __mmap_region(NULL, 0x250000, 0x3000,
1601 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1602 			     0x250, NULL);
1603 	ASSERT_EQ(addr, 0x250000);
1604 
1605 	/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1606 	addr = __mmap_region(NULL, 0x303000, 0x3000,
1607 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1608 			     0x303, NULL);
1609 	ASSERT_EQ(addr, 0x303000);
1610 
1611 	/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1612 	addr = __mmap_region(NULL, 0x24d000, 0x3000,
1613 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1614 			     0x24d, NULL);
1615 	ASSERT_EQ(addr, 0x24d000);
1616 
1617 	ASSERT_EQ(mm.map_count, 2);
1618 
1619 	for_each_vma(vmi, vma) {
1620 		if (vma->vm_start == 0x300000) {
1621 			ASSERT_EQ(vma->vm_end, 0x306000);
1622 			ASSERT_EQ(vma->vm_pgoff, 0x300);
1623 		} else if (vma->vm_start == 0x24d000) {
1624 			ASSERT_EQ(vma->vm_end, 0x253000);
1625 			ASSERT_EQ(vma->vm_pgoff, 0x24d);
1626 		} else {
1627 			ASSERT_FALSE(true);
1628 		}
1629 	}
1630 
1631 	cleanup_mm(&mm, &vmi);
1632 	return true;
1633 }
1634 
1635 int main(void)
1636 {
1637 	int num_tests = 0, num_fail = 0;
1638 
1639 	maple_tree_init();
1640 
1641 #define TEST(name)							\
1642 	do {								\
1643 		num_tests++;						\
1644 		if (!test_##name()) {					\
1645 			num_fail++;					\
1646 			fprintf(stderr, "Test " #name " FAILED\n");	\
1647 		}							\
1648 	} while (0)
1649 
1650 	/* Very simple tests to kick the tyres. */
1651 	TEST(simple_merge);
1652 	TEST(simple_modify);
1653 	TEST(simple_expand);
1654 	TEST(simple_shrink);
1655 
1656 	TEST(merge_new);
1657 	TEST(vma_merge_special_flags);
1658 	TEST(vma_merge_with_close);
1659 	TEST(vma_merge_new_with_close);
1660 	TEST(merge_existing);
1661 	TEST(anon_vma_non_mergeable);
1662 	TEST(dup_anon_vma);
1663 	TEST(vmi_prealloc_fail);
1664 	TEST(merge_extend);
1665 	TEST(copy_vma);
1666 	TEST(expand_only_mode);
1667 
1668 	TEST(mmap_region_basic);
1669 
1670 #undef TEST
1671 
1672 	printf("%d tests run, %d passed, %d failed.\n",
1673 	       num_tests, num_tests - num_fail, num_fail);
1674 
1675 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1676 }
1677