xref: /linux/tools/testing/vma/vma.c (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "generated/bit-length.h"
8 
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11 
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14 
15 static bool fail_prealloc;
16 
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma)					\
19 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20 
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22 
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26 
27 /*
28  * Directly import the VMA implementation here. Our vma_internal.h wrapper
29  * provides userland-equivalent functionality for everything vma.c uses.
30  */
31 #include "../../../mm/vma_init.c"
32 #include "../../../mm/vma_exec.c"
33 #include "../../../mm/vma.c"
34 
35 const struct vm_operations_struct vma_dummy_vm_ops;
36 static struct anon_vma dummy_anon_vma;
37 
38 #define ASSERT_TRUE(_expr)						\
39 	do {								\
40 		if (!(_expr)) {						\
41 			fprintf(stderr,					\
42 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
43 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
44 			return false;					\
45 		}							\
46 	} while (0)
47 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
48 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
49 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
50 
51 static struct task_struct __current;
52 
get_current(void)53 struct task_struct *get_current(void)
54 {
55 	return &__current;
56 }
57 
rlimit(unsigned int limit)58 unsigned long rlimit(unsigned int limit)
59 {
60 	return (unsigned long)-1;
61 }
62 
63 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)64 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
65 					unsigned long start,
66 					unsigned long end,
67 					pgoff_t pgoff,
68 					vm_flags_t flags)
69 {
70 	struct vm_area_struct *ret = vm_area_alloc(mm);
71 
72 	if (ret == NULL)
73 		return NULL;
74 
75 	ret->vm_start = start;
76 	ret->vm_end = end;
77 	ret->vm_pgoff = pgoff;
78 	ret->__vm_flags = flags;
79 	vma_assert_detached(ret);
80 
81 	return ret;
82 }
83 
84 /* Helper function to allocate a VMA and link it to the tree. */
attach_vma(struct mm_struct * mm,struct vm_area_struct * vma)85 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
86 {
87 	int res;
88 
89 	res = vma_link(mm, vma);
90 	if (!res)
91 		vma_assert_attached(vma);
92 	return res;
93 }
94 
detach_free_vma(struct vm_area_struct * vma)95 static void detach_free_vma(struct vm_area_struct *vma)
96 {
97 	vma_mark_detached(vma);
98 	vm_area_free(vma);
99 }
100 
101 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)102 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
103 						 unsigned long start,
104 						 unsigned long end,
105 						 pgoff_t pgoff,
106 						 vm_flags_t flags)
107 {
108 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
109 
110 	if (vma == NULL)
111 		return NULL;
112 
113 	if (attach_vma(mm, vma)) {
114 		detach_free_vma(vma);
115 		return NULL;
116 	}
117 
118 	/*
119 	 * Reset this counter which we use to track whether writes have
120 	 * begun. Linking to the tree will have caused this to be incremented,
121 	 * which means we will get a false positive otherwise.
122 	 */
123 	vma->vm_lock_seq = UINT_MAX;
124 
125 	return vma;
126 }
127 
128 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)129 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
130 {
131 	struct vm_area_struct *vma;
132 	/*
133 	 * For convenience, get prev and next VMAs. Which the new VMA operation
134 	 * requires.
135 	 */
136 	vmg->next = vma_next(vmg->vmi);
137 	vmg->prev = vma_prev(vmg->vmi);
138 	vma_iter_next_range(vmg->vmi);
139 
140 	vma = vma_merge_new_range(vmg);
141 	if (vma)
142 		vma_assert_attached(vma);
143 
144 	return vma;
145 }
146 
147 /*
148  * Helper function which provides a wrapper around a merge existing VMA
149  * operation.
150  */
merge_existing(struct vma_merge_struct * vmg)151 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
152 {
153 	struct vm_area_struct *vma;
154 
155 	vma = vma_merge_existing_range(vmg);
156 	if (vma)
157 		vma_assert_attached(vma);
158 	return vma;
159 }
160 
161 /*
162  * Helper function which provides a wrapper around the expansion of an existing
163  * VMA.
164  */
expand_existing(struct vma_merge_struct * vmg)165 static int expand_existing(struct vma_merge_struct *vmg)
166 {
167 	return vma_expand(vmg);
168 }
169 
170 /*
171  * Helper function to reset merge state the associated VMA iterator to a
172  * specified new range.
173  */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)174 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
175 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
176 {
177 	vma_iter_set(vmg->vmi, start);
178 
179 	vmg->prev = NULL;
180 	vmg->middle = NULL;
181 	vmg->next = NULL;
182 	vmg->target = NULL;
183 
184 	vmg->start = start;
185 	vmg->end = end;
186 	vmg->pgoff = pgoff;
187 	vmg->flags = flags;
188 
189 	vmg->just_expand = false;
190 	vmg->__remove_middle = false;
191 	vmg->__remove_next = false;
192 	vmg->__adjust_middle_start = false;
193 	vmg->__adjust_next_start = false;
194 }
195 
196 /* Helper function to set both the VMG range and its anon_vma. */
vmg_set_range_anon_vma(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,struct anon_vma * anon_vma)197 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
198 				   unsigned long end, pgoff_t pgoff, vm_flags_t flags,
199 				   struct anon_vma *anon_vma)
200 {
201 	vmg_set_range(vmg, start, end, pgoff, flags);
202 	vmg->anon_vma = anon_vma;
203 }
204 
205 /*
206  * Helper function to try to merge a new VMA.
207  *
208  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
209  * VMA, link it to the maple tree and return it.
210  */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,bool * was_merged)211 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
212 						struct vma_merge_struct *vmg,
213 						unsigned long start, unsigned long end,
214 						pgoff_t pgoff, vm_flags_t flags,
215 						bool *was_merged)
216 {
217 	struct vm_area_struct *merged;
218 
219 	vmg_set_range(vmg, start, end, pgoff, flags);
220 
221 	merged = merge_new(vmg);
222 	if (merged) {
223 		*was_merged = true;
224 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
225 		return merged;
226 	}
227 
228 	*was_merged = false;
229 
230 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
231 
232 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
233 }
234 
235 /*
236  * Helper function to reset the dummy anon_vma to indicate it has not been
237  * duplicated.
238  */
reset_dummy_anon_vma(void)239 static void reset_dummy_anon_vma(void)
240 {
241 	dummy_anon_vma.was_cloned = false;
242 	dummy_anon_vma.was_unlinked = false;
243 }
244 
245 /*
246  * Helper function to remove all VMAs and destroy the maple tree associated with
247  * a virtual address space. Returns a count of VMAs in the tree.
248  */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)249 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
250 {
251 	struct vm_area_struct *vma;
252 	int count = 0;
253 
254 	fail_prealloc = false;
255 	reset_dummy_anon_vma();
256 
257 	vma_iter_set(vmi, 0);
258 	for_each_vma(*vmi, vma) {
259 		detach_free_vma(vma);
260 		count++;
261 	}
262 
263 	mtree_destroy(&mm->mm_mt);
264 	mm->map_count = 0;
265 	return count;
266 }
267 
268 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)269 static bool vma_write_started(struct vm_area_struct *vma)
270 {
271 	int seq = vma->vm_lock_seq;
272 
273 	/* We reset after each check. */
274 	vma->vm_lock_seq = UINT_MAX;
275 
276 	/* The vma_start_write() stub simply increments this value. */
277 	return seq > -1;
278 }
279 
280 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)281 static void dummy_close(struct vm_area_struct *)
282 {
283 }
284 
__vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc,struct anon_vma * anon_vma)285 static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
286 				     struct anon_vma_chain *avc,
287 				     struct anon_vma *anon_vma)
288 {
289 	vma->anon_vma = anon_vma;
290 	INIT_LIST_HEAD(&vma->anon_vma_chain);
291 	list_add(&avc->same_vma, &vma->anon_vma_chain);
292 	avc->anon_vma = vma->anon_vma;
293 }
294 
vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc)295 static void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
296 				   struct anon_vma_chain *avc)
297 {
298 	__vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
299 }
300 
test_simple_merge(void)301 static bool test_simple_merge(void)
302 {
303 	struct vm_area_struct *vma;
304 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
305 	struct mm_struct mm = {};
306 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
307 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
308 	VMA_ITERATOR(vmi, &mm, 0x1000);
309 	struct vma_merge_struct vmg = {
310 		.mm = &mm,
311 		.vmi = &vmi,
312 		.start = 0x1000,
313 		.end = 0x2000,
314 		.flags = flags,
315 		.pgoff = 1,
316 	};
317 
318 	ASSERT_FALSE(attach_vma(&mm, vma_left));
319 	ASSERT_FALSE(attach_vma(&mm, vma_right));
320 
321 	vma = merge_new(&vmg);
322 	ASSERT_NE(vma, NULL);
323 
324 	ASSERT_EQ(vma->vm_start, 0);
325 	ASSERT_EQ(vma->vm_end, 0x3000);
326 	ASSERT_EQ(vma->vm_pgoff, 0);
327 	ASSERT_EQ(vma->vm_flags, flags);
328 
329 	detach_free_vma(vma);
330 	mtree_destroy(&mm.mm_mt);
331 
332 	return true;
333 }
334 
test_simple_modify(void)335 static bool test_simple_modify(void)
336 {
337 	struct vm_area_struct *vma;
338 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
339 	struct mm_struct mm = {};
340 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
341 	VMA_ITERATOR(vmi, &mm, 0x1000);
342 
343 	ASSERT_FALSE(attach_vma(&mm, init_vma));
344 
345 	/*
346 	 * The flags will not be changed, the vma_modify_flags() function
347 	 * performs the merge/split only.
348 	 */
349 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
350 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
351 	ASSERT_NE(vma, NULL);
352 	/* We modify the provided VMA, and on split allocate new VMAs. */
353 	ASSERT_EQ(vma, init_vma);
354 
355 	ASSERT_EQ(vma->vm_start, 0x1000);
356 	ASSERT_EQ(vma->vm_end, 0x2000);
357 	ASSERT_EQ(vma->vm_pgoff, 1);
358 
359 	/*
360 	 * Now walk through the three split VMAs and make sure they are as
361 	 * expected.
362 	 */
363 
364 	vma_iter_set(&vmi, 0);
365 	vma = vma_iter_load(&vmi);
366 
367 	ASSERT_EQ(vma->vm_start, 0);
368 	ASSERT_EQ(vma->vm_end, 0x1000);
369 	ASSERT_EQ(vma->vm_pgoff, 0);
370 
371 	detach_free_vma(vma);
372 	vma_iter_clear(&vmi);
373 
374 	vma = vma_next(&vmi);
375 
376 	ASSERT_EQ(vma->vm_start, 0x1000);
377 	ASSERT_EQ(vma->vm_end, 0x2000);
378 	ASSERT_EQ(vma->vm_pgoff, 1);
379 
380 	detach_free_vma(vma);
381 	vma_iter_clear(&vmi);
382 
383 	vma = vma_next(&vmi);
384 
385 	ASSERT_EQ(vma->vm_start, 0x2000);
386 	ASSERT_EQ(vma->vm_end, 0x3000);
387 	ASSERT_EQ(vma->vm_pgoff, 2);
388 
389 	detach_free_vma(vma);
390 	mtree_destroy(&mm.mm_mt);
391 
392 	return true;
393 }
394 
test_simple_expand(void)395 static bool test_simple_expand(void)
396 {
397 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
398 	struct mm_struct mm = {};
399 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
400 	VMA_ITERATOR(vmi, &mm, 0);
401 	struct vma_merge_struct vmg = {
402 		.vmi = &vmi,
403 		.middle = vma,
404 		.start = 0,
405 		.end = 0x3000,
406 		.pgoff = 0,
407 	};
408 
409 	ASSERT_FALSE(attach_vma(&mm, vma));
410 
411 	ASSERT_FALSE(expand_existing(&vmg));
412 
413 	ASSERT_EQ(vma->vm_start, 0);
414 	ASSERT_EQ(vma->vm_end, 0x3000);
415 	ASSERT_EQ(vma->vm_pgoff, 0);
416 
417 	detach_free_vma(vma);
418 	mtree_destroy(&mm.mm_mt);
419 
420 	return true;
421 }
422 
test_simple_shrink(void)423 static bool test_simple_shrink(void)
424 {
425 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
426 	struct mm_struct mm = {};
427 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
428 	VMA_ITERATOR(vmi, &mm, 0);
429 
430 	ASSERT_FALSE(attach_vma(&mm, vma));
431 
432 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
433 
434 	ASSERT_EQ(vma->vm_start, 0);
435 	ASSERT_EQ(vma->vm_end, 0x1000);
436 	ASSERT_EQ(vma->vm_pgoff, 0);
437 
438 	detach_free_vma(vma);
439 	mtree_destroy(&mm.mm_mt);
440 
441 	return true;
442 }
443 
test_merge_new(void)444 static bool test_merge_new(void)
445 {
446 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
447 	struct mm_struct mm = {};
448 	VMA_ITERATOR(vmi, &mm, 0);
449 	struct vma_merge_struct vmg = {
450 		.mm = &mm,
451 		.vmi = &vmi,
452 	};
453 	struct anon_vma_chain dummy_anon_vma_chain_a = {
454 		.anon_vma = &dummy_anon_vma,
455 	};
456 	struct anon_vma_chain dummy_anon_vma_chain_b = {
457 		.anon_vma = &dummy_anon_vma,
458 	};
459 	struct anon_vma_chain dummy_anon_vma_chain_c = {
460 		.anon_vma = &dummy_anon_vma,
461 	};
462 	struct anon_vma_chain dummy_anon_vma_chain_d = {
463 		.anon_vma = &dummy_anon_vma,
464 	};
465 	const struct vm_operations_struct vm_ops = {
466 		.close = dummy_close,
467 	};
468 	int count;
469 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
470 	bool merged;
471 
472 	/*
473 	 * 0123456789abc
474 	 * AA B       CC
475 	 */
476 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
477 	ASSERT_NE(vma_a, NULL);
478 	/* We give each VMA a single avc so we can test anon_vma duplication. */
479 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
480 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
481 
482 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
483 	ASSERT_NE(vma_b, NULL);
484 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
485 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
486 
487 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
488 	ASSERT_NE(vma_c, NULL);
489 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
490 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
491 
492 	/*
493 	 * NO merge.
494 	 *
495 	 * 0123456789abc
496 	 * AA B   **  CC
497 	 */
498 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
499 	ASSERT_NE(vma_d, NULL);
500 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
501 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
502 	ASSERT_FALSE(merged);
503 	ASSERT_EQ(mm.map_count, 4);
504 
505 	/*
506 	 * Merge BOTH sides.
507 	 *
508 	 * 0123456789abc
509 	 * AA*B   DD  CC
510 	 */
511 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
512 	vma_b->anon_vma = &dummy_anon_vma;
513 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
514 	ASSERT_EQ(vma, vma_a);
515 	/* Merge with A, delete B. */
516 	ASSERT_TRUE(merged);
517 	ASSERT_EQ(vma->vm_start, 0);
518 	ASSERT_EQ(vma->vm_end, 0x4000);
519 	ASSERT_EQ(vma->vm_pgoff, 0);
520 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
521 	ASSERT_TRUE(vma_write_started(vma));
522 	ASSERT_EQ(mm.map_count, 3);
523 
524 	/*
525 	 * Merge to PREVIOUS VMA.
526 	 *
527 	 * 0123456789abc
528 	 * AAAA*  DD  CC
529 	 */
530 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
531 	ASSERT_EQ(vma, vma_a);
532 	/* Extend A. */
533 	ASSERT_TRUE(merged);
534 	ASSERT_EQ(vma->vm_start, 0);
535 	ASSERT_EQ(vma->vm_end, 0x5000);
536 	ASSERT_EQ(vma->vm_pgoff, 0);
537 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
538 	ASSERT_TRUE(vma_write_started(vma));
539 	ASSERT_EQ(mm.map_count, 3);
540 
541 	/*
542 	 * Merge to NEXT VMA.
543 	 *
544 	 * 0123456789abc
545 	 * AAAAA *DD  CC
546 	 */
547 	vma_d->anon_vma = &dummy_anon_vma;
548 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
549 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
550 	ASSERT_EQ(vma, vma_d);
551 	/* Prepend. */
552 	ASSERT_TRUE(merged);
553 	ASSERT_EQ(vma->vm_start, 0x6000);
554 	ASSERT_EQ(vma->vm_end, 0x9000);
555 	ASSERT_EQ(vma->vm_pgoff, 6);
556 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
557 	ASSERT_TRUE(vma_write_started(vma));
558 	ASSERT_EQ(mm.map_count, 3);
559 
560 	/*
561 	 * Merge BOTH sides.
562 	 *
563 	 * 0123456789abc
564 	 * AAAAA*DDD  CC
565 	 */
566 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
567 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
568 	ASSERT_EQ(vma, vma_a);
569 	/* Merge with A, delete D. */
570 	ASSERT_TRUE(merged);
571 	ASSERT_EQ(vma->vm_start, 0);
572 	ASSERT_EQ(vma->vm_end, 0x9000);
573 	ASSERT_EQ(vma->vm_pgoff, 0);
574 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
575 	ASSERT_TRUE(vma_write_started(vma));
576 	ASSERT_EQ(mm.map_count, 2);
577 
578 	/*
579 	 * Merge to NEXT VMA.
580 	 *
581 	 * 0123456789abc
582 	 * AAAAAAAAA *CC
583 	 */
584 	vma_c->anon_vma = &dummy_anon_vma;
585 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
586 	ASSERT_EQ(vma, vma_c);
587 	/* Prepend C. */
588 	ASSERT_TRUE(merged);
589 	ASSERT_EQ(vma->vm_start, 0xa000);
590 	ASSERT_EQ(vma->vm_end, 0xc000);
591 	ASSERT_EQ(vma->vm_pgoff, 0xa);
592 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
593 	ASSERT_TRUE(vma_write_started(vma));
594 	ASSERT_EQ(mm.map_count, 2);
595 
596 	/*
597 	 * Merge BOTH sides.
598 	 *
599 	 * 0123456789abc
600 	 * AAAAAAAAA*CCC
601 	 */
602 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
603 	ASSERT_EQ(vma, vma_a);
604 	/* Extend A and delete C. */
605 	ASSERT_TRUE(merged);
606 	ASSERT_EQ(vma->vm_start, 0);
607 	ASSERT_EQ(vma->vm_end, 0xc000);
608 	ASSERT_EQ(vma->vm_pgoff, 0);
609 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
610 	ASSERT_TRUE(vma_write_started(vma));
611 	ASSERT_EQ(mm.map_count, 1);
612 
613 	/*
614 	 * Final state.
615 	 *
616 	 * 0123456789abc
617 	 * AAAAAAAAAAAAA
618 	 */
619 
620 	count = 0;
621 	vma_iter_set(&vmi, 0);
622 	for_each_vma(vmi, vma) {
623 		ASSERT_NE(vma, NULL);
624 		ASSERT_EQ(vma->vm_start, 0);
625 		ASSERT_EQ(vma->vm_end, 0xc000);
626 		ASSERT_EQ(vma->vm_pgoff, 0);
627 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
628 
629 		detach_free_vma(vma);
630 		count++;
631 	}
632 
633 	/* Should only have one VMA left (though freed) after all is done.*/
634 	ASSERT_EQ(count, 1);
635 
636 	mtree_destroy(&mm.mm_mt);
637 	return true;
638 }
639 
test_vma_merge_special_flags(void)640 static bool test_vma_merge_special_flags(void)
641 {
642 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
643 	struct mm_struct mm = {};
644 	VMA_ITERATOR(vmi, &mm, 0);
645 	struct vma_merge_struct vmg = {
646 		.mm = &mm,
647 		.vmi = &vmi,
648 	};
649 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
650 	vm_flags_t all_special_flags = 0;
651 	int i;
652 	struct vm_area_struct *vma_left, *vma;
653 
654 	/* Make sure there aren't new VM_SPECIAL flags. */
655 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
656 		all_special_flags |= special_flags[i];
657 	}
658 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
659 
660 	/*
661 	 * 01234
662 	 * AAA
663 	 */
664 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
665 	ASSERT_NE(vma_left, NULL);
666 
667 	/* 1. Set up new VMA with special flag that would otherwise merge. */
668 
669 	/*
670 	 * 01234
671 	 * AAA*
672 	 *
673 	 * This should merge if not for the VM_SPECIAL flag.
674 	 */
675 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
676 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
677 		vm_flags_t special_flag = special_flags[i];
678 
679 		vma_left->__vm_flags = flags | special_flag;
680 		vmg.flags = flags | special_flag;
681 		vma = merge_new(&vmg);
682 		ASSERT_EQ(vma, NULL);
683 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
684 	}
685 
686 	/* 2. Modify VMA with special flag that would otherwise merge. */
687 
688 	/*
689 	 * 01234
690 	 * AAAB
691 	 *
692 	 * Create a VMA to modify.
693 	 */
694 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
695 	ASSERT_NE(vma, NULL);
696 	vmg.middle = vma;
697 
698 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
699 		vm_flags_t special_flag = special_flags[i];
700 
701 		vma_left->__vm_flags = flags | special_flag;
702 		vmg.flags = flags | special_flag;
703 		vma = merge_existing(&vmg);
704 		ASSERT_EQ(vma, NULL);
705 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
706 	}
707 
708 	cleanup_mm(&mm, &vmi);
709 	return true;
710 }
711 
test_vma_merge_with_close(void)712 static bool test_vma_merge_with_close(void)
713 {
714 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
715 	struct mm_struct mm = {};
716 	VMA_ITERATOR(vmi, &mm, 0);
717 	struct vma_merge_struct vmg = {
718 		.mm = &mm,
719 		.vmi = &vmi,
720 	};
721 	const struct vm_operations_struct vm_ops = {
722 		.close = dummy_close,
723 	};
724 	struct vm_area_struct *vma_prev, *vma_next, *vma;
725 
726 	/*
727 	 * When merging VMAs we are not permitted to remove any VMA that has a
728 	 * vm_ops->close() hook.
729 	 *
730 	 * Considering the two possible adjacent VMAs to which a VMA can be
731 	 * merged:
732 	 *
733 	 * [ prev ][ vma ][ next ]
734 	 *
735 	 * In no case will we need to delete prev. If the operation is
736 	 * mergeable, then prev will be extended with one or both of vma and
737 	 * next deleted.
738 	 *
739 	 * As a result, during initial mergeability checks, only
740 	 * can_vma_merge_before() (which implies the VMA being merged with is
741 	 * 'next' as shown above) bothers to check to see whether the next VMA
742 	 * has a vm_ops->close() callback that will need to be called when
743 	 * removed.
744 	 *
745 	 * If it does, then we cannot merge as the resources that the close()
746 	 * operation potentially clears down are tied only to the existing VMA
747 	 * range and we have no way of extending those to the nearly merged one.
748 	 *
749 	 * We must consider two scenarios:
750 	 *
751 	 * A.
752 	 *
753 	 * vm_ops->close:     -       -    !NULL
754 	 *                 [ prev ][ vma ][ next ]
755 	 *
756 	 * Where prev may or may not be present/mergeable.
757 	 *
758 	 * This is picked up by a specific check in can_vma_merge_before().
759 	 *
760 	 * B.
761 	 *
762 	 * vm_ops->close:     -     !NULL
763 	 *                 [ prev ][ vma ]
764 	 *
765 	 * Where prev and vma are present and mergeable.
766 	 *
767 	 * This is picked up by a specific check in the modified VMA merge.
768 	 *
769 	 * IMPORTANT NOTE: We make the assumption that the following case:
770 	 *
771 	 *    -     !NULL   NULL
772 	 * [ prev ][ vma ][ next ]
773 	 *
774 	 * Cannot occur, because vma->vm_ops being the same implies the same
775 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
776 	 * would be set too, and thus scenario A would pick this up.
777 	 */
778 
779 	/*
780 	 * The only case of a new VMA merge that results in a VMA being deleted
781 	 * is one where both the previous and next VMAs are merged - in this
782 	 * instance the next VMA is deleted, and the previous VMA is extended.
783 	 *
784 	 * If we are unable to do so, we reduce the operation to simply
785 	 * extending the prev VMA and not merging next.
786 	 *
787 	 * 0123456789
788 	 * PPP**NNNN
789 	 *             ->
790 	 * 0123456789
791 	 * PPPPPPNNN
792 	 */
793 
794 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
795 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
796 	vma_next->vm_ops = &vm_ops;
797 
798 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
799 	ASSERT_EQ(merge_new(&vmg), vma_prev);
800 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
801 	ASSERT_EQ(vma_prev->vm_start, 0);
802 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
803 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
804 
805 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
806 
807 	/*
808 	 * When modifying an existing VMA there are further cases where we
809 	 * delete VMAs.
810 	 *
811 	 *    <>
812 	 * 0123456789
813 	 * PPPVV
814 	 *
815 	 * In this instance, if vma has a close hook, the merge simply cannot
816 	 * proceed.
817 	 */
818 
819 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
820 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
821 	vma->vm_ops = &vm_ops;
822 
823 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
824 	vmg.prev = vma_prev;
825 	vmg.middle = vma;
826 
827 	/*
828 	 * The VMA being modified in a way that would otherwise merge should
829 	 * also fail.
830 	 */
831 	ASSERT_EQ(merge_existing(&vmg), NULL);
832 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
833 
834 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
835 
836 	/*
837 	 * This case is mirrored if merging with next.
838 	 *
839 	 *    <>
840 	 * 0123456789
841 	 *    VVNNNN
842 	 *
843 	 * In this instance, if vma has a close hook, the merge simply cannot
844 	 * proceed.
845 	 */
846 
847 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
848 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
849 	vma->vm_ops = &vm_ops;
850 
851 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
852 	vmg.middle = vma;
853 	ASSERT_EQ(merge_existing(&vmg), NULL);
854 	/*
855 	 * Initially this is misapprehended as an out of memory report, as the
856 	 * close() check is handled in the same way as anon_vma duplication
857 	 * failures, however a subsequent patch resolves this.
858 	 */
859 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
860 
861 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
862 
863 	/*
864 	 * Finally, we consider two variants of the case where we modify a VMA
865 	 * to merge with both the previous and next VMAs.
866 	 *
867 	 * The first variant is where vma has a close hook. In this instance, no
868 	 * merge can proceed.
869 	 *
870 	 *    <>
871 	 * 0123456789
872 	 * PPPVVNNNN
873 	 */
874 
875 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
876 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
877 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
878 	vma->vm_ops = &vm_ops;
879 
880 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
881 	vmg.prev = vma_prev;
882 	vmg.middle = vma;
883 
884 	ASSERT_EQ(merge_existing(&vmg), NULL);
885 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
886 
887 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
888 
889 	/*
890 	 * The second variant is where next has a close hook. In this instance,
891 	 * we reduce the operation to a merge between prev and vma.
892 	 *
893 	 *    <>
894 	 * 0123456789
895 	 * PPPVVNNNN
896 	 *            ->
897 	 * 0123456789
898 	 * PPPPPNNNN
899 	 */
900 
901 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
902 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
903 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
904 	vma_next->vm_ops = &vm_ops;
905 
906 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
907 	vmg.prev = vma_prev;
908 	vmg.middle = vma;
909 
910 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
911 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
912 	ASSERT_EQ(vma_prev->vm_start, 0);
913 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
914 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
915 
916 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
917 
918 	return true;
919 }
920 
test_vma_merge_new_with_close(void)921 static bool test_vma_merge_new_with_close(void)
922 {
923 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
924 	struct mm_struct mm = {};
925 	VMA_ITERATOR(vmi, &mm, 0);
926 	struct vma_merge_struct vmg = {
927 		.mm = &mm,
928 		.vmi = &vmi,
929 	};
930 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
931 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
932 	const struct vm_operations_struct vm_ops = {
933 		.close = dummy_close,
934 	};
935 	struct vm_area_struct *vma;
936 
937 	/*
938 	 * We should allow the partial merge of a proposed new VMA if the
939 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
940 	 * compatible), e.g.:
941 	 *
942 	 *        New VMA
943 	 *    A  v-------v  B
944 	 * |-----|       |-----|
945 	 *  close         close
946 	 *
947 	 * Since the rule is to not DELETE a VMA with a close operation, this
948 	 * should be permitted, only rather than expanding A and deleting B, we
949 	 * should simply expand A and leave B intact, e.g.:
950 	 *
951 	 *        New VMA
952 	 *       A          B
953 	 * |------------||-----|
954 	 *  close         close
955 	 */
956 
957 	/* Have prev and next have a vm_ops->close() hook. */
958 	vma_prev->vm_ops = &vm_ops;
959 	vma_next->vm_ops = &vm_ops;
960 
961 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
962 	vma = merge_new(&vmg);
963 	ASSERT_NE(vma, NULL);
964 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
965 	ASSERT_EQ(vma->vm_start, 0);
966 	ASSERT_EQ(vma->vm_end, 0x5000);
967 	ASSERT_EQ(vma->vm_pgoff, 0);
968 	ASSERT_EQ(vma->vm_ops, &vm_ops);
969 	ASSERT_TRUE(vma_write_started(vma));
970 	ASSERT_EQ(mm.map_count, 2);
971 
972 	cleanup_mm(&mm, &vmi);
973 	return true;
974 }
975 
test_merge_existing(void)976 static bool test_merge_existing(void)
977 {
978 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
979 	struct mm_struct mm = {};
980 	VMA_ITERATOR(vmi, &mm, 0);
981 	struct vm_area_struct *vma, *vma_prev, *vma_next;
982 	struct vma_merge_struct vmg = {
983 		.mm = &mm,
984 		.vmi = &vmi,
985 	};
986 	const struct vm_operations_struct vm_ops = {
987 		.close = dummy_close,
988 	};
989 	struct anon_vma_chain avc = {};
990 
991 	/*
992 	 * Merge right case - partial span.
993 	 *
994 	 *    <->
995 	 * 0123456789
996 	 *   VVVVNNN
997 	 *            ->
998 	 * 0123456789
999 	 *   VNNNNNN
1000 	 */
1001 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
1002 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1003 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
1004 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1005 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma);
1006 	vmg.middle = vma;
1007 	vmg.prev = vma;
1008 	vma_set_dummy_anon_vma(vma, &avc);
1009 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1010 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1011 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1012 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1013 	ASSERT_EQ(vma_next->vm_pgoff, 3);
1014 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1015 	ASSERT_EQ(vma->vm_start, 0x2000);
1016 	ASSERT_EQ(vma->vm_end, 0x3000);
1017 	ASSERT_EQ(vma->vm_pgoff, 2);
1018 	ASSERT_TRUE(vma_write_started(vma));
1019 	ASSERT_TRUE(vma_write_started(vma_next));
1020 	ASSERT_EQ(mm.map_count, 2);
1021 
1022 	/* Clear down and reset. */
1023 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1024 
1025 	/*
1026 	 * Merge right case - full span.
1027 	 *
1028 	 *   <-->
1029 	 * 0123456789
1030 	 *   VVVVNNN
1031 	 *            ->
1032 	 * 0123456789
1033 	 *   NNNNNNN
1034 	 */
1035 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
1036 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
1037 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1038 	vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, flags, &dummy_anon_vma);
1039 	vmg.middle = vma;
1040 	vma_set_dummy_anon_vma(vma, &avc);
1041 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1042 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1043 	ASSERT_EQ(vma_next->vm_start, 0x2000);
1044 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1045 	ASSERT_EQ(vma_next->vm_pgoff, 2);
1046 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1047 	ASSERT_TRUE(vma_write_started(vma_next));
1048 	ASSERT_EQ(mm.map_count, 1);
1049 
1050 	/* Clear down and reset. We should have deleted vma. */
1051 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1052 
1053 	/*
1054 	 * Merge left case - partial span.
1055 	 *
1056 	 *    <->
1057 	 * 0123456789
1058 	 * PPPVVVV
1059 	 *            ->
1060 	 * 0123456789
1061 	 * PPPPPPV
1062 	 */
1063 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1064 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1065 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1066 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1067 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma);
1068 	vmg.prev = vma_prev;
1069 	vmg.middle = vma;
1070 	vma_set_dummy_anon_vma(vma, &avc);
1071 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1072 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1073 	ASSERT_EQ(vma_prev->vm_start, 0);
1074 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1075 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1076 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1077 	ASSERT_EQ(vma->vm_start, 0x6000);
1078 	ASSERT_EQ(vma->vm_end, 0x7000);
1079 	ASSERT_EQ(vma->vm_pgoff, 6);
1080 	ASSERT_TRUE(vma_write_started(vma_prev));
1081 	ASSERT_TRUE(vma_write_started(vma));
1082 	ASSERT_EQ(mm.map_count, 2);
1083 
1084 	/* Clear down and reset. */
1085 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1086 
1087 	/*
1088 	 * Merge left case - full span.
1089 	 *
1090 	 *    <-->
1091 	 * 0123456789
1092 	 * PPPVVVV
1093 	 *            ->
1094 	 * 0123456789
1095 	 * PPPPPPP
1096 	 */
1097 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1098 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1099 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1100 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma);
1101 	vmg.prev = vma_prev;
1102 	vmg.middle = vma;
1103 	vma_set_dummy_anon_vma(vma, &avc);
1104 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1105 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1106 	ASSERT_EQ(vma_prev->vm_start, 0);
1107 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1108 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1109 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1110 	ASSERT_TRUE(vma_write_started(vma_prev));
1111 	ASSERT_EQ(mm.map_count, 1);
1112 
1113 	/* Clear down and reset. We should have deleted vma. */
1114 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1115 
1116 	/*
1117 	 * Merge both case.
1118 	 *
1119 	 *    <-->
1120 	 * 0123456789
1121 	 * PPPVVVVNNN
1122 	 *             ->
1123 	 * 0123456789
1124 	 * PPPPPPPPPP
1125 	 */
1126 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1127 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1128 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1129 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1130 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma);
1131 	vmg.prev = vma_prev;
1132 	vmg.middle = vma;
1133 	vma_set_dummy_anon_vma(vma, &avc);
1134 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1135 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1136 	ASSERT_EQ(vma_prev->vm_start, 0);
1137 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1138 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1139 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1140 	ASSERT_TRUE(vma_write_started(vma_prev));
1141 	ASSERT_EQ(mm.map_count, 1);
1142 
1143 	/* Clear down and reset. We should have deleted prev and next. */
1144 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1145 
1146 	/*
1147 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1148 	 * caller always specifies ranges within the input VMA so we need only
1149 	 * examine these cases.
1150 	 *
1151 	 *     -
1152 	 *      -
1153 	 *       -
1154 	 *     <->
1155 	 *     <>
1156 	 *      <>
1157 	 * 0123456789a
1158 	 * PPPVVVVVNNN
1159 	 */
1160 
1161 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1162 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1163 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1164 
1165 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1166 	vmg.prev = vma;
1167 	vmg.middle = vma;
1168 	ASSERT_EQ(merge_existing(&vmg), NULL);
1169 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1170 
1171 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1172 	vmg.prev = vma;
1173 	vmg.middle = vma;
1174 	ASSERT_EQ(merge_existing(&vmg), NULL);
1175 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1176 
1177 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1178 	vmg.prev = vma;
1179 	vmg.middle = vma;
1180 	ASSERT_EQ(merge_existing(&vmg), NULL);
1181 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1182 
1183 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1184 	vmg.prev = vma;
1185 	vmg.middle = vma;
1186 	ASSERT_EQ(merge_existing(&vmg), NULL);
1187 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1188 
1189 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1190 	vmg.prev = vma;
1191 	vmg.middle = vma;
1192 	ASSERT_EQ(merge_existing(&vmg), NULL);
1193 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1194 
1195 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1196 	vmg.prev = vma;
1197 	vmg.middle = vma;
1198 	ASSERT_EQ(merge_existing(&vmg), NULL);
1199 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1200 
1201 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1202 
1203 	return true;
1204 }
1205 
test_anon_vma_non_mergeable(void)1206 static bool test_anon_vma_non_mergeable(void)
1207 {
1208 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1209 	struct mm_struct mm = {};
1210 	VMA_ITERATOR(vmi, &mm, 0);
1211 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1212 	struct vma_merge_struct vmg = {
1213 		.mm = &mm,
1214 		.vmi = &vmi,
1215 	};
1216 	struct anon_vma_chain dummy_anon_vma_chain_1 = {};
1217 	struct anon_vma_chain dummy_anon_vma_chain_2 = {};
1218 	struct anon_vma dummy_anon_vma_2;
1219 
1220 	/*
1221 	 * In the case of modified VMA merge, merging both left and right VMAs
1222 	 * but where prev and next have incompatible anon_vma objects, we revert
1223 	 * to a merge of prev and VMA:
1224 	 *
1225 	 *    <-->
1226 	 * 0123456789
1227 	 * PPPVVVVNNN
1228 	 *            ->
1229 	 * 0123456789
1230 	 * PPPPPPPNNN
1231 	 */
1232 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1233 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1234 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1235 
1236 	/*
1237 	 * Give both prev and next single anon_vma_chain fields, so they will
1238 	 * merge with the NULL vmg->anon_vma.
1239 	 *
1240 	 * However, when prev is compared to next, the merge should fail.
1241 	 */
1242 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL);
1243 	vmg.prev = vma_prev;
1244 	vmg.middle = vma;
1245 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1246 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1247 
1248 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1249 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1250 	ASSERT_EQ(vma_prev->vm_start, 0);
1251 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1252 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1253 	ASSERT_TRUE(vma_write_started(vma_prev));
1254 	ASSERT_FALSE(vma_write_started(vma_next));
1255 
1256 	/* Clear down and reset. */
1257 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1258 
1259 	/*
1260 	 * Now consider the new VMA case. This is equivalent, only adding a new
1261 	 * VMA in a gap between prev and next.
1262 	 *
1263 	 *    <-->
1264 	 * 0123456789
1265 	 * PPP****NNN
1266 	 *            ->
1267 	 * 0123456789
1268 	 * PPPPPPPNNN
1269 	 */
1270 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1271 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1272 
1273 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL);
1274 	vmg.prev = vma_prev;
1275 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1276 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1277 
1278 	vmg.anon_vma = NULL;
1279 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1280 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1281 	ASSERT_EQ(vma_prev->vm_start, 0);
1282 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1283 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1284 	ASSERT_TRUE(vma_write_started(vma_prev));
1285 	ASSERT_FALSE(vma_write_started(vma_next));
1286 
1287 	/* Final cleanup. */
1288 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1289 
1290 	return true;
1291 }
1292 
test_dup_anon_vma(void)1293 static bool test_dup_anon_vma(void)
1294 {
1295 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1296 	struct mm_struct mm = {};
1297 	VMA_ITERATOR(vmi, &mm, 0);
1298 	struct vma_merge_struct vmg = {
1299 		.mm = &mm,
1300 		.vmi = &vmi,
1301 	};
1302 	struct anon_vma_chain dummy_anon_vma_chain = {
1303 		.anon_vma = &dummy_anon_vma,
1304 	};
1305 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1306 
1307 	reset_dummy_anon_vma();
1308 
1309 	/*
1310 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1311 	 * assigns it to the expanded VMA.
1312 	 *
1313 	 * This covers new VMA merging, as these operations amount to a VMA
1314 	 * expand.
1315 	 */
1316 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1317 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1318 	vma_next->anon_vma = &dummy_anon_vma;
1319 
1320 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1321 	vmg.middle = vma_prev;
1322 	vmg.next = vma_next;
1323 
1324 	ASSERT_EQ(expand_existing(&vmg), 0);
1325 
1326 	/* Will have been cloned. */
1327 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1328 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1329 
1330 	/* Cleanup ready for next run. */
1331 	cleanup_mm(&mm, &vmi);
1332 
1333 	/*
1334 	 * next has anon_vma, we assign to prev.
1335 	 *
1336 	 *         |<----->|
1337 	 * |-------*********-------|
1338 	 *   prev     vma     next
1339 	 *  extend   delete  delete
1340 	 */
1341 
1342 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1343 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1344 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1345 
1346 	/* Initialise avc so mergeability check passes. */
1347 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1348 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1349 
1350 	vma_next->anon_vma = &dummy_anon_vma;
1351 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1352 	vmg.prev = vma_prev;
1353 	vmg.middle = vma;
1354 
1355 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1356 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1357 
1358 	ASSERT_EQ(vma_prev->vm_start, 0);
1359 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1360 
1361 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1362 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1363 
1364 	cleanup_mm(&mm, &vmi);
1365 
1366 	/*
1367 	 * vma has anon_vma, we assign to prev.
1368 	 *
1369 	 *         |<----->|
1370 	 * |-------*********-------|
1371 	 *   prev     vma     next
1372 	 *  extend   delete  delete
1373 	 */
1374 
1375 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1376 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1377 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1378 	vmg.anon_vma = &dummy_anon_vma;
1379 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1380 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1381 	vmg.prev = vma_prev;
1382 	vmg.middle = vma;
1383 
1384 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1385 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1386 
1387 	ASSERT_EQ(vma_prev->vm_start, 0);
1388 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1389 
1390 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1391 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1392 
1393 	cleanup_mm(&mm, &vmi);
1394 
1395 	/*
1396 	 * vma has anon_vma, we assign to prev.
1397 	 *
1398 	 *         |<----->|
1399 	 * |-------*************
1400 	 *   prev       vma
1401 	 *  extend shrink/delete
1402 	 */
1403 
1404 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1405 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1406 
1407 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1408 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1409 	vmg.prev = vma_prev;
1410 	vmg.middle = vma;
1411 
1412 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1413 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1414 
1415 	ASSERT_EQ(vma_prev->vm_start, 0);
1416 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1417 
1418 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1419 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1420 
1421 	cleanup_mm(&mm, &vmi);
1422 
1423 	/*
1424 	 * vma has anon_vma, we assign to next.
1425 	 *
1426 	 *     |<----->|
1427 	 * *************-------|
1428 	 *      vma       next
1429 	 * shrink/delete extend
1430 	 */
1431 
1432 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1433 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1434 
1435 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1436 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1437 	vmg.prev = vma;
1438 	vmg.middle = vma;
1439 
1440 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1441 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1442 
1443 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1444 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1445 
1446 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1447 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1448 
1449 	cleanup_mm(&mm, &vmi);
1450 	return true;
1451 }
1452 
test_vmi_prealloc_fail(void)1453 static bool test_vmi_prealloc_fail(void)
1454 {
1455 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1456 	struct mm_struct mm = {};
1457 	VMA_ITERATOR(vmi, &mm, 0);
1458 	struct vma_merge_struct vmg = {
1459 		.mm = &mm,
1460 		.vmi = &vmi,
1461 	};
1462 	struct anon_vma_chain avc = {};
1463 	struct vm_area_struct *vma_prev, *vma;
1464 
1465 	/*
1466 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1467 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1468 	 * the duplicated anon_vma is unlinked.
1469 	 */
1470 
1471 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1472 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1473 	vma->anon_vma = &dummy_anon_vma;
1474 
1475 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, flags, &dummy_anon_vma);
1476 	vmg.prev = vma_prev;
1477 	vmg.middle = vma;
1478 	vma_set_dummy_anon_vma(vma, &avc);
1479 
1480 	fail_prealloc = true;
1481 
1482 	/* This will cause the merge to fail. */
1483 	ASSERT_EQ(merge_existing(&vmg), NULL);
1484 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1485 	/* We will already have assigned the anon_vma. */
1486 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1487 	/* And it was both cloned and unlinked. */
1488 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1489 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1490 
1491 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1492 
1493 	/*
1494 	 * We repeat the same operation for expanding a VMA, which is what new
1495 	 * VMA merging ultimately uses too. This asserts that unlinking is
1496 	 * performed in this case too.
1497 	 */
1498 
1499 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1500 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1501 	vma->anon_vma = &dummy_anon_vma;
1502 
1503 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1504 	vmg.middle = vma_prev;
1505 	vmg.next = vma;
1506 
1507 	fail_prealloc = true;
1508 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1509 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1510 
1511 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1512 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1513 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1514 
1515 	cleanup_mm(&mm, &vmi);
1516 	return true;
1517 }
1518 
test_merge_extend(void)1519 static bool test_merge_extend(void)
1520 {
1521 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1522 	struct mm_struct mm = {};
1523 	VMA_ITERATOR(vmi, &mm, 0x1000);
1524 	struct vm_area_struct *vma;
1525 
1526 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1527 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1528 
1529 	/*
1530 	 * Extend a VMA into the gap between itself and the following VMA.
1531 	 * This should result in a merge.
1532 	 *
1533 	 * <->
1534 	 * *  *
1535 	 *
1536 	 */
1537 
1538 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1539 	ASSERT_EQ(vma->vm_start, 0);
1540 	ASSERT_EQ(vma->vm_end, 0x4000);
1541 	ASSERT_EQ(vma->vm_pgoff, 0);
1542 	ASSERT_TRUE(vma_write_started(vma));
1543 	ASSERT_EQ(mm.map_count, 1);
1544 
1545 	cleanup_mm(&mm, &vmi);
1546 	return true;
1547 }
1548 
test_copy_vma(void)1549 static bool test_copy_vma(void)
1550 {
1551 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1552 	struct mm_struct mm = {};
1553 	bool need_locks = false;
1554 	VMA_ITERATOR(vmi, &mm, 0);
1555 	struct vm_area_struct *vma, *vma_new, *vma_next;
1556 
1557 	/* Move backwards and do not merge. */
1558 
1559 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1560 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1561 	ASSERT_NE(vma_new, vma);
1562 	ASSERT_EQ(vma_new->vm_start, 0);
1563 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1564 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1565 	vma_assert_attached(vma_new);
1566 
1567 	cleanup_mm(&mm, &vmi);
1568 
1569 	/* Move a VMA into position next to another and merge the two. */
1570 
1571 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1572 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1573 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1574 	vma_assert_attached(vma_new);
1575 
1576 	ASSERT_EQ(vma_new, vma_next);
1577 
1578 	cleanup_mm(&mm, &vmi);
1579 	return true;
1580 }
1581 
test_expand_only_mode(void)1582 static bool test_expand_only_mode(void)
1583 {
1584 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1585 	struct mm_struct mm = {};
1586 	VMA_ITERATOR(vmi, &mm, 0);
1587 	struct vm_area_struct *vma_prev, *vma;
1588 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1589 
1590 	/*
1591 	 * Place a VMA prior to the one we're expanding so we assert that we do
1592 	 * not erroneously try to traverse to the previous VMA even though we
1593 	 * have, through the use of the just_expand flag, indicated we do not
1594 	 * need to do so.
1595 	 */
1596 	alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1597 
1598 	/*
1599 	 * We will be positioned at the prev VMA, but looking to expand to
1600 	 * 0x9000.
1601 	 */
1602 	vma_iter_set(&vmi, 0x3000);
1603 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1604 	vmg.prev = vma_prev;
1605 	vmg.just_expand = true;
1606 
1607 	vma = vma_merge_new_range(&vmg);
1608 	ASSERT_NE(vma, NULL);
1609 	ASSERT_EQ(vma, vma_prev);
1610 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1611 	ASSERT_EQ(vma->vm_start, 0x3000);
1612 	ASSERT_EQ(vma->vm_end, 0x9000);
1613 	ASSERT_EQ(vma->vm_pgoff, 3);
1614 	ASSERT_TRUE(vma_write_started(vma));
1615 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1616 	vma_assert_attached(vma);
1617 
1618 	cleanup_mm(&mm, &vmi);
1619 	return true;
1620 }
1621 
test_mmap_region_basic(void)1622 static bool test_mmap_region_basic(void)
1623 {
1624 	struct mm_struct mm = {};
1625 	unsigned long addr;
1626 	struct vm_area_struct *vma;
1627 	VMA_ITERATOR(vmi, &mm, 0);
1628 
1629 	current->mm = &mm;
1630 
1631 	/* Map at 0x300000, length 0x3000. */
1632 	addr = __mmap_region(NULL, 0x300000, 0x3000,
1633 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1634 			     0x300, NULL);
1635 	ASSERT_EQ(addr, 0x300000);
1636 
1637 	/* Map at 0x250000, length 0x3000. */
1638 	addr = __mmap_region(NULL, 0x250000, 0x3000,
1639 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1640 			     0x250, NULL);
1641 	ASSERT_EQ(addr, 0x250000);
1642 
1643 	/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1644 	addr = __mmap_region(NULL, 0x303000, 0x3000,
1645 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1646 			     0x303, NULL);
1647 	ASSERT_EQ(addr, 0x303000);
1648 
1649 	/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1650 	addr = __mmap_region(NULL, 0x24d000, 0x3000,
1651 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1652 			     0x24d, NULL);
1653 	ASSERT_EQ(addr, 0x24d000);
1654 
1655 	ASSERT_EQ(mm.map_count, 2);
1656 
1657 	for_each_vma(vmi, vma) {
1658 		if (vma->vm_start == 0x300000) {
1659 			ASSERT_EQ(vma->vm_end, 0x306000);
1660 			ASSERT_EQ(vma->vm_pgoff, 0x300);
1661 		} else if (vma->vm_start == 0x24d000) {
1662 			ASSERT_EQ(vma->vm_end, 0x253000);
1663 			ASSERT_EQ(vma->vm_pgoff, 0x24d);
1664 		} else {
1665 			ASSERT_FALSE(true);
1666 		}
1667 	}
1668 
1669 	cleanup_mm(&mm, &vmi);
1670 	return true;
1671 }
1672 
main(void)1673 int main(void)
1674 {
1675 	int num_tests = 0, num_fail = 0;
1676 
1677 	maple_tree_init();
1678 	vma_state_init();
1679 
1680 #define TEST(name)							\
1681 	do {								\
1682 		num_tests++;						\
1683 		if (!test_##name()) {					\
1684 			num_fail++;					\
1685 			fprintf(stderr, "Test " #name " FAILED\n");	\
1686 		}							\
1687 	} while (0)
1688 
1689 	/* Very simple tests to kick the tyres. */
1690 	TEST(simple_merge);
1691 	TEST(simple_modify);
1692 	TEST(simple_expand);
1693 	TEST(simple_shrink);
1694 
1695 	TEST(merge_new);
1696 	TEST(vma_merge_special_flags);
1697 	TEST(vma_merge_with_close);
1698 	TEST(vma_merge_new_with_close);
1699 	TEST(merge_existing);
1700 	TEST(anon_vma_non_mergeable);
1701 	TEST(dup_anon_vma);
1702 	TEST(vmi_prealloc_fail);
1703 	TEST(merge_extend);
1704 	TEST(copy_vma);
1705 	TEST(expand_only_mode);
1706 
1707 	TEST(mmap_region_basic);
1708 
1709 #undef TEST
1710 
1711 	printf("%d tests run, %d passed, %d failed.\n",
1712 	       num_tests, num_tests - num_fail, num_fail);
1713 
1714 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1715 }
1716