xref: /linux/tools/testing/vma/vma.c (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "generated/bit-length.h"
8 
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11 
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14 
15 static bool fail_prealloc;
16 
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma)					\
19 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20 
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22 
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26 
27 /*
28  * Directly import the VMA implementation here. Our vma_internal.h wrapper
29  * provides userland-equivalent functionality for everything vma.c uses.
30  */
31 #include "../../../mm/vma_init.c"
32 #include "../../../mm/vma_exec.c"
33 #include "../../../mm/vma.c"
34 
35 const struct vm_operations_struct vma_dummy_vm_ops;
36 static struct anon_vma dummy_anon_vma;
37 
38 #define ASSERT_TRUE(_expr)						\
39 	do {								\
40 		if (!(_expr)) {						\
41 			fprintf(stderr,					\
42 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
43 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
44 			return false;					\
45 		}							\
46 	} while (0)
47 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
48 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
49 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
50 
51 #define IS_SET(_val, _flags) ((_val & _flags) == _flags)
52 
53 static struct task_struct __current;
54 
get_current(void)55 struct task_struct *get_current(void)
56 {
57 	return &__current;
58 }
59 
rlimit(unsigned int limit)60 unsigned long rlimit(unsigned int limit)
61 {
62 	return (unsigned long)-1;
63 }
64 
65 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)66 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
67 					unsigned long start,
68 					unsigned long end,
69 					pgoff_t pgoff,
70 					vm_flags_t vm_flags)
71 {
72 	struct vm_area_struct *vma = vm_area_alloc(mm);
73 
74 	if (vma == NULL)
75 		return NULL;
76 
77 	vma->vm_start = start;
78 	vma->vm_end = end;
79 	vma->vm_pgoff = pgoff;
80 	vm_flags_reset(vma, vm_flags);
81 	vma_assert_detached(vma);
82 
83 	return vma;
84 }
85 
86 /* Helper function to allocate a VMA and link it to the tree. */
attach_vma(struct mm_struct * mm,struct vm_area_struct * vma)87 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
88 {
89 	int res;
90 
91 	res = vma_link(mm, vma);
92 	if (!res)
93 		vma_assert_attached(vma);
94 	return res;
95 }
96 
detach_free_vma(struct vm_area_struct * vma)97 static void detach_free_vma(struct vm_area_struct *vma)
98 {
99 	vma_mark_detached(vma);
100 	vm_area_free(vma);
101 }
102 
103 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)104 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
105 						 unsigned long start,
106 						 unsigned long end,
107 						 pgoff_t pgoff,
108 						 vm_flags_t vm_flags)
109 {
110 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
111 
112 	if (vma == NULL)
113 		return NULL;
114 
115 	if (attach_vma(mm, vma)) {
116 		detach_free_vma(vma);
117 		return NULL;
118 	}
119 
120 	/*
121 	 * Reset this counter which we use to track whether writes have
122 	 * begun. Linking to the tree will have caused this to be incremented,
123 	 * which means we will get a false positive otherwise.
124 	 */
125 	vma->vm_lock_seq = UINT_MAX;
126 
127 	return vma;
128 }
129 
130 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)131 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
132 {
133 	struct vm_area_struct *vma;
134 	/*
135 	 * For convenience, get prev and next VMAs. Which the new VMA operation
136 	 * requires.
137 	 */
138 	vmg->next = vma_next(vmg->vmi);
139 	vmg->prev = vma_prev(vmg->vmi);
140 	vma_iter_next_range(vmg->vmi);
141 
142 	vma = vma_merge_new_range(vmg);
143 	if (vma)
144 		vma_assert_attached(vma);
145 
146 	return vma;
147 }
148 
149 /*
150  * Helper function which provides a wrapper around a merge existing VMA
151  * operation.
152  */
merge_existing(struct vma_merge_struct * vmg)153 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
154 {
155 	struct vm_area_struct *vma;
156 
157 	vma = vma_merge_existing_range(vmg);
158 	if (vma)
159 		vma_assert_attached(vma);
160 	return vma;
161 }
162 
163 /*
164  * Helper function which provides a wrapper around the expansion of an existing
165  * VMA.
166  */
expand_existing(struct vma_merge_struct * vmg)167 static int expand_existing(struct vma_merge_struct *vmg)
168 {
169 	return vma_expand(vmg);
170 }
171 
172 /*
173  * Helper function to reset merge state the associated VMA iterator to a
174  * specified new range.
175  */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)176 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
177 			  unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
178 {
179 	vma_iter_set(vmg->vmi, start);
180 
181 	vmg->prev = NULL;
182 	vmg->middle = NULL;
183 	vmg->next = NULL;
184 	vmg->target = NULL;
185 
186 	vmg->start = start;
187 	vmg->end = end;
188 	vmg->pgoff = pgoff;
189 	vmg->vm_flags = vm_flags;
190 
191 	vmg->just_expand = false;
192 	vmg->__remove_middle = false;
193 	vmg->__remove_next = false;
194 	vmg->__adjust_middle_start = false;
195 	vmg->__adjust_next_start = false;
196 }
197 
198 /* Helper function to set both the VMG range and its anon_vma. */
vmg_set_range_anon_vma(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags,struct anon_vma * anon_vma)199 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
200 				   unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
201 				   struct anon_vma *anon_vma)
202 {
203 	vmg_set_range(vmg, start, end, pgoff, vm_flags);
204 	vmg->anon_vma = anon_vma;
205 }
206 
207 /*
208  * Helper function to try to merge a new VMA.
209  *
210  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
211  * VMA, link it to the maple tree and return it.
212  */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags,bool * was_merged)213 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
214 						struct vma_merge_struct *vmg,
215 						unsigned long start, unsigned long end,
216 						pgoff_t pgoff, vm_flags_t vm_flags,
217 						bool *was_merged)
218 {
219 	struct vm_area_struct *merged;
220 
221 	vmg_set_range(vmg, start, end, pgoff, vm_flags);
222 
223 	merged = merge_new(vmg);
224 	if (merged) {
225 		*was_merged = true;
226 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
227 		return merged;
228 	}
229 
230 	*was_merged = false;
231 
232 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
233 
234 	return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
235 }
236 
237 /*
238  * Helper function to reset the dummy anon_vma to indicate it has not been
239  * duplicated.
240  */
reset_dummy_anon_vma(void)241 static void reset_dummy_anon_vma(void)
242 {
243 	dummy_anon_vma.was_cloned = false;
244 	dummy_anon_vma.was_unlinked = false;
245 }
246 
247 /*
248  * Helper function to remove all VMAs and destroy the maple tree associated with
249  * a virtual address space. Returns a count of VMAs in the tree.
250  */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)251 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
252 {
253 	struct vm_area_struct *vma;
254 	int count = 0;
255 
256 	fail_prealloc = false;
257 	reset_dummy_anon_vma();
258 
259 	vma_iter_set(vmi, 0);
260 	for_each_vma(*vmi, vma) {
261 		detach_free_vma(vma);
262 		count++;
263 	}
264 
265 	mtree_destroy(&mm->mm_mt);
266 	mm->map_count = 0;
267 	return count;
268 }
269 
270 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)271 static bool vma_write_started(struct vm_area_struct *vma)
272 {
273 	int seq = vma->vm_lock_seq;
274 
275 	/* We reset after each check. */
276 	vma->vm_lock_seq = UINT_MAX;
277 
278 	/* The vma_start_write() stub simply increments this value. */
279 	return seq > -1;
280 }
281 
282 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)283 static void dummy_close(struct vm_area_struct *)
284 {
285 }
286 
__vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc,struct anon_vma * anon_vma)287 static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
288 				     struct anon_vma_chain *avc,
289 				     struct anon_vma *anon_vma)
290 {
291 	vma->anon_vma = anon_vma;
292 	INIT_LIST_HEAD(&vma->anon_vma_chain);
293 	list_add(&avc->same_vma, &vma->anon_vma_chain);
294 	avc->anon_vma = vma->anon_vma;
295 }
296 
vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc)297 static void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
298 				   struct anon_vma_chain *avc)
299 {
300 	__vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
301 }
302 
test_simple_merge(void)303 static bool test_simple_merge(void)
304 {
305 	struct vm_area_struct *vma;
306 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
307 	struct mm_struct mm = {};
308 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
309 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
310 	VMA_ITERATOR(vmi, &mm, 0x1000);
311 	struct vma_merge_struct vmg = {
312 		.mm = &mm,
313 		.vmi = &vmi,
314 		.start = 0x1000,
315 		.end = 0x2000,
316 		.vm_flags = vm_flags,
317 		.pgoff = 1,
318 	};
319 
320 	ASSERT_FALSE(attach_vma(&mm, vma_left));
321 	ASSERT_FALSE(attach_vma(&mm, vma_right));
322 
323 	vma = merge_new(&vmg);
324 	ASSERT_NE(vma, NULL);
325 
326 	ASSERT_EQ(vma->vm_start, 0);
327 	ASSERT_EQ(vma->vm_end, 0x3000);
328 	ASSERT_EQ(vma->vm_pgoff, 0);
329 	ASSERT_EQ(vma->vm_flags, vm_flags);
330 
331 	detach_free_vma(vma);
332 	mtree_destroy(&mm.mm_mt);
333 
334 	return true;
335 }
336 
test_simple_modify(void)337 static bool test_simple_modify(void)
338 {
339 	struct vm_area_struct *vma;
340 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
341 	struct mm_struct mm = {};
342 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
343 	VMA_ITERATOR(vmi, &mm, 0x1000);
344 	vm_flags_t flags = VM_READ | VM_MAYREAD;
345 
346 	ASSERT_FALSE(attach_vma(&mm, init_vma));
347 
348 	/*
349 	 * The flags will not be changed, the vma_modify_flags() function
350 	 * performs the merge/split only.
351 	 */
352 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
353 			       0x1000, 0x2000, &flags);
354 	ASSERT_NE(vma, NULL);
355 	/* We modify the provided VMA, and on split allocate new VMAs. */
356 	ASSERT_EQ(vma, init_vma);
357 
358 	ASSERT_EQ(vma->vm_start, 0x1000);
359 	ASSERT_EQ(vma->vm_end, 0x2000);
360 	ASSERT_EQ(vma->vm_pgoff, 1);
361 
362 	/*
363 	 * Now walk through the three split VMAs and make sure they are as
364 	 * expected.
365 	 */
366 
367 	vma_iter_set(&vmi, 0);
368 	vma = vma_iter_load(&vmi);
369 
370 	ASSERT_EQ(vma->vm_start, 0);
371 	ASSERT_EQ(vma->vm_end, 0x1000);
372 	ASSERT_EQ(vma->vm_pgoff, 0);
373 
374 	detach_free_vma(vma);
375 	vma_iter_clear(&vmi);
376 
377 	vma = vma_next(&vmi);
378 
379 	ASSERT_EQ(vma->vm_start, 0x1000);
380 	ASSERT_EQ(vma->vm_end, 0x2000);
381 	ASSERT_EQ(vma->vm_pgoff, 1);
382 
383 	detach_free_vma(vma);
384 	vma_iter_clear(&vmi);
385 
386 	vma = vma_next(&vmi);
387 
388 	ASSERT_EQ(vma->vm_start, 0x2000);
389 	ASSERT_EQ(vma->vm_end, 0x3000);
390 	ASSERT_EQ(vma->vm_pgoff, 2);
391 
392 	detach_free_vma(vma);
393 	mtree_destroy(&mm.mm_mt);
394 
395 	return true;
396 }
397 
test_simple_expand(void)398 static bool test_simple_expand(void)
399 {
400 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
401 	struct mm_struct mm = {};
402 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
403 	VMA_ITERATOR(vmi, &mm, 0);
404 	struct vma_merge_struct vmg = {
405 		.vmi = &vmi,
406 		.target = vma,
407 		.start = 0,
408 		.end = 0x3000,
409 		.pgoff = 0,
410 	};
411 
412 	ASSERT_FALSE(attach_vma(&mm, vma));
413 
414 	ASSERT_FALSE(expand_existing(&vmg));
415 
416 	ASSERT_EQ(vma->vm_start, 0);
417 	ASSERT_EQ(vma->vm_end, 0x3000);
418 	ASSERT_EQ(vma->vm_pgoff, 0);
419 
420 	detach_free_vma(vma);
421 	mtree_destroy(&mm.mm_mt);
422 
423 	return true;
424 }
425 
test_simple_shrink(void)426 static bool test_simple_shrink(void)
427 {
428 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
429 	struct mm_struct mm = {};
430 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
431 	VMA_ITERATOR(vmi, &mm, 0);
432 
433 	ASSERT_FALSE(attach_vma(&mm, vma));
434 
435 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
436 
437 	ASSERT_EQ(vma->vm_start, 0);
438 	ASSERT_EQ(vma->vm_end, 0x1000);
439 	ASSERT_EQ(vma->vm_pgoff, 0);
440 
441 	detach_free_vma(vma);
442 	mtree_destroy(&mm.mm_mt);
443 
444 	return true;
445 }
446 
__test_merge_new(bool is_sticky,bool a_is_sticky,bool b_is_sticky,bool c_is_sticky)447 static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
448 {
449 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
450 	struct mm_struct mm = {};
451 	VMA_ITERATOR(vmi, &mm, 0);
452 	struct vma_merge_struct vmg = {
453 		.mm = &mm,
454 		.vmi = &vmi,
455 	};
456 	struct anon_vma_chain dummy_anon_vma_chain_a = {
457 		.anon_vma = &dummy_anon_vma,
458 	};
459 	struct anon_vma_chain dummy_anon_vma_chain_b = {
460 		.anon_vma = &dummy_anon_vma,
461 	};
462 	struct anon_vma_chain dummy_anon_vma_chain_c = {
463 		.anon_vma = &dummy_anon_vma,
464 	};
465 	struct anon_vma_chain dummy_anon_vma_chain_d = {
466 		.anon_vma = &dummy_anon_vma,
467 	};
468 	const struct vm_operations_struct vm_ops = {
469 		.close = dummy_close,
470 	};
471 	int count;
472 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
473 	bool merged;
474 
475 	if (is_sticky)
476 		vm_flags |= VM_STICKY;
477 
478 	/*
479 	 * 0123456789abc
480 	 * AA B       CC
481 	 */
482 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
483 	ASSERT_NE(vma_a, NULL);
484 	if (a_is_sticky)
485 		vm_flags_set(vma_a, VM_STICKY);
486 	/* We give each VMA a single avc so we can test anon_vma duplication. */
487 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
488 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
489 
490 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
491 	ASSERT_NE(vma_b, NULL);
492 	if (b_is_sticky)
493 		vm_flags_set(vma_b, VM_STICKY);
494 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
495 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
496 
497 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
498 	ASSERT_NE(vma_c, NULL);
499 	if (c_is_sticky)
500 		vm_flags_set(vma_c, VM_STICKY);
501 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
502 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
503 
504 	/*
505 	 * NO merge.
506 	 *
507 	 * 0123456789abc
508 	 * AA B   **  CC
509 	 */
510 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
511 	ASSERT_NE(vma_d, NULL);
512 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
513 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
514 	ASSERT_FALSE(merged);
515 	ASSERT_EQ(mm.map_count, 4);
516 
517 	/*
518 	 * Merge BOTH sides.
519 	 *
520 	 * 0123456789abc
521 	 * AA*B   DD  CC
522 	 */
523 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
524 	vma_b->anon_vma = &dummy_anon_vma;
525 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
526 	ASSERT_EQ(vma, vma_a);
527 	/* Merge with A, delete B. */
528 	ASSERT_TRUE(merged);
529 	ASSERT_EQ(vma->vm_start, 0);
530 	ASSERT_EQ(vma->vm_end, 0x4000);
531 	ASSERT_EQ(vma->vm_pgoff, 0);
532 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
533 	ASSERT_TRUE(vma_write_started(vma));
534 	ASSERT_EQ(mm.map_count, 3);
535 	if (is_sticky || a_is_sticky || b_is_sticky)
536 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
537 
538 	/*
539 	 * Merge to PREVIOUS VMA.
540 	 *
541 	 * 0123456789abc
542 	 * AAAA*  DD  CC
543 	 */
544 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
545 	ASSERT_EQ(vma, vma_a);
546 	/* Extend A. */
547 	ASSERT_TRUE(merged);
548 	ASSERT_EQ(vma->vm_start, 0);
549 	ASSERT_EQ(vma->vm_end, 0x5000);
550 	ASSERT_EQ(vma->vm_pgoff, 0);
551 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
552 	ASSERT_TRUE(vma_write_started(vma));
553 	ASSERT_EQ(mm.map_count, 3);
554 	if (is_sticky || a_is_sticky)
555 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
556 
557 	/*
558 	 * Merge to NEXT VMA.
559 	 *
560 	 * 0123456789abc
561 	 * AAAAA *DD  CC
562 	 */
563 	vma_d->anon_vma = &dummy_anon_vma;
564 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
565 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
566 	ASSERT_EQ(vma, vma_d);
567 	/* Prepend. */
568 	ASSERT_TRUE(merged);
569 	ASSERT_EQ(vma->vm_start, 0x6000);
570 	ASSERT_EQ(vma->vm_end, 0x9000);
571 	ASSERT_EQ(vma->vm_pgoff, 6);
572 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
573 	ASSERT_TRUE(vma_write_started(vma));
574 	ASSERT_EQ(mm.map_count, 3);
575 	if (is_sticky) /* D uses is_sticky. */
576 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
577 
578 	/*
579 	 * Merge BOTH sides.
580 	 *
581 	 * 0123456789abc
582 	 * AAAAA*DDD  CC
583 	 */
584 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
585 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
586 	ASSERT_EQ(vma, vma_a);
587 	/* Merge with A, delete D. */
588 	ASSERT_TRUE(merged);
589 	ASSERT_EQ(vma->vm_start, 0);
590 	ASSERT_EQ(vma->vm_end, 0x9000);
591 	ASSERT_EQ(vma->vm_pgoff, 0);
592 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
593 	ASSERT_TRUE(vma_write_started(vma));
594 	ASSERT_EQ(mm.map_count, 2);
595 	if (is_sticky || a_is_sticky)
596 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
597 
598 	/*
599 	 * Merge to NEXT VMA.
600 	 *
601 	 * 0123456789abc
602 	 * AAAAAAAAA *CC
603 	 */
604 	vma_c->anon_vma = &dummy_anon_vma;
605 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
606 	ASSERT_EQ(vma, vma_c);
607 	/* Prepend C. */
608 	ASSERT_TRUE(merged);
609 	ASSERT_EQ(vma->vm_start, 0xa000);
610 	ASSERT_EQ(vma->vm_end, 0xc000);
611 	ASSERT_EQ(vma->vm_pgoff, 0xa);
612 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
613 	ASSERT_TRUE(vma_write_started(vma));
614 	ASSERT_EQ(mm.map_count, 2);
615 	if (is_sticky || c_is_sticky)
616 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
617 
618 	/*
619 	 * Merge BOTH sides.
620 	 *
621 	 * 0123456789abc
622 	 * AAAAAAAAA*CCC
623 	 */
624 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
625 	ASSERT_EQ(vma, vma_a);
626 	/* Extend A and delete C. */
627 	ASSERT_TRUE(merged);
628 	ASSERT_EQ(vma->vm_start, 0);
629 	ASSERT_EQ(vma->vm_end, 0xc000);
630 	ASSERT_EQ(vma->vm_pgoff, 0);
631 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
632 	ASSERT_TRUE(vma_write_started(vma));
633 	ASSERT_EQ(mm.map_count, 1);
634 	if (is_sticky || a_is_sticky || c_is_sticky)
635 		ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
636 
637 	/*
638 	 * Final state.
639 	 *
640 	 * 0123456789abc
641 	 * AAAAAAAAAAAAA
642 	 */
643 
644 	count = 0;
645 	vma_iter_set(&vmi, 0);
646 	for_each_vma(vmi, vma) {
647 		ASSERT_NE(vma, NULL);
648 		ASSERT_EQ(vma->vm_start, 0);
649 		ASSERT_EQ(vma->vm_end, 0xc000);
650 		ASSERT_EQ(vma->vm_pgoff, 0);
651 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
652 
653 		detach_free_vma(vma);
654 		count++;
655 	}
656 
657 	/* Should only have one VMA left (though freed) after all is done.*/
658 	ASSERT_EQ(count, 1);
659 
660 	mtree_destroy(&mm.mm_mt);
661 	return true;
662 }
663 
test_merge_new(void)664 static bool test_merge_new(void)
665 {
666 	int i, j, k, l;
667 
668 	/* Generate every possible permutation of sticky flags. */
669 	for (i = 0; i < 2; i++)
670 		for (j = 0; j < 2; j++)
671 			for (k = 0; k < 2; k++)
672 				for (l = 0; l < 2; l++)
673 					ASSERT_TRUE(__test_merge_new(i, j, k, l));
674 
675 	return true;
676 }
677 
test_vma_merge_special_flags(void)678 static bool test_vma_merge_special_flags(void)
679 {
680 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
681 	struct mm_struct mm = {};
682 	VMA_ITERATOR(vmi, &mm, 0);
683 	struct vma_merge_struct vmg = {
684 		.mm = &mm,
685 		.vmi = &vmi,
686 	};
687 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
688 	vm_flags_t all_special_flags = 0;
689 	int i;
690 	struct vm_area_struct *vma_left, *vma;
691 
692 	/* Make sure there aren't new VM_SPECIAL flags. */
693 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
694 		all_special_flags |= special_flags[i];
695 	}
696 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
697 
698 	/*
699 	 * 01234
700 	 * AAA
701 	 */
702 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
703 	ASSERT_NE(vma_left, NULL);
704 
705 	/* 1. Set up new VMA with special flag that would otherwise merge. */
706 
707 	/*
708 	 * 01234
709 	 * AAA*
710 	 *
711 	 * This should merge if not for the VM_SPECIAL flag.
712 	 */
713 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
714 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
715 		vm_flags_t special_flag = special_flags[i];
716 
717 		vm_flags_reset(vma_left, vm_flags | special_flag);
718 		vmg.vm_flags = vm_flags | special_flag;
719 		vma = merge_new(&vmg);
720 		ASSERT_EQ(vma, NULL);
721 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
722 	}
723 
724 	/* 2. Modify VMA with special flag that would otherwise merge. */
725 
726 	/*
727 	 * 01234
728 	 * AAAB
729 	 *
730 	 * Create a VMA to modify.
731 	 */
732 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
733 	ASSERT_NE(vma, NULL);
734 	vmg.middle = vma;
735 
736 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
737 		vm_flags_t special_flag = special_flags[i];
738 
739 		vm_flags_reset(vma_left, vm_flags | special_flag);
740 		vmg.vm_flags = vm_flags | special_flag;
741 		vma = merge_existing(&vmg);
742 		ASSERT_EQ(vma, NULL);
743 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
744 	}
745 
746 	cleanup_mm(&mm, &vmi);
747 	return true;
748 }
749 
test_vma_merge_with_close(void)750 static bool test_vma_merge_with_close(void)
751 {
752 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
753 	struct mm_struct mm = {};
754 	VMA_ITERATOR(vmi, &mm, 0);
755 	struct vma_merge_struct vmg = {
756 		.mm = &mm,
757 		.vmi = &vmi,
758 	};
759 	const struct vm_operations_struct vm_ops = {
760 		.close = dummy_close,
761 	};
762 	struct vm_area_struct *vma_prev, *vma_next, *vma;
763 
764 	/*
765 	 * When merging VMAs we are not permitted to remove any VMA that has a
766 	 * vm_ops->close() hook.
767 	 *
768 	 * Considering the two possible adjacent VMAs to which a VMA can be
769 	 * merged:
770 	 *
771 	 * [ prev ][ vma ][ next ]
772 	 *
773 	 * In no case will we need to delete prev. If the operation is
774 	 * mergeable, then prev will be extended with one or both of vma and
775 	 * next deleted.
776 	 *
777 	 * As a result, during initial mergeability checks, only
778 	 * can_vma_merge_before() (which implies the VMA being merged with is
779 	 * 'next' as shown above) bothers to check to see whether the next VMA
780 	 * has a vm_ops->close() callback that will need to be called when
781 	 * removed.
782 	 *
783 	 * If it does, then we cannot merge as the resources that the close()
784 	 * operation potentially clears down are tied only to the existing VMA
785 	 * range and we have no way of extending those to the nearly merged one.
786 	 *
787 	 * We must consider two scenarios:
788 	 *
789 	 * A.
790 	 *
791 	 * vm_ops->close:     -       -    !NULL
792 	 *                 [ prev ][ vma ][ next ]
793 	 *
794 	 * Where prev may or may not be present/mergeable.
795 	 *
796 	 * This is picked up by a specific check in can_vma_merge_before().
797 	 *
798 	 * B.
799 	 *
800 	 * vm_ops->close:     -     !NULL
801 	 *                 [ prev ][ vma ]
802 	 *
803 	 * Where prev and vma are present and mergeable.
804 	 *
805 	 * This is picked up by a specific check in the modified VMA merge.
806 	 *
807 	 * IMPORTANT NOTE: We make the assumption that the following case:
808 	 *
809 	 *    -     !NULL   NULL
810 	 * [ prev ][ vma ][ next ]
811 	 *
812 	 * Cannot occur, because vma->vm_ops being the same implies the same
813 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
814 	 * would be set too, and thus scenario A would pick this up.
815 	 */
816 
817 	/*
818 	 * The only case of a new VMA merge that results in a VMA being deleted
819 	 * is one where both the previous and next VMAs are merged - in this
820 	 * instance the next VMA is deleted, and the previous VMA is extended.
821 	 *
822 	 * If we are unable to do so, we reduce the operation to simply
823 	 * extending the prev VMA and not merging next.
824 	 *
825 	 * 0123456789
826 	 * PPP**NNNN
827 	 *             ->
828 	 * 0123456789
829 	 * PPPPPPNNN
830 	 */
831 
832 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
833 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
834 	vma_next->vm_ops = &vm_ops;
835 
836 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
837 	ASSERT_EQ(merge_new(&vmg), vma_prev);
838 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
839 	ASSERT_EQ(vma_prev->vm_start, 0);
840 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
841 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
842 
843 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
844 
845 	/*
846 	 * When modifying an existing VMA there are further cases where we
847 	 * delete VMAs.
848 	 *
849 	 *    <>
850 	 * 0123456789
851 	 * PPPVV
852 	 *
853 	 * In this instance, if vma has a close hook, the merge simply cannot
854 	 * proceed.
855 	 */
856 
857 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
858 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
859 	vma->vm_ops = &vm_ops;
860 
861 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
862 	vmg.prev = vma_prev;
863 	vmg.middle = vma;
864 
865 	/*
866 	 * The VMA being modified in a way that would otherwise merge should
867 	 * also fail.
868 	 */
869 	ASSERT_EQ(merge_existing(&vmg), NULL);
870 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
871 
872 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
873 
874 	/*
875 	 * This case is mirrored if merging with next.
876 	 *
877 	 *    <>
878 	 * 0123456789
879 	 *    VVNNNN
880 	 *
881 	 * In this instance, if vma has a close hook, the merge simply cannot
882 	 * proceed.
883 	 */
884 
885 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
886 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
887 	vma->vm_ops = &vm_ops;
888 
889 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
890 	vmg.middle = vma;
891 	ASSERT_EQ(merge_existing(&vmg), NULL);
892 	/*
893 	 * Initially this is misapprehended as an out of memory report, as the
894 	 * close() check is handled in the same way as anon_vma duplication
895 	 * failures, however a subsequent patch resolves this.
896 	 */
897 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
898 
899 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
900 
901 	/*
902 	 * Finally, we consider two variants of the case where we modify a VMA
903 	 * to merge with both the previous and next VMAs.
904 	 *
905 	 * The first variant is where vma has a close hook. In this instance, no
906 	 * merge can proceed.
907 	 *
908 	 *    <>
909 	 * 0123456789
910 	 * PPPVVNNNN
911 	 */
912 
913 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
914 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
915 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
916 	vma->vm_ops = &vm_ops;
917 
918 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
919 	vmg.prev = vma_prev;
920 	vmg.middle = vma;
921 
922 	ASSERT_EQ(merge_existing(&vmg), NULL);
923 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
924 
925 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
926 
927 	/*
928 	 * The second variant is where next has a close hook. In this instance,
929 	 * we reduce the operation to a merge between prev and vma.
930 	 *
931 	 *    <>
932 	 * 0123456789
933 	 * PPPVVNNNN
934 	 *            ->
935 	 * 0123456789
936 	 * PPPPPNNNN
937 	 */
938 
939 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
940 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
941 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
942 	vma_next->vm_ops = &vm_ops;
943 
944 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
945 	vmg.prev = vma_prev;
946 	vmg.middle = vma;
947 
948 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
949 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
950 	ASSERT_EQ(vma_prev->vm_start, 0);
951 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
952 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
953 
954 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
955 
956 	return true;
957 }
958 
test_vma_merge_new_with_close(void)959 static bool test_vma_merge_new_with_close(void)
960 {
961 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
962 	struct mm_struct mm = {};
963 	VMA_ITERATOR(vmi, &mm, 0);
964 	struct vma_merge_struct vmg = {
965 		.mm = &mm,
966 		.vmi = &vmi,
967 	};
968 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
969 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
970 	const struct vm_operations_struct vm_ops = {
971 		.close = dummy_close,
972 	};
973 	struct vm_area_struct *vma;
974 
975 	/*
976 	 * We should allow the partial merge of a proposed new VMA if the
977 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
978 	 * compatible), e.g.:
979 	 *
980 	 *        New VMA
981 	 *    A  v-------v  B
982 	 * |-----|       |-----|
983 	 *  close         close
984 	 *
985 	 * Since the rule is to not DELETE a VMA with a close operation, this
986 	 * should be permitted, only rather than expanding A and deleting B, we
987 	 * should simply expand A and leave B intact, e.g.:
988 	 *
989 	 *        New VMA
990 	 *       A          B
991 	 * |------------||-----|
992 	 *  close         close
993 	 */
994 
995 	/* Have prev and next have a vm_ops->close() hook. */
996 	vma_prev->vm_ops = &vm_ops;
997 	vma_next->vm_ops = &vm_ops;
998 
999 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
1000 	vma = merge_new(&vmg);
1001 	ASSERT_NE(vma, NULL);
1002 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1003 	ASSERT_EQ(vma->vm_start, 0);
1004 	ASSERT_EQ(vma->vm_end, 0x5000);
1005 	ASSERT_EQ(vma->vm_pgoff, 0);
1006 	ASSERT_EQ(vma->vm_ops, &vm_ops);
1007 	ASSERT_TRUE(vma_write_started(vma));
1008 	ASSERT_EQ(mm.map_count, 2);
1009 
1010 	cleanup_mm(&mm, &vmi);
1011 	return true;
1012 }
1013 
__test_merge_existing(bool prev_is_sticky,bool middle_is_sticky,bool next_is_sticky)1014 static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
1015 {
1016 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1017 	vm_flags_t prev_flags = vm_flags;
1018 	vm_flags_t next_flags = vm_flags;
1019 	struct mm_struct mm = {};
1020 	VMA_ITERATOR(vmi, &mm, 0);
1021 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1022 	struct vma_merge_struct vmg = {
1023 		.mm = &mm,
1024 		.vmi = &vmi,
1025 	};
1026 	const struct vm_operations_struct vm_ops = {
1027 		.close = dummy_close,
1028 	};
1029 	struct anon_vma_chain avc = {};
1030 
1031 	if (prev_is_sticky)
1032 		prev_flags |= VM_STICKY;
1033 	if (middle_is_sticky)
1034 		vm_flags |= VM_STICKY;
1035 	if (next_is_sticky)
1036 		next_flags |= VM_STICKY;
1037 
1038 	/*
1039 	 * Merge right case - partial span.
1040 	 *
1041 	 *    <->
1042 	 * 0123456789
1043 	 *   VVVVNNN
1044 	 *            ->
1045 	 * 0123456789
1046 	 *   VNNNNNN
1047 	 */
1048 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
1049 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1050 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
1051 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1052 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
1053 	vmg.middle = vma;
1054 	vmg.prev = vma;
1055 	vma_set_dummy_anon_vma(vma, &avc);
1056 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1057 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1058 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1059 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1060 	ASSERT_EQ(vma_next->vm_pgoff, 3);
1061 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1062 	ASSERT_EQ(vma->vm_start, 0x2000);
1063 	ASSERT_EQ(vma->vm_end, 0x3000);
1064 	ASSERT_EQ(vma->vm_pgoff, 2);
1065 	ASSERT_TRUE(vma_write_started(vma));
1066 	ASSERT_TRUE(vma_write_started(vma_next));
1067 	ASSERT_EQ(mm.map_count, 2);
1068 	if (middle_is_sticky || next_is_sticky)
1069 		ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
1070 
1071 	/* Clear down and reset. */
1072 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1073 
1074 	/*
1075 	 * Merge right case - full span.
1076 	 *
1077 	 *   <-->
1078 	 * 0123456789
1079 	 *   VVVVNNN
1080 	 *            ->
1081 	 * 0123456789
1082 	 *   NNNNNNN
1083 	 */
1084 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
1085 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
1086 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1087 	vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
1088 	vmg.middle = vma;
1089 	vma_set_dummy_anon_vma(vma, &avc);
1090 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1091 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1092 	ASSERT_EQ(vma_next->vm_start, 0x2000);
1093 	ASSERT_EQ(vma_next->vm_end, 0x9000);
1094 	ASSERT_EQ(vma_next->vm_pgoff, 2);
1095 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1096 	ASSERT_TRUE(vma_write_started(vma_next));
1097 	ASSERT_EQ(mm.map_count, 1);
1098 	if (middle_is_sticky || next_is_sticky)
1099 		ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
1100 
1101 	/* Clear down and reset. We should have deleted vma. */
1102 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1103 
1104 	/*
1105 	 * Merge left case - partial span.
1106 	 *
1107 	 *    <->
1108 	 * 0123456789
1109 	 * PPPVVVV
1110 	 *            ->
1111 	 * 0123456789
1112 	 * PPPPPPV
1113 	 */
1114 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1115 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1116 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
1117 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1118 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
1119 	vmg.prev = vma_prev;
1120 	vmg.middle = vma;
1121 	vma_set_dummy_anon_vma(vma, &avc);
1122 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1123 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1124 	ASSERT_EQ(vma_prev->vm_start, 0);
1125 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1126 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1127 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1128 	ASSERT_EQ(vma->vm_start, 0x6000);
1129 	ASSERT_EQ(vma->vm_end, 0x7000);
1130 	ASSERT_EQ(vma->vm_pgoff, 6);
1131 	ASSERT_TRUE(vma_write_started(vma_prev));
1132 	ASSERT_TRUE(vma_write_started(vma));
1133 	ASSERT_EQ(mm.map_count, 2);
1134 	if (prev_is_sticky || middle_is_sticky)
1135 		ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
1136 
1137 	/* Clear down and reset. */
1138 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1139 
1140 	/*
1141 	 * Merge left case - full span.
1142 	 *
1143 	 *    <-->
1144 	 * 0123456789
1145 	 * PPPVVVV
1146 	 *            ->
1147 	 * 0123456789
1148 	 * PPPPPPP
1149 	 */
1150 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1151 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1152 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
1153 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
1154 	vmg.prev = vma_prev;
1155 	vmg.middle = vma;
1156 	vma_set_dummy_anon_vma(vma, &avc);
1157 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1158 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1159 	ASSERT_EQ(vma_prev->vm_start, 0);
1160 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1161 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1162 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1163 	ASSERT_TRUE(vma_write_started(vma_prev));
1164 	ASSERT_EQ(mm.map_count, 1);
1165 	if (prev_is_sticky || middle_is_sticky)
1166 		ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
1167 
1168 	/* Clear down and reset. We should have deleted vma. */
1169 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1170 
1171 	/*
1172 	 * Merge both case.
1173 	 *
1174 	 *    <-->
1175 	 * 0123456789
1176 	 * PPPVVVVNNN
1177 	 *             ->
1178 	 * 0123456789
1179 	 * PPPPPPPPPP
1180 	 */
1181 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1182 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1183 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
1184 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
1185 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
1186 	vmg.prev = vma_prev;
1187 	vmg.middle = vma;
1188 	vma_set_dummy_anon_vma(vma, &avc);
1189 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1190 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1191 	ASSERT_EQ(vma_prev->vm_start, 0);
1192 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1193 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1194 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1195 	ASSERT_TRUE(vma_write_started(vma_prev));
1196 	ASSERT_EQ(mm.map_count, 1);
1197 	if (prev_is_sticky || middle_is_sticky || next_is_sticky)
1198 		ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
1199 
1200 	/* Clear down and reset. We should have deleted prev and next. */
1201 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1202 
1203 	/*
1204 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1205 	 * caller always specifies ranges within the input VMA so we need only
1206 	 * examine these cases.
1207 	 *
1208 	 *     -
1209 	 *      -
1210 	 *       -
1211 	 *     <->
1212 	 *     <>
1213 	 *      <>
1214 	 * 0123456789a
1215 	 * PPPVVVVVNNN
1216 	 */
1217 
1218 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1219 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
1220 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
1221 
1222 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
1223 	vmg.prev = vma;
1224 	vmg.middle = vma;
1225 	ASSERT_EQ(merge_existing(&vmg), NULL);
1226 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1227 
1228 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
1229 	vmg.prev = vma;
1230 	vmg.middle = vma;
1231 	ASSERT_EQ(merge_existing(&vmg), NULL);
1232 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1233 
1234 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
1235 	vmg.prev = vma;
1236 	vmg.middle = vma;
1237 	ASSERT_EQ(merge_existing(&vmg), NULL);
1238 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1239 
1240 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
1241 	vmg.prev = vma;
1242 	vmg.middle = vma;
1243 	ASSERT_EQ(merge_existing(&vmg), NULL);
1244 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1245 
1246 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
1247 	vmg.prev = vma;
1248 	vmg.middle = vma;
1249 	ASSERT_EQ(merge_existing(&vmg), NULL);
1250 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1251 
1252 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
1253 	vmg.prev = vma;
1254 	vmg.middle = vma;
1255 	ASSERT_EQ(merge_existing(&vmg), NULL);
1256 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1257 
1258 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1259 
1260 	return true;
1261 }
1262 
test_merge_existing(void)1263 static bool test_merge_existing(void)
1264 {
1265 	int i, j, k;
1266 
1267 	/* Generate every possible permutation of sticky flags. */
1268 	for (i = 0; i < 2; i++)
1269 		for (j = 0; j < 2; j++)
1270 			for (k = 0; k < 2; k++)
1271 				ASSERT_TRUE(__test_merge_existing(i, j, k));
1272 
1273 	return true;
1274 }
1275 
test_anon_vma_non_mergeable(void)1276 static bool test_anon_vma_non_mergeable(void)
1277 {
1278 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1279 	struct mm_struct mm = {};
1280 	VMA_ITERATOR(vmi, &mm, 0);
1281 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1282 	struct vma_merge_struct vmg = {
1283 		.mm = &mm,
1284 		.vmi = &vmi,
1285 	};
1286 	struct anon_vma_chain dummy_anon_vma_chain_1 = {};
1287 	struct anon_vma_chain dummy_anon_vma_chain_2 = {};
1288 	struct anon_vma dummy_anon_vma_2;
1289 
1290 	/*
1291 	 * In the case of modified VMA merge, merging both left and right VMAs
1292 	 * but where prev and next have incompatible anon_vma objects, we revert
1293 	 * to a merge of prev and VMA:
1294 	 *
1295 	 *    <-->
1296 	 * 0123456789
1297 	 * PPPVVVVNNN
1298 	 *            ->
1299 	 * 0123456789
1300 	 * PPPPPPPNNN
1301 	 */
1302 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1303 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
1304 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
1305 
1306 	/*
1307 	 * Give both prev and next single anon_vma_chain fields, so they will
1308 	 * merge with the NULL vmg->anon_vma.
1309 	 *
1310 	 * However, when prev is compared to next, the merge should fail.
1311 	 */
1312 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
1313 	vmg.prev = vma_prev;
1314 	vmg.middle = vma;
1315 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1316 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1317 
1318 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1319 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1320 	ASSERT_EQ(vma_prev->vm_start, 0);
1321 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1322 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1323 	ASSERT_TRUE(vma_write_started(vma_prev));
1324 	ASSERT_FALSE(vma_write_started(vma_next));
1325 
1326 	/* Clear down and reset. */
1327 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1328 
1329 	/*
1330 	 * Now consider the new VMA case. This is equivalent, only adding a new
1331 	 * VMA in a gap between prev and next.
1332 	 *
1333 	 *    <-->
1334 	 * 0123456789
1335 	 * PPP****NNN
1336 	 *            ->
1337 	 * 0123456789
1338 	 * PPPPPPPNNN
1339 	 */
1340 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1341 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
1342 
1343 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
1344 	vmg.prev = vma_prev;
1345 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1346 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1347 
1348 	vmg.anon_vma = NULL;
1349 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1350 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1351 	ASSERT_EQ(vma_prev->vm_start, 0);
1352 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1353 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1354 	ASSERT_TRUE(vma_write_started(vma_prev));
1355 	ASSERT_FALSE(vma_write_started(vma_next));
1356 
1357 	/* Final cleanup. */
1358 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1359 
1360 	return true;
1361 }
1362 
test_dup_anon_vma(void)1363 static bool test_dup_anon_vma(void)
1364 {
1365 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1366 	struct mm_struct mm = {};
1367 	VMA_ITERATOR(vmi, &mm, 0);
1368 	struct vma_merge_struct vmg = {
1369 		.mm = &mm,
1370 		.vmi = &vmi,
1371 	};
1372 	struct anon_vma_chain dummy_anon_vma_chain = {
1373 		.anon_vma = &dummy_anon_vma,
1374 	};
1375 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1376 
1377 	reset_dummy_anon_vma();
1378 
1379 	/*
1380 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1381 	 * assigns it to the expanded VMA.
1382 	 *
1383 	 * This covers new VMA merging, as these operations amount to a VMA
1384 	 * expand.
1385 	 */
1386 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1387 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1388 	vma_next->anon_vma = &dummy_anon_vma;
1389 
1390 	vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
1391 	vmg.target = vma_prev;
1392 	vmg.next = vma_next;
1393 
1394 	ASSERT_EQ(expand_existing(&vmg), 0);
1395 
1396 	/* Will have been cloned. */
1397 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1398 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1399 
1400 	/* Cleanup ready for next run. */
1401 	cleanup_mm(&mm, &vmi);
1402 
1403 	/*
1404 	 * next has anon_vma, we assign to prev.
1405 	 *
1406 	 *         |<----->|
1407 	 * |-------*********-------|
1408 	 *   prev     vma     next
1409 	 *  extend   delete  delete
1410 	 */
1411 
1412 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1413 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1414 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1415 
1416 	/* Initialise avc so mergeability check passes. */
1417 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1418 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1419 
1420 	vma_next->anon_vma = &dummy_anon_vma;
1421 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1422 	vmg.prev = vma_prev;
1423 	vmg.middle = vma;
1424 
1425 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1426 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1427 
1428 	ASSERT_EQ(vma_prev->vm_start, 0);
1429 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1430 
1431 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1432 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1433 
1434 	cleanup_mm(&mm, &vmi);
1435 
1436 	/*
1437 	 * vma has anon_vma, we assign to prev.
1438 	 *
1439 	 *         |<----->|
1440 	 * |-------*********-------|
1441 	 *   prev     vma     next
1442 	 *  extend   delete  delete
1443 	 */
1444 
1445 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1446 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1447 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1448 	vmg.anon_vma = &dummy_anon_vma;
1449 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1450 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1451 	vmg.prev = vma_prev;
1452 	vmg.middle = vma;
1453 
1454 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1455 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1456 
1457 	ASSERT_EQ(vma_prev->vm_start, 0);
1458 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1459 
1460 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1461 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1462 
1463 	cleanup_mm(&mm, &vmi);
1464 
1465 	/*
1466 	 * vma has anon_vma, we assign to prev.
1467 	 *
1468 	 *         |<----->|
1469 	 * |-------*************
1470 	 *   prev       vma
1471 	 *  extend shrink/delete
1472 	 */
1473 
1474 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1475 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
1476 
1477 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1478 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1479 	vmg.prev = vma_prev;
1480 	vmg.middle = vma;
1481 
1482 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1483 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1484 
1485 	ASSERT_EQ(vma_prev->vm_start, 0);
1486 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1487 
1488 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1489 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1490 
1491 	cleanup_mm(&mm, &vmi);
1492 
1493 	/*
1494 	 * vma has anon_vma, we assign to next.
1495 	 *
1496 	 *     |<----->|
1497 	 * *************-------|
1498 	 *      vma       next
1499 	 * shrink/delete extend
1500 	 */
1501 
1502 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
1503 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
1504 
1505 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1506 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
1507 	vmg.prev = vma;
1508 	vmg.middle = vma;
1509 
1510 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1511 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1512 
1513 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1514 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1515 
1516 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1517 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1518 
1519 	cleanup_mm(&mm, &vmi);
1520 	return true;
1521 }
1522 
test_vmi_prealloc_fail(void)1523 static bool test_vmi_prealloc_fail(void)
1524 {
1525 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1526 	struct mm_struct mm = {};
1527 	VMA_ITERATOR(vmi, &mm, 0);
1528 	struct vma_merge_struct vmg = {
1529 		.mm = &mm,
1530 		.vmi = &vmi,
1531 	};
1532 	struct anon_vma_chain avc = {};
1533 	struct vm_area_struct *vma_prev, *vma;
1534 
1535 	/*
1536 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1537 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1538 	 * the duplicated anon_vma is unlinked.
1539 	 */
1540 
1541 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1542 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1543 	vma->anon_vma = &dummy_anon_vma;
1544 
1545 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
1546 	vmg.prev = vma_prev;
1547 	vmg.middle = vma;
1548 	vma_set_dummy_anon_vma(vma, &avc);
1549 
1550 	fail_prealloc = true;
1551 
1552 	/* This will cause the merge to fail. */
1553 	ASSERT_EQ(merge_existing(&vmg), NULL);
1554 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1555 	/* We will already have assigned the anon_vma. */
1556 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1557 	/* And it was both cloned and unlinked. */
1558 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1559 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1560 
1561 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1562 
1563 	/*
1564 	 * We repeat the same operation for expanding a VMA, which is what new
1565 	 * VMA merging ultimately uses too. This asserts that unlinking is
1566 	 * performed in this case too.
1567 	 */
1568 
1569 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
1570 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1571 	vma->anon_vma = &dummy_anon_vma;
1572 
1573 	vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
1574 	vmg.target = vma_prev;
1575 	vmg.next = vma;
1576 
1577 	fail_prealloc = true;
1578 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1579 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1580 
1581 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1582 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1583 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1584 
1585 	cleanup_mm(&mm, &vmi);
1586 	return true;
1587 }
1588 
test_merge_extend(void)1589 static bool test_merge_extend(void)
1590 {
1591 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1592 	struct mm_struct mm = {};
1593 	VMA_ITERATOR(vmi, &mm, 0x1000);
1594 	struct vm_area_struct *vma;
1595 
1596 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
1597 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
1598 
1599 	/*
1600 	 * Extend a VMA into the gap between itself and the following VMA.
1601 	 * This should result in a merge.
1602 	 *
1603 	 * <->
1604 	 * *  *
1605 	 *
1606 	 */
1607 
1608 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1609 	ASSERT_EQ(vma->vm_start, 0);
1610 	ASSERT_EQ(vma->vm_end, 0x4000);
1611 	ASSERT_EQ(vma->vm_pgoff, 0);
1612 	ASSERT_TRUE(vma_write_started(vma));
1613 	ASSERT_EQ(mm.map_count, 1);
1614 
1615 	cleanup_mm(&mm, &vmi);
1616 	return true;
1617 }
1618 
test_copy_vma(void)1619 static bool test_copy_vma(void)
1620 {
1621 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1622 	struct mm_struct mm = {};
1623 	bool need_locks = false;
1624 	VMA_ITERATOR(vmi, &mm, 0);
1625 	struct vm_area_struct *vma, *vma_new, *vma_next;
1626 
1627 	/* Move backwards and do not merge. */
1628 
1629 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1630 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1631 	ASSERT_NE(vma_new, vma);
1632 	ASSERT_EQ(vma_new->vm_start, 0);
1633 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1634 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1635 	vma_assert_attached(vma_new);
1636 
1637 	cleanup_mm(&mm, &vmi);
1638 
1639 	/* Move a VMA into position next to another and merge the two. */
1640 
1641 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
1642 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags);
1643 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1644 	vma_assert_attached(vma_new);
1645 
1646 	ASSERT_EQ(vma_new, vma_next);
1647 
1648 	cleanup_mm(&mm, &vmi);
1649 	return true;
1650 }
1651 
test_expand_only_mode(void)1652 static bool test_expand_only_mode(void)
1653 {
1654 	vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1655 	struct mm_struct mm = {};
1656 	VMA_ITERATOR(vmi, &mm, 0);
1657 	struct vm_area_struct *vma_prev, *vma;
1658 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
1659 
1660 	/*
1661 	 * Place a VMA prior to the one we're expanding so we assert that we do
1662 	 * not erroneously try to traverse to the previous VMA even though we
1663 	 * have, through the use of the just_expand flag, indicated we do not
1664 	 * need to do so.
1665 	 */
1666 	alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
1667 
1668 	/*
1669 	 * We will be positioned at the prev VMA, but looking to expand to
1670 	 * 0x9000.
1671 	 */
1672 	vma_iter_set(&vmi, 0x3000);
1673 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
1674 	vmg.prev = vma_prev;
1675 	vmg.just_expand = true;
1676 
1677 	vma = vma_merge_new_range(&vmg);
1678 	ASSERT_NE(vma, NULL);
1679 	ASSERT_EQ(vma, vma_prev);
1680 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1681 	ASSERT_EQ(vma->vm_start, 0x3000);
1682 	ASSERT_EQ(vma->vm_end, 0x9000);
1683 	ASSERT_EQ(vma->vm_pgoff, 3);
1684 	ASSERT_TRUE(vma_write_started(vma));
1685 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1686 	vma_assert_attached(vma);
1687 
1688 	cleanup_mm(&mm, &vmi);
1689 	return true;
1690 }
1691 
test_mmap_region_basic(void)1692 static bool test_mmap_region_basic(void)
1693 {
1694 	struct mm_struct mm = {};
1695 	unsigned long addr;
1696 	struct vm_area_struct *vma;
1697 	VMA_ITERATOR(vmi, &mm, 0);
1698 
1699 	current->mm = &mm;
1700 
1701 	/* Map at 0x300000, length 0x3000. */
1702 	addr = __mmap_region(NULL, 0x300000, 0x3000,
1703 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1704 			     0x300, NULL);
1705 	ASSERT_EQ(addr, 0x300000);
1706 
1707 	/* Map at 0x250000, length 0x3000. */
1708 	addr = __mmap_region(NULL, 0x250000, 0x3000,
1709 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1710 			     0x250, NULL);
1711 	ASSERT_EQ(addr, 0x250000);
1712 
1713 	/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1714 	addr = __mmap_region(NULL, 0x303000, 0x3000,
1715 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1716 			     0x303, NULL);
1717 	ASSERT_EQ(addr, 0x303000);
1718 
1719 	/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1720 	addr = __mmap_region(NULL, 0x24d000, 0x3000,
1721 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1722 			     0x24d, NULL);
1723 	ASSERT_EQ(addr, 0x24d000);
1724 
1725 	ASSERT_EQ(mm.map_count, 2);
1726 
1727 	for_each_vma(vmi, vma) {
1728 		if (vma->vm_start == 0x300000) {
1729 			ASSERT_EQ(vma->vm_end, 0x306000);
1730 			ASSERT_EQ(vma->vm_pgoff, 0x300);
1731 		} else if (vma->vm_start == 0x24d000) {
1732 			ASSERT_EQ(vma->vm_end, 0x253000);
1733 			ASSERT_EQ(vma->vm_pgoff, 0x24d);
1734 		} else {
1735 			ASSERT_FALSE(true);
1736 		}
1737 	}
1738 
1739 	cleanup_mm(&mm, &vmi);
1740 	return true;
1741 }
1742 
main(void)1743 int main(void)
1744 {
1745 	int num_tests = 0, num_fail = 0;
1746 
1747 	maple_tree_init();
1748 	vma_state_init();
1749 
1750 #define TEST(name)							\
1751 	do {								\
1752 		num_tests++;						\
1753 		if (!test_##name()) {					\
1754 			num_fail++;					\
1755 			fprintf(stderr, "Test " #name " FAILED\n");	\
1756 		}							\
1757 	} while (0)
1758 
1759 	/* Very simple tests to kick the tyres. */
1760 	TEST(simple_merge);
1761 	TEST(simple_modify);
1762 	TEST(simple_expand);
1763 	TEST(simple_shrink);
1764 
1765 	TEST(merge_new);
1766 	TEST(vma_merge_special_flags);
1767 	TEST(vma_merge_with_close);
1768 	TEST(vma_merge_new_with_close);
1769 	TEST(merge_existing);
1770 	TEST(anon_vma_non_mergeable);
1771 	TEST(dup_anon_vma);
1772 	TEST(vmi_prealloc_fail);
1773 	TEST(merge_extend);
1774 	TEST(copy_vma);
1775 	TEST(expand_only_mode);
1776 
1777 	TEST(mmap_region_basic);
1778 
1779 #undef TEST
1780 
1781 	printf("%d tests run, %d passed, %d failed.\n",
1782 	       num_tests, num_tests - num_fail, num_fail);
1783 
1784 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1785 }
1786