xref: /linux/tools/testing/vma/tests/merge.c (revision 3a6455d56bd7c4cfb1ea35ddae052943065e338e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /* Helper function which provides a wrapper around a merge new VMA operation. */
4 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
5 {
6 	struct vm_area_struct *vma;
7 	/*
8 	 * For convenience, get prev and next VMAs. Which the new VMA operation
9 	 * requires.
10 	 */
11 	vmg->next = vma_next(vmg->vmi);
12 	vmg->prev = vma_prev(vmg->vmi);
13 	vma_iter_next_range(vmg->vmi);
14 
15 	vma = vma_merge_new_range(vmg);
16 	if (vma)
17 		vma_assert_attached(vma);
18 
19 	return vma;
20 }
21 
22 /*
23  * Helper function which provides a wrapper around the expansion of an existing
24  * VMA.
25  */
26 static int expand_existing(struct vma_merge_struct *vmg)
27 {
28 	return vma_expand(vmg);
29 }
30 
31 /*
32  * Helper function to reset merge state the associated VMA iterator to a
33  * specified new range.
34  */
35 void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
36 		   unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags)
37 {
38 	vma_iter_set(vmg->vmi, start);
39 
40 	vmg->prev = NULL;
41 	vmg->middle = NULL;
42 	vmg->next = NULL;
43 	vmg->target = NULL;
44 
45 	vmg->start = start;
46 	vmg->end = end;
47 	vmg->pgoff = pgoff;
48 	vmg->vma_flags = vma_flags;
49 
50 	vmg->just_expand = false;
51 	vmg->__remove_middle = false;
52 	vmg->__remove_next = false;
53 	vmg->__adjust_middle_start = false;
54 	vmg->__adjust_next_start = false;
55 }
56 
57 /* Helper function to set both the VMG range and its anon_vma. */
58 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
59 		unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
60 		struct anon_vma *anon_vma)
61 {
62 	vmg_set_range(vmg, start, end, pgoff, vma_flags);
63 	vmg->anon_vma = anon_vma;
64 }
65 
66 /*
67  * Helper function to try to merge a new VMA.
68  *
69  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
70  * VMA, link it to the maple tree and return it.
71  */
72 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
73 		struct vma_merge_struct *vmg, unsigned long start,
74 		unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
75 		bool *was_merged)
76 {
77 	struct vm_area_struct *merged;
78 
79 	vmg_set_range(vmg, start, end, pgoff, vma_flags);
80 
81 	merged = merge_new(vmg);
82 	if (merged) {
83 		*was_merged = true;
84 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
85 		return merged;
86 	}
87 
88 	*was_merged = false;
89 
90 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
91 
92 	return alloc_and_link_vma(mm, start, end, pgoff, vma_flags);
93 }
94 
95 static bool test_simple_merge(void)
96 {
97 	struct vm_area_struct *vma;
98 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
99 					     VMA_MAYWRITE_BIT);
100 	struct mm_struct mm = {};
101 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
102 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags);
103 	VMA_ITERATOR(vmi, &mm, 0x1000);
104 	struct vma_merge_struct vmg = {
105 		.mm = &mm,
106 		.vmi = &vmi,
107 		.start = 0x1000,
108 		.end = 0x2000,
109 		.vma_flags = vma_flags,
110 		.pgoff = 1,
111 	};
112 
113 	ASSERT_FALSE(attach_vma(&mm, vma_left));
114 	ASSERT_FALSE(attach_vma(&mm, vma_right));
115 
116 	vma = merge_new(&vmg);
117 	ASSERT_NE(vma, NULL);
118 
119 	ASSERT_EQ(vma->vm_start, 0);
120 	ASSERT_EQ(vma->vm_end, 0x3000);
121 	ASSERT_EQ(vma->vm_pgoff, 0);
122 	ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags);
123 
124 	detach_free_vma(vma);
125 	mtree_destroy(&mm.mm_mt);
126 
127 	return true;
128 }
129 
130 static bool test_simple_modify(void)
131 {
132 	struct vm_area_struct *vma;
133 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
134 					     VMA_MAYWRITE_BIT);
135 	vm_flags_t legacy_flags = VM_READ | VM_WRITE;
136 	struct mm_struct mm = {};
137 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
138 	VMA_ITERATOR(vmi, &mm, 0x1000);
139 
140 	ASSERT_FALSE(attach_vma(&mm, init_vma));
141 
142 	/*
143 	 * The flags will not be changed, the vma_modify_flags() function
144 	 * performs the merge/split only.
145 	 */
146 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
147 			       0x1000, 0x2000, &legacy_flags);
148 	ASSERT_NE(vma, NULL);
149 	/* We modify the provided VMA, and on split allocate new VMAs. */
150 	ASSERT_EQ(vma, init_vma);
151 
152 	ASSERT_EQ(vma->vm_start, 0x1000);
153 	ASSERT_EQ(vma->vm_end, 0x2000);
154 	ASSERT_EQ(vma->vm_pgoff, 1);
155 
156 	/*
157 	 * Now walk through the three split VMAs and make sure they are as
158 	 * expected.
159 	 */
160 
161 	vma_iter_set(&vmi, 0);
162 	vma = vma_iter_load(&vmi);
163 
164 	ASSERT_EQ(vma->vm_start, 0);
165 	ASSERT_EQ(vma->vm_end, 0x1000);
166 	ASSERT_EQ(vma->vm_pgoff, 0);
167 
168 	detach_free_vma(vma);
169 	vma_iter_clear(&vmi);
170 
171 	vma = vma_next(&vmi);
172 
173 	ASSERT_EQ(vma->vm_start, 0x1000);
174 	ASSERT_EQ(vma->vm_end, 0x2000);
175 	ASSERT_EQ(vma->vm_pgoff, 1);
176 
177 	detach_free_vma(vma);
178 	vma_iter_clear(&vmi);
179 
180 	vma = vma_next(&vmi);
181 
182 	ASSERT_EQ(vma->vm_start, 0x2000);
183 	ASSERT_EQ(vma->vm_end, 0x3000);
184 	ASSERT_EQ(vma->vm_pgoff, 2);
185 
186 	detach_free_vma(vma);
187 	mtree_destroy(&mm.mm_mt);
188 
189 	return true;
190 }
191 
192 static bool test_simple_expand(void)
193 {
194 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
195 					     VMA_MAYWRITE_BIT);
196 	struct mm_struct mm = {};
197 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
198 	VMA_ITERATOR(vmi, &mm, 0);
199 	struct vma_merge_struct vmg = {
200 		.vmi = &vmi,
201 		.target = vma,
202 		.start = 0,
203 		.end = 0x3000,
204 		.pgoff = 0,
205 	};
206 
207 	ASSERT_FALSE(attach_vma(&mm, vma));
208 
209 	ASSERT_FALSE(expand_existing(&vmg));
210 
211 	ASSERT_EQ(vma->vm_start, 0);
212 	ASSERT_EQ(vma->vm_end, 0x3000);
213 	ASSERT_EQ(vma->vm_pgoff, 0);
214 
215 	detach_free_vma(vma);
216 	mtree_destroy(&mm.mm_mt);
217 
218 	return true;
219 }
220 
221 static bool test_simple_shrink(void)
222 {
223 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
224 					     VMA_MAYWRITE_BIT);
225 	struct mm_struct mm = {};
226 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
227 	VMA_ITERATOR(vmi, &mm, 0);
228 
229 	ASSERT_FALSE(attach_vma(&mm, vma));
230 
231 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
232 
233 	ASSERT_EQ(vma->vm_start, 0);
234 	ASSERT_EQ(vma->vm_end, 0x1000);
235 	ASSERT_EQ(vma->vm_pgoff, 0);
236 
237 	detach_free_vma(vma);
238 	mtree_destroy(&mm.mm_mt);
239 
240 	return true;
241 }
242 
243 static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
244 {
245 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
246 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
247 	struct mm_struct mm = {};
248 	VMA_ITERATOR(vmi, &mm, 0);
249 	struct vma_merge_struct vmg = {
250 		.mm = &mm,
251 		.vmi = &vmi,
252 	};
253 	struct anon_vma_chain dummy_anon_vma_chain_a = {
254 		.anon_vma = &dummy_anon_vma,
255 	};
256 	struct anon_vma_chain dummy_anon_vma_chain_b = {
257 		.anon_vma = &dummy_anon_vma,
258 	};
259 	struct anon_vma_chain dummy_anon_vma_chain_c = {
260 		.anon_vma = &dummy_anon_vma,
261 	};
262 	struct anon_vma_chain dummy_anon_vma_chain_d = {
263 		.anon_vma = &dummy_anon_vma,
264 	};
265 	const struct vm_operations_struct vm_ops = {
266 		.close = dummy_close,
267 	};
268 	int count;
269 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
270 	bool merged;
271 
272 	if (is_sticky)
273 		vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
274 
275 	/*
276 	 * 0123456789abc
277 	 * AA B       CC
278 	 */
279 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
280 	ASSERT_NE(vma_a, NULL);
281 	if (a_is_sticky)
282 		vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS);
283 	/* We give each VMA a single avc so we can test anon_vma duplication. */
284 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
285 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
286 
287 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
288 	ASSERT_NE(vma_b, NULL);
289 	if (b_is_sticky)
290 		vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS);
291 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
292 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
293 
294 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags);
295 	ASSERT_NE(vma_c, NULL);
296 	if (c_is_sticky)
297 		vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS);
298 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
299 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
300 
301 	/*
302 	 * NO merge.
303 	 *
304 	 * 0123456789abc
305 	 * AA B   **  CC
306 	 */
307 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged);
308 	ASSERT_NE(vma_d, NULL);
309 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
310 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
311 	ASSERT_FALSE(merged);
312 	ASSERT_EQ(mm.map_count, 4);
313 
314 	/*
315 	 * Merge BOTH sides.
316 	 *
317 	 * 0123456789abc
318 	 * AA*B   DD  CC
319 	 */
320 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
321 	vma_b->anon_vma = &dummy_anon_vma;
322 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged);
323 	ASSERT_EQ(vma, vma_a);
324 	/* Merge with A, delete B. */
325 	ASSERT_TRUE(merged);
326 	ASSERT_EQ(vma->vm_start, 0);
327 	ASSERT_EQ(vma->vm_end, 0x4000);
328 	ASSERT_EQ(vma->vm_pgoff, 0);
329 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
330 	ASSERT_TRUE(vma_write_started(vma));
331 	ASSERT_EQ(mm.map_count, 3);
332 	if (is_sticky || a_is_sticky || b_is_sticky)
333 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
334 
335 	/*
336 	 * Merge to PREVIOUS VMA.
337 	 *
338 	 * 0123456789abc
339 	 * AAAA*  DD  CC
340 	 */
341 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged);
342 	ASSERT_EQ(vma, vma_a);
343 	/* Extend A. */
344 	ASSERT_TRUE(merged);
345 	ASSERT_EQ(vma->vm_start, 0);
346 	ASSERT_EQ(vma->vm_end, 0x5000);
347 	ASSERT_EQ(vma->vm_pgoff, 0);
348 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
349 	ASSERT_TRUE(vma_write_started(vma));
350 	ASSERT_EQ(mm.map_count, 3);
351 	if (is_sticky || a_is_sticky)
352 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
353 
354 	/*
355 	 * Merge to NEXT VMA.
356 	 *
357 	 * 0123456789abc
358 	 * AAAAA *DD  CC
359 	 */
360 	vma_d->anon_vma = &dummy_anon_vma;
361 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
362 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged);
363 	ASSERT_EQ(vma, vma_d);
364 	/* Prepend. */
365 	ASSERT_TRUE(merged);
366 	ASSERT_EQ(vma->vm_start, 0x6000);
367 	ASSERT_EQ(vma->vm_end, 0x9000);
368 	ASSERT_EQ(vma->vm_pgoff, 6);
369 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
370 	ASSERT_TRUE(vma_write_started(vma));
371 	ASSERT_EQ(mm.map_count, 3);
372 	if (is_sticky) /* D uses is_sticky. */
373 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
374 
375 	/*
376 	 * Merge BOTH sides.
377 	 *
378 	 * 0123456789abc
379 	 * AAAAA*DDD  CC
380 	 */
381 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
382 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged);
383 	ASSERT_EQ(vma, vma_a);
384 	/* Merge with A, delete D. */
385 	ASSERT_TRUE(merged);
386 	ASSERT_EQ(vma->vm_start, 0);
387 	ASSERT_EQ(vma->vm_end, 0x9000);
388 	ASSERT_EQ(vma->vm_pgoff, 0);
389 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
390 	ASSERT_TRUE(vma_write_started(vma));
391 	ASSERT_EQ(mm.map_count, 2);
392 	if (is_sticky || a_is_sticky)
393 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
394 
395 	/*
396 	 * Merge to NEXT VMA.
397 	 *
398 	 * 0123456789abc
399 	 * AAAAAAAAA *CC
400 	 */
401 	vma_c->anon_vma = &dummy_anon_vma;
402 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged);
403 	ASSERT_EQ(vma, vma_c);
404 	/* Prepend C. */
405 	ASSERT_TRUE(merged);
406 	ASSERT_EQ(vma->vm_start, 0xa000);
407 	ASSERT_EQ(vma->vm_end, 0xc000);
408 	ASSERT_EQ(vma->vm_pgoff, 0xa);
409 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
410 	ASSERT_TRUE(vma_write_started(vma));
411 	ASSERT_EQ(mm.map_count, 2);
412 	if (is_sticky || c_is_sticky)
413 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
414 
415 	/*
416 	 * Merge BOTH sides.
417 	 *
418 	 * 0123456789abc
419 	 * AAAAAAAAA*CCC
420 	 */
421 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged);
422 	ASSERT_EQ(vma, vma_a);
423 	/* Extend A and delete C. */
424 	ASSERT_TRUE(merged);
425 	ASSERT_EQ(vma->vm_start, 0);
426 	ASSERT_EQ(vma->vm_end, 0xc000);
427 	ASSERT_EQ(vma->vm_pgoff, 0);
428 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
429 	ASSERT_TRUE(vma_write_started(vma));
430 	ASSERT_EQ(mm.map_count, 1);
431 	if (is_sticky || a_is_sticky || c_is_sticky)
432 		ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
433 
434 	/*
435 	 * Final state.
436 	 *
437 	 * 0123456789abc
438 	 * AAAAAAAAAAAAA
439 	 */
440 
441 	count = 0;
442 	vma_iter_set(&vmi, 0);
443 	for_each_vma(vmi, vma) {
444 		ASSERT_NE(vma, NULL);
445 		ASSERT_EQ(vma->vm_start, 0);
446 		ASSERT_EQ(vma->vm_end, 0xc000);
447 		ASSERT_EQ(vma->vm_pgoff, 0);
448 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
449 
450 		detach_free_vma(vma);
451 		count++;
452 	}
453 
454 	/* Should only have one VMA left (though freed) after all is done.*/
455 	ASSERT_EQ(count, 1);
456 
457 	mtree_destroy(&mm.mm_mt);
458 	return true;
459 }
460 
461 static bool test_merge_new(void)
462 {
463 	int i, j, k, l;
464 
465 	/* Generate every possible permutation of sticky flags. */
466 	for (i = 0; i < 2; i++)
467 		for (j = 0; j < 2; j++)
468 			for (k = 0; k < 2; k++)
469 				for (l = 0; l < 2; l++)
470 					ASSERT_TRUE(__test_merge_new(i, j, k, l));
471 
472 	return true;
473 }
474 
475 static bool test_vma_merge_special_flags(void)
476 {
477 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
478 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
479 	struct mm_struct mm = {};
480 	VMA_ITERATOR(vmi, &mm, 0);
481 	struct vma_merge_struct vmg = {
482 		.mm = &mm,
483 		.vmi = &vmi,
484 	};
485 	vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT,
486 		VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT };
487 	vma_flags_t all_special_flags = EMPTY_VMA_FLAGS;
488 	int i;
489 	struct vm_area_struct *vma_left, *vma;
490 
491 	/* Make sure there aren't new VM_SPECIAL flags. */
492 	for (i = 0; i < ARRAY_SIZE(special_flags); i++)
493 		vma_flags_set(&all_special_flags, special_flags[i]);
494 	ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS);
495 
496 	/*
497 	 * 01234
498 	 * AAA
499 	 */
500 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
501 	ASSERT_NE(vma_left, NULL);
502 
503 	/* 1. Set up new VMA with special flag that would otherwise merge. */
504 
505 	/*
506 	 * 01234
507 	 * AAA*
508 	 *
509 	 * This should merge if not for the VM_SPECIAL flag.
510 	 */
511 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags);
512 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
513 		vma_flag_t special_flag = special_flags[i];
514 		vma_flags_t flags = vma_flags;
515 
516 		vma_flags_set(&flags, special_flag);
517 		vma_left->flags = flags;
518 		vmg.vma_flags = flags;
519 		vma = merge_new(&vmg);
520 		ASSERT_EQ(vma, NULL);
521 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
522 	}
523 
524 	/* 2. Modify VMA with special flag that would otherwise merge. */
525 
526 	/*
527 	 * 01234
528 	 * AAAB
529 	 *
530 	 * Create a VMA to modify.
531 	 */
532 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
533 	ASSERT_NE(vma, NULL);
534 	vmg.middle = vma;
535 
536 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
537 		vma_flag_t special_flag = special_flags[i];
538 		vma_flags_t flags = vma_flags;
539 
540 		vma_flags_set(&flags, special_flag);
541 		vma_left->flags = flags;
542 		vmg.vma_flags = flags;
543 		vma = merge_existing(&vmg);
544 		ASSERT_EQ(vma, NULL);
545 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
546 	}
547 
548 	cleanup_mm(&mm, &vmi);
549 	return true;
550 }
551 
552 static bool test_vma_merge_with_close(void)
553 {
554 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
555 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
556 	struct mm_struct mm = {};
557 	VMA_ITERATOR(vmi, &mm, 0);
558 	struct vma_merge_struct vmg = {
559 		.mm = &mm,
560 		.vmi = &vmi,
561 	};
562 	const struct vm_operations_struct vm_ops = {
563 		.close = dummy_close,
564 	};
565 	struct vm_area_struct *vma_prev, *vma_next, *vma;
566 
567 	/*
568 	 * When merging VMAs we are not permitted to remove any VMA that has a
569 	 * vm_ops->close() hook.
570 	 *
571 	 * Considering the two possible adjacent VMAs to which a VMA can be
572 	 * merged:
573 	 *
574 	 * [ prev ][ vma ][ next ]
575 	 *
576 	 * In no case will we need to delete prev. If the operation is
577 	 * mergeable, then prev will be extended with one or both of vma and
578 	 * next deleted.
579 	 *
580 	 * As a result, during initial mergeability checks, only
581 	 * can_vma_merge_before() (which implies the VMA being merged with is
582 	 * 'next' as shown above) bothers to check to see whether the next VMA
583 	 * has a vm_ops->close() callback that will need to be called when
584 	 * removed.
585 	 *
586 	 * If it does, then we cannot merge as the resources that the close()
587 	 * operation potentially clears down are tied only to the existing VMA
588 	 * range and we have no way of extending those to the nearly merged one.
589 	 *
590 	 * We must consider two scenarios:
591 	 *
592 	 * A.
593 	 *
594 	 * vm_ops->close:     -       -    !NULL
595 	 *                 [ prev ][ vma ][ next ]
596 	 *
597 	 * Where prev may or may not be present/mergeable.
598 	 *
599 	 * This is picked up by a specific check in can_vma_merge_before().
600 	 *
601 	 * B.
602 	 *
603 	 * vm_ops->close:     -     !NULL
604 	 *                 [ prev ][ vma ]
605 	 *
606 	 * Where prev and vma are present and mergeable.
607 	 *
608 	 * This is picked up by a specific check in the modified VMA merge.
609 	 *
610 	 * IMPORTANT NOTE: We make the assumption that the following case:
611 	 *
612 	 *    -     !NULL   NULL
613 	 * [ prev ][ vma ][ next ]
614 	 *
615 	 * Cannot occur, because vma->vm_ops being the same implies the same
616 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
617 	 * would be set too, and thus scenario A would pick this up.
618 	 */
619 
620 	/*
621 	 * The only case of a new VMA merge that results in a VMA being deleted
622 	 * is one where both the previous and next VMAs are merged - in this
623 	 * instance the next VMA is deleted, and the previous VMA is extended.
624 	 *
625 	 * If we are unable to do so, we reduce the operation to simply
626 	 * extending the prev VMA and not merging next.
627 	 *
628 	 * 0123456789
629 	 * PPP**NNNN
630 	 *             ->
631 	 * 0123456789
632 	 * PPPPPPNNN
633 	 */
634 
635 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
636 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
637 	vma_next->vm_ops = &vm_ops;
638 
639 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
640 	ASSERT_EQ(merge_new(&vmg), vma_prev);
641 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
642 	ASSERT_EQ(vma_prev->vm_start, 0);
643 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
644 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
645 
646 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
647 
648 	/*
649 	 * When modifying an existing VMA there are further cases where we
650 	 * delete VMAs.
651 	 *
652 	 *    <>
653 	 * 0123456789
654 	 * PPPVV
655 	 *
656 	 * In this instance, if vma has a close hook, the merge simply cannot
657 	 * proceed.
658 	 */
659 
660 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
661 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
662 	vma->vm_ops = &vm_ops;
663 
664 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
665 	vmg.prev = vma_prev;
666 	vmg.middle = vma;
667 
668 	/*
669 	 * The VMA being modified in a way that would otherwise merge should
670 	 * also fail.
671 	 */
672 	ASSERT_EQ(merge_existing(&vmg), NULL);
673 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
674 
675 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
676 
677 	/*
678 	 * This case is mirrored if merging with next.
679 	 *
680 	 *    <>
681 	 * 0123456789
682 	 *    VVNNNN
683 	 *
684 	 * In this instance, if vma has a close hook, the merge simply cannot
685 	 * proceed.
686 	 */
687 
688 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
689 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
690 	vma->vm_ops = &vm_ops;
691 
692 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
693 	vmg.middle = vma;
694 	ASSERT_EQ(merge_existing(&vmg), NULL);
695 	/*
696 	 * Initially this is misapprehended as an out of memory report, as the
697 	 * close() check is handled in the same way as anon_vma duplication
698 	 * failures, however a subsequent patch resolves this.
699 	 */
700 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
701 
702 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
703 
704 	/*
705 	 * Finally, we consider two variants of the case where we modify a VMA
706 	 * to merge with both the previous and next VMAs.
707 	 *
708 	 * The first variant is where vma has a close hook. In this instance, no
709 	 * merge can proceed.
710 	 *
711 	 *    <>
712 	 * 0123456789
713 	 * PPPVVNNNN
714 	 */
715 
716 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
717 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
718 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
719 	vma->vm_ops = &vm_ops;
720 
721 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
722 	vmg.prev = vma_prev;
723 	vmg.middle = vma;
724 
725 	ASSERT_EQ(merge_existing(&vmg), NULL);
726 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
727 
728 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
729 
730 	/*
731 	 * The second variant is where next has a close hook. In this instance,
732 	 * we reduce the operation to a merge between prev and vma.
733 	 *
734 	 *    <>
735 	 * 0123456789
736 	 * PPPVVNNNN
737 	 *            ->
738 	 * 0123456789
739 	 * PPPPPNNNN
740 	 */
741 
742 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
743 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
744 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
745 	vma_next->vm_ops = &vm_ops;
746 
747 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
748 	vmg.prev = vma_prev;
749 	vmg.middle = vma;
750 
751 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
752 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
753 	ASSERT_EQ(vma_prev->vm_start, 0);
754 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
755 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
756 
757 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
758 
759 	return true;
760 }
761 
762 static bool test_vma_merge_new_with_close(void)
763 {
764 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
765 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
766 	struct mm_struct mm = {};
767 	VMA_ITERATOR(vmi, &mm, 0);
768 	struct vma_merge_struct vmg = {
769 		.mm = &mm,
770 		.vmi = &vmi,
771 	};
772 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
773 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags);
774 	const struct vm_operations_struct vm_ops = {
775 		.close = dummy_close,
776 	};
777 	struct vm_area_struct *vma;
778 
779 	/*
780 	 * We should allow the partial merge of a proposed new VMA if the
781 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
782 	 * compatible), e.g.:
783 	 *
784 	 *        New VMA
785 	 *    A  v-------v  B
786 	 * |-----|       |-----|
787 	 *  close         close
788 	 *
789 	 * Since the rule is to not DELETE a VMA with a close operation, this
790 	 * should be permitted, only rather than expanding A and deleting B, we
791 	 * should simply expand A and leave B intact, e.g.:
792 	 *
793 	 *        New VMA
794 	 *       A          B
795 	 * |------------||-----|
796 	 *  close         close
797 	 */
798 
799 	/* Have prev and next have a vm_ops->close() hook. */
800 	vma_prev->vm_ops = &vm_ops;
801 	vma_next->vm_ops = &vm_ops;
802 
803 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags);
804 	vma = merge_new(&vmg);
805 	ASSERT_NE(vma, NULL);
806 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
807 	ASSERT_EQ(vma->vm_start, 0);
808 	ASSERT_EQ(vma->vm_end, 0x5000);
809 	ASSERT_EQ(vma->vm_pgoff, 0);
810 	ASSERT_EQ(vma->vm_ops, &vm_ops);
811 	ASSERT_TRUE(vma_write_started(vma));
812 	ASSERT_EQ(mm.map_count, 2);
813 
814 	cleanup_mm(&mm, &vmi);
815 	return true;
816 }
817 
818 static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
819 {
820 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
821 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
822 	vma_flags_t prev_flags = vma_flags;
823 	vma_flags_t next_flags = vma_flags;
824 	struct mm_struct mm = {};
825 	VMA_ITERATOR(vmi, &mm, 0);
826 	struct vm_area_struct *vma, *vma_prev, *vma_next;
827 	struct vma_merge_struct vmg = {
828 		.mm = &mm,
829 		.vmi = &vmi,
830 	};
831 	const struct vm_operations_struct vm_ops = {
832 		.close = dummy_close,
833 	};
834 	struct anon_vma_chain avc = {};
835 
836 	if (prev_is_sticky)
837 		vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS);
838 	if (middle_is_sticky)
839 		vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
840 	if (next_is_sticky)
841 		vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS);
842 
843 	/*
844 	 * Merge right case - partial span.
845 	 *
846 	 *    <->
847 	 * 0123456789
848 	 *   VVVVNNN
849 	 *            ->
850 	 * 0123456789
851 	 *   VNNNNNN
852 	 */
853 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
854 	vma->vm_ops = &vm_ops; /* This should have no impact. */
855 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
856 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
857 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
858 	vmg.middle = vma;
859 	vmg.prev = vma;
860 	vma_set_dummy_anon_vma(vma, &avc);
861 	ASSERT_EQ(merge_existing(&vmg), vma_next);
862 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
863 	ASSERT_EQ(vma_next->vm_start, 0x3000);
864 	ASSERT_EQ(vma_next->vm_end, 0x9000);
865 	ASSERT_EQ(vma_next->vm_pgoff, 3);
866 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
867 	ASSERT_EQ(vma->vm_start, 0x2000);
868 	ASSERT_EQ(vma->vm_end, 0x3000);
869 	ASSERT_EQ(vma->vm_pgoff, 2);
870 	ASSERT_TRUE(vma_write_started(vma));
871 	ASSERT_TRUE(vma_write_started(vma_next));
872 	ASSERT_EQ(mm.map_count, 2);
873 	if (middle_is_sticky || next_is_sticky)
874 		ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
875 
876 	/* Clear down and reset. */
877 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
878 
879 	/*
880 	 * Merge right case - full span.
881 	 *
882 	 *   <-->
883 	 * 0123456789
884 	 *   VVVVNNN
885 	 *            ->
886 	 * 0123456789
887 	 *   NNNNNNN
888 	 */
889 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
890 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
891 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
892 	vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma);
893 	vmg.middle = vma;
894 	vma_set_dummy_anon_vma(vma, &avc);
895 	ASSERT_EQ(merge_existing(&vmg), vma_next);
896 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
897 	ASSERT_EQ(vma_next->vm_start, 0x2000);
898 	ASSERT_EQ(vma_next->vm_end, 0x9000);
899 	ASSERT_EQ(vma_next->vm_pgoff, 2);
900 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
901 	ASSERT_TRUE(vma_write_started(vma_next));
902 	ASSERT_EQ(mm.map_count, 1);
903 	if (middle_is_sticky || next_is_sticky)
904 		ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
905 
906 	/* Clear down and reset. We should have deleted vma. */
907 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
908 
909 	/*
910 	 * Merge left case - partial span.
911 	 *
912 	 *    <->
913 	 * 0123456789
914 	 * PPPVVVV
915 	 *            ->
916 	 * 0123456789
917 	 * PPPPPPV
918 	 */
919 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
920 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
921 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
922 	vma->vm_ops = &vm_ops; /* This should have no impact. */
923 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
924 	vmg.prev = vma_prev;
925 	vmg.middle = vma;
926 	vma_set_dummy_anon_vma(vma, &avc);
927 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
928 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
929 	ASSERT_EQ(vma_prev->vm_start, 0);
930 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
931 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
932 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
933 	ASSERT_EQ(vma->vm_start, 0x6000);
934 	ASSERT_EQ(vma->vm_end, 0x7000);
935 	ASSERT_EQ(vma->vm_pgoff, 6);
936 	ASSERT_TRUE(vma_write_started(vma_prev));
937 	ASSERT_TRUE(vma_write_started(vma));
938 	ASSERT_EQ(mm.map_count, 2);
939 	if (prev_is_sticky || middle_is_sticky)
940 		ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
941 
942 	/* Clear down and reset. */
943 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
944 
945 	/*
946 	 * Merge left case - full span.
947 	 *
948 	 *    <-->
949 	 * 0123456789
950 	 * PPPVVVV
951 	 *            ->
952 	 * 0123456789
953 	 * PPPPPPP
954 	 */
955 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
956 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
957 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
958 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
959 	vmg.prev = vma_prev;
960 	vmg.middle = vma;
961 	vma_set_dummy_anon_vma(vma, &avc);
962 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
963 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
964 	ASSERT_EQ(vma_prev->vm_start, 0);
965 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
966 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
967 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
968 	ASSERT_TRUE(vma_write_started(vma_prev));
969 	ASSERT_EQ(mm.map_count, 1);
970 	if (prev_is_sticky || middle_is_sticky)
971 		ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
972 
973 	/* Clear down and reset. We should have deleted vma. */
974 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
975 
976 	/*
977 	 * Merge both case.
978 	 *
979 	 *    <-->
980 	 * 0123456789
981 	 * PPPVVVVNNN
982 	 *             ->
983 	 * 0123456789
984 	 * PPPPPPPPPP
985 	 */
986 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
987 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
988 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
989 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
990 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
991 	vmg.prev = vma_prev;
992 	vmg.middle = vma;
993 	vma_set_dummy_anon_vma(vma, &avc);
994 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
995 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
996 	ASSERT_EQ(vma_prev->vm_start, 0);
997 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
998 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
999 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1000 	ASSERT_TRUE(vma_write_started(vma_prev));
1001 	ASSERT_EQ(mm.map_count, 1);
1002 	if (prev_is_sticky || middle_is_sticky || next_is_sticky)
1003 		ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
1004 
1005 	/* Clear down and reset. We should have deleted prev and next. */
1006 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1007 
1008 	/*
1009 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1010 	 * caller always specifies ranges within the input VMA so we need only
1011 	 * examine these cases.
1012 	 *
1013 	 *     -
1014 	 *      -
1015 	 *       -
1016 	 *     <->
1017 	 *     <>
1018 	 *      <>
1019 	 * 0123456789a
1020 	 * PPPVVVVVNNN
1021 	 */
1022 
1023 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
1024 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
1025 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
1026 
1027 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags);
1028 	vmg.prev = vma;
1029 	vmg.middle = vma;
1030 	ASSERT_EQ(merge_existing(&vmg), NULL);
1031 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1032 
1033 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
1034 	vmg.prev = vma;
1035 	vmg.middle = vma;
1036 	ASSERT_EQ(merge_existing(&vmg), NULL);
1037 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1038 
1039 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags);
1040 	vmg.prev = vma;
1041 	vmg.middle = vma;
1042 	ASSERT_EQ(merge_existing(&vmg), NULL);
1043 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1044 
1045 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags);
1046 	vmg.prev = vma;
1047 	vmg.middle = vma;
1048 	ASSERT_EQ(merge_existing(&vmg), NULL);
1049 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1050 
1051 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags);
1052 	vmg.prev = vma;
1053 	vmg.middle = vma;
1054 	ASSERT_EQ(merge_existing(&vmg), NULL);
1055 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1056 
1057 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
1058 	vmg.prev = vma;
1059 	vmg.middle = vma;
1060 	ASSERT_EQ(merge_existing(&vmg), NULL);
1061 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1062 
1063 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1064 
1065 	return true;
1066 }
1067 
1068 static bool test_merge_existing(void)
1069 {
1070 	int i, j, k;
1071 
1072 	/* Generate every possible permutation of sticky flags. */
1073 	for (i = 0; i < 2; i++)
1074 		for (j = 0; j < 2; j++)
1075 			for (k = 0; k < 2; k++)
1076 				ASSERT_TRUE(__test_merge_existing(i, j, k));
1077 
1078 	return true;
1079 }
1080 
1081 static bool test_anon_vma_non_mergeable(void)
1082 {
1083 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
1084 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
1085 	struct mm_struct mm = {};
1086 	VMA_ITERATOR(vmi, &mm, 0);
1087 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1088 	struct vma_merge_struct vmg = {
1089 		.mm = &mm,
1090 		.vmi = &vmi,
1091 	};
1092 	struct anon_vma_chain dummy_anon_vma_chain_1 = {};
1093 	struct anon_vma_chain dummy_anon_vma_chain_2 = {};
1094 	struct anon_vma dummy_anon_vma_2;
1095 
1096 	/*
1097 	 * In the case of modified VMA merge, merging both left and right VMAs
1098 	 * but where prev and next have incompatible anon_vma objects, we revert
1099 	 * to a merge of prev and VMA:
1100 	 *
1101 	 *    <-->
1102 	 * 0123456789
1103 	 * PPPVVVVNNN
1104 	 *            ->
1105 	 * 0123456789
1106 	 * PPPPPPPNNN
1107 	 */
1108 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1109 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
1110 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
1111 
1112 	/*
1113 	 * Give both prev and next single anon_vma_chain fields, so they will
1114 	 * merge with the NULL vmg->anon_vma.
1115 	 *
1116 	 * However, when prev is compared to next, the merge should fail.
1117 	 */
1118 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
1119 	vmg.prev = vma_prev;
1120 	vmg.middle = vma;
1121 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1122 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1123 
1124 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1125 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1126 	ASSERT_EQ(vma_prev->vm_start, 0);
1127 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1128 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1129 	ASSERT_TRUE(vma_write_started(vma_prev));
1130 	ASSERT_FALSE(vma_write_started(vma_next));
1131 
1132 	/* Clear down and reset. */
1133 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1134 
1135 	/*
1136 	 * Now consider the new VMA case. This is equivalent, only adding a new
1137 	 * VMA in a gap between prev and next.
1138 	 *
1139 	 *    <-->
1140 	 * 0123456789
1141 	 * PPP****NNN
1142 	 *            ->
1143 	 * 0123456789
1144 	 * PPPPPPPNNN
1145 	 */
1146 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1147 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
1148 
1149 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
1150 	vmg.prev = vma_prev;
1151 	vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
1152 	__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
1153 
1154 	vmg.anon_vma = NULL;
1155 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1156 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1157 	ASSERT_EQ(vma_prev->vm_start, 0);
1158 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1159 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1160 	ASSERT_TRUE(vma_write_started(vma_prev));
1161 	ASSERT_FALSE(vma_write_started(vma_next));
1162 
1163 	/* Final cleanup. */
1164 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1165 
1166 	return true;
1167 }
1168 
1169 static bool test_dup_anon_vma(void)
1170 {
1171 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
1172 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
1173 	struct mm_struct mm = {};
1174 	VMA_ITERATOR(vmi, &mm, 0);
1175 	struct vma_merge_struct vmg = {
1176 		.mm = &mm,
1177 		.vmi = &vmi,
1178 	};
1179 	struct anon_vma_chain dummy_anon_vma_chain = {
1180 		.anon_vma = &dummy_anon_vma,
1181 	};
1182 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1183 
1184 	reset_dummy_anon_vma();
1185 
1186 	/*
1187 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1188 	 * assigns it to the expanded VMA.
1189 	 *
1190 	 * This covers new VMA merging, as these operations amount to a VMA
1191 	 * expand.
1192 	 */
1193 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1194 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1195 	vma_next->anon_vma = &dummy_anon_vma;
1196 
1197 	vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags);
1198 	vmg.target = vma_prev;
1199 	vmg.next = vma_next;
1200 
1201 	ASSERT_EQ(expand_existing(&vmg), 0);
1202 
1203 	/* Will have been cloned. */
1204 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1205 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1206 
1207 	/* Cleanup ready for next run. */
1208 	cleanup_mm(&mm, &vmi);
1209 
1210 	/*
1211 	 * next has anon_vma, we assign to prev.
1212 	 *
1213 	 *         |<----->|
1214 	 * |-------*********-------|
1215 	 *   prev     vma     next
1216 	 *  extend   delete  delete
1217 	 */
1218 
1219 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1220 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1221 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
1222 
1223 	/* Initialise avc so mergeability check passes. */
1224 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1225 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1226 
1227 	vma_next->anon_vma = &dummy_anon_vma;
1228 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
1229 	vmg.prev = vma_prev;
1230 	vmg.middle = vma;
1231 
1232 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1233 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1234 
1235 	ASSERT_EQ(vma_prev->vm_start, 0);
1236 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1237 
1238 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1239 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1240 
1241 	cleanup_mm(&mm, &vmi);
1242 
1243 	/*
1244 	 * vma has anon_vma, we assign to prev.
1245 	 *
1246 	 *         |<----->|
1247 	 * |-------*********-------|
1248 	 *   prev     vma     next
1249 	 *  extend   delete  delete
1250 	 */
1251 
1252 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1253 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1254 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
1255 	vmg.anon_vma = &dummy_anon_vma;
1256 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1257 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
1258 	vmg.prev = vma_prev;
1259 	vmg.middle = vma;
1260 
1261 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1262 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1263 
1264 	ASSERT_EQ(vma_prev->vm_start, 0);
1265 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1266 
1267 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1268 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1269 
1270 	cleanup_mm(&mm, &vmi);
1271 
1272 	/*
1273 	 * vma has anon_vma, we assign to prev.
1274 	 *
1275 	 *         |<----->|
1276 	 * |-------*************
1277 	 *   prev       vma
1278 	 *  extend shrink/delete
1279 	 */
1280 
1281 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1282 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
1283 
1284 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1285 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
1286 	vmg.prev = vma_prev;
1287 	vmg.middle = vma;
1288 
1289 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1290 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1291 
1292 	ASSERT_EQ(vma_prev->vm_start, 0);
1293 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1294 
1295 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1296 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1297 
1298 	cleanup_mm(&mm, &vmi);
1299 
1300 	/*
1301 	 * vma has anon_vma, we assign to next.
1302 	 *
1303 	 *     |<----->|
1304 	 * *************-------|
1305 	 *      vma       next
1306 	 * shrink/delete extend
1307 	 */
1308 
1309 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags);
1310 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
1311 
1312 	vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
1313 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
1314 	vmg.prev = vma;
1315 	vmg.middle = vma;
1316 
1317 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1318 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1319 
1320 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1321 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1322 
1323 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1324 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1325 
1326 	cleanup_mm(&mm, &vmi);
1327 	return true;
1328 }
1329 
1330 static bool test_vmi_prealloc_fail(void)
1331 {
1332 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
1333 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
1334 	struct mm_struct mm = {};
1335 	VMA_ITERATOR(vmi, &mm, 0);
1336 	struct vma_merge_struct vmg = {
1337 		.mm = &mm,
1338 		.vmi = &vmi,
1339 	};
1340 	struct anon_vma_chain avc = {};
1341 	struct vm_area_struct *vma_prev, *vma;
1342 
1343 	/*
1344 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1345 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1346 	 * the duplicated anon_vma is unlinked.
1347 	 */
1348 
1349 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1350 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1351 	vma->anon_vma = &dummy_anon_vma;
1352 
1353 	vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma);
1354 	vmg.prev = vma_prev;
1355 	vmg.middle = vma;
1356 	vma_set_dummy_anon_vma(vma, &avc);
1357 
1358 	fail_prealloc = true;
1359 
1360 	/* This will cause the merge to fail. */
1361 	ASSERT_EQ(merge_existing(&vmg), NULL);
1362 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1363 	/* We will already have assigned the anon_vma. */
1364 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1365 	/* And it was both cloned and unlinked. */
1366 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1367 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1368 
1369 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1370 
1371 	/*
1372 	 * We repeat the same operation for expanding a VMA, which is what new
1373 	 * VMA merging ultimately uses too. This asserts that unlinking is
1374 	 * performed in this case too.
1375 	 */
1376 
1377 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
1378 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1379 	vma->anon_vma = &dummy_anon_vma;
1380 
1381 	vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags);
1382 	vmg.target = vma_prev;
1383 	vmg.next = vma;
1384 
1385 	fail_prealloc = true;
1386 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1387 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1388 
1389 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1390 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1391 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1392 
1393 	cleanup_mm(&mm, &vmi);
1394 	return true;
1395 }
1396 
1397 static bool test_merge_extend(void)
1398 {
1399 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
1400 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
1401 	struct mm_struct mm = {};
1402 	VMA_ITERATOR(vmi, &mm, 0x1000);
1403 	struct vm_area_struct *vma;
1404 
1405 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags);
1406 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
1407 
1408 	/*
1409 	 * Extend a VMA into the gap between itself and the following VMA.
1410 	 * This should result in a merge.
1411 	 *
1412 	 * <->
1413 	 * *  *
1414 	 *
1415 	 */
1416 
1417 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1418 	ASSERT_EQ(vma->vm_start, 0);
1419 	ASSERT_EQ(vma->vm_end, 0x4000);
1420 	ASSERT_EQ(vma->vm_pgoff, 0);
1421 	ASSERT_TRUE(vma_write_started(vma));
1422 	ASSERT_EQ(mm.map_count, 1);
1423 
1424 	cleanup_mm(&mm, &vmi);
1425 	return true;
1426 }
1427 
1428 static bool test_expand_only_mode(void)
1429 {
1430 	vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
1431 					     VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
1432 	struct mm_struct mm = {};
1433 	VMA_ITERATOR(vmi, &mm, 0);
1434 	struct vm_area_struct *vma_prev, *vma;
1435 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5);
1436 
1437 	/*
1438 	 * Place a VMA prior to the one we're expanding so we assert that we do
1439 	 * not erroneously try to traverse to the previous VMA even though we
1440 	 * have, through the use of the just_expand flag, indicated we do not
1441 	 * need to do so.
1442 	 */
1443 	alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
1444 
1445 	/*
1446 	 * We will be positioned at the prev VMA, but looking to expand to
1447 	 * 0x9000.
1448 	 */
1449 	vma_iter_set(&vmi, 0x3000);
1450 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
1451 	vmg.prev = vma_prev;
1452 	vmg.just_expand = true;
1453 
1454 	vma = vma_merge_new_range(&vmg);
1455 	ASSERT_NE(vma, NULL);
1456 	ASSERT_EQ(vma, vma_prev);
1457 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1458 	ASSERT_EQ(vma->vm_start, 0x3000);
1459 	ASSERT_EQ(vma->vm_end, 0x9000);
1460 	ASSERT_EQ(vma->vm_pgoff, 3);
1461 	ASSERT_TRUE(vma_write_started(vma));
1462 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1463 	vma_assert_attached(vma);
1464 
1465 	cleanup_mm(&mm, &vmi);
1466 	return true;
1467 }
1468 
1469 static void run_merge_tests(int *num_tests, int *num_fail)
1470 {
1471 	/* Very simple tests to kick the tyres. */
1472 	TEST(simple_merge);
1473 	TEST(simple_modify);
1474 	TEST(simple_expand);
1475 	TEST(simple_shrink);
1476 
1477 	TEST(merge_new);
1478 	TEST(vma_merge_special_flags);
1479 	TEST(vma_merge_with_close);
1480 	TEST(vma_merge_new_with_close);
1481 	TEST(merge_existing);
1482 	TEST(anon_vma_non_mergeable);
1483 	TEST(dup_anon_vma);
1484 	TEST(vmi_prealloc_fail);
1485 	TEST(merge_extend);
1486 	TEST(expand_only_mode);
1487 }
1488