xref: /linux/tools/testing/selftests/mm/merge.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "kselftest_harness.h"
5 #include <linux/prctl.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/prctl.h>
12 #include <sys/syscall.h>
13 #include <sys/wait.h>
14 #include <linux/perf_event.h>
15 #include "vm_util.h"
16 #include <linux/mman.h>
17 
18 FIXTURE(merge)
19 {
20 	unsigned int page_size;
21 	char *carveout;
22 	struct procmap_fd procmap;
23 };
24 
25 static char *map_carveout(unsigned int page_size)
26 {
27 	return mmap(NULL, 30 * page_size, PROT_NONE,
28 		    MAP_ANON | MAP_PRIVATE, -1, 0);
29 }
30 
31 static pid_t do_fork(struct procmap_fd *procmap)
32 {
33 	pid_t pid = fork();
34 
35 	if (pid == -1)
36 		return -1;
37 	if (pid != 0) {
38 		wait(NULL);
39 		return pid;
40 	}
41 
42 	/* Reopen for child. */
43 	if (close_procmap(procmap))
44 		return -1;
45 	if (open_self_procmap(procmap))
46 		return -1;
47 
48 	return 0;
49 }
50 
51 #ifdef __NR_mseal
52 static int sys_mseal(void *ptr, size_t len, unsigned long flags)
53 {
54 	return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
55 }
56 #else
57 static int sys_mseal(void *ptr, size_t len, unsigned long flags)
58 {
59 	errno = ENOSYS;
60 	return -1;
61 }
62 #endif
63 
64 FIXTURE_SETUP(merge)
65 {
66 	self->page_size = psize();
67 	/* Carve out PROT_NONE region to map over. */
68 	self->carveout = map_carveout(self->page_size);
69 	ASSERT_NE(self->carveout, MAP_FAILED);
70 	/* Setup PROCMAP_QUERY interface. */
71 	ASSERT_EQ(open_self_procmap(&self->procmap), 0);
72 }
73 
74 FIXTURE_TEARDOWN(merge)
75 {
76 	ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
77 	/* May fail for parent of forked process. */
78 	close_procmap(&self->procmap);
79 	/*
80 	 * Clear unconditionally, as some tests set this. It is no issue if this
81 	 * fails (KSM may be disabled for instance).
82 	 */
83 	prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
84 }
85 
86 FIXTURE(merge_with_fork)
87 {
88 	unsigned int page_size;
89 	char *carveout;
90 	struct procmap_fd procmap;
91 };
92 
93 FIXTURE_VARIANT(merge_with_fork)
94 {
95 	bool forked;
96 };
97 
98 FIXTURE_VARIANT_ADD(merge_with_fork, forked)
99 {
100 	.forked = true,
101 };
102 
103 FIXTURE_VARIANT_ADD(merge_with_fork, unforked)
104 {
105 	.forked = false,
106 };
107 
108 FIXTURE_SETUP(merge_with_fork)
109 {
110 	self->page_size = psize();
111 	self->carveout = map_carveout(self->page_size);
112 	ASSERT_NE(self->carveout, MAP_FAILED);
113 	ASSERT_EQ(open_self_procmap(&self->procmap), 0);
114 }
115 
116 FIXTURE_TEARDOWN(merge_with_fork)
117 {
118 	ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
119 	ASSERT_EQ(close_procmap(&self->procmap), 0);
120 	/* See above. */
121 	prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
122 }
123 
124 TEST_F(merge, mprotect_unfaulted_left)
125 {
126 	unsigned int page_size = self->page_size;
127 	char *carveout = self->carveout;
128 	struct procmap_fd *procmap = &self->procmap;
129 	char *ptr;
130 
131 	/*
132 	 * Map 10 pages of R/W memory within. MAP_NORESERVE so we don't hit
133 	 * merge failure due to lack of VM_ACCOUNT flag by mistake.
134 	 *
135 	 * |-----------------------|
136 	 * |       unfaulted       |
137 	 * |-----------------------|
138 	 */
139 	ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
140 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
141 	ASSERT_NE(ptr, MAP_FAILED);
142 	/*
143 	 * Now make the first 5 pages read-only, splitting the VMA:
144 	 *
145 	 *      RO          RW
146 	 * |-----------|-----------|
147 	 * | unfaulted | unfaulted |
148 	 * |-----------|-----------|
149 	 */
150 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
151 	/*
152 	 * Fault in the first of the last 5 pages so it gets an anon_vma and
153 	 * thus the whole VMA becomes 'faulted':
154 	 *
155 	 *      RO          RW
156 	 * |-----------|-----------|
157 	 * | unfaulted |  faulted  |
158 	 * |-----------|-----------|
159 	 */
160 	ptr[5 * page_size] = 'x';
161 	/*
162 	 * Now mprotect() the RW region read-only, we should merge (though for
163 	 * ~15 years we did not! :):
164 	 *
165 	 *             RO
166 	 * |-----------------------|
167 	 * |        faulted        |
168 	 * |-----------------------|
169 	 */
170 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
171 
172 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
173 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
174 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
175 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
176 }
177 
178 TEST_F(merge, mprotect_unfaulted_right)
179 {
180 	unsigned int page_size = self->page_size;
181 	char *carveout = self->carveout;
182 	struct procmap_fd *procmap = &self->procmap;
183 	char *ptr;
184 
185 	/*
186 	 * |-----------------------|
187 	 * |       unfaulted       |
188 	 * |-----------------------|
189 	 */
190 	ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
191 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
192 	ASSERT_NE(ptr, MAP_FAILED);
193 	/*
194 	 * Now make the last 5 pages read-only, splitting the VMA:
195 	 *
196 	 *      RW          RO
197 	 * |-----------|-----------|
198 	 * | unfaulted | unfaulted |
199 	 * |-----------|-----------|
200 	 */
201 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
202 	/*
203 	 * Fault in the first of the first 5 pages so it gets an anon_vma and
204 	 * thus the whole VMA becomes 'faulted':
205 	 *
206 	 *      RW          RO
207 	 * |-----------|-----------|
208 	 * |  faulted  | unfaulted |
209 	 * |-----------|-----------|
210 	 */
211 	ptr[0] = 'x';
212 	/*
213 	 * Now mprotect() the RW region read-only, we should merge:
214 	 *
215 	 *             RO
216 	 * |-----------------------|
217 	 * |        faulted        |
218 	 * |-----------------------|
219 	 */
220 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
221 
222 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
223 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
224 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
225 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
226 }
227 
228 TEST_F(merge, mprotect_unfaulted_both)
229 {
230 	unsigned int page_size = self->page_size;
231 	char *carveout = self->carveout;
232 	struct procmap_fd *procmap = &self->procmap;
233 	char *ptr;
234 
235 	/*
236 	 * |-----------------------|
237 	 * |       unfaulted       |
238 	 * |-----------------------|
239 	 */
240 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
241 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
242 	ASSERT_NE(ptr, MAP_FAILED);
243 	/*
244 	 * Now make the first and last 3 pages read-only, splitting the VMA:
245 	 *
246 	 *      RO          RW          RO
247 	 * |-----------|-----------|-----------|
248 	 * | unfaulted | unfaulted | unfaulted |
249 	 * |-----------|-----------|-----------|
250 	 */
251 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
252 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
253 	/*
254 	 * Fault in the first of the middle 3 pages so it gets an anon_vma and
255 	 * thus the whole VMA becomes 'faulted':
256 	 *
257 	 *      RO          RW          RO
258 	 * |-----------|-----------|-----------|
259 	 * | unfaulted |  faulted  | unfaulted |
260 	 * |-----------|-----------|-----------|
261 	 */
262 	ptr[3 * page_size] = 'x';
263 	/*
264 	 * Now mprotect() the RW region read-only, we should merge:
265 	 *
266 	 *             RO
267 	 * |-----------------------|
268 	 * |        faulted        |
269 	 * |-----------------------|
270 	 */
271 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
272 
273 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
274 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
275 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
276 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
277 }
278 
279 TEST_F(merge, mprotect_faulted_left_unfaulted_right)
280 {
281 	unsigned int page_size = self->page_size;
282 	char *carveout = self->carveout;
283 	struct procmap_fd *procmap = &self->procmap;
284 	char *ptr;
285 
286 	/*
287 	 * |-----------------------|
288 	 * |       unfaulted       |
289 	 * |-----------------------|
290 	 */
291 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
292 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
293 	ASSERT_NE(ptr, MAP_FAILED);
294 	/*
295 	 * Now make the last 3 pages read-only, splitting the VMA:
296 	 *
297 	 *             RW               RO
298 	 * |-----------------------|-----------|
299 	 * |       unfaulted       | unfaulted |
300 	 * |-----------------------|-----------|
301 	 */
302 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
303 	/*
304 	 * Fault in the first of the first 6 pages so it gets an anon_vma and
305 	 * thus the whole VMA becomes 'faulted':
306 	 *
307 	 *             RW               RO
308 	 * |-----------------------|-----------|
309 	 * |       unfaulted       | unfaulted |
310 	 * |-----------------------|-----------|
311 	 */
312 	ptr[0] = 'x';
313 	/*
314 	 * Now make the first 3 pages read-only, splitting the VMA:
315 	 *
316 	 *      RO          RW          RO
317 	 * |-----------|-----------|-----------|
318 	 * |  faulted  |  faulted  | unfaulted |
319 	 * |-----------|-----------|-----------|
320 	 */
321 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
322 	/*
323 	 * Now mprotect() the RW region read-only, we should merge:
324 	 *
325 	 *             RO
326 	 * |-----------------------|
327 	 * |        faulted        |
328 	 * |-----------------------|
329 	 */
330 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
331 
332 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
333 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
334 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
335 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
336 }
337 
338 TEST_F(merge, mprotect_unfaulted_left_faulted_right)
339 {
340 	unsigned int page_size = self->page_size;
341 	char *carveout = self->carveout;
342 	struct procmap_fd *procmap = &self->procmap;
343 	char *ptr;
344 
345 	/*
346 	 * |-----------------------|
347 	 * |       unfaulted       |
348 	 * |-----------------------|
349 	 */
350 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
351 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
352 	ASSERT_NE(ptr, MAP_FAILED);
353 	/*
354 	 * Now make the first 3 pages read-only, splitting the VMA:
355 	 *
356 	 *      RO                RW
357 	 * |-----------|-----------------------|
358 	 * | unfaulted |       unfaulted       |
359 	 * |-----------|-----------------------|
360 	 */
361 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
362 	/*
363 	 * Fault in the first of the last 6 pages so it gets an anon_vma and
364 	 * thus the whole VMA becomes 'faulted':
365 	 *
366 	 *      RO                RW
367 	 * |-----------|-----------------------|
368 	 * | unfaulted |        faulted        |
369 	 * |-----------|-----------------------|
370 	 */
371 	ptr[3 * page_size] = 'x';
372 	/*
373 	 * Now make the last 3 pages read-only, splitting the VMA:
374 	 *
375 	 *      RO          RW          RO
376 	 * |-----------|-----------|-----------|
377 	 * | unfaulted |  faulted  |  faulted  |
378 	 * |-----------|-----------|-----------|
379 	 */
380 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
381 	/*
382 	 * Now mprotect() the RW region read-only, we should merge:
383 	 *
384 	 *             RO
385 	 * |-----------------------|
386 	 * |        faulted        |
387 	 * |-----------------------|
388 	 */
389 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
390 
391 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
392 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
393 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
394 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
395 }
396 
397 TEST_F(merge, forked_target_vma)
398 {
399 	unsigned int page_size = self->page_size;
400 	char *carveout = self->carveout;
401 	struct procmap_fd *procmap = &self->procmap;
402 	char *ptr, *ptr2;
403 	pid_t pid;
404 	int i;
405 
406 	/*
407 	 * |-----------|
408 	 * | unfaulted |
409 	 * |-----------|
410 	 */
411 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
412 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
413 	ASSERT_NE(ptr, MAP_FAILED);
414 
415 	/*
416 	 * Fault in process.
417 	 *
418 	 * |-----------|
419 	 * |  faulted  |
420 	 * |-----------|
421 	 */
422 	ptr[0] = 'x';
423 
424 	pid = do_fork(&self->procmap);
425 	ASSERT_NE(pid, -1);
426 	if (pid != 0)
427 		return;
428 
429 	/* unCOWing everything does not cause the AVC to go away. */
430 	for (i = 0; i < 5 * page_size; i += page_size)
431 		ptr[i] = 'x';
432 
433 	/*
434 	 * Map in adjacent VMA in child.
435 	 *
436 	 *     forked
437 	 * |-----------|-----------|
438 	 * |  faulted  | unfaulted |
439 	 * |-----------|-----------|
440 	 *      ptr         ptr2
441 	 */
442 	ptr2 = mmap(&ptr[5 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
443 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
444 	ASSERT_NE(ptr2, MAP_FAILED);
445 
446 	/* Make sure not merged. */
447 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
448 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
449 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 5 * page_size);
450 }
451 
452 TEST_F(merge, forked_source_vma)
453 {
454 	unsigned int page_size = self->page_size;
455 	char *carveout = self->carveout;
456 	struct procmap_fd *procmap = &self->procmap;
457 	char *ptr, *ptr2;
458 	pid_t pid;
459 	int i;
460 
461 	/*
462 	 * |-----------|------------|
463 	 * | unfaulted | <unmapped> |
464 	 * |-----------|------------|
465 	 */
466 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
467 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
468 	ASSERT_NE(ptr, MAP_FAILED);
469 
470 	/*
471 	 * Fault in process.
472 	 *
473 	 * |-----------|------------|
474 	 * |  faulted  | <unmapped> |
475 	 * |-----------|------------|
476 	 */
477 	ptr[0] = 'x';
478 
479 	pid = do_fork(&self->procmap);
480 	ASSERT_NE(pid, -1);
481 	if (pid != 0)
482 		return;
483 
484 	/* unCOWing everything does not cause the AVC to go away. */
485 	for (i = 0; i < 5 * page_size; i += page_size)
486 		ptr[i] = 'x';
487 
488 	/*
489 	 * Map in adjacent VMA in child, ptr2 after ptr, but incompatible.
490 	 *
491 	 *   forked RW      RWX
492 	 * |-----------|-----------|
493 	 * |  faulted  | unfaulted |
494 	 * |-----------|-----------|
495 	 *      ptr        ptr2
496 	 */
497 	ptr2 = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC,
498 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
499 	ASSERT_NE(ptr2, MAP_FAILED);
500 
501 	/* Make sure not merged. */
502 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
503 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
504 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
505 
506 	/*
507 	 * Now mprotect forked region to RWX so it becomes the source for the
508 	 * merge to unfaulted region:
509 	 *
510 	 *  forked RWX      RWX
511 	 * |-----------|-----------|
512 	 * |  faulted  | unfaulted |
513 	 * |-----------|-----------|
514 	 *      ptr         ptr2
515 	 *
516 	 * This should NOT result in a merge, as ptr was forked.
517 	 */
518 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC), 0);
519 	/* Again, make sure not merged. */
520 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
521 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
522 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
523 }
524 
525 TEST_F(merge, handle_uprobe_upon_merged_vma)
526 {
527 	const size_t attr_sz = sizeof(struct perf_event_attr);
528 	unsigned int page_size = self->page_size;
529 	const char *probe_file = "./foo";
530 	char *carveout = self->carveout;
531 	struct perf_event_attr attr;
532 	unsigned long type;
533 	void *ptr1, *ptr2;
534 	int fd;
535 
536 	fd = open(probe_file, O_RDWR|O_CREAT, 0600);
537 	ASSERT_GE(fd, 0);
538 
539 	ASSERT_EQ(ftruncate(fd, page_size), 0);
540 	if (read_sysfs("/sys/bus/event_source/devices/uprobe/type", &type) != 0) {
541 		SKIP(goto out, "Failed to read uprobe sysfs file, skipping");
542 	}
543 
544 	memset(&attr, 0, attr_sz);
545 	attr.size = attr_sz;
546 	attr.type = type;
547 	attr.config1 = (__u64)(long)probe_file;
548 	attr.config2 = 0x0;
549 
550 	ASSERT_GE(syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0), 0);
551 
552 	ptr1 = mmap(&carveout[page_size], 10 * page_size, PROT_EXEC,
553 		    MAP_PRIVATE | MAP_FIXED, fd, 0);
554 	ASSERT_NE(ptr1, MAP_FAILED);
555 
556 	ptr2 = mremap(ptr1, page_size, 2 * page_size,
557 		      MREMAP_MAYMOVE | MREMAP_FIXED, ptr1 + 5 * page_size);
558 	ASSERT_NE(ptr2, MAP_FAILED);
559 
560 	ASSERT_NE(mremap(ptr2, page_size, page_size,
561 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr1), MAP_FAILED);
562 
563 out:
564 	close(fd);
565 	remove(probe_file);
566 }
567 
568 TEST_F(merge, ksm_merge)
569 {
570 	unsigned int page_size = self->page_size;
571 	char *carveout = self->carveout;
572 	struct procmap_fd *procmap = &self->procmap;
573 	char *ptr, *ptr2;
574 	int err;
575 
576 	/*
577 	 * Map two R/W immediately adjacent to one another, they should
578 	 * trivially merge:
579 	 *
580 	 * |-----------|-----------|
581 	 * |    R/W    |    R/W    |
582 	 * |-----------|-----------|
583 	 *      ptr         ptr2
584 	 */
585 
586 	ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
587 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
588 	ASSERT_NE(ptr, MAP_FAILED);
589 	ptr2 = mmap(&carveout[2 * page_size], page_size,
590 		    PROT_READ | PROT_WRITE,
591 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
592 	ASSERT_NE(ptr2, MAP_FAILED);
593 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
594 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
595 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
596 
597 	/* Unmap the second half of this merged VMA. */
598 	ASSERT_EQ(munmap(ptr2, page_size), 0);
599 
600 	/* OK, now enable global KSM merge. We clear this on test teardown. */
601 	err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
602 	if (err == -1) {
603 		int errnum = errno;
604 
605 		/* Only non-failure case... */
606 		ASSERT_EQ(errnum, EINVAL);
607 		/* ...but indicates we should skip. */
608 		SKIP(return, "KSM memory merging not supported, skipping.");
609 	}
610 
611 	/*
612 	 * Now map a VMA adjacent to the existing that was just made
613 	 * VM_MERGEABLE, this should merge as well.
614 	 */
615 	ptr2 = mmap(&carveout[2 * page_size], page_size,
616 		    PROT_READ | PROT_WRITE,
617 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
618 	ASSERT_NE(ptr2, MAP_FAILED);
619 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
620 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
621 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
622 
623 	/* Now this VMA altogether. */
624 	ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
625 
626 	/* Try the same operation as before, asserting this also merges fine. */
627 	ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
628 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
629 	ASSERT_NE(ptr, MAP_FAILED);
630 	ptr2 = mmap(&carveout[2 * page_size], page_size,
631 		    PROT_READ | PROT_WRITE,
632 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
633 	ASSERT_NE(ptr2, MAP_FAILED);
634 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
635 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
636 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
637 }
638 
639 TEST_F(merge, mremap_unfaulted_to_faulted)
640 {
641 	unsigned int page_size = self->page_size;
642 	char *carveout = self->carveout;
643 	struct procmap_fd *procmap = &self->procmap;
644 	char *ptr, *ptr2;
645 
646 	/*
647 	 * Map two distinct areas:
648 	 *
649 	 * |-----------|  |-----------|
650 	 * | unfaulted |  | unfaulted |
651 	 * |-----------|  |-----------|
652 	 *      ptr            ptr2
653 	 */
654 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
655 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
656 	ASSERT_NE(ptr, MAP_FAILED);
657 	ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
658 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
659 	ASSERT_NE(ptr2, MAP_FAILED);
660 
661 	/* Offset ptr2 further away. */
662 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
663 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
664 	ASSERT_NE(ptr2, MAP_FAILED);
665 
666 	/*
667 	 * Fault in ptr:
668 	 *                \
669 	 * |-----------|  /  |-----------|
670 	 * |  faulted  |  \  | unfaulted |
671 	 * |-----------|  /  |-----------|
672 	 *      ptr       \       ptr2
673 	 */
674 	ptr[0] = 'x';
675 
676 	/*
677 	 * Now move ptr2 adjacent to ptr:
678 	 *
679 	 * |-----------|-----------|
680 	 * |  faulted  | unfaulted |
681 	 * |-----------|-----------|
682 	 *      ptr         ptr2
683 	 *
684 	 * It should merge:
685 	 *
686 	 * |----------------------|
687 	 * |       faulted        |
688 	 * |----------------------|
689 	 *            ptr
690 	 */
691 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
692 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
693 	ASSERT_NE(ptr2, MAP_FAILED);
694 
695 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
696 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
697 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
698 }
699 
700 TEST_F(merge, mremap_unfaulted_behind_faulted)
701 {
702 	unsigned int page_size = self->page_size;
703 	char *carveout = self->carveout;
704 	struct procmap_fd *procmap = &self->procmap;
705 	char *ptr, *ptr2;
706 
707 	/*
708 	 * Map two distinct areas:
709 	 *
710 	 * |-----------|  |-----------|
711 	 * | unfaulted |  | unfaulted |
712 	 * |-----------|  |-----------|
713 	 *      ptr            ptr2
714 	 */
715 	ptr = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
716 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
717 	ASSERT_NE(ptr, MAP_FAILED);
718 	ptr2 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
719 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
720 	ASSERT_NE(ptr2, MAP_FAILED);
721 
722 	/* Offset ptr2 further away. */
723 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
724 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
725 	ASSERT_NE(ptr2, MAP_FAILED);
726 
727 	/*
728 	 * Fault in ptr:
729 	 *                \
730 	 * |-----------|  /  |-----------|
731 	 * |  faulted  |  \  | unfaulted |
732 	 * |-----------|  /  |-----------|
733 	 *      ptr       \       ptr2
734 	 */
735 	ptr[0] = 'x';
736 
737 	/*
738 	 * Now move ptr2 adjacent, but behind, ptr:
739 	 *
740 	 * |-----------|-----------|
741 	 * | unfaulted |  faulted  |
742 	 * |-----------|-----------|
743 	 *      ptr2        ptr
744 	 *
745 	 * It should merge:
746 	 *
747 	 * |----------------------|
748 	 * |       faulted        |
749 	 * |----------------------|
750 	 *            ptr2
751 	 */
752 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
753 			  MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
754 	ASSERT_NE(ptr2, MAP_FAILED);
755 
756 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
757 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
758 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
759 }
760 
761 TEST_F(merge, mremap_unfaulted_between_faulted)
762 {
763 	unsigned int page_size = self->page_size;
764 	char *carveout = self->carveout;
765 	struct procmap_fd *procmap = &self->procmap;
766 	char *ptr, *ptr2, *ptr3;
767 
768 	/*
769 	 * Map three distinct areas:
770 	 *
771 	 * |-----------|  |-----------|  |-----------|
772 	 * | unfaulted |  | unfaulted |  | unfaulted |
773 	 * |-----------|  |-----------|  |-----------|
774 	 *      ptr            ptr2           ptr3
775 	 */
776 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
777 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
778 	ASSERT_NE(ptr, MAP_FAILED);
779 	ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
780 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
781 	ASSERT_NE(ptr2, MAP_FAILED);
782 	ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
783 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
784 	ASSERT_NE(ptr3, MAP_FAILED);
785 
786 	/* Offset ptr3 further away. */
787 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
788 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
789 	ASSERT_NE(ptr3, MAP_FAILED);
790 
791 	/* Offset ptr2 further away. */
792 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
793 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
794 	ASSERT_NE(ptr2, MAP_FAILED);
795 
796 	/*
797 	 * Fault in ptr, ptr3:
798 	 *                \                 \
799 	 * |-----------|  /  |-----------|  /  |-----------|
800 	 * |  faulted  |  \  | unfaulted |  \  |  faulted  |
801 	 * |-----------|  /  |-----------|  /  |-----------|
802 	 *      ptr       \       ptr2      \       ptr3
803 	 */
804 	ptr[0] = 'x';
805 	ptr3[0] = 'x';
806 
807 	/*
808 	 * Move ptr3 back into place, leaving a place for ptr2:
809 	 *                                        \
810 	 * |-----------|           |-----------|  /  |-----------|
811 	 * |  faulted  |           |  faulted  |  \  | unfaulted |
812 	 * |-----------|           |-----------|  /  |-----------|
813 	 *      ptr                     ptr3      \       ptr2
814 	 */
815 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
816 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
817 	ASSERT_NE(ptr3, MAP_FAILED);
818 
819 	/*
820 	 * Finally, move ptr2 into place:
821 	 *
822 	 * |-----------|-----------|-----------|
823 	 * |  faulted  | unfaulted |  faulted  |
824 	 * |-----------|-----------|-----------|
825 	 *      ptr        ptr2         ptr3
826 	 *
827 	 * It should merge, but only ptr, ptr2:
828 	 *
829 	 * |-----------------------|-----------|
830 	 * |        faulted        | unfaulted |
831 	 * |-----------------------|-----------|
832 	 */
833 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
834 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
835 	ASSERT_NE(ptr2, MAP_FAILED);
836 
837 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
838 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
839 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
840 
841 	ASSERT_TRUE(find_vma_procmap(procmap, ptr3));
842 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr3);
843 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr3 + 5 * page_size);
844 }
845 
846 TEST_F(merge, mremap_unfaulted_between_faulted_unfaulted)
847 {
848 	unsigned int page_size = self->page_size;
849 	char *carveout = self->carveout;
850 	struct procmap_fd *procmap = &self->procmap;
851 	char *ptr, *ptr2, *ptr3;
852 
853 	/*
854 	 * Map three distinct areas:
855 	 *
856 	 * |-----------|  |-----------|  |-----------|
857 	 * | unfaulted |  | unfaulted |  | unfaulted |
858 	 * |-----------|  |-----------|  |-----------|
859 	 *      ptr            ptr2           ptr3
860 	 */
861 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
862 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
863 	ASSERT_NE(ptr, MAP_FAILED);
864 	ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
865 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
866 	ASSERT_NE(ptr2, MAP_FAILED);
867 	ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
868 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
869 	ASSERT_NE(ptr3, MAP_FAILED);
870 
871 	/* Offset ptr3 further away. */
872 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
873 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
874 	ASSERT_NE(ptr3, MAP_FAILED);
875 
876 
877 	/* Offset ptr2 further away. */
878 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
879 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
880 	ASSERT_NE(ptr2, MAP_FAILED);
881 
882 	/*
883 	 * Fault in ptr:
884 	 *                \                 \
885 	 * |-----------|  /  |-----------|  /  |-----------|
886 	 * |  faulted  |  \  | unfaulted |  \  | unfaulted |
887 	 * |-----------|  /  |-----------|  /  |-----------|
888 	 *      ptr       \       ptr2      \       ptr3
889 	 */
890 	ptr[0] = 'x';
891 
892 	/*
893 	 * Move ptr3 back into place, leaving a place for ptr2:
894 	 *                                        \
895 	 * |-----------|           |-----------|  /  |-----------|
896 	 * |  faulted  |           | unfaulted |  \  | unfaulted |
897 	 * |-----------|           |-----------|  /  |-----------|
898 	 *      ptr                     ptr3      \       ptr2
899 	 */
900 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
901 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
902 	ASSERT_NE(ptr3, MAP_FAILED);
903 
904 	/*
905 	 * Finally, move ptr2 into place:
906 	 *
907 	 * |-----------|-----------|-----------|
908 	 * |  faulted  | unfaulted | unfaulted |
909 	 * |-----------|-----------|-----------|
910 	 *      ptr        ptr2         ptr3
911 	 *
912 	 * It should merge:
913 	 *
914 	 * |-----------------------------------|
915 	 * |              faulted              |
916 	 * |-----------------------------------|
917 	 */
918 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
919 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
920 	ASSERT_NE(ptr2, MAP_FAILED);
921 
922 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
923 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
924 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
925 }
926 
927 TEST_F(merge, mremap_unfaulted_between_correctly_placed_faulted)
928 {
929 	unsigned int page_size = self->page_size;
930 	char *carveout = self->carveout;
931 	struct procmap_fd *procmap = &self->procmap;
932 	char *ptr, *ptr2;
933 
934 	/*
935 	 * Map one larger area:
936 	 *
937 	 * |-----------------------------------|
938 	 * |            unfaulted              |
939 	 * |-----------------------------------|
940 	 */
941 	ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
942 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
943 	ASSERT_NE(ptr, MAP_FAILED);
944 
945 	/*
946 	 * Fault in ptr:
947 	 *
948 	 * |-----------------------------------|
949 	 * |              faulted              |
950 	 * |-----------------------------------|
951 	 */
952 	ptr[0] = 'x';
953 
954 	/*
955 	 * Unmap middle:
956 	 *
957 	 * |-----------|           |-----------|
958 	 * |  faulted  |           |  faulted  |
959 	 * |-----------|           |-----------|
960 	 *
961 	 * Now the faulted areas are compatible with each other (anon_vma the
962 	 * same, vma->vm_pgoff equal to virtual page offset).
963 	 */
964 	ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
965 
966 	/*
967 	 * Map a new area, ptr2:
968 	 *                                        \
969 	 * |-----------|           |-----------|  /  |-----------|
970 	 * |  faulted  |           |  faulted  |  \  | unfaulted |
971 	 * |-----------|           |-----------|  /  |-----------|
972 	 *      ptr                               \       ptr2
973 	 */
974 	ptr2 = mmap(&carveout[20 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
975 		    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
976 	ASSERT_NE(ptr2, MAP_FAILED);
977 
978 	/*
979 	 * Finally, move ptr2 into place:
980 	 *
981 	 * |-----------|-----------|-----------|
982 	 * |  faulted  | unfaulted |  faulted  |
983 	 * |-----------|-----------|-----------|
984 	 *      ptr        ptr2         ptr3
985 	 *
986 	 * It should merge:
987 	 *
988 	 * |-----------------------------------|
989 	 * |              faulted              |
990 	 * |-----------------------------------|
991 	 */
992 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
993 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
994 	ASSERT_NE(ptr2, MAP_FAILED);
995 
996 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
997 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
998 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
999 }
1000 
1001 TEST_F(merge, mremap_correct_placed_faulted)
1002 {
1003 	unsigned int page_size = self->page_size;
1004 	char *carveout = self->carveout;
1005 	struct procmap_fd *procmap = &self->procmap;
1006 	char *ptr, *ptr2, *ptr3;
1007 
1008 	/*
1009 	 * Map one larger area:
1010 	 *
1011 	 * |-----------------------------------|
1012 	 * |            unfaulted              |
1013 	 * |-----------------------------------|
1014 	 */
1015 	ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
1016 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
1017 	ASSERT_NE(ptr, MAP_FAILED);
1018 
1019 	/*
1020 	 * Fault in ptr:
1021 	 *
1022 	 * |-----------------------------------|
1023 	 * |              faulted              |
1024 	 * |-----------------------------------|
1025 	 */
1026 	ptr[0] = 'x';
1027 
1028 	/*
1029 	 * Offset the final and middle 5 pages further away:
1030 	 *                \                 \
1031 	 * |-----------|  /  |-----------|  /  |-----------|
1032 	 * |  faulted  |  \  |  faulted  |  \  |  faulted  |
1033 	 * |-----------|  /  |-----------|  /  |-----------|
1034 	 *      ptr       \       ptr2      \       ptr3
1035 	 */
1036 	ptr3 = &ptr[10 * page_size];
1037 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1038 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
1039 	ASSERT_NE(ptr3, MAP_FAILED);
1040 	ptr2 = &ptr[5 * page_size];
1041 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1042 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
1043 	ASSERT_NE(ptr2, MAP_FAILED);
1044 
1045 	/*
1046 	 * Move ptr2 into its correct place:
1047 	 *                            \
1048 	 * |-----------|-----------|  /  |-----------|
1049 	 * |  faulted  |  faulted  |  \  |  faulted  |
1050 	 * |-----------|-----------|  /  |-----------|
1051 	 *      ptr         ptr2      \       ptr3
1052 	 *
1053 	 * It should merge:
1054 	 *                            \
1055 	 * |-----------------------|  /  |-----------|
1056 	 * |        faulted        |  \  |  faulted  |
1057 	 * |-----------------------|  /  |-----------|
1058 	 *            ptr             \       ptr3
1059 	 */
1060 
1061 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1062 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1063 	ASSERT_NE(ptr2, MAP_FAILED);
1064 
1065 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1066 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1067 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1068 
1069 	/*
1070 	 * Now move ptr out of place:
1071 	 *                            \                 \
1072 	 *             |-----------|  /  |-----------|  /  |-----------|
1073 	 *             |  faulted  |  \  |  faulted  |  \  |  faulted  |
1074 	 *             |-----------|  /  |-----------|  /  |-----------|
1075 	 *                  ptr2      \       ptr       \       ptr3
1076 	 */
1077 	ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1078 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1079 	ASSERT_NE(ptr, MAP_FAILED);
1080 
1081 	/*
1082 	 * Now move ptr back into place:
1083 	 *                            \
1084 	 * |-----------|-----------|  /  |-----------|
1085 	 * |  faulted  |  faulted  |  \  |  faulted  |
1086 	 * |-----------|-----------|  /  |-----------|
1087 	 *      ptr         ptr2      \       ptr3
1088 	 *
1089 	 * It should merge:
1090 	 *                            \
1091 	 * |-----------------------|  /  |-----------|
1092 	 * |        faulted        |  \  |  faulted  |
1093 	 * |-----------------------|  /  |-----------|
1094 	 *            ptr             \       ptr3
1095 	 */
1096 	ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1097 			 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1098 	ASSERT_NE(ptr, MAP_FAILED);
1099 
1100 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1101 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1102 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1103 
1104 	/*
1105 	 * Now move ptr out of place again:
1106 	 *                            \                 \
1107 	 *             |-----------|  /  |-----------|  /  |-----------|
1108 	 *             |  faulted  |  \  |  faulted  |  \  |  faulted  |
1109 	 *             |-----------|  /  |-----------|  /  |-----------|
1110 	 *                  ptr2      \       ptr       \       ptr3
1111 	 */
1112 	ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1113 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1114 	ASSERT_NE(ptr, MAP_FAILED);
1115 
1116 	/*
1117 	 * Now move ptr3 back into place:
1118 	 *                                        \
1119 	 *             |-----------|-----------|  /  |-----------|
1120 	 *             |  faulted  |  faulted  |  \  |  faulted  |
1121 	 *             |-----------|-----------|  /  |-----------|
1122 	 *                  ptr2        ptr3      \       ptr
1123 	 *
1124 	 * It should merge:
1125 	 *                                        \
1126 	 *             |-----------------------|  /  |-----------|
1127 	 *             |        faulted        |  \  |  faulted  |
1128 	 *             |-----------------------|  /  |-----------|
1129 	 *                        ptr2            \       ptr
1130 	 */
1131 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1132 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr2[5 * page_size]);
1133 	ASSERT_NE(ptr3, MAP_FAILED);
1134 
1135 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
1136 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
1137 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
1138 
1139 	/*
1140 	 * Now move ptr back into place:
1141 	 *
1142 	 * |-----------|-----------------------|
1143 	 * |  faulted  |        faulted        |
1144 	 * |-----------|-----------------------|
1145 	 *      ptr               ptr2
1146 	 *
1147 	 * It should merge:
1148 	 *
1149 	 * |-----------------------------------|
1150 	 * |              faulted              |
1151 	 * |-----------------------------------|
1152 	 *                  ptr
1153 	 */
1154 	ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1155 			 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1156 	ASSERT_NE(ptr, MAP_FAILED);
1157 
1158 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1159 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1160 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1161 
1162 	/*
1163 	 * Now move ptr2 out of the way:
1164 	 *                                        \
1165 	 * |-----------|           |-----------|  /  |-----------|
1166 	 * |  faulted  |           |  faulted  |  \  |  faulted  |
1167 	 * |-----------|           |-----------|  /  |-----------|
1168 	 *      ptr                     ptr3      \       ptr2
1169 	 */
1170 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1171 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
1172 	ASSERT_NE(ptr2, MAP_FAILED);
1173 
1174 	/*
1175 	 * Now move it back:
1176 	 *
1177 	 * |-----------|-----------|-----------|
1178 	 * |  faulted  |  faulted  |  faulted  |
1179 	 * |-----------|-----------|-----------|
1180 	 *      ptr         ptr2        ptr3
1181 	 *
1182 	 * It should merge:
1183 	 *
1184 	 * |-----------------------------------|
1185 	 * |              faulted              |
1186 	 * |-----------------------------------|
1187 	 *                  ptr
1188 	 */
1189 	ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1190 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1191 	ASSERT_NE(ptr2, MAP_FAILED);
1192 
1193 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1194 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1195 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1196 
1197 	/*
1198 	 * Move ptr3 out of place:
1199 	 *                                        \
1200 	 * |-----------------------|              /  |-----------|
1201 	 * |        faulted        |              \  |  faulted  |
1202 	 * |-----------------------|              /  |-----------|
1203 	 *            ptr                         \       ptr3
1204 	 */
1205 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1206 			  MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 1000);
1207 	ASSERT_NE(ptr3, MAP_FAILED);
1208 
1209 	/*
1210 	 * Now move it back:
1211 	 *
1212 	 * |-----------|-----------|-----------|
1213 	 * |  faulted  |  faulted  |  faulted  |
1214 	 * |-----------|-----------|-----------|
1215 	 *      ptr         ptr2        ptr3
1216 	 *
1217 	 * It should merge:
1218 	 *
1219 	 * |-----------------------------------|
1220 	 * |              faulted              |
1221 	 * |-----------------------------------|
1222 	 *                  ptr
1223 	 */
1224 	ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1225 			  MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
1226 	ASSERT_NE(ptr3, MAP_FAILED);
1227 
1228 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1229 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1230 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1231 }
1232 
1233 TEST_F(merge, merge_vmas_with_mseal)
1234 {
1235 	unsigned int page_size = self->page_size;
1236 	struct procmap_fd *procmap = &self->procmap;
1237 	char *ptr, *ptr2, *ptr3;
1238 	/* We need our own as cannot munmap() once sealed. */
1239 	char *carveout;
1240 
1241 	/* Invalid mseal() call to see if implemented. */
1242 	ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
1243 	if (errno == ENOSYS)
1244 		SKIP(return, "mseal not supported, skipping.");
1245 
1246 	/* Map carveout. */
1247 	carveout = mmap(NULL, 5 * page_size, PROT_NONE,
1248 			MAP_PRIVATE | MAP_ANON, -1, 0);
1249 	ASSERT_NE(carveout, MAP_FAILED);
1250 
1251 	/*
1252 	 * Map 3 separate VMAs:
1253 	 *
1254 	 * |-----------|-----------|-----------|
1255 	 * |    RW     |    RWE    |    RO     |
1256 	 * |-----------|-----------|-----------|
1257 	 *      ptr         ptr2        ptr3
1258 	 */
1259 	ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
1260 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
1261 	ASSERT_NE(ptr, MAP_FAILED);
1262 	ptr2 = mmap(&carveout[2 * page_size], page_size,
1263 		    PROT_READ | PROT_WRITE | PROT_EXEC,
1264 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
1265 	ASSERT_NE(ptr2, MAP_FAILED);
1266 	ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
1267 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
1268 	ASSERT_NE(ptr3, MAP_FAILED);
1269 
1270 	/*
1271 	 * mseal the second VMA:
1272 	 *
1273 	 * |-----------|-----------|-----------|
1274 	 * |    RW     |    RWES   |    RO     |
1275 	 * |-----------|-----------|-----------|
1276 	 *      ptr         ptr2        ptr3
1277 	 */
1278 	ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
1279 
1280 	/* Make first VMA mergeable upon mseal. */
1281 	ASSERT_EQ(mprotect(ptr, page_size,
1282 			   PROT_READ | PROT_WRITE | PROT_EXEC), 0);
1283 	/*
1284 	 * At this point we have:
1285 	 *
1286 	 * |-----------|-----------|-----------|
1287 	 * |    RWE    |    RWES   |    RO     |
1288 	 * |-----------|-----------|-----------|
1289 	 *      ptr         ptr2        ptr3
1290 	 *
1291 	 * Now mseal all of the VMAs.
1292 	 */
1293 	ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
1294 
1295 	/*
1296 	 * We should end up with:
1297 	 *
1298 	 * |-----------------------|-----------|
1299 	 * |          RWES         |    ROS    |
1300 	 * |-----------------------|-----------|
1301 	 *            ptr               ptr3
1302 	 */
1303 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1304 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1305 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
1306 }
1307 
1308 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
1309 {
1310 	struct procmap_fd *procmap = &self->procmap;
1311 	unsigned int page_size = self->page_size;
1312 	unsigned long offset;
1313 	char *ptr_a, *ptr_b;
1314 
1315 	/*
1316 	 * mremap() such that A and B merge:
1317 	 *
1318 	 *                             |------------|
1319 	 *                             |    \       |
1320 	 *           |-----------|     |    /  |---------|
1321 	 *           | unfaulted |     v    \  | faulted |
1322 	 *           |-----------|          /  |---------|
1323 	 *                 B                \       A
1324 	 */
1325 
1326 	/* Map VMA A into place. */
1327 	ptr_a = mmap(&self->carveout[page_size + 3 * page_size],
1328 		     3 * page_size,
1329 		     PROT_READ | PROT_WRITE,
1330 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1331 	ASSERT_NE(ptr_a, MAP_FAILED);
1332 	/* Fault it in. */
1333 	ptr_a[0] = 'x';
1334 
1335 	if (variant->forked) {
1336 		pid_t pid = do_fork(&self->procmap);
1337 
1338 		ASSERT_NE(pid, -1);
1339 		if (pid != 0)
1340 			return;
1341 	}
1342 
1343 	/*
1344 	 * Now move it out of the way so we can place VMA B in position,
1345 	 * unfaulted.
1346 	 */
1347 	ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1348 		       MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1349 	ASSERT_NE(ptr_a, MAP_FAILED);
1350 
1351 	/* Map VMA B into place. */
1352 	ptr_b = mmap(&self->carveout[page_size], 3 * page_size,
1353 		     PROT_READ | PROT_WRITE,
1354 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1355 	ASSERT_NE(ptr_b, MAP_FAILED);
1356 
1357 	/*
1358 	 * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
1359 	 * anon_vma propagation.
1360 	 */
1361 	ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1362 		       MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1363 		       &self->carveout[page_size + 3 * page_size]);
1364 	ASSERT_NE(ptr_a, MAP_FAILED);
1365 
1366 	/* The VMAs should have merged, if not forked. */
1367 	ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1368 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1369 
1370 	offset = variant->forked ? 3 * page_size : 6 * page_size;
1371 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + offset);
1372 }
1373 
1374 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_next)
1375 {
1376 	struct procmap_fd *procmap = &self->procmap;
1377 	unsigned int page_size = self->page_size;
1378 	unsigned long offset;
1379 	char *ptr_a, *ptr_b;
1380 
1381 	/*
1382 	 * mremap() such that A and B merge:
1383 	 *
1384 	 *      |---------------------------|
1385 	 *      |                   \       |
1386 	 *      |    |-----------|  /  |---------|
1387 	 *      v    | unfaulted |  \  | faulted |
1388 	 *           |-----------|  /  |---------|
1389 	 *                 B        \       A
1390 	 *
1391 	 * Then unmap VMA A to trigger the bug.
1392 	 */
1393 
1394 	/* Map VMA A into place. */
1395 	ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1396 		     PROT_READ | PROT_WRITE,
1397 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1398 	ASSERT_NE(ptr_a, MAP_FAILED);
1399 	/* Fault it in. */
1400 	ptr_a[0] = 'x';
1401 
1402 	if (variant->forked) {
1403 		pid_t pid = do_fork(&self->procmap);
1404 
1405 		ASSERT_NE(pid, -1);
1406 		if (pid != 0)
1407 			return;
1408 	}
1409 
1410 	/*
1411 	 * Now move it out of the way so we can place VMA B in position,
1412 	 * unfaulted.
1413 	 */
1414 	ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1415 		       MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1416 	ASSERT_NE(ptr_a, MAP_FAILED);
1417 
1418 	/* Map VMA B into place. */
1419 	ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
1420 		     PROT_READ | PROT_WRITE,
1421 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1422 	ASSERT_NE(ptr_b, MAP_FAILED);
1423 
1424 	/*
1425 	 * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
1426 	 * anon_vma propagation.
1427 	 */
1428 	ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1429 		       MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1430 		       &self->carveout[page_size]);
1431 	ASSERT_NE(ptr_a, MAP_FAILED);
1432 
1433 	/* The VMAs should have merged, if not forked. */
1434 	ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1435 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1436 	offset = variant->forked ? 3 * page_size : 6 * page_size;
1437 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
1438 }
1439 
1440 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_unfaulted_next)
1441 {
1442 	struct procmap_fd *procmap = &self->procmap;
1443 	unsigned int page_size = self->page_size;
1444 	unsigned long offset;
1445 	char *ptr_a, *ptr_b, *ptr_c;
1446 
1447 	/*
1448 	 * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
1449 	 *
1450 	 *                  |---------------------------|
1451 	 *                  |                   \       |
1452 	 * |-----------|    |    |-----------|  /  |---------|
1453 	 * | unfaulted |    v    | unfaulted |  \  | faulted |
1454 	 * |-----------|         |-----------|  /  |---------|
1455 	 *       A                     C        \        B
1456 	 */
1457 
1458 	/* Map VMA B into place. */
1459 	ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
1460 		     PROT_READ | PROT_WRITE,
1461 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1462 	ASSERT_NE(ptr_b, MAP_FAILED);
1463 	/* Fault it in. */
1464 	ptr_b[0] = 'x';
1465 
1466 	if (variant->forked) {
1467 		pid_t pid = do_fork(&self->procmap);
1468 
1469 		ASSERT_NE(pid, -1);
1470 		if (pid != 0)
1471 			return;
1472 	}
1473 
1474 	/*
1475 	 * Now move it out of the way so we can place VMAs A, C in position,
1476 	 * unfaulted.
1477 	 */
1478 	ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1479 		       MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1480 	ASSERT_NE(ptr_b, MAP_FAILED);
1481 
1482 	/* Map VMA A into place. */
1483 
1484 	ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1485 		     PROT_READ | PROT_WRITE,
1486 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1487 	ASSERT_NE(ptr_a, MAP_FAILED);
1488 
1489 	/* Map VMA C into place. */
1490 	ptr_c = mmap(&self->carveout[page_size + 3 * page_size + 3 * page_size],
1491 		     3 * page_size, PROT_READ | PROT_WRITE,
1492 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1493 	ASSERT_NE(ptr_c, MAP_FAILED);
1494 
1495 	/*
1496 	 * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
1497 	 * anon_vma propagation.
1498 	 */
1499 	ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1500 		       MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1501 		       &self->carveout[page_size + 3 * page_size]);
1502 	ASSERT_NE(ptr_b, MAP_FAILED);
1503 
1504 	/* The VMAs should have merged, if not forked. */
1505 	ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1506 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1507 	offset = variant->forked ? 3 * page_size : 9 * page_size;
1508 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
1509 
1510 	/* If forked, B and C should also not have merged. */
1511 	if (variant->forked) {
1512 		ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1513 		ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1514 		ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 3 * page_size);
1515 	}
1516 }
1517 
1518 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_faulted_next)
1519 {
1520 	struct procmap_fd *procmap = &self->procmap;
1521 	unsigned int page_size = self->page_size;
1522 	char *ptr_a, *ptr_b, *ptr_bc;
1523 
1524 	/*
1525 	 * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
1526 	 *
1527 	 *                  |---------------------------|
1528 	 *                  |                   \       |
1529 	 * |-----------|    |    |-----------|  /  |---------|
1530 	 * | unfaulted |    v    |  faulted  |  \  | faulted |
1531 	 * |-----------|         |-----------|  /  |---------|
1532 	 *       A                     C        \       B
1533 	 */
1534 
1535 	/*
1536 	 * Map VMA B and C into place. We have to map them together so their
1537 	 * anon_vma is the same and the vma->vm_pgoff's are correctly aligned.
1538 	 */
1539 	ptr_bc = mmap(&self->carveout[page_size + 3 * page_size],
1540 		      3 * page_size + 3 * page_size,
1541 		     PROT_READ | PROT_WRITE,
1542 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1543 	ASSERT_NE(ptr_bc, MAP_FAILED);
1544 
1545 	/* Fault it in. */
1546 	ptr_bc[0] = 'x';
1547 
1548 	if (variant->forked) {
1549 		pid_t pid = do_fork(&self->procmap);
1550 
1551 		ASSERT_NE(pid, -1);
1552 		if (pid != 0)
1553 			return;
1554 	}
1555 
1556 	/*
1557 	 * Now move VMA B out the way (splitting VMA BC) so we can place VMA A
1558 	 * in position, unfaulted, and leave the remainder of the VMA we just
1559 	 * moved in place, faulted, as VMA C.
1560 	 */
1561 	ptr_b = mremap(ptr_bc, 3 * page_size, 3 * page_size,
1562 		       MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1563 	ASSERT_NE(ptr_b, MAP_FAILED);
1564 
1565 	/* Map VMA A into place. */
1566 	ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1567 		     PROT_READ | PROT_WRITE,
1568 		     MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1569 	ASSERT_NE(ptr_a, MAP_FAILED);
1570 
1571 	/*
1572 	 * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
1573 	 * anon_vma propagation.
1574 	 */
1575 	ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1576 		       MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1577 		       &self->carveout[page_size + 3 * page_size]);
1578 	ASSERT_NE(ptr_b, MAP_FAILED);
1579 
1580 	/* The VMAs should have merged. A,B,C if unforked, B, C if forked. */
1581 	if (variant->forked) {
1582 		ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1583 		ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1584 		ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 6 * page_size);
1585 	} else {
1586 		ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1587 		ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1588 		ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + 9 * page_size);
1589 	}
1590 }
1591 
1592 TEST_HARNESS_MAIN
1593