1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define _GNU_SOURCE
4 #include "kselftest_harness.h"
5 #include <linux/prctl.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/prctl.h>
12 #include <sys/syscall.h>
13 #include <sys/wait.h>
14 #include <linux/perf_event.h>
15 #include "vm_util.h"
16 #include <linux/mman.h>
17
FIXTURE(merge)18 FIXTURE(merge)
19 {
20 unsigned int page_size;
21 char *carveout;
22 struct procmap_fd procmap;
23 };
24
map_carveout(unsigned int page_size)25 static char *map_carveout(unsigned int page_size)
26 {
27 return mmap(NULL, 30 * page_size, PROT_NONE,
28 MAP_ANON | MAP_PRIVATE, -1, 0);
29 }
30
do_fork(struct procmap_fd * procmap)31 static pid_t do_fork(struct procmap_fd *procmap)
32 {
33 pid_t pid = fork();
34
35 if (pid == -1)
36 return -1;
37 if (pid != 0) {
38 wait(NULL);
39 return pid;
40 }
41
42 /* Reopen for child. */
43 if (close_procmap(procmap))
44 return -1;
45 if (open_self_procmap(procmap))
46 return -1;
47
48 return 0;
49 }
50
FIXTURE_SETUP(merge)51 FIXTURE_SETUP(merge)
52 {
53 self->page_size = psize();
54 /* Carve out PROT_NONE region to map over. */
55 self->carveout = map_carveout(self->page_size);
56 ASSERT_NE(self->carveout, MAP_FAILED);
57 /* Setup PROCMAP_QUERY interface. */
58 ASSERT_EQ(open_self_procmap(&self->procmap), 0);
59 }
60
FIXTURE_TEARDOWN(merge)61 FIXTURE_TEARDOWN(merge)
62 {
63 ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
64 /* May fail for parent of forked process. */
65 close_procmap(&self->procmap);
66 /*
67 * Clear unconditionally, as some tests set this. It is no issue if this
68 * fails (KSM may be disabled for instance).
69 */
70 prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
71 }
72
FIXTURE(merge_with_fork)73 FIXTURE(merge_with_fork)
74 {
75 unsigned int page_size;
76 char *carveout;
77 struct procmap_fd procmap;
78 };
79
FIXTURE_VARIANT(merge_with_fork)80 FIXTURE_VARIANT(merge_with_fork)
81 {
82 bool forked;
83 };
84
FIXTURE_VARIANT_ADD(merge_with_fork,forked)85 FIXTURE_VARIANT_ADD(merge_with_fork, forked)
86 {
87 .forked = true,
88 };
89
FIXTURE_VARIANT_ADD(merge_with_fork,unforked)90 FIXTURE_VARIANT_ADD(merge_with_fork, unforked)
91 {
92 .forked = false,
93 };
94
FIXTURE_SETUP(merge_with_fork)95 FIXTURE_SETUP(merge_with_fork)
96 {
97 self->page_size = psize();
98 self->carveout = map_carveout(self->page_size);
99 ASSERT_NE(self->carveout, MAP_FAILED);
100 ASSERT_EQ(open_self_procmap(&self->procmap), 0);
101 }
102
FIXTURE_TEARDOWN(merge_with_fork)103 FIXTURE_TEARDOWN(merge_with_fork)
104 {
105 ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
106 ASSERT_EQ(close_procmap(&self->procmap), 0);
107 /* See above. */
108 prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
109 }
110
TEST_F(merge,mprotect_unfaulted_left)111 TEST_F(merge, mprotect_unfaulted_left)
112 {
113 unsigned int page_size = self->page_size;
114 char *carveout = self->carveout;
115 struct procmap_fd *procmap = &self->procmap;
116 char *ptr;
117
118 /*
119 * Map 10 pages of R/W memory within. MAP_NORESERVE so we don't hit
120 * merge failure due to lack of VM_ACCOUNT flag by mistake.
121 *
122 * |-----------------------|
123 * | unfaulted |
124 * |-----------------------|
125 */
126 ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
127 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
128 ASSERT_NE(ptr, MAP_FAILED);
129 /*
130 * Now make the first 5 pages read-only, splitting the VMA:
131 *
132 * RO RW
133 * |-----------|-----------|
134 * | unfaulted | unfaulted |
135 * |-----------|-----------|
136 */
137 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
138 /*
139 * Fault in the first of the last 5 pages so it gets an anon_vma and
140 * thus the whole VMA becomes 'faulted':
141 *
142 * RO RW
143 * |-----------|-----------|
144 * | unfaulted | faulted |
145 * |-----------|-----------|
146 */
147 ptr[5 * page_size] = 'x';
148 /*
149 * Now mprotect() the RW region read-only, we should merge (though for
150 * ~15 years we did not! :):
151 *
152 * RO
153 * |-----------------------|
154 * | faulted |
155 * |-----------------------|
156 */
157 ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
158
159 /* Assert that the merge succeeded using PROCMAP_QUERY. */
160 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
161 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
162 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
163 }
164
TEST_F(merge,mprotect_unfaulted_right)165 TEST_F(merge, mprotect_unfaulted_right)
166 {
167 unsigned int page_size = self->page_size;
168 char *carveout = self->carveout;
169 struct procmap_fd *procmap = &self->procmap;
170 char *ptr;
171
172 /*
173 * |-----------------------|
174 * | unfaulted |
175 * |-----------------------|
176 */
177 ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
178 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
179 ASSERT_NE(ptr, MAP_FAILED);
180 /*
181 * Now make the last 5 pages read-only, splitting the VMA:
182 *
183 * RW RO
184 * |-----------|-----------|
185 * | unfaulted | unfaulted |
186 * |-----------|-----------|
187 */
188 ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
189 /*
190 * Fault in the first of the first 5 pages so it gets an anon_vma and
191 * thus the whole VMA becomes 'faulted':
192 *
193 * RW RO
194 * |-----------|-----------|
195 * | faulted | unfaulted |
196 * |-----------|-----------|
197 */
198 ptr[0] = 'x';
199 /*
200 * Now mprotect() the RW region read-only, we should merge:
201 *
202 * RO
203 * |-----------------------|
204 * | faulted |
205 * |-----------------------|
206 */
207 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
208
209 /* Assert that the merge succeeded using PROCMAP_QUERY. */
210 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
211 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
212 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
213 }
214
TEST_F(merge,mprotect_unfaulted_both)215 TEST_F(merge, mprotect_unfaulted_both)
216 {
217 unsigned int page_size = self->page_size;
218 char *carveout = self->carveout;
219 struct procmap_fd *procmap = &self->procmap;
220 char *ptr;
221
222 /*
223 * |-----------------------|
224 * | unfaulted |
225 * |-----------------------|
226 */
227 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
228 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
229 ASSERT_NE(ptr, MAP_FAILED);
230 /*
231 * Now make the first and last 3 pages read-only, splitting the VMA:
232 *
233 * RO RW RO
234 * |-----------|-----------|-----------|
235 * | unfaulted | unfaulted | unfaulted |
236 * |-----------|-----------|-----------|
237 */
238 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
239 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
240 /*
241 * Fault in the first of the middle 3 pages so it gets an anon_vma and
242 * thus the whole VMA becomes 'faulted':
243 *
244 * RO RW RO
245 * |-----------|-----------|-----------|
246 * | unfaulted | faulted | unfaulted |
247 * |-----------|-----------|-----------|
248 */
249 ptr[3 * page_size] = 'x';
250 /*
251 * Now mprotect() the RW region read-only, we should merge:
252 *
253 * RO
254 * |-----------------------|
255 * | faulted |
256 * |-----------------------|
257 */
258 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
259
260 /* Assert that the merge succeeded using PROCMAP_QUERY. */
261 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
262 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
263 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
264 }
265
TEST_F(merge,mprotect_faulted_left_unfaulted_right)266 TEST_F(merge, mprotect_faulted_left_unfaulted_right)
267 {
268 unsigned int page_size = self->page_size;
269 char *carveout = self->carveout;
270 struct procmap_fd *procmap = &self->procmap;
271 char *ptr;
272
273 /*
274 * |-----------------------|
275 * | unfaulted |
276 * |-----------------------|
277 */
278 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
279 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
280 ASSERT_NE(ptr, MAP_FAILED);
281 /*
282 * Now make the last 3 pages read-only, splitting the VMA:
283 *
284 * RW RO
285 * |-----------------------|-----------|
286 * | unfaulted | unfaulted |
287 * |-----------------------|-----------|
288 */
289 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
290 /*
291 * Fault in the first of the first 6 pages so it gets an anon_vma and
292 * thus the whole VMA becomes 'faulted':
293 *
294 * RW RO
295 * |-----------------------|-----------|
296 * | unfaulted | unfaulted |
297 * |-----------------------|-----------|
298 */
299 ptr[0] = 'x';
300 /*
301 * Now make the first 3 pages read-only, splitting the VMA:
302 *
303 * RO RW RO
304 * |-----------|-----------|-----------|
305 * | faulted | faulted | unfaulted |
306 * |-----------|-----------|-----------|
307 */
308 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
309 /*
310 * Now mprotect() the RW region read-only, we should merge:
311 *
312 * RO
313 * |-----------------------|
314 * | faulted |
315 * |-----------------------|
316 */
317 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
318
319 /* Assert that the merge succeeded using PROCMAP_QUERY. */
320 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
321 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
322 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
323 }
324
TEST_F(merge,mprotect_unfaulted_left_faulted_right)325 TEST_F(merge, mprotect_unfaulted_left_faulted_right)
326 {
327 unsigned int page_size = self->page_size;
328 char *carveout = self->carveout;
329 struct procmap_fd *procmap = &self->procmap;
330 char *ptr;
331
332 /*
333 * |-----------------------|
334 * | unfaulted |
335 * |-----------------------|
336 */
337 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
338 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
339 ASSERT_NE(ptr, MAP_FAILED);
340 /*
341 * Now make the first 3 pages read-only, splitting the VMA:
342 *
343 * RO RW
344 * |-----------|-----------------------|
345 * | unfaulted | unfaulted |
346 * |-----------|-----------------------|
347 */
348 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
349 /*
350 * Fault in the first of the last 6 pages so it gets an anon_vma and
351 * thus the whole VMA becomes 'faulted':
352 *
353 * RO RW
354 * |-----------|-----------------------|
355 * | unfaulted | faulted |
356 * |-----------|-----------------------|
357 */
358 ptr[3 * page_size] = 'x';
359 /*
360 * Now make the last 3 pages read-only, splitting the VMA:
361 *
362 * RO RW RO
363 * |-----------|-----------|-----------|
364 * | unfaulted | faulted | faulted |
365 * |-----------|-----------|-----------|
366 */
367 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
368 /*
369 * Now mprotect() the RW region read-only, we should merge:
370 *
371 * RO
372 * |-----------------------|
373 * | faulted |
374 * |-----------------------|
375 */
376 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
377
378 /* Assert that the merge succeeded using PROCMAP_QUERY. */
379 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
380 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
381 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
382 }
383
TEST_F(merge,forked_target_vma)384 TEST_F(merge, forked_target_vma)
385 {
386 unsigned int page_size = self->page_size;
387 char *carveout = self->carveout;
388 struct procmap_fd *procmap = &self->procmap;
389 char *ptr, *ptr2;
390 pid_t pid;
391 int i;
392
393 /*
394 * |-----------|
395 * | unfaulted |
396 * |-----------|
397 */
398 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
399 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
400 ASSERT_NE(ptr, MAP_FAILED);
401
402 /*
403 * Fault in process.
404 *
405 * |-----------|
406 * | faulted |
407 * |-----------|
408 */
409 ptr[0] = 'x';
410
411 pid = do_fork(&self->procmap);
412 ASSERT_NE(pid, -1);
413 if (pid != 0)
414 return;
415
416 /* unCOWing everything does not cause the AVC to go away. */
417 for (i = 0; i < 5 * page_size; i += page_size)
418 ptr[i] = 'x';
419
420 /*
421 * Map in adjacent VMA in child.
422 *
423 * forked
424 * |-----------|-----------|
425 * | faulted | unfaulted |
426 * |-----------|-----------|
427 * ptr ptr2
428 */
429 ptr2 = mmap(&ptr[5 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
430 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
431 ASSERT_NE(ptr2, MAP_FAILED);
432
433 /* Make sure not merged. */
434 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
435 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
436 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 5 * page_size);
437 }
438
TEST_F(merge,forked_source_vma)439 TEST_F(merge, forked_source_vma)
440 {
441 unsigned int page_size = self->page_size;
442 char *carveout = self->carveout;
443 struct procmap_fd *procmap = &self->procmap;
444 char *ptr, *ptr2;
445 pid_t pid;
446 int i;
447
448 /*
449 * |-----------|------------|
450 * | unfaulted | <unmapped> |
451 * |-----------|------------|
452 */
453 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
454 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
455 ASSERT_NE(ptr, MAP_FAILED);
456
457 /*
458 * Fault in process.
459 *
460 * |-----------|------------|
461 * | faulted | <unmapped> |
462 * |-----------|------------|
463 */
464 ptr[0] = 'x';
465
466 pid = do_fork(&self->procmap);
467 ASSERT_NE(pid, -1);
468 if (pid != 0)
469 return;
470
471 /* unCOWing everything does not cause the AVC to go away. */
472 for (i = 0; i < 5 * page_size; i += page_size)
473 ptr[i] = 'x';
474
475 /*
476 * Map in adjacent VMA in child, ptr2 after ptr, but incompatible.
477 *
478 * forked RW RWX
479 * |-----------|-----------|
480 * | faulted | unfaulted |
481 * |-----------|-----------|
482 * ptr ptr2
483 */
484 ptr2 = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC,
485 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
486 ASSERT_NE(ptr2, MAP_FAILED);
487
488 /* Make sure not merged. */
489 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
490 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
491 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
492
493 /*
494 * Now mprotect forked region to RWX so it becomes the source for the
495 * merge to unfaulted region:
496 *
497 * forked RWX RWX
498 * |-----------|-----------|
499 * | faulted | unfaulted |
500 * |-----------|-----------|
501 * ptr ptr2
502 *
503 * This should NOT result in a merge, as ptr was forked.
504 */
505 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC), 0);
506 /* Again, make sure not merged. */
507 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
508 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
509 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
510 }
511
TEST_F(merge,handle_uprobe_upon_merged_vma)512 TEST_F(merge, handle_uprobe_upon_merged_vma)
513 {
514 const size_t attr_sz = sizeof(struct perf_event_attr);
515 unsigned int page_size = self->page_size;
516 const char *probe_file = "./foo";
517 char *carveout = self->carveout;
518 struct perf_event_attr attr;
519 unsigned long type;
520 void *ptr1, *ptr2;
521 int fd;
522
523 fd = open(probe_file, O_RDWR|O_CREAT, 0600);
524 ASSERT_GE(fd, 0);
525
526 ASSERT_EQ(ftruncate(fd, page_size), 0);
527 if (read_sysfs("/sys/bus/event_source/devices/uprobe/type", &type) != 0) {
528 SKIP(goto out, "Failed to read uprobe sysfs file, skipping");
529 }
530
531 memset(&attr, 0, attr_sz);
532 attr.size = attr_sz;
533 attr.type = type;
534 attr.config1 = (__u64)(long)probe_file;
535 attr.config2 = 0x0;
536
537 ASSERT_GE(syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0), 0);
538
539 ptr1 = mmap(&carveout[page_size], 10 * page_size, PROT_EXEC,
540 MAP_PRIVATE | MAP_FIXED, fd, 0);
541 ASSERT_NE(ptr1, MAP_FAILED);
542
543 ptr2 = mremap(ptr1, page_size, 2 * page_size,
544 MREMAP_MAYMOVE | MREMAP_FIXED, ptr1 + 5 * page_size);
545 ASSERT_NE(ptr2, MAP_FAILED);
546
547 ASSERT_NE(mremap(ptr2, page_size, page_size,
548 MREMAP_MAYMOVE | MREMAP_FIXED, ptr1), MAP_FAILED);
549
550 out:
551 close(fd);
552 remove(probe_file);
553 }
554
TEST_F(merge,ksm_merge)555 TEST_F(merge, ksm_merge)
556 {
557 unsigned int page_size = self->page_size;
558 char *carveout = self->carveout;
559 struct procmap_fd *procmap = &self->procmap;
560 char *ptr, *ptr2;
561 int err;
562
563 /*
564 * Map two R/W immediately adjacent to one another, they should
565 * trivially merge:
566 *
567 * |-----------|-----------|
568 * | R/W | R/W |
569 * |-----------|-----------|
570 * ptr ptr2
571 */
572
573 ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
574 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
575 ASSERT_NE(ptr, MAP_FAILED);
576 ptr2 = mmap(&carveout[2 * page_size], page_size,
577 PROT_READ | PROT_WRITE,
578 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
579 ASSERT_NE(ptr2, MAP_FAILED);
580 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
581 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
582 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
583
584 /* Unmap the second half of this merged VMA. */
585 ASSERT_EQ(munmap(ptr2, page_size), 0);
586
587 /* OK, now enable global KSM merge. We clear this on test teardown. */
588 err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
589 if (err == -1) {
590 int errnum = errno;
591
592 /* Only non-failure case... */
593 ASSERT_EQ(errnum, EINVAL);
594 /* ...but indicates we should skip. */
595 SKIP(return, "KSM memory merging not supported, skipping.");
596 }
597
598 /*
599 * Now map a VMA adjacent to the existing that was just made
600 * VM_MERGEABLE, this should merge as well.
601 */
602 ptr2 = mmap(&carveout[2 * page_size], page_size,
603 PROT_READ | PROT_WRITE,
604 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
605 ASSERT_NE(ptr2, MAP_FAILED);
606 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
607 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
608 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
609
610 /* Now this VMA altogether. */
611 ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
612
613 /* Try the same operation as before, asserting this also merges fine. */
614 ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
615 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
616 ASSERT_NE(ptr, MAP_FAILED);
617 ptr2 = mmap(&carveout[2 * page_size], page_size,
618 PROT_READ | PROT_WRITE,
619 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
620 ASSERT_NE(ptr2, MAP_FAILED);
621 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
622 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
623 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
624 }
625
TEST_F(merge,mremap_unfaulted_to_faulted)626 TEST_F(merge, mremap_unfaulted_to_faulted)
627 {
628 unsigned int page_size = self->page_size;
629 char *carveout = self->carveout;
630 struct procmap_fd *procmap = &self->procmap;
631 char *ptr, *ptr2;
632
633 /*
634 * Map two distinct areas:
635 *
636 * |-----------| |-----------|
637 * | unfaulted | | unfaulted |
638 * |-----------| |-----------|
639 * ptr ptr2
640 */
641 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
642 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
643 ASSERT_NE(ptr, MAP_FAILED);
644 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
645 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
646 ASSERT_NE(ptr2, MAP_FAILED);
647
648 /* Offset ptr2 further away. */
649 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
650 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
651 ASSERT_NE(ptr2, MAP_FAILED);
652
653 /*
654 * Fault in ptr:
655 * \
656 * |-----------| / |-----------|
657 * | faulted | \ | unfaulted |
658 * |-----------| / |-----------|
659 * ptr \ ptr2
660 */
661 ptr[0] = 'x';
662
663 /*
664 * Now move ptr2 adjacent to ptr:
665 *
666 * |-----------|-----------|
667 * | faulted | unfaulted |
668 * |-----------|-----------|
669 * ptr ptr2
670 *
671 * It should merge:
672 *
673 * |----------------------|
674 * | faulted |
675 * |----------------------|
676 * ptr
677 */
678 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
679 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
680 ASSERT_NE(ptr2, MAP_FAILED);
681
682 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
683 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
684 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
685 }
686
TEST_F(merge,mremap_unfaulted_behind_faulted)687 TEST_F(merge, mremap_unfaulted_behind_faulted)
688 {
689 unsigned int page_size = self->page_size;
690 char *carveout = self->carveout;
691 struct procmap_fd *procmap = &self->procmap;
692 char *ptr, *ptr2;
693
694 /*
695 * Map two distinct areas:
696 *
697 * |-----------| |-----------|
698 * | unfaulted | | unfaulted |
699 * |-----------| |-----------|
700 * ptr ptr2
701 */
702 ptr = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
703 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
704 ASSERT_NE(ptr, MAP_FAILED);
705 ptr2 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
706 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
707 ASSERT_NE(ptr2, MAP_FAILED);
708
709 /* Offset ptr2 further away. */
710 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
711 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
712 ASSERT_NE(ptr2, MAP_FAILED);
713
714 /*
715 * Fault in ptr:
716 * \
717 * |-----------| / |-----------|
718 * | faulted | \ | unfaulted |
719 * |-----------| / |-----------|
720 * ptr \ ptr2
721 */
722 ptr[0] = 'x';
723
724 /*
725 * Now move ptr2 adjacent, but behind, ptr:
726 *
727 * |-----------|-----------|
728 * | unfaulted | faulted |
729 * |-----------|-----------|
730 * ptr2 ptr
731 *
732 * It should merge:
733 *
734 * |----------------------|
735 * | faulted |
736 * |----------------------|
737 * ptr2
738 */
739 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
740 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
741 ASSERT_NE(ptr2, MAP_FAILED);
742
743 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
744 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
745 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
746 }
747
TEST_F(merge,mremap_unfaulted_between_faulted)748 TEST_F(merge, mremap_unfaulted_between_faulted)
749 {
750 unsigned int page_size = self->page_size;
751 char *carveout = self->carveout;
752 struct procmap_fd *procmap = &self->procmap;
753 char *ptr, *ptr2, *ptr3;
754
755 /*
756 * Map three distinct areas:
757 *
758 * |-----------| |-----------| |-----------|
759 * | unfaulted | | unfaulted | | unfaulted |
760 * |-----------| |-----------| |-----------|
761 * ptr ptr2 ptr3
762 */
763 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
764 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
765 ASSERT_NE(ptr, MAP_FAILED);
766 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
767 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
768 ASSERT_NE(ptr2, MAP_FAILED);
769 ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
770 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
771 ASSERT_NE(ptr3, MAP_FAILED);
772
773 /* Offset ptr3 further away. */
774 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
775 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
776 ASSERT_NE(ptr3, MAP_FAILED);
777
778 /* Offset ptr2 further away. */
779 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
780 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
781 ASSERT_NE(ptr2, MAP_FAILED);
782
783 /*
784 * Fault in ptr, ptr3:
785 * \ \
786 * |-----------| / |-----------| / |-----------|
787 * | faulted | \ | unfaulted | \ | faulted |
788 * |-----------| / |-----------| / |-----------|
789 * ptr \ ptr2 \ ptr3
790 */
791 ptr[0] = 'x';
792 ptr3[0] = 'x';
793
794 /*
795 * Move ptr3 back into place, leaving a place for ptr2:
796 * \
797 * |-----------| |-----------| / |-----------|
798 * | faulted | | faulted | \ | unfaulted |
799 * |-----------| |-----------| / |-----------|
800 * ptr ptr3 \ ptr2
801 */
802 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
803 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
804 ASSERT_NE(ptr3, MAP_FAILED);
805
806 /*
807 * Finally, move ptr2 into place:
808 *
809 * |-----------|-----------|-----------|
810 * | faulted | unfaulted | faulted |
811 * |-----------|-----------|-----------|
812 * ptr ptr2 ptr3
813 *
814 * It should merge, but only ptr, ptr2:
815 *
816 * |-----------------------|-----------|
817 * | faulted | unfaulted |
818 * |-----------------------|-----------|
819 */
820 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
821 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
822 ASSERT_NE(ptr2, MAP_FAILED);
823
824 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
825 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
826 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
827
828 ASSERT_TRUE(find_vma_procmap(procmap, ptr3));
829 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr3);
830 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr3 + 5 * page_size);
831 }
832
TEST_F(merge,mremap_unfaulted_between_faulted_unfaulted)833 TEST_F(merge, mremap_unfaulted_between_faulted_unfaulted)
834 {
835 unsigned int page_size = self->page_size;
836 char *carveout = self->carveout;
837 struct procmap_fd *procmap = &self->procmap;
838 char *ptr, *ptr2, *ptr3;
839
840 /*
841 * Map three distinct areas:
842 *
843 * |-----------| |-----------| |-----------|
844 * | unfaulted | | unfaulted | | unfaulted |
845 * |-----------| |-----------| |-----------|
846 * ptr ptr2 ptr3
847 */
848 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
849 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
850 ASSERT_NE(ptr, MAP_FAILED);
851 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
852 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
853 ASSERT_NE(ptr2, MAP_FAILED);
854 ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
855 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
856 ASSERT_NE(ptr3, MAP_FAILED);
857
858 /* Offset ptr3 further away. */
859 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
860 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
861 ASSERT_NE(ptr3, MAP_FAILED);
862
863
864 /* Offset ptr2 further away. */
865 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
866 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
867 ASSERT_NE(ptr2, MAP_FAILED);
868
869 /*
870 * Fault in ptr:
871 * \ \
872 * |-----------| / |-----------| / |-----------|
873 * | faulted | \ | unfaulted | \ | unfaulted |
874 * |-----------| / |-----------| / |-----------|
875 * ptr \ ptr2 \ ptr3
876 */
877 ptr[0] = 'x';
878
879 /*
880 * Move ptr3 back into place, leaving a place for ptr2:
881 * \
882 * |-----------| |-----------| / |-----------|
883 * | faulted | | unfaulted | \ | unfaulted |
884 * |-----------| |-----------| / |-----------|
885 * ptr ptr3 \ ptr2
886 */
887 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
888 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
889 ASSERT_NE(ptr3, MAP_FAILED);
890
891 /*
892 * Finally, move ptr2 into place:
893 *
894 * |-----------|-----------|-----------|
895 * | faulted | unfaulted | unfaulted |
896 * |-----------|-----------|-----------|
897 * ptr ptr2 ptr3
898 *
899 * It should merge:
900 *
901 * |-----------------------------------|
902 * | faulted |
903 * |-----------------------------------|
904 */
905 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
906 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
907 ASSERT_NE(ptr2, MAP_FAILED);
908
909 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
910 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
911 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
912 }
913
TEST_F(merge,mremap_unfaulted_between_correctly_placed_faulted)914 TEST_F(merge, mremap_unfaulted_between_correctly_placed_faulted)
915 {
916 unsigned int page_size = self->page_size;
917 char *carveout = self->carveout;
918 struct procmap_fd *procmap = &self->procmap;
919 char *ptr, *ptr2;
920
921 /*
922 * Map one larger area:
923 *
924 * |-----------------------------------|
925 * | unfaulted |
926 * |-----------------------------------|
927 */
928 ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
929 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
930 ASSERT_NE(ptr, MAP_FAILED);
931
932 /*
933 * Fault in ptr:
934 *
935 * |-----------------------------------|
936 * | faulted |
937 * |-----------------------------------|
938 */
939 ptr[0] = 'x';
940
941 /*
942 * Unmap middle:
943 *
944 * |-----------| |-----------|
945 * | faulted | | faulted |
946 * |-----------| |-----------|
947 *
948 * Now the faulted areas are compatible with each other (anon_vma the
949 * same, vma->vm_pgoff equal to virtual page offset).
950 */
951 ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
952
953 /*
954 * Map a new area, ptr2:
955 * \
956 * |-----------| |-----------| / |-----------|
957 * | faulted | | faulted | \ | unfaulted |
958 * |-----------| |-----------| / |-----------|
959 * ptr \ ptr2
960 */
961 ptr2 = mmap(&carveout[20 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
962 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
963 ASSERT_NE(ptr2, MAP_FAILED);
964
965 /*
966 * Finally, move ptr2 into place:
967 *
968 * |-----------|-----------|-----------|
969 * | faulted | unfaulted | faulted |
970 * |-----------|-----------|-----------|
971 * ptr ptr2 ptr3
972 *
973 * It should merge:
974 *
975 * |-----------------------------------|
976 * | faulted |
977 * |-----------------------------------|
978 */
979 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
980 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
981 ASSERT_NE(ptr2, MAP_FAILED);
982
983 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
984 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
985 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
986 }
987
TEST_F(merge,mremap_correct_placed_faulted)988 TEST_F(merge, mremap_correct_placed_faulted)
989 {
990 unsigned int page_size = self->page_size;
991 char *carveout = self->carveout;
992 struct procmap_fd *procmap = &self->procmap;
993 char *ptr, *ptr2, *ptr3;
994
995 /*
996 * Map one larger area:
997 *
998 * |-----------------------------------|
999 * | unfaulted |
1000 * |-----------------------------------|
1001 */
1002 ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
1003 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
1004 ASSERT_NE(ptr, MAP_FAILED);
1005
1006 /*
1007 * Fault in ptr:
1008 *
1009 * |-----------------------------------|
1010 * | faulted |
1011 * |-----------------------------------|
1012 */
1013 ptr[0] = 'x';
1014
1015 /*
1016 * Offset the final and middle 5 pages further away:
1017 * \ \
1018 * |-----------| / |-----------| / |-----------|
1019 * | faulted | \ | faulted | \ | faulted |
1020 * |-----------| / |-----------| / |-----------|
1021 * ptr \ ptr2 \ ptr3
1022 */
1023 ptr3 = &ptr[10 * page_size];
1024 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1025 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
1026 ASSERT_NE(ptr3, MAP_FAILED);
1027 ptr2 = &ptr[5 * page_size];
1028 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1029 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
1030 ASSERT_NE(ptr2, MAP_FAILED);
1031
1032 /*
1033 * Move ptr2 into its correct place:
1034 * \
1035 * |-----------|-----------| / |-----------|
1036 * | faulted | faulted | \ | faulted |
1037 * |-----------|-----------| / |-----------|
1038 * ptr ptr2 \ ptr3
1039 *
1040 * It should merge:
1041 * \
1042 * |-----------------------| / |-----------|
1043 * | faulted | \ | faulted |
1044 * |-----------------------| / |-----------|
1045 * ptr \ ptr3
1046 */
1047
1048 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1049 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1050 ASSERT_NE(ptr2, MAP_FAILED);
1051
1052 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1053 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1054 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1055
1056 /*
1057 * Now move ptr out of place:
1058 * \ \
1059 * |-----------| / |-----------| / |-----------|
1060 * | faulted | \ | faulted | \ | faulted |
1061 * |-----------| / |-----------| / |-----------|
1062 * ptr2 \ ptr \ ptr3
1063 */
1064 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1065 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1066 ASSERT_NE(ptr, MAP_FAILED);
1067
1068 /*
1069 * Now move ptr back into place:
1070 * \
1071 * |-----------|-----------| / |-----------|
1072 * | faulted | faulted | \ | faulted |
1073 * |-----------|-----------| / |-----------|
1074 * ptr ptr2 \ ptr3
1075 *
1076 * It should merge:
1077 * \
1078 * |-----------------------| / |-----------|
1079 * | faulted | \ | faulted |
1080 * |-----------------------| / |-----------|
1081 * ptr \ ptr3
1082 */
1083 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1084 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1085 ASSERT_NE(ptr, MAP_FAILED);
1086
1087 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1088 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1089 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1090
1091 /*
1092 * Now move ptr out of place again:
1093 * \ \
1094 * |-----------| / |-----------| / |-----------|
1095 * | faulted | \ | faulted | \ | faulted |
1096 * |-----------| / |-----------| / |-----------|
1097 * ptr2 \ ptr \ ptr3
1098 */
1099 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1100 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1101 ASSERT_NE(ptr, MAP_FAILED);
1102
1103 /*
1104 * Now move ptr3 back into place:
1105 * \
1106 * |-----------|-----------| / |-----------|
1107 * | faulted | faulted | \ | faulted |
1108 * |-----------|-----------| / |-----------|
1109 * ptr2 ptr3 \ ptr
1110 *
1111 * It should merge:
1112 * \
1113 * |-----------------------| / |-----------|
1114 * | faulted | \ | faulted |
1115 * |-----------------------| / |-----------|
1116 * ptr2 \ ptr
1117 */
1118 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1119 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr2[5 * page_size]);
1120 ASSERT_NE(ptr3, MAP_FAILED);
1121
1122 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
1123 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
1124 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
1125
1126 /*
1127 * Now move ptr back into place:
1128 *
1129 * |-----------|-----------------------|
1130 * | faulted | faulted |
1131 * |-----------|-----------------------|
1132 * ptr ptr2
1133 *
1134 * It should merge:
1135 *
1136 * |-----------------------------------|
1137 * | faulted |
1138 * |-----------------------------------|
1139 * ptr
1140 */
1141 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1142 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1143 ASSERT_NE(ptr, MAP_FAILED);
1144
1145 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1146 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1147 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1148
1149 /*
1150 * Now move ptr2 out of the way:
1151 * \
1152 * |-----------| |-----------| / |-----------|
1153 * | faulted | | faulted | \ | faulted |
1154 * |-----------| |-----------| / |-----------|
1155 * ptr ptr3 \ ptr2
1156 */
1157 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1158 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
1159 ASSERT_NE(ptr2, MAP_FAILED);
1160
1161 /*
1162 * Now move it back:
1163 *
1164 * |-----------|-----------|-----------|
1165 * | faulted | faulted | faulted |
1166 * |-----------|-----------|-----------|
1167 * ptr ptr2 ptr3
1168 *
1169 * It should merge:
1170 *
1171 * |-----------------------------------|
1172 * | faulted |
1173 * |-----------------------------------|
1174 * ptr
1175 */
1176 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1177 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1178 ASSERT_NE(ptr2, MAP_FAILED);
1179
1180 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1181 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1182 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1183
1184 /*
1185 * Move ptr3 out of place:
1186 * \
1187 * |-----------------------| / |-----------|
1188 * | faulted | \ | faulted |
1189 * |-----------------------| / |-----------|
1190 * ptr \ ptr3
1191 */
1192 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1193 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 1000);
1194 ASSERT_NE(ptr3, MAP_FAILED);
1195
1196 /*
1197 * Now move it back:
1198 *
1199 * |-----------|-----------|-----------|
1200 * | faulted | faulted | faulted |
1201 * |-----------|-----------|-----------|
1202 * ptr ptr2 ptr3
1203 *
1204 * It should merge:
1205 *
1206 * |-----------------------------------|
1207 * | faulted |
1208 * |-----------------------------------|
1209 * ptr
1210 */
1211 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1212 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
1213 ASSERT_NE(ptr3, MAP_FAILED);
1214
1215 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1216 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1217 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1218 }
1219
TEST_F(merge_with_fork,mremap_faulted_to_unfaulted_prev)1220 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
1221 {
1222 struct procmap_fd *procmap = &self->procmap;
1223 unsigned int page_size = self->page_size;
1224 unsigned long offset;
1225 char *ptr_a, *ptr_b;
1226
1227 /*
1228 * mremap() such that A and B merge:
1229 *
1230 * |------------|
1231 * | \ |
1232 * |-----------| | / |---------|
1233 * | unfaulted | v \ | faulted |
1234 * |-----------| / |---------|
1235 * B \ A
1236 */
1237
1238 /* Map VMA A into place. */
1239 ptr_a = mmap(&self->carveout[page_size + 3 * page_size],
1240 3 * page_size,
1241 PROT_READ | PROT_WRITE,
1242 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1243 ASSERT_NE(ptr_a, MAP_FAILED);
1244 /* Fault it in. */
1245 ptr_a[0] = 'x';
1246
1247 if (variant->forked) {
1248 pid_t pid = do_fork(&self->procmap);
1249
1250 ASSERT_NE(pid, -1);
1251 if (pid != 0)
1252 return;
1253 }
1254
1255 /*
1256 * Now move it out of the way so we can place VMA B in position,
1257 * unfaulted.
1258 */
1259 ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1260 MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1261 ASSERT_NE(ptr_a, MAP_FAILED);
1262
1263 /* Map VMA B into place. */
1264 ptr_b = mmap(&self->carveout[page_size], 3 * page_size,
1265 PROT_READ | PROT_WRITE,
1266 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1267 ASSERT_NE(ptr_b, MAP_FAILED);
1268
1269 /*
1270 * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
1271 * anon_vma propagation.
1272 */
1273 ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1274 MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1275 &self->carveout[page_size + 3 * page_size]);
1276 ASSERT_NE(ptr_a, MAP_FAILED);
1277
1278 /* The VMAs should have merged, if not forked. */
1279 ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1280 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1281
1282 offset = variant->forked ? 3 * page_size : 6 * page_size;
1283 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + offset);
1284 }
1285
TEST_F(merge_with_fork,mremap_faulted_to_unfaulted_next)1286 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_next)
1287 {
1288 struct procmap_fd *procmap = &self->procmap;
1289 unsigned int page_size = self->page_size;
1290 unsigned long offset;
1291 char *ptr_a, *ptr_b;
1292
1293 /*
1294 * mremap() such that A and B merge:
1295 *
1296 * |---------------------------|
1297 * | \ |
1298 * | |-----------| / |---------|
1299 * v | unfaulted | \ | faulted |
1300 * |-----------| / |---------|
1301 * B \ A
1302 *
1303 * Then unmap VMA A to trigger the bug.
1304 */
1305
1306 /* Map VMA A into place. */
1307 ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1308 PROT_READ | PROT_WRITE,
1309 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1310 ASSERT_NE(ptr_a, MAP_FAILED);
1311 /* Fault it in. */
1312 ptr_a[0] = 'x';
1313
1314 if (variant->forked) {
1315 pid_t pid = do_fork(&self->procmap);
1316
1317 ASSERT_NE(pid, -1);
1318 if (pid != 0)
1319 return;
1320 }
1321
1322 /*
1323 * Now move it out of the way so we can place VMA B in position,
1324 * unfaulted.
1325 */
1326 ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1327 MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1328 ASSERT_NE(ptr_a, MAP_FAILED);
1329
1330 /* Map VMA B into place. */
1331 ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
1332 PROT_READ | PROT_WRITE,
1333 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1334 ASSERT_NE(ptr_b, MAP_FAILED);
1335
1336 /*
1337 * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
1338 * anon_vma propagation.
1339 */
1340 ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
1341 MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1342 &self->carveout[page_size]);
1343 ASSERT_NE(ptr_a, MAP_FAILED);
1344
1345 /* The VMAs should have merged, if not forked. */
1346 ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1347 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1348 offset = variant->forked ? 3 * page_size : 6 * page_size;
1349 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
1350 }
1351
TEST_F(merge_with_fork,mremap_faulted_to_unfaulted_prev_unfaulted_next)1352 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_unfaulted_next)
1353 {
1354 struct procmap_fd *procmap = &self->procmap;
1355 unsigned int page_size = self->page_size;
1356 unsigned long offset;
1357 char *ptr_a, *ptr_b, *ptr_c;
1358
1359 /*
1360 * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
1361 *
1362 * |---------------------------|
1363 * | \ |
1364 * |-----------| | |-----------| / |---------|
1365 * | unfaulted | v | unfaulted | \ | faulted |
1366 * |-----------| |-----------| / |---------|
1367 * A C \ B
1368 */
1369
1370 /* Map VMA B into place. */
1371 ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
1372 PROT_READ | PROT_WRITE,
1373 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1374 ASSERT_NE(ptr_b, MAP_FAILED);
1375 /* Fault it in. */
1376 ptr_b[0] = 'x';
1377
1378 if (variant->forked) {
1379 pid_t pid = do_fork(&self->procmap);
1380
1381 ASSERT_NE(pid, -1);
1382 if (pid != 0)
1383 return;
1384 }
1385
1386 /*
1387 * Now move it out of the way so we can place VMAs A, C in position,
1388 * unfaulted.
1389 */
1390 ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1391 MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1392 ASSERT_NE(ptr_b, MAP_FAILED);
1393
1394 /* Map VMA A into place. */
1395
1396 ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1397 PROT_READ | PROT_WRITE,
1398 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1399 ASSERT_NE(ptr_a, MAP_FAILED);
1400
1401 /* Map VMA C into place. */
1402 ptr_c = mmap(&self->carveout[page_size + 3 * page_size + 3 * page_size],
1403 3 * page_size, PROT_READ | PROT_WRITE,
1404 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1405 ASSERT_NE(ptr_c, MAP_FAILED);
1406
1407 /*
1408 * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
1409 * anon_vma propagation.
1410 */
1411 ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1412 MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1413 &self->carveout[page_size + 3 * page_size]);
1414 ASSERT_NE(ptr_b, MAP_FAILED);
1415
1416 /* The VMAs should have merged, if not forked. */
1417 ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1418 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1419 offset = variant->forked ? 3 * page_size : 9 * page_size;
1420 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
1421
1422 /* If forked, B and C should also not have merged. */
1423 if (variant->forked) {
1424 ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1425 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1426 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 3 * page_size);
1427 }
1428 }
1429
TEST_F(merge_with_fork,mremap_faulted_to_unfaulted_prev_faulted_next)1430 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_faulted_next)
1431 {
1432 struct procmap_fd *procmap = &self->procmap;
1433 unsigned int page_size = self->page_size;
1434 char *ptr_a, *ptr_b, *ptr_bc;
1435
1436 /*
1437 * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
1438 *
1439 * |---------------------------|
1440 * | \ |
1441 * |-----------| | |-----------| / |---------|
1442 * | unfaulted | v | faulted | \ | faulted |
1443 * |-----------| |-----------| / |---------|
1444 * A C \ B
1445 */
1446
1447 /*
1448 * Map VMA B and C into place. We have to map them together so their
1449 * anon_vma is the same and the vma->vm_pgoff's are correctly aligned.
1450 */
1451 ptr_bc = mmap(&self->carveout[page_size + 3 * page_size],
1452 3 * page_size + 3 * page_size,
1453 PROT_READ | PROT_WRITE,
1454 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1455 ASSERT_NE(ptr_bc, MAP_FAILED);
1456
1457 /* Fault it in. */
1458 ptr_bc[0] = 'x';
1459
1460 if (variant->forked) {
1461 pid_t pid = do_fork(&self->procmap);
1462
1463 ASSERT_NE(pid, -1);
1464 if (pid != 0)
1465 return;
1466 }
1467
1468 /*
1469 * Now move VMA B out the way (splitting VMA BC) so we can place VMA A
1470 * in position, unfaulted, and leave the remainder of the VMA we just
1471 * moved in place, faulted, as VMA C.
1472 */
1473 ptr_b = mremap(ptr_bc, 3 * page_size, 3 * page_size,
1474 MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
1475 ASSERT_NE(ptr_b, MAP_FAILED);
1476
1477 /* Map VMA A into place. */
1478 ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
1479 PROT_READ | PROT_WRITE,
1480 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
1481 ASSERT_NE(ptr_a, MAP_FAILED);
1482
1483 /*
1484 * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
1485 * anon_vma propagation.
1486 */
1487 ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
1488 MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
1489 &self->carveout[page_size + 3 * page_size]);
1490 ASSERT_NE(ptr_b, MAP_FAILED);
1491
1492 /* The VMAs should have merged. A,B,C if unforked, B, C if forked. */
1493 if (variant->forked) {
1494 ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
1495 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
1496 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 6 * page_size);
1497 } else {
1498 ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
1499 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
1500 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + 9 * page_size);
1501 }
1502 }
1503
1504 TEST_HARNESS_MAIN
1505