1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020 Google LLC
4 */
5 #define _GNU_SOURCE
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/userfaultfd.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
15 #include <syscall.h>
16 #include <time.h>
17 #include <stdbool.h>
18
19 #include "../kselftest.h"
20
21 #define EXPECT_SUCCESS 0
22 #define EXPECT_FAILURE 1
23 #define NON_OVERLAPPING 0
24 #define OVERLAPPING 1
25 #define NS_PER_SEC 1000000000ULL
26 #define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */
27 #define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */
28
29 #ifndef MIN
30 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
31 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
32 #endif
33 #define SIZE_MB(m) ((size_t)m * (1024 * 1024))
34 #define SIZE_KB(k) ((size_t)k * 1024)
35
36 struct config {
37 unsigned long long src_alignment;
38 unsigned long long dest_alignment;
39 unsigned long long region_size;
40 int overlapping;
41 unsigned int dest_preamble_size;
42 };
43
44 struct test {
45 const char *name;
46 struct config config;
47 int expect_failure;
48 };
49
50 enum {
51 _1KB = 1ULL << 10, /* 1KB -> not page aligned */
52 _4KB = 4ULL << 10,
53 _8KB = 8ULL << 10,
54 _1MB = 1ULL << 20,
55 _2MB = 2ULL << 20,
56 _4MB = 4ULL << 20,
57 _5MB = 5ULL << 20,
58 _1GB = 1ULL << 30,
59 _2GB = 2ULL << 30,
60 PMD = _2MB,
61 PUD = _1GB,
62 };
63
64 #define PTE page_size
65
66 #define MAKE_TEST(source_align, destination_align, size, \
67 overlaps, should_fail, test_name) \
68 (struct test){ \
69 .name = test_name, \
70 .config = { \
71 .src_alignment = source_align, \
72 .dest_alignment = destination_align, \
73 .region_size = size, \
74 .overlapping = overlaps, \
75 }, \
76 .expect_failure = should_fail \
77 }
78
79 /* compute square root using binary search */
get_sqrt(unsigned long val)80 static unsigned long get_sqrt(unsigned long val)
81 {
82 unsigned long low = 1;
83
84 /* assuming rand_size is less than 1TB */
85 unsigned long high = (1UL << 20);
86
87 while (low <= high) {
88 unsigned long mid = low + (high - low) / 2;
89 unsigned long temp = mid * mid;
90
91 if (temp == val)
92 return mid;
93 if (temp < val)
94 low = mid + 1;
95 high = mid - 1;
96 }
97 return low;
98 }
99
100 /*
101 * Returns false if the requested remap region overlaps with an
102 * existing mapping (e.g text, stack) else returns true.
103 */
is_remap_region_valid(void * addr,unsigned long long size)104 static bool is_remap_region_valid(void *addr, unsigned long long size)
105 {
106 void *remap_addr = NULL;
107 bool ret = true;
108
109 /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
110 remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
111 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
112 -1, 0);
113
114 if (remap_addr == MAP_FAILED) {
115 if (errno == EEXIST)
116 ret = false;
117 } else {
118 munmap(remap_addr, size);
119 }
120
121 return ret;
122 }
123
124 /* Returns mmap_min_addr sysctl tunable from procfs */
get_mmap_min_addr(void)125 static unsigned long long get_mmap_min_addr(void)
126 {
127 FILE *fp;
128 int n_matched;
129 static unsigned long long addr;
130
131 if (addr)
132 return addr;
133
134 fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
135 if (fp == NULL) {
136 ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
137 strerror(errno));
138 exit(KSFT_SKIP);
139 }
140
141 n_matched = fscanf(fp, "%llu", &addr);
142 if (n_matched != 1) {
143 ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
144 strerror(errno));
145 fclose(fp);
146 exit(KSFT_SKIP);
147 }
148
149 fclose(fp);
150 return addr;
151 }
152
153 /*
154 * Using /proc/self/maps, assert that the specified address range is contained
155 * within a single mapping.
156 */
is_range_mapped(FILE * maps_fp,unsigned long start,unsigned long end)157 static bool is_range_mapped(FILE *maps_fp, unsigned long start,
158 unsigned long end)
159 {
160 char *line = NULL;
161 size_t len = 0;
162 bool success = false;
163 unsigned long first_val, second_val;
164
165 rewind(maps_fp);
166
167 while (getline(&line, &len, maps_fp) != -1) {
168 if (sscanf(line, "%lx-%lx", &first_val, &second_val) != 2) {
169 ksft_exit_fail_msg("cannot parse /proc/self/maps\n");
170 break;
171 }
172
173 if (first_val <= start && second_val >= end) {
174 success = true;
175 fflush(maps_fp);
176 break;
177 }
178 }
179
180 return success;
181 }
182
183 /* Check if [ptr, ptr + size) mapped in /proc/self/maps. */
is_ptr_mapped(FILE * maps_fp,void * ptr,unsigned long size)184 static bool is_ptr_mapped(FILE *maps_fp, void *ptr, unsigned long size)
185 {
186 unsigned long start = (unsigned long)ptr;
187 unsigned long end = start + size;
188
189 return is_range_mapped(maps_fp, start, end);
190 }
191
192 /*
193 * Returns the start address of the mapping on success, else returns
194 * NULL on failure.
195 */
get_source_mapping(struct config c)196 static void *get_source_mapping(struct config c)
197 {
198 unsigned long long addr = 0ULL;
199 void *src_addr = NULL;
200 unsigned long long mmap_min_addr;
201
202 mmap_min_addr = get_mmap_min_addr();
203 /*
204 * For some tests, we need to not have any mappings below the
205 * source mapping. Add some headroom to mmap_min_addr for this.
206 */
207 mmap_min_addr += 10 * _4MB;
208
209 retry:
210 addr += c.src_alignment;
211 if (addr < mmap_min_addr)
212 goto retry;
213
214 src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
215 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
216 -1, 0);
217 if (src_addr == MAP_FAILED) {
218 if (errno == EPERM || errno == EEXIST)
219 goto retry;
220 goto error;
221 }
222 /*
223 * Check that the address is aligned to the specified alignment.
224 * Addresses which have alignments that are multiples of that
225 * specified are not considered valid. For instance, 1GB address is
226 * 2MB-aligned, however it will not be considered valid for a
227 * requested alignment of 2MB. This is done to reduce coincidental
228 * alignment in the tests.
229 */
230 if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
231 !((unsigned long long) src_addr & c.src_alignment)) {
232 munmap(src_addr, c.region_size);
233 goto retry;
234 }
235
236 if (!src_addr)
237 goto error;
238
239 return src_addr;
240 error:
241 ksft_print_msg("Failed to map source region: %s\n",
242 strerror(errno));
243 return NULL;
244 }
245
246 /*
247 * This test validates that merge is called when expanding a mapping.
248 * Mapping containing three pages is created, middle page is unmapped
249 * and then the mapping containing the first page is expanded so that
250 * it fills the created hole. The two parts should merge creating
251 * single mapping with three pages.
252 */
mremap_expand_merge(FILE * maps_fp,unsigned long page_size)253 static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size)
254 {
255 char *test_name = "mremap expand merge";
256 bool success = false;
257 char *remap, *start;
258
259 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
260 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
261
262 if (start == MAP_FAILED) {
263 ksft_print_msg("mmap failed: %s\n", strerror(errno));
264 goto out;
265 }
266
267 munmap(start + page_size, page_size);
268 remap = mremap(start, page_size, 2 * page_size, 0);
269 if (remap == MAP_FAILED) {
270 ksft_print_msg("mremap failed: %s\n", strerror(errno));
271 munmap(start, page_size);
272 munmap(start + 2 * page_size, page_size);
273 goto out;
274 }
275
276 success = is_range_mapped(maps_fp, (unsigned long)start,
277 (unsigned long)(start + 3 * page_size));
278 munmap(start, 3 * page_size);
279
280 out:
281 if (success)
282 ksft_test_result_pass("%s\n", test_name);
283 else
284 ksft_test_result_fail("%s\n", test_name);
285 }
286
287 /*
288 * Similar to mremap_expand_merge() except instead of removing the middle page,
289 * we remove the last then attempt to remap offset from the second page. This
290 * should result in the mapping being restored to its former state.
291 */
mremap_expand_merge_offset(FILE * maps_fp,unsigned long page_size)292 static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size)
293 {
294
295 char *test_name = "mremap expand merge offset";
296 bool success = false;
297 char *remap, *start;
298
299 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
300 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
301
302 if (start == MAP_FAILED) {
303 ksft_print_msg("mmap failed: %s\n", strerror(errno));
304 goto out;
305 }
306
307 /* Unmap final page to ensure we have space to expand. */
308 munmap(start + 2 * page_size, page_size);
309 remap = mremap(start + page_size, page_size, 2 * page_size, 0);
310 if (remap == MAP_FAILED) {
311 ksft_print_msg("mremap failed: %s\n", strerror(errno));
312 munmap(start, 2 * page_size);
313 goto out;
314 }
315
316 success = is_range_mapped(maps_fp, (unsigned long)start,
317 (unsigned long)(start + 3 * page_size));
318 munmap(start, 3 * page_size);
319
320 out:
321 if (success)
322 ksft_test_result_pass("%s\n", test_name);
323 else
324 ksft_test_result_fail("%s\n", test_name);
325 }
326
327 /*
328 * Verify that an mremap within a range does not cause corruption
329 * of unrelated part of range.
330 *
331 * Consider the following range which is 2MB aligned and is
332 * a part of a larger 20MB range which is not shown. Each
333 * character is 256KB below making the source and destination
334 * 2MB each. The lower case letters are moved (s to d) and the
335 * upper case letters are not moved. The below test verifies
336 * that the upper case S letters are not corrupted by the
337 * adjacent mremap.
338 *
339 * |DDDDddddSSSSssss|
340 */
mremap_move_within_range(unsigned int pattern_seed,char * rand_addr)341 static void mremap_move_within_range(unsigned int pattern_seed, char *rand_addr)
342 {
343 char *test_name = "mremap mremap move within range";
344 void *src, *dest;
345 unsigned int i, success = 1;
346
347 size_t size = SIZE_MB(20);
348 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
349 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
350 if (ptr == MAP_FAILED) {
351 perror("mmap");
352 success = 0;
353 goto out;
354 }
355 memset(ptr, 0, size);
356
357 src = ptr + SIZE_MB(6);
358 src = (void *)((unsigned long)src & ~(SIZE_MB(2) - 1));
359
360 /* Set byte pattern for source block. */
361 memcpy(src, rand_addr, SIZE_MB(2));
362
363 dest = src - SIZE_MB(2);
364
365 void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
366 MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
367 if (new_ptr == MAP_FAILED) {
368 perror("mremap");
369 success = 0;
370 goto out;
371 }
372
373 /* Verify byte pattern after remapping */
374 srand(pattern_seed);
375 for (i = 0; i < SIZE_MB(1); i++) {
376 char c = (char) rand();
377
378 if (((char *)src)[i] != c) {
379 ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
380 i);
381 ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
382 ((char *) src)[i] & 0xff);
383 success = 0;
384 }
385 }
386
387 out:
388 if (munmap(ptr, size) == -1)
389 perror("munmap");
390
391 if (success)
392 ksft_test_result_pass("%s\n", test_name);
393 else
394 ksft_test_result_fail("%s\n", test_name);
395 }
396
is_multiple_vma_range_ok(unsigned int pattern_seed,char * ptr,unsigned long page_size)397 static bool is_multiple_vma_range_ok(unsigned int pattern_seed,
398 char *ptr, unsigned long page_size)
399 {
400 int i;
401
402 srand(pattern_seed);
403 for (i = 0; i <= 10; i += 2) {
404 int j;
405 char *buf = &ptr[i * page_size];
406 size_t size = i == 4 ? 2 * page_size : page_size;
407
408 for (j = 0; j < size; j++) {
409 char chr = rand();
410
411 if (chr != buf[j]) {
412 ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
413 i, j, chr, buf[j]);
414 return false;
415 }
416 }
417 }
418
419 return true;
420 }
421
mremap_move_multiple_vmas(unsigned int pattern_seed,unsigned long page_size,bool dont_unmap)422 static void mremap_move_multiple_vmas(unsigned int pattern_seed,
423 unsigned long page_size,
424 bool dont_unmap)
425 {
426 int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
427 char *test_name = "mremap move multiple vmas";
428 const size_t size = 11 * page_size;
429 bool success = true;
430 char *ptr, *tgt_ptr;
431 int i;
432
433 if (dont_unmap)
434 mremap_flags |= MREMAP_DONTUNMAP;
435
436 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
437 MAP_PRIVATE | MAP_ANON, -1, 0);
438 if (ptr == MAP_FAILED) {
439 perror("mmap");
440 success = false;
441 goto out;
442 }
443
444 tgt_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
445 MAP_PRIVATE | MAP_ANON, -1, 0);
446 if (tgt_ptr == MAP_FAILED) {
447 perror("mmap");
448 success = false;
449 goto out;
450 }
451 if (munmap(tgt_ptr, 2 * size)) {
452 perror("munmap");
453 success = false;
454 goto out_unmap;
455 }
456
457 /*
458 * Unmap so we end up with:
459 *
460 * 0 2 4 5 6 8 10 offset in buffer
461 * |*| |*| |*****| |*| |*|
462 * |*| |*| |*****| |*| |*|
463 * 0 1 2 3 4 5 6 pattern offset
464 */
465 for (i = 1; i < 10; i += 2) {
466 if (i == 5)
467 continue;
468
469 if (munmap(&ptr[i * page_size], page_size)) {
470 perror("munmap");
471 success = false;
472 goto out_unmap;
473 }
474 }
475
476 srand(pattern_seed);
477
478 /* Set up random patterns. */
479 for (i = 0; i <= 10; i += 2) {
480 int j;
481 size_t size = i == 4 ? 2 * page_size : page_size;
482 char *buf = &ptr[i * page_size];
483
484 for (j = 0; j < size; j++)
485 buf[j] = rand();
486 }
487
488 /* First, just move the whole thing. */
489 if (mremap(ptr, size, size, mremap_flags, tgt_ptr) == MAP_FAILED) {
490 perror("mremap");
491 success = false;
492 goto out_unmap;
493 }
494 /* Check move was ok. */
495 if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
496 success = false;
497 goto out_unmap;
498 }
499
500 /* Move next to itself. */
501 if (mremap(tgt_ptr, size, size, mremap_flags,
502 &tgt_ptr[size]) == MAP_FAILED) {
503 perror("mremap");
504 success = false;
505 goto out_unmap;
506 }
507 /* Check that the move is ok. */
508 if (!is_multiple_vma_range_ok(pattern_seed, &tgt_ptr[size], page_size)) {
509 success = false;
510 goto out_unmap;
511 }
512
513 /* Map a range to overwrite. */
514 if (mmap(tgt_ptr, size, PROT_NONE,
515 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
516 perror("mmap tgt");
517 success = false;
518 goto out_unmap;
519 }
520 /* Move and overwrite. */
521 if (mremap(&tgt_ptr[size], size, size,
522 mremap_flags, tgt_ptr) == MAP_FAILED) {
523 perror("mremap");
524 success = false;
525 goto out_unmap;
526 }
527 /* Check that the move is ok. */
528 if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
529 success = false;
530 goto out_unmap;
531 }
532
533 out_unmap:
534 if (munmap(tgt_ptr, 2 * size))
535 perror("munmap tgt");
536 if (munmap(ptr, size))
537 perror("munmap src");
538
539 out:
540 if (success)
541 ksft_test_result_pass("%s%s\n", test_name,
542 dont_unmap ? " [dontunnmap]" : "");
543 else
544 ksft_test_result_fail("%s%s\n", test_name,
545 dont_unmap ? " [dontunnmap]" : "");
546 }
547
mremap_shrink_multiple_vmas(unsigned long page_size,bool inplace)548 static void mremap_shrink_multiple_vmas(unsigned long page_size,
549 bool inplace)
550 {
551 char *test_name = "mremap shrink multiple vmas";
552 const size_t size = 10 * page_size;
553 bool success = true;
554 char *ptr, *tgt_ptr;
555 void *res;
556 int i;
557
558 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
559 MAP_PRIVATE | MAP_ANON, -1, 0);
560 if (ptr == MAP_FAILED) {
561 perror("mmap");
562 success = false;
563 goto out;
564 }
565
566 tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
567 MAP_PRIVATE | MAP_ANON, -1, 0);
568 if (tgt_ptr == MAP_FAILED) {
569 perror("mmap");
570 success = false;
571 goto out;
572 }
573 if (munmap(tgt_ptr, size)) {
574 perror("munmap");
575 success = false;
576 goto out_unmap;
577 }
578
579 /*
580 * Unmap so we end up with:
581 *
582 * 0 2 4 6 8 10 offset in buffer
583 * |*| |*| |*| |*| |*| |*|
584 * |*| |*| |*| |*| |*| |*|
585 */
586 for (i = 1; i < 10; i += 2) {
587 if (munmap(&ptr[i * page_size], page_size)) {
588 perror("munmap");
589 success = false;
590 goto out_unmap;
591 }
592 }
593
594 /*
595 * Shrink in-place across multiple VMAs and gaps so we end up with:
596 *
597 * 0
598 * |*|
599 * |*|
600 */
601 if (inplace)
602 res = mremap(ptr, size, page_size, 0);
603 else
604 res = mremap(ptr, size, page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
605 tgt_ptr);
606
607 if (res == MAP_FAILED) {
608 perror("mremap");
609 success = false;
610 goto out_unmap;
611 }
612
613 out_unmap:
614 if (munmap(tgt_ptr, size))
615 perror("munmap tgt");
616 if (munmap(ptr, size))
617 perror("munmap src");
618 out:
619 if (success)
620 ksft_test_result_pass("%s%s\n", test_name,
621 inplace ? " [inplace]" : "");
622 else
623 ksft_test_result_fail("%s%s\n", test_name,
624 inplace ? " [inplace]" : "");
625 }
626
mremap_move_multiple_vmas_split(unsigned int pattern_seed,unsigned long page_size,bool dont_unmap)627 static void mremap_move_multiple_vmas_split(unsigned int pattern_seed,
628 unsigned long page_size,
629 bool dont_unmap)
630 {
631 char *test_name = "mremap move multiple vmas split";
632 int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
633 const size_t size = 10 * page_size;
634 bool success = true;
635 char *ptr, *tgt_ptr;
636 int i;
637
638 if (dont_unmap)
639 mremap_flags |= MREMAP_DONTUNMAP;
640
641 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
642 MAP_PRIVATE | MAP_ANON, -1, 0);
643 if (ptr == MAP_FAILED) {
644 perror("mmap");
645 success = false;
646 goto out;
647 }
648
649 tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
650 MAP_PRIVATE | MAP_ANON, -1, 0);
651 if (tgt_ptr == MAP_FAILED) {
652 perror("mmap");
653 success = false;
654 goto out;
655 }
656 if (munmap(tgt_ptr, size)) {
657 perror("munmap");
658 success = false;
659 goto out_unmap;
660 }
661
662 /*
663 * Unmap so we end up with:
664 *
665 * 0 1 2 3 4 5 6 7 8 9 10 offset in buffer
666 * |**********| |*******|
667 * |**********| |*******|
668 * 0 1 2 3 4 5 6 7 8 9 pattern offset
669 */
670 if (munmap(&ptr[5 * page_size], page_size)) {
671 perror("munmap");
672 success = false;
673 goto out_unmap;
674 }
675
676 /* Set up random patterns. */
677 srand(pattern_seed);
678 for (i = 0; i < 10; i++) {
679 int j;
680 char *buf = &ptr[i * page_size];
681
682 if (i == 5)
683 continue;
684
685 for (j = 0; j < page_size; j++)
686 buf[j] = rand();
687 }
688
689 /*
690 * Move the below:
691 *
692 * <------------->
693 * 0 1 2 3 4 5 6 7 8 9 10 offset in buffer
694 * |**********| |*******|
695 * |**********| |*******|
696 * 0 1 2 3 4 5 6 7 8 9 pattern offset
697 *
698 * Into:
699 *
700 * 0 1 2 3 4 5 6 7 offset in buffer
701 * |*****| |*****|
702 * |*****| |*****|
703 * 2 3 4 5 6 7 pattern offset
704 */
705 if (mremap(&ptr[2 * page_size], size - 3 * page_size, size - 3 * page_size,
706 mremap_flags, tgt_ptr) == MAP_FAILED) {
707 perror("mremap");
708 success = false;
709 goto out_unmap;
710 }
711
712 /* Offset into random pattern. */
713 srand(pattern_seed);
714 for (i = 0; i < 2 * page_size; i++)
715 rand();
716
717 /* Check pattern. */
718 for (i = 0; i < 7; i++) {
719 int j;
720 char *buf = &tgt_ptr[i * page_size];
721
722 if (i == 3)
723 continue;
724
725 for (j = 0; j < page_size; j++) {
726 char chr = rand();
727
728 if (chr != buf[j]) {
729 ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
730 i, j, chr, buf[j]);
731 goto out_unmap;
732 }
733 }
734 }
735
736 out_unmap:
737 if (munmap(tgt_ptr, size))
738 perror("munmap tgt");
739 if (munmap(ptr, size))
740 perror("munmap src");
741 out:
742 if (success)
743 ksft_test_result_pass("%s%s\n", test_name,
744 dont_unmap ? " [dontunnmap]" : "");
745 else
746 ksft_test_result_fail("%s%s\n", test_name,
747 dont_unmap ? " [dontunnmap]" : "");
748 }
749
750 #ifdef __NR_userfaultfd
mremap_move_multi_invalid_vmas(FILE * maps_fp,unsigned long page_size)751 static void mremap_move_multi_invalid_vmas(FILE *maps_fp,
752 unsigned long page_size)
753 {
754 char *test_name = "mremap move multiple invalid vmas";
755 const size_t size = 10 * page_size;
756 bool success = true;
757 char *ptr, *tgt_ptr;
758 int uffd, err, i;
759 void *res;
760 struct uffdio_api api = {
761 .api = UFFD_API,
762 .features = UFFD_EVENT_PAGEFAULT,
763 };
764
765 uffd = syscall(__NR_userfaultfd, O_NONBLOCK);
766 if (uffd == -1) {
767 err = errno;
768 perror("userfaultfd");
769 if (err == EPERM) {
770 ksft_test_result_skip("%s - missing uffd", test_name);
771 return;
772 }
773 success = false;
774 goto out;
775 }
776 if (ioctl(uffd, UFFDIO_API, &api)) {
777 perror("ioctl UFFDIO_API");
778 success = false;
779 goto out_close_uffd;
780 }
781
782 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
783 MAP_PRIVATE | MAP_ANON, -1, 0);
784 if (ptr == MAP_FAILED) {
785 perror("mmap");
786 success = false;
787 goto out_close_uffd;
788 }
789
790 tgt_ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
791 if (tgt_ptr == MAP_FAILED) {
792 perror("mmap");
793 success = false;
794 goto out_close_uffd;
795 }
796 if (munmap(tgt_ptr, size)) {
797 perror("munmap");
798 success = false;
799 goto out_unmap;
800 }
801
802 /*
803 * Unmap so we end up with:
804 *
805 * 0 2 4 6 8 10 offset in buffer
806 * |*| |*| |*| |*| |*|
807 * |*| |*| |*| |*| |*|
808 *
809 * Additionally, register each with UFFD.
810 */
811 for (i = 0; i < 10; i += 2) {
812 void *unmap_ptr = &ptr[(i + 1) * page_size];
813 unsigned long start = (unsigned long)&ptr[i * page_size];
814 struct uffdio_register reg = {
815 .range = {
816 .start = start,
817 .len = page_size,
818 },
819 .mode = UFFDIO_REGISTER_MODE_MISSING,
820 };
821
822 if (ioctl(uffd, UFFDIO_REGISTER, ®) == -1) {
823 perror("ioctl UFFDIO_REGISTER");
824 success = false;
825 goto out_unmap;
826 }
827 if (munmap(unmap_ptr, page_size)) {
828 perror("munmap");
829 success = false;
830 goto out_unmap;
831 }
832 }
833
834 /*
835 * Now try to move the entire range which is invalid for multi VMA move.
836 *
837 * This will fail, and no VMA should be moved, as we check this ahead of
838 * time.
839 */
840 res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
841 err = errno;
842 if (res != MAP_FAILED) {
843 fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
844 success = false;
845 goto out_unmap;
846 }
847 if (err != EFAULT) {
848 errno = err;
849 perror("mrmeap() unexpected error");
850 success = false;
851 goto out_unmap;
852 }
853 if (is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
854 fprintf(stderr,
855 "Invalid uffd-armed VMA at start of multi range moved\n");
856 success = false;
857 goto out_unmap;
858 }
859
860 /*
861 * Now try to move a single VMA, this should succeed as not multi VMA
862 * move.
863 */
864 res = mremap(ptr, page_size, page_size,
865 MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
866 if (res == MAP_FAILED) {
867 perror("mremap single invalid-multi VMA");
868 success = false;
869 goto out_unmap;
870 }
871
872 /*
873 * Unmap the VMA, and remap a non-uffd registered (therefore, multi VMA
874 * move valid) VMA at the start of ptr range.
875 */
876 if (munmap(tgt_ptr, page_size)) {
877 perror("munmap");
878 success = false;
879 goto out_unmap;
880 }
881 res = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
882 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
883 if (res == MAP_FAILED) {
884 perror("mmap");
885 success = false;
886 goto out_unmap;
887 }
888
889 /*
890 * Now try to move the entire range, we should succeed in moving the
891 * first VMA, but no others, and report a failure.
892 */
893 res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
894 err = errno;
895 if (res != MAP_FAILED) {
896 fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
897 success = false;
898 goto out_unmap;
899 }
900 if (err != EFAULT) {
901 errno = err;
902 perror("mrmeap() unexpected error");
903 success = false;
904 goto out_unmap;
905 }
906 if (!is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
907 fprintf(stderr, "Valid VMA not moved\n");
908 success = false;
909 goto out_unmap;
910 }
911
912 /*
913 * Unmap the VMA, and map valid VMA at start of ptr range, and replace
914 * all existing multi-move invalid VMAs, except the last, with valid
915 * multi-move VMAs.
916 */
917 if (munmap(tgt_ptr, page_size)) {
918 perror("munmap");
919 success = false;
920 goto out_unmap;
921 }
922 if (munmap(ptr, size - 2 * page_size)) {
923 perror("munmap");
924 success = false;
925 goto out_unmap;
926 }
927 for (i = 0; i < 8; i += 2) {
928 res = mmap(&ptr[i * page_size], page_size,
929 PROT_READ | PROT_WRITE,
930 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
931 if (res == MAP_FAILED) {
932 perror("mmap");
933 success = false;
934 goto out_unmap;
935 }
936 }
937
938 /*
939 * Now try to move the entire range, we should succeed in moving all but
940 * the last VMA, and report a failure.
941 */
942 res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
943 err = errno;
944 if (res != MAP_FAILED) {
945 fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
946 success = false;
947 goto out_unmap;
948 }
949 if (err != EFAULT) {
950 errno = err;
951 perror("mrmeap() unexpected error");
952 success = false;
953 goto out_unmap;
954 }
955
956 for (i = 0; i < 10; i += 2) {
957 bool is_mapped = is_ptr_mapped(maps_fp,
958 &tgt_ptr[i * page_size], page_size);
959
960 if (i < 8 && !is_mapped) {
961 fprintf(stderr, "Valid VMA not moved at %d\n", i);
962 success = false;
963 goto out_unmap;
964 } else if (i == 8 && is_mapped) {
965 fprintf(stderr, "Invalid VMA moved at %d\n", i);
966 success = false;
967 goto out_unmap;
968 }
969 }
970
971 out_unmap:
972 if (munmap(tgt_ptr, size))
973 perror("munmap tgt");
974 if (munmap(ptr, size))
975 perror("munmap src");
976 out_close_uffd:
977 close(uffd);
978 out:
979 if (success)
980 ksft_test_result_pass("%s\n", test_name);
981 else
982 ksft_test_result_fail("%s\n", test_name);
983 }
984 #else
mremap_move_multi_invalid_vmas(FILE * maps_fp,unsigned long page_size)985 static void mremap_move_multi_invalid_vmas(FILE *maps_fp, unsigned long page_size)
986 {
987 char *test_name = "mremap move multiple invalid vmas";
988
989 ksft_test_result_skip("%s - missing uffd", test_name);
990 }
991 #endif /* __NR_userfaultfd */
992
993 /* Returns the time taken for the remap on success else returns -1. */
remap_region(struct config c,unsigned int threshold_mb,char * rand_addr)994 static long long remap_region(struct config c, unsigned int threshold_mb,
995 char *rand_addr)
996 {
997 void *addr, *src_addr, *dest_addr, *dest_preamble_addr = NULL;
998 unsigned long long t, d;
999 struct timespec t_start = {0, 0}, t_end = {0, 0};
1000 long long start_ns, end_ns, align_mask, ret, offset;
1001 unsigned long long threshold;
1002 unsigned long num_chunks;
1003
1004 if (threshold_mb == VALIDATION_NO_THRESHOLD)
1005 threshold = c.region_size;
1006 else
1007 threshold = MIN(threshold_mb * _1MB, c.region_size);
1008
1009 src_addr = get_source_mapping(c);
1010 if (!src_addr) {
1011 ret = -1;
1012 goto out;
1013 }
1014
1015 /* Set byte pattern for source block. */
1016 memcpy(src_addr, rand_addr, threshold);
1017
1018 /* Mask to zero out lower bits of address for alignment */
1019 align_mask = ~(c.dest_alignment - 1);
1020 /* Offset of destination address from the end of the source region */
1021 offset = (c.overlapping) ? -c.dest_alignment : c.dest_alignment;
1022 addr = (void *) (((unsigned long long) src_addr + c.region_size
1023 + offset) & align_mask);
1024
1025 /* Remap after the destination block preamble. */
1026 addr += c.dest_preamble_size;
1027
1028 /* See comment in get_source_mapping() */
1029 if (!((unsigned long long) addr & c.dest_alignment))
1030 addr = (void *) ((unsigned long long) addr | c.dest_alignment);
1031
1032 /* Don't destroy existing mappings unless expected to overlap */
1033 while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
1034 /* Check for unsigned overflow */
1035 if (addr + c.dest_alignment < addr) {
1036 ksft_print_msg("Couldn't find a valid region to remap to\n");
1037 ret = -1;
1038 goto clean_up_src;
1039 }
1040 addr += c.dest_alignment;
1041 }
1042
1043 if (c.dest_preamble_size) {
1044 dest_preamble_addr = mmap((void *) addr - c.dest_preamble_size, c.dest_preamble_size,
1045 PROT_READ | PROT_WRITE,
1046 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
1047 -1, 0);
1048 if (dest_preamble_addr == MAP_FAILED) {
1049 ksft_print_msg("Failed to map dest preamble region: %s\n",
1050 strerror(errno));
1051 ret = -1;
1052 goto clean_up_src;
1053 }
1054
1055 /* Set byte pattern for the dest preamble block. */
1056 memcpy(dest_preamble_addr, rand_addr, c.dest_preamble_size);
1057 }
1058
1059 clock_gettime(CLOCK_MONOTONIC, &t_start);
1060 dest_addr = mremap(src_addr, c.region_size, c.region_size,
1061 MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
1062 clock_gettime(CLOCK_MONOTONIC, &t_end);
1063
1064 if (dest_addr == MAP_FAILED) {
1065 ksft_print_msg("mremap failed: %s\n", strerror(errno));
1066 ret = -1;
1067 goto clean_up_dest_preamble;
1068 }
1069
1070 /*
1071 * Verify byte pattern after remapping. Employ an algorithm with a
1072 * square root time complexity in threshold: divide the range into
1073 * chunks, if memcmp() returns non-zero, only then perform an
1074 * iteration in that chunk to find the mismatch index.
1075 */
1076 num_chunks = get_sqrt(threshold);
1077 for (unsigned long i = 0; i < num_chunks; ++i) {
1078 size_t chunk_size = threshold / num_chunks;
1079 unsigned long shift = i * chunk_size;
1080
1081 if (!memcmp(dest_addr + shift, rand_addr + shift, chunk_size))
1082 continue;
1083
1084 /* brute force iteration only over mismatch segment */
1085 for (t = shift; t < shift + chunk_size; ++t) {
1086 if (((char *) dest_addr)[t] != rand_addr[t]) {
1087 ksft_print_msg("Data after remap doesn't match at offset %llu\n",
1088 t);
1089 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
1090 ((char *) dest_addr)[t] & 0xff);
1091 ret = -1;
1092 goto clean_up_dest;
1093 }
1094 }
1095 }
1096
1097 /*
1098 * if threshold is not divisible by num_chunks, then check the
1099 * last chunk
1100 */
1101 for (t = num_chunks * (threshold / num_chunks); t < threshold; ++t) {
1102 if (((char *) dest_addr)[t] != rand_addr[t]) {
1103 ksft_print_msg("Data after remap doesn't match at offset %llu\n",
1104 t);
1105 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
1106 ((char *) dest_addr)[t] & 0xff);
1107 ret = -1;
1108 goto clean_up_dest;
1109 }
1110 }
1111
1112 /* Verify the dest preamble byte pattern after remapping */
1113 if (!c.dest_preamble_size)
1114 goto no_preamble;
1115
1116 num_chunks = get_sqrt(c.dest_preamble_size);
1117
1118 for (unsigned long i = 0; i < num_chunks; ++i) {
1119 size_t chunk_size = c.dest_preamble_size / num_chunks;
1120 unsigned long shift = i * chunk_size;
1121
1122 if (!memcmp(dest_preamble_addr + shift, rand_addr + shift,
1123 chunk_size))
1124 continue;
1125
1126 /* brute force iteration only over mismatched segment */
1127 for (d = shift; d < shift + chunk_size; ++d) {
1128 if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
1129 ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
1130 d);
1131 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
1132 ((char *) dest_preamble_addr)[d] & 0xff);
1133 ret = -1;
1134 goto clean_up_dest;
1135 }
1136 }
1137 }
1138
1139 for (d = num_chunks * (c.dest_preamble_size / num_chunks); d < c.dest_preamble_size; ++d) {
1140 if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
1141 ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
1142 d);
1143 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
1144 ((char *) dest_preamble_addr)[d] & 0xff);
1145 ret = -1;
1146 goto clean_up_dest;
1147 }
1148 }
1149
1150 no_preamble:
1151 start_ns = t_start.tv_sec * NS_PER_SEC + t_start.tv_nsec;
1152 end_ns = t_end.tv_sec * NS_PER_SEC + t_end.tv_nsec;
1153 ret = end_ns - start_ns;
1154
1155 /*
1156 * Since the destination address is specified using MREMAP_FIXED, subsequent
1157 * mremap will unmap any previous mapping at the address range specified by
1158 * dest_addr and region_size. This significantly affects the remap time of
1159 * subsequent tests. So we clean up mappings after each test.
1160 */
1161 clean_up_dest:
1162 munmap(dest_addr, c.region_size);
1163 clean_up_dest_preamble:
1164 if (c.dest_preamble_size && dest_preamble_addr)
1165 munmap(dest_preamble_addr, c.dest_preamble_size);
1166 clean_up_src:
1167 munmap(src_addr, c.region_size);
1168 out:
1169 return ret;
1170 }
1171
1172 /*
1173 * Verify that an mremap aligning down does not destroy
1174 * the beginning of the mapping just because the aligned
1175 * down address landed on a mapping that maybe does not exist.
1176 */
mremap_move_1mb_from_start(unsigned int pattern_seed,char * rand_addr)1177 static void mremap_move_1mb_from_start(unsigned int pattern_seed,
1178 char *rand_addr)
1179 {
1180 char *test_name = "mremap move 1mb from start at 1MB+256KB aligned src";
1181 void *src = NULL, *dest = NULL;
1182 unsigned int i, success = 1;
1183
1184 /* Config to reuse get_source_mapping() to do an aligned mmap. */
1185 struct config c = {
1186 .src_alignment = SIZE_MB(1) + SIZE_KB(256),
1187 .region_size = SIZE_MB(6)
1188 };
1189
1190 src = get_source_mapping(c);
1191 if (!src) {
1192 success = 0;
1193 goto out;
1194 }
1195
1196 c.src_alignment = SIZE_MB(1) + SIZE_KB(256);
1197 dest = get_source_mapping(c);
1198 if (!dest) {
1199 success = 0;
1200 goto out;
1201 }
1202
1203 /* Set byte pattern for source block. */
1204 memcpy(src, rand_addr, SIZE_MB(2));
1205
1206 /*
1207 * Unmap the beginning of dest so that the aligned address
1208 * falls on no mapping.
1209 */
1210 munmap(dest, SIZE_MB(1));
1211
1212 void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
1213 MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
1214 if (new_ptr == MAP_FAILED) {
1215 perror("mremap");
1216 success = 0;
1217 goto out;
1218 }
1219
1220 /* Verify byte pattern after remapping */
1221 srand(pattern_seed);
1222 for (i = 0; i < SIZE_MB(1); i++) {
1223 char c = (char) rand();
1224
1225 if (((char *)src)[i] != c) {
1226 ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
1227 i);
1228 ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
1229 ((char *) src)[i] & 0xff);
1230 success = 0;
1231 }
1232 }
1233
1234 out:
1235 if (src && munmap(src, c.region_size) == -1)
1236 perror("munmap src");
1237
1238 if (dest && munmap(dest, c.region_size) == -1)
1239 perror("munmap dest");
1240
1241 if (success)
1242 ksft_test_result_pass("%s\n", test_name);
1243 else
1244 ksft_test_result_fail("%s\n", test_name);
1245 }
1246
run_mremap_test_case(struct test test_case,int * failures,unsigned int threshold_mb,char * rand_addr)1247 static void run_mremap_test_case(struct test test_case, int *failures,
1248 unsigned int threshold_mb,
1249 char *rand_addr)
1250 {
1251 long long remap_time = remap_region(test_case.config, threshold_mb,
1252 rand_addr);
1253
1254 if (remap_time < 0) {
1255 if (test_case.expect_failure)
1256 ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
1257 test_case.name);
1258 else {
1259 ksft_test_result_fail("%s\n", test_case.name);
1260 *failures += 1;
1261 }
1262 } else {
1263 /*
1264 * Comparing mremap time is only applicable if entire region
1265 * was faulted in.
1266 */
1267 if (threshold_mb == VALIDATION_NO_THRESHOLD ||
1268 test_case.config.region_size <= threshold_mb * _1MB)
1269 ksft_test_result_pass("%s\n\tmremap time: %12lldns\n",
1270 test_case.name, remap_time);
1271 else
1272 ksft_test_result_pass("%s\n", test_case.name);
1273 }
1274 }
1275
usage(const char * cmd)1276 static void usage(const char *cmd)
1277 {
1278 fprintf(stderr,
1279 "Usage: %s [[-t <threshold_mb>] [-p <pattern_seed>]]\n"
1280 "-t\t only validate threshold_mb of the remapped region\n"
1281 " \t if 0 is supplied no threshold is used; all tests\n"
1282 " \t are run and remapped regions validated fully.\n"
1283 " \t The default threshold used is 4MB.\n"
1284 "-p\t provide a seed to generate the random pattern for\n"
1285 " \t validating the remapped region.\n", cmd);
1286 }
1287
parse_args(int argc,char ** argv,unsigned int * threshold_mb,unsigned int * pattern_seed)1288 static int parse_args(int argc, char **argv, unsigned int *threshold_mb,
1289 unsigned int *pattern_seed)
1290 {
1291 const char *optstr = "t:p:";
1292 int opt;
1293
1294 while ((opt = getopt(argc, argv, optstr)) != -1) {
1295 switch (opt) {
1296 case 't':
1297 *threshold_mb = atoi(optarg);
1298 break;
1299 case 'p':
1300 *pattern_seed = atoi(optarg);
1301 break;
1302 default:
1303 usage(argv[0]);
1304 return -1;
1305 }
1306 }
1307
1308 if (optind < argc) {
1309 usage(argv[0]);
1310 return -1;
1311 }
1312
1313 return 0;
1314 }
1315
1316 #define MAX_TEST 15
1317 #define MAX_PERF_TEST 3
main(int argc,char ** argv)1318 int main(int argc, char **argv)
1319 {
1320 int failures = 0;
1321 unsigned int i;
1322 int run_perf_tests;
1323 unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
1324
1325 /* hard-coded test configs */
1326 size_t max_test_variable_region_size = _2GB;
1327 size_t max_test_constant_region_size = _2MB;
1328 size_t dest_preamble_size = 10 * _4MB;
1329
1330 unsigned int pattern_seed;
1331 char *rand_addr;
1332 size_t rand_size;
1333 int num_expand_tests = 2;
1334 int num_misc_tests = 9;
1335 struct test test_cases[MAX_TEST] = {};
1336 struct test perf_test_cases[MAX_PERF_TEST];
1337 int page_size;
1338 time_t t;
1339 FILE *maps_fp;
1340
1341 pattern_seed = (unsigned int) time(&t);
1342
1343 if (parse_args(argc, argv, &threshold_mb, &pattern_seed) < 0)
1344 exit(EXIT_FAILURE);
1345
1346 ksft_print_msg("Test configs:\n\tthreshold_mb=%u\n\tpattern_seed=%u\n\n",
1347 threshold_mb, pattern_seed);
1348
1349 /*
1350 * set preallocated random array according to test configs; see the
1351 * functions for the logic of setting the size
1352 */
1353 if (!threshold_mb)
1354 rand_size = MAX(max_test_variable_region_size,
1355 max_test_constant_region_size);
1356 else
1357 rand_size = MAX(MIN(threshold_mb * _1MB,
1358 max_test_variable_region_size),
1359 max_test_constant_region_size);
1360 rand_size = MAX(dest_preamble_size, rand_size);
1361
1362 rand_addr = (char *)mmap(NULL, rand_size, PROT_READ | PROT_WRITE,
1363 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1364 if (rand_addr == MAP_FAILED) {
1365 perror("mmap");
1366 ksft_exit_fail_msg("cannot mmap rand_addr\n");
1367 }
1368
1369 /* fill stream of random bytes */
1370 srand(pattern_seed);
1371 for (unsigned long i = 0; i < rand_size; ++i)
1372 rand_addr[i] = (char) rand();
1373
1374 page_size = sysconf(_SC_PAGESIZE);
1375
1376 /* Expected mremap failures */
1377 test_cases[0] = MAKE_TEST(page_size, page_size, page_size,
1378 OVERLAPPING, EXPECT_FAILURE,
1379 "mremap - Source and Destination Regions Overlapping");
1380
1381 test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size,
1382 NON_OVERLAPPING, EXPECT_FAILURE,
1383 "mremap - Destination Address Misaligned (1KB-aligned)");
1384 test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size,
1385 NON_OVERLAPPING, EXPECT_FAILURE,
1386 "mremap - Source Address Misaligned (1KB-aligned)");
1387
1388 /* Src addr PTE aligned */
1389 test_cases[3] = MAKE_TEST(PTE, PTE, PTE * 2,
1390 NON_OVERLAPPING, EXPECT_SUCCESS,
1391 "8KB mremap - Source PTE-aligned, Destination PTE-aligned");
1392
1393 /* Src addr 1MB aligned */
1394 test_cases[4] = MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1395 "2MB mremap - Source 1MB-aligned, Destination PTE-aligned");
1396 test_cases[5] = MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1397 "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
1398
1399 /* Src addr PMD aligned */
1400 test_cases[6] = MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1401 "4MB mremap - Source PMD-aligned, Destination PTE-aligned");
1402 test_cases[7] = MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1403 "4MB mremap - Source PMD-aligned, Destination 1MB-aligned");
1404 test_cases[8] = MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1405 "4MB mremap - Source PMD-aligned, Destination PMD-aligned");
1406
1407 /* Src addr PUD aligned */
1408 test_cases[9] = MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1409 "2GB mremap - Source PUD-aligned, Destination PTE-aligned");
1410 test_cases[10] = MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1411 "2GB mremap - Source PUD-aligned, Destination 1MB-aligned");
1412 test_cases[11] = MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1413 "2GB mremap - Source PUD-aligned, Destination PMD-aligned");
1414 test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1415 "2GB mremap - Source PUD-aligned, Destination PUD-aligned");
1416
1417 /* Src and Dest addr 1MB aligned. 5MB mremap. */
1418 test_cases[13] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1419 "5MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
1420
1421 /* Src and Dest addr 1MB aligned. 5MB mremap. */
1422 test_cases[14] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1423 "5MB mremap - Source 1MB-aligned, Dest 1MB-aligned with 40MB Preamble");
1424 test_cases[14].config.dest_preamble_size = 10 * _4MB;
1425
1426 perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1427 "1GB mremap - Source PTE-aligned, Destination PTE-aligned");
1428 /*
1429 * mremap 1GB region - Page table level aligned time
1430 * comparison.
1431 */
1432 perf_test_cases[1] = MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1433 "1GB mremap - Source PMD-aligned, Destination PMD-aligned");
1434 perf_test_cases[2] = MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1435 "1GB mremap - Source PUD-aligned, Destination PUD-aligned");
1436
1437 run_perf_tests = (threshold_mb == VALIDATION_NO_THRESHOLD) ||
1438 (threshold_mb * _1MB >= _1GB);
1439
1440 ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ?
1441 ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests + num_misc_tests);
1442
1443 for (i = 0; i < ARRAY_SIZE(test_cases); i++)
1444 run_mremap_test_case(test_cases[i], &failures, threshold_mb,
1445 rand_addr);
1446
1447 maps_fp = fopen("/proc/self/maps", "r");
1448
1449 if (maps_fp == NULL) {
1450 munmap(rand_addr, rand_size);
1451 ksft_exit_fail_msg("Failed to read /proc/self/maps: %s\n", strerror(errno));
1452 }
1453
1454 mremap_expand_merge(maps_fp, page_size);
1455 mremap_expand_merge_offset(maps_fp, page_size);
1456
1457 mremap_move_within_range(pattern_seed, rand_addr);
1458 mremap_move_1mb_from_start(pattern_seed, rand_addr);
1459 mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
1460 mremap_shrink_multiple_vmas(page_size, /* inplace= */false);
1461 mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ false);
1462 mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
1463 mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
1464 mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
1465 mremap_move_multi_invalid_vmas(maps_fp, page_size);
1466
1467 fclose(maps_fp);
1468
1469 if (run_perf_tests) {
1470 ksft_print_msg("\n%s\n",
1471 "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:");
1472 for (i = 0; i < ARRAY_SIZE(perf_test_cases); i++)
1473 run_mremap_test_case(perf_test_cases[i], &failures,
1474 threshold_mb,
1475 rand_addr);
1476 }
1477
1478 munmap(rand_addr, rand_size);
1479
1480 if (failures > 0)
1481 ksft_exit_fail();
1482 else
1483 ksft_exit_pass();
1484 }
1485