xref: /linux/tools/testing/selftests/mm/mremap_test.c (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020 Google LLC
4  */
5 #define _GNU_SOURCE
6 
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/userfaultfd.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
15 #include <syscall.h>
16 #include <time.h>
17 #include <stdbool.h>
18 
19 #include "kselftest.h"
20 
21 #define EXPECT_SUCCESS 0
22 #define EXPECT_FAILURE 1
23 #define NON_OVERLAPPING 0
24 #define OVERLAPPING 1
25 #define NS_PER_SEC 1000000000ULL
26 #define VALIDATION_DEFAULT_THRESHOLD 4	/* 4MB */
27 #define VALIDATION_NO_THRESHOLD 0	/* Verify the entire region */
28 
29 #ifndef MIN
30 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
31 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
32 #endif
33 #define SIZE_MB(m) ((size_t)m * (1024 * 1024))
34 #define SIZE_KB(k) ((size_t)k * 1024)
35 
36 struct config {
37 	unsigned long long src_alignment;
38 	unsigned long long dest_alignment;
39 	unsigned long long region_size;
40 	int overlapping;
41 	unsigned int dest_preamble_size;
42 };
43 
44 struct test {
45 	const char *name;
46 	struct config config;
47 	int expect_failure;
48 };
49 
50 enum {
51 	_1KB = 1ULL << 10,	/* 1KB -> not page aligned */
52 	_4KB = 4ULL << 10,
53 	_8KB = 8ULL << 10,
54 	_1MB = 1ULL << 20,
55 	_2MB = 2ULL << 20,
56 	_4MB = 4ULL << 20,
57 	_5MB = 5ULL << 20,
58 	_1GB = 1ULL << 30,
59 	_2GB = 2ULL << 30,
60 	PMD = _2MB,
61 	PUD = _1GB,
62 };
63 
64 #define PTE page_size
65 
66 #define MAKE_TEST(source_align, destination_align, size,	\
67 		  overlaps, should_fail, test_name)		\
68 (struct test){							\
69 	.name = test_name,					\
70 	.config = {						\
71 		.src_alignment = source_align,			\
72 		.dest_alignment = destination_align,		\
73 		.region_size = size,				\
74 		.overlapping = overlaps,			\
75 	},							\
76 	.expect_failure = should_fail				\
77 }
78 
79 /* compute square root using binary search */
get_sqrt(unsigned long val)80 static unsigned long get_sqrt(unsigned long val)
81 {
82 	unsigned long low = 1;
83 
84 	/* assuming rand_size is less than 1TB */
85 	unsigned long high = (1UL << 20);
86 
87 	while (low <= high) {
88 		unsigned long mid = low + (high - low) / 2;
89 		unsigned long temp = mid * mid;
90 
91 		if (temp == val)
92 			return mid;
93 		if (temp < val)
94 			low = mid + 1;
95 		high = mid - 1;
96 	}
97 	return low;
98 }
99 
100 /*
101  * Returns false if the requested remap region overlaps with an
102  * existing mapping (e.g text, stack) else returns true.
103  */
is_remap_region_valid(void * addr,unsigned long long size)104 static bool is_remap_region_valid(void *addr, unsigned long long size)
105 {
106 	void *remap_addr = NULL;
107 	bool ret = true;
108 
109 	/* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
110 	remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
111 					 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
112 					 -1, 0);
113 
114 	if (remap_addr == MAP_FAILED) {
115 		if (errno == EEXIST)
116 			ret = false;
117 	} else {
118 		munmap(remap_addr, size);
119 	}
120 
121 	return ret;
122 }
123 
124 /* Returns mmap_min_addr sysctl tunable from procfs */
get_mmap_min_addr(void)125 static unsigned long long get_mmap_min_addr(void)
126 {
127 	FILE *fp;
128 	int n_matched;
129 	static unsigned long long addr;
130 
131 	if (addr)
132 		return addr;
133 
134 	fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
135 	if (fp == NULL) {
136 		ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
137 			strerror(errno));
138 		exit(KSFT_SKIP);
139 	}
140 
141 	n_matched = fscanf(fp, "%llu", &addr);
142 	if (n_matched != 1) {
143 		ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
144 			strerror(errno));
145 		fclose(fp);
146 		exit(KSFT_SKIP);
147 	}
148 
149 	fclose(fp);
150 	return addr;
151 }
152 
153 /*
154  * Using /proc/self/maps, assert that the specified address range is contained
155  * within a single mapping.
156  */
is_range_mapped(FILE * maps_fp,unsigned long start,unsigned long end)157 static bool is_range_mapped(FILE *maps_fp, unsigned long start,
158 			    unsigned long end)
159 {
160 	char *line = NULL;
161 	size_t len = 0;
162 	bool success = false;
163 	unsigned long first_val, second_val;
164 
165 	rewind(maps_fp);
166 
167 	while (getline(&line, &len, maps_fp) != -1) {
168 		if (sscanf(line, "%lx-%lx", &first_val, &second_val) != 2) {
169 			ksft_exit_fail_msg("cannot parse /proc/self/maps\n");
170 			break;
171 		}
172 
173 		if (first_val <= start && second_val >= end) {
174 			success = true;
175 			fflush(maps_fp);
176 			break;
177 		}
178 	}
179 
180 	return success;
181 }
182 
183 /* Check if [ptr, ptr + size) mapped in /proc/self/maps. */
is_ptr_mapped(FILE * maps_fp,void * ptr,unsigned long size)184 static bool is_ptr_mapped(FILE *maps_fp, void *ptr, unsigned long size)
185 {
186 	unsigned long start = (unsigned long)ptr;
187 	unsigned long end = start + size;
188 
189 	return is_range_mapped(maps_fp, start, end);
190 }
191 
192 /*
193  * Returns the start address of the mapping on success, else returns
194  * NULL on failure.
195  */
get_source_mapping(struct config c)196 static void *get_source_mapping(struct config c)
197 {
198 	unsigned long long addr = 0ULL;
199 	void *src_addr = NULL;
200 	unsigned long long mmap_min_addr;
201 
202 	mmap_min_addr = get_mmap_min_addr();
203 	/*
204 	 * For some tests, we need to not have any mappings below the
205 	 * source mapping. Add some headroom to mmap_min_addr for this.
206 	 */
207 	mmap_min_addr += 10 * _4MB;
208 
209 retry:
210 	addr += c.src_alignment;
211 	if (addr < mmap_min_addr)
212 		goto retry;
213 
214 	src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
215 					MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
216 					-1, 0);
217 	if (src_addr == MAP_FAILED) {
218 		if (errno == EPERM || errno == EEXIST)
219 			goto retry;
220 		goto error;
221 	}
222 	/*
223 	 * Check that the address is aligned to the specified alignment.
224 	 * Addresses which have alignments that are multiples of that
225 	 * specified are not considered valid. For instance, 1GB address is
226 	 * 2MB-aligned, however it will not be considered valid for a
227 	 * requested alignment of 2MB. This is done to reduce coincidental
228 	 * alignment in the tests.
229 	 */
230 	if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
231 			!((unsigned long long) src_addr & c.src_alignment)) {
232 		munmap(src_addr, c.region_size);
233 		goto retry;
234 	}
235 
236 	if (!src_addr)
237 		goto error;
238 
239 	return src_addr;
240 error:
241 	ksft_print_msg("Failed to map source region: %s\n",
242 			strerror(errno));
243 	return NULL;
244 }
245 
246 /*
247  * This test validates that merge is called when expanding a mapping.
248  * Mapping containing three pages is created, middle page is unmapped
249  * and then the mapping containing the first page is expanded so that
250  * it fills the created hole. The two parts should merge creating
251  * single mapping with three pages.
252  */
mremap_expand_merge(FILE * maps_fp,unsigned long page_size)253 static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size)
254 {
255 	char *test_name = "mremap expand merge";
256 	bool success = false;
257 	char *remap, *start;
258 
259 	start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
260 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
261 
262 	if (start == MAP_FAILED) {
263 		ksft_print_msg("mmap failed: %s\n", strerror(errno));
264 		goto out;
265 	}
266 
267 	munmap(start + page_size, page_size);
268 	remap = mremap(start, page_size, 2 * page_size, 0);
269 	if (remap == MAP_FAILED) {
270 		ksft_print_msg("mremap failed: %s\n", strerror(errno));
271 		munmap(start, page_size);
272 		munmap(start + 2 * page_size, page_size);
273 		goto out;
274 	}
275 
276 	success = is_range_mapped(maps_fp, (unsigned long)start,
277 				  (unsigned long)(start + 3 * page_size));
278 	munmap(start, 3 * page_size);
279 
280 out:
281 	if (success)
282 		ksft_test_result_pass("%s\n", test_name);
283 	else
284 		ksft_test_result_fail("%s\n", test_name);
285 }
286 
287 /*
288  * Similar to mremap_expand_merge() except instead of removing the middle page,
289  * we remove the last then attempt to remap offset from the second page. This
290  * should result in the mapping being restored to its former state.
291  */
mremap_expand_merge_offset(FILE * maps_fp,unsigned long page_size)292 static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size)
293 {
294 
295 	char *test_name = "mremap expand merge offset";
296 	bool success = false;
297 	char *remap, *start;
298 
299 	start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
300 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
301 
302 	if (start == MAP_FAILED) {
303 		ksft_print_msg("mmap failed: %s\n", strerror(errno));
304 		goto out;
305 	}
306 
307 	/* Unmap final page to ensure we have space to expand. */
308 	munmap(start + 2 * page_size, page_size);
309 	remap = mremap(start + page_size, page_size, 2 * page_size, 0);
310 	if (remap == MAP_FAILED) {
311 		ksft_print_msg("mremap failed: %s\n", strerror(errno));
312 		munmap(start, 2 * page_size);
313 		goto out;
314 	}
315 
316 	success = is_range_mapped(maps_fp, (unsigned long)start,
317 				  (unsigned long)(start + 3 * page_size));
318 	munmap(start, 3 * page_size);
319 
320 out:
321 	if (success)
322 		ksft_test_result_pass("%s\n", test_name);
323 	else
324 		ksft_test_result_fail("%s\n", test_name);
325 }
326 
327 /*
328  * Verify that an mremap within a range does not cause corruption
329  * of unrelated part of range.
330  *
331  * Consider the following range which is 2MB aligned and is
332  * a part of a larger 20MB range which is not shown. Each
333  * character is 256KB below making the source and destination
334  * 2MB each. The lower case letters are moved (s to d) and the
335  * upper case letters are not moved. The below test verifies
336  * that the upper case S letters are not corrupted by the
337  * adjacent mremap.
338  *
339  * |DDDDddddSSSSssss|
340  */
mremap_move_within_range(unsigned int pattern_seed,char * rand_addr)341 static void mremap_move_within_range(unsigned int pattern_seed, char *rand_addr)
342 {
343 	char *test_name = "mremap mremap move within range";
344 	void *src, *dest;
345 	unsigned int i, success = 1;
346 
347 	size_t size = SIZE_MB(20);
348 	void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
349 			 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
350 	if (ptr == MAP_FAILED) {
351 		perror("mmap");
352 		success = 0;
353 		goto out;
354 	}
355 	memset(ptr, 0, size);
356 
357 	src = ptr + SIZE_MB(6);
358 	src = (void *)((unsigned long)src & ~(SIZE_MB(2) - 1));
359 
360 	/* Set byte pattern for source block. */
361 	memcpy(src, rand_addr, SIZE_MB(2));
362 
363 	dest = src - SIZE_MB(2);
364 
365 	void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
366 						   MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
367 	if (new_ptr == MAP_FAILED) {
368 		perror("mremap");
369 		success = 0;
370 		goto out;
371 	}
372 
373 	/* Verify byte pattern after remapping */
374 	srand(pattern_seed);
375 	for (i = 0; i < SIZE_MB(1); i++) {
376 		char c = (char) rand();
377 
378 		if (((char *)src)[i] != c) {
379 			ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
380 				       i);
381 			ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
382 					((char *) src)[i] & 0xff);
383 			success = 0;
384 		}
385 	}
386 
387 out:
388 	if (munmap(ptr, size) == -1)
389 		perror("munmap");
390 
391 	if (success)
392 		ksft_test_result_pass("%s\n", test_name);
393 	else
394 		ksft_test_result_fail("%s\n", test_name);
395 }
396 
is_multiple_vma_range_ok(unsigned int pattern_seed,char * ptr,unsigned long page_size)397 static bool is_multiple_vma_range_ok(unsigned int pattern_seed,
398 				     char *ptr, unsigned long page_size)
399 {
400 	int i;
401 
402 	srand(pattern_seed);
403 	for (i = 0; i <= 10; i += 2) {
404 		int j;
405 		char *buf = &ptr[i * page_size];
406 		size_t size = i == 4 ? 2 * page_size : page_size;
407 
408 		for (j = 0; j < size; j++) {
409 			char chr = rand();
410 
411 			if (chr != buf[j]) {
412 				ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
413 					       i, j, chr, buf[j]);
414 				return false;
415 			}
416 		}
417 	}
418 
419 	return true;
420 }
421 
mremap_move_multiple_vmas(unsigned int pattern_seed,unsigned long page_size,bool dont_unmap)422 static void mremap_move_multiple_vmas(unsigned int pattern_seed,
423 				      unsigned long page_size,
424 				      bool dont_unmap)
425 {
426 	int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
427 	char *test_name = "mremap move multiple vmas";
428 	const size_t size = 11 * page_size;
429 	bool success = true;
430 	char *ptr, *tgt_ptr;
431 	int i;
432 
433 	if (dont_unmap)
434 		mremap_flags |= MREMAP_DONTUNMAP;
435 
436 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
437 		   MAP_PRIVATE | MAP_ANON, -1, 0);
438 	if (ptr == MAP_FAILED) {
439 		perror("mmap");
440 		success = false;
441 		goto out;
442 	}
443 
444 	tgt_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
445 		       MAP_PRIVATE | MAP_ANON, -1, 0);
446 	if (tgt_ptr == MAP_FAILED) {
447 		perror("mmap");
448 		success = false;
449 		goto out;
450 	}
451 	if (munmap(tgt_ptr, 2 * size)) {
452 		perror("munmap");
453 		success = false;
454 		goto out_unmap;
455 	}
456 
457 	/*
458 	 * Unmap so we end up with:
459 	 *
460 	 *  0   2   4 5 6   8   10 offset in buffer
461 	 * |*| |*| |*****| |*| |*|
462 	 * |*| |*| |*****| |*| |*|
463 	 *  0   1   2 3 4   5   6  pattern offset
464 	 */
465 	for (i = 1; i < 10; i += 2) {
466 		if (i == 5)
467 			continue;
468 
469 		if (munmap(&ptr[i * page_size], page_size)) {
470 			perror("munmap");
471 			success = false;
472 			goto out_unmap;
473 		}
474 	}
475 
476 	srand(pattern_seed);
477 
478 	/* Set up random patterns. */
479 	for (i = 0; i <= 10; i += 2) {
480 		int j;
481 		size_t size = i == 4 ? 2 * page_size : page_size;
482 		char *buf = &ptr[i * page_size];
483 
484 		for (j = 0; j < size; j++)
485 			buf[j] = rand();
486 	}
487 
488 	/* First, just move the whole thing. */
489 	if (mremap(ptr, size, size, mremap_flags, tgt_ptr) == MAP_FAILED) {
490 		perror("mremap");
491 		success = false;
492 		goto out_unmap;
493 	}
494 	/* Check move was ok. */
495 	if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
496 		success = false;
497 		goto out_unmap;
498 	}
499 
500 	/* Move next to itself. */
501 	if (mremap(tgt_ptr, size, size, mremap_flags,
502 		   &tgt_ptr[size]) == MAP_FAILED) {
503 		perror("mremap");
504 		success = false;
505 		goto out_unmap;
506 	}
507 	/* Check that the move is ok. */
508 	if (!is_multiple_vma_range_ok(pattern_seed, &tgt_ptr[size], page_size)) {
509 		success = false;
510 		goto out_unmap;
511 	}
512 
513 	/* Map a range to overwrite. */
514 	if (mmap(tgt_ptr, size, PROT_NONE,
515 		 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
516 		perror("mmap tgt");
517 		success = false;
518 		goto out_unmap;
519 	}
520 	/* Move and overwrite. */
521 	if (mremap(&tgt_ptr[size], size, size,
522 		   mremap_flags, tgt_ptr) == MAP_FAILED) {
523 		perror("mremap");
524 		success = false;
525 		goto out_unmap;
526 	}
527 	/* Check that the move is ok. */
528 	if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
529 		success = false;
530 		goto out_unmap;
531 	}
532 
533 out_unmap:
534 	if (munmap(tgt_ptr, 2 * size))
535 		perror("munmap tgt");
536 	if (munmap(ptr, size))
537 		perror("munmap src");
538 
539 out:
540 	if (success)
541 		ksft_test_result_pass("%s%s\n", test_name,
542 				      dont_unmap ? " [dontunnmap]" : "");
543 	else
544 		ksft_test_result_fail("%s%s\n", test_name,
545 				      dont_unmap ? " [dontunnmap]" : "");
546 }
547 
mremap_shrink_multiple_vmas(unsigned long page_size,bool inplace)548 static void mremap_shrink_multiple_vmas(unsigned long page_size,
549 					bool inplace)
550 {
551 	char *test_name = "mremap shrink multiple vmas";
552 	const size_t size = 10 * page_size;
553 	bool success = true;
554 	char *ptr, *tgt_ptr;
555 	void *res;
556 	int i;
557 
558 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
559 		   MAP_PRIVATE | MAP_ANON, -1, 0);
560 	if (ptr == MAP_FAILED) {
561 		perror("mmap");
562 		success = false;
563 		goto out;
564 	}
565 
566 	tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
567 		       MAP_PRIVATE | MAP_ANON, -1, 0);
568 	if (tgt_ptr == MAP_FAILED) {
569 		perror("mmap");
570 		success = false;
571 		goto out;
572 	}
573 	if (munmap(tgt_ptr, size)) {
574 		perror("munmap");
575 		success = false;
576 		goto out_unmap;
577 	}
578 
579 	/*
580 	 * Unmap so we end up with:
581 	 *
582 	 *  0   2   4   6   8   10 offset in buffer
583 	 * |*| |*| |*| |*| |*| |*|
584 	 * |*| |*| |*| |*| |*| |*|
585 	 */
586 	for (i = 1; i < 10; i += 2) {
587 		if (munmap(&ptr[i * page_size], page_size)) {
588 			perror("munmap");
589 			success = false;
590 			goto out_unmap;
591 		}
592 	}
593 
594 	/*
595 	 * Shrink in-place across multiple VMAs and gaps so we end up with:
596 	 *
597 	 *  0
598 	 * |*|
599 	 * |*|
600 	 */
601 	if (inplace)
602 		res = mremap(ptr, size, page_size, 0);
603 	else
604 		res = mremap(ptr, size, page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
605 			     tgt_ptr);
606 
607 	if (res == MAP_FAILED) {
608 		perror("mremap");
609 		success = false;
610 		goto out_unmap;
611 	}
612 
613 out_unmap:
614 	if (munmap(tgt_ptr, size))
615 		perror("munmap tgt");
616 	if (munmap(ptr, size))
617 		perror("munmap src");
618 out:
619 	if (success)
620 		ksft_test_result_pass("%s%s\n", test_name,
621 				      inplace ? " [inplace]" : "");
622 	else
623 		ksft_test_result_fail("%s%s\n", test_name,
624 				      inplace ? " [inplace]" : "");
625 }
626 
mremap_move_multiple_vmas_split(unsigned int pattern_seed,unsigned long page_size,bool dont_unmap)627 static void mremap_move_multiple_vmas_split(unsigned int pattern_seed,
628 					    unsigned long page_size,
629 					    bool dont_unmap)
630 {
631 	char *test_name = "mremap move multiple vmas split";
632 	int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
633 	const size_t size = 10 * page_size;
634 	bool success = true;
635 	char *ptr, *tgt_ptr;
636 	int i;
637 
638 	if (dont_unmap)
639 		mremap_flags |= MREMAP_DONTUNMAP;
640 
641 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
642 		   MAP_PRIVATE | MAP_ANON, -1, 0);
643 	if (ptr == MAP_FAILED) {
644 		perror("mmap");
645 		success = false;
646 		goto out;
647 	}
648 
649 	tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
650 		       MAP_PRIVATE | MAP_ANON, -1, 0);
651 	if (tgt_ptr == MAP_FAILED) {
652 		perror("mmap");
653 		success = false;
654 		goto out;
655 	}
656 	if (munmap(tgt_ptr, size)) {
657 		perror("munmap");
658 		success = false;
659 		goto out_unmap;
660 	}
661 
662 	/*
663 	 * Unmap so we end up with:
664 	 *
665 	 *  0 1 2 3 4 5 6 7 8 9 10 offset in buffer
666 	 * |**********| |*******|
667 	 * |**********| |*******|
668 	 *  0 1 2 3 4   5 6 7 8 9  pattern offset
669 	 */
670 	if (munmap(&ptr[5 * page_size], page_size)) {
671 		perror("munmap");
672 		success = false;
673 		goto out_unmap;
674 	}
675 
676 	/* Set up random patterns. */
677 	srand(pattern_seed);
678 	for (i = 0; i < 10; i++) {
679 		int j;
680 		char *buf = &ptr[i * page_size];
681 
682 		if (i == 5)
683 			continue;
684 
685 		for (j = 0; j < page_size; j++)
686 			buf[j] = rand();
687 	}
688 
689 	/*
690 	 * Move the below:
691 	 *
692 	 *      <------------->
693 	 *  0 1 2 3 4 5 6 7 8 9 10 offset in buffer
694 	 * |**********| |*******|
695 	 * |**********| |*******|
696 	 *  0 1 2 3 4   5 6 7 8 9  pattern offset
697 	 *
698 	 * Into:
699 	 *
700 	 * 0 1 2 3 4 5 6 7 offset in buffer
701 	 * |*****| |*****|
702 	 * |*****| |*****|
703 	 * 2 3 4   5 6 7   pattern offset
704 	 */
705 	if (mremap(&ptr[2 * page_size], size - 3 * page_size, size - 3 * page_size,
706 		   mremap_flags, tgt_ptr) == MAP_FAILED) {
707 		perror("mremap");
708 		success = false;
709 		goto out_unmap;
710 	}
711 
712 	/* Offset into random pattern. */
713 	srand(pattern_seed);
714 	for (i = 0; i < 2 * page_size; i++)
715 		rand();
716 
717 	/* Check pattern. */
718 	for (i = 0; i < 7; i++) {
719 		int j;
720 		char *buf = &tgt_ptr[i * page_size];
721 
722 		if (i == 3)
723 			continue;
724 
725 		for (j = 0; j < page_size; j++) {
726 			char chr = rand();
727 
728 			if (chr != buf[j]) {
729 				ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
730 					       i, j, chr, buf[j]);
731 				goto out_unmap;
732 			}
733 		}
734 	}
735 
736 out_unmap:
737 	if (munmap(tgt_ptr, size))
738 		perror("munmap tgt");
739 	if (munmap(ptr, size))
740 		perror("munmap src");
741 out:
742 	if (success)
743 		ksft_test_result_pass("%s%s\n", test_name,
744 				      dont_unmap ? " [dontunnmap]" : "");
745 	else
746 		ksft_test_result_fail("%s%s\n", test_name,
747 				      dont_unmap ? " [dontunnmap]" : "");
748 }
749 
750 #ifdef __NR_userfaultfd
mremap_move_multi_invalid_vmas(FILE * maps_fp,unsigned long page_size)751 static void mremap_move_multi_invalid_vmas(FILE *maps_fp,
752 		unsigned long page_size)
753 {
754 	char *test_name = "mremap move multiple invalid vmas";
755 	const size_t size = 10 * page_size;
756 	bool success = true;
757 	char *ptr, *tgt_ptr;
758 	int uffd, err, i;
759 	void *res;
760 	struct uffdio_api api = {
761 		.api = UFFD_API,
762 		.features = UFFD_EVENT_PAGEFAULT,
763 	};
764 
765 	uffd = syscall(__NR_userfaultfd, O_NONBLOCK);
766 	if (uffd == -1) {
767 		err = errno;
768 		perror("userfaultfd");
769 		if (err == EPERM) {
770 			ksft_test_result_skip("%s - missing uffd", test_name);
771 			return;
772 		}
773 		success = false;
774 		goto out;
775 	}
776 	if (ioctl(uffd, UFFDIO_API, &api)) {
777 		perror("ioctl UFFDIO_API");
778 		success = false;
779 		goto out_close_uffd;
780 	}
781 
782 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
783 		   MAP_PRIVATE | MAP_ANON, -1, 0);
784 	if (ptr == MAP_FAILED) {
785 		perror("mmap");
786 		success = false;
787 		goto out_close_uffd;
788 	}
789 
790 	tgt_ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
791 	if (tgt_ptr == MAP_FAILED) {
792 		perror("mmap");
793 		success = false;
794 		goto out_close_uffd;
795 	}
796 	if (munmap(tgt_ptr, size)) {
797 		perror("munmap");
798 		success = false;
799 		goto out_unmap;
800 	}
801 
802 	/*
803 	 * Unmap so we end up with:
804 	 *
805 	 *  0   2   4   6   8   10 offset in buffer
806 	 * |*| |*| |*| |*| |*|
807 	 * |*| |*| |*| |*| |*|
808 	 *
809 	 * Additionally, register each with UFFD.
810 	 */
811 	for (i = 0; i < 10; i += 2) {
812 		void *unmap_ptr = &ptr[(i + 1) * page_size];
813 		unsigned long start = (unsigned long)&ptr[i * page_size];
814 		struct uffdio_register reg = {
815 			.range = {
816 				.start = start,
817 				.len = page_size,
818 			},
819 			.mode = UFFDIO_REGISTER_MODE_MISSING,
820 		};
821 
822 		if (ioctl(uffd, UFFDIO_REGISTER, &reg) == -1) {
823 			perror("ioctl UFFDIO_REGISTER");
824 			success = false;
825 			goto out_unmap;
826 		}
827 		if (munmap(unmap_ptr, page_size)) {
828 			perror("munmap");
829 			success = false;
830 			goto out_unmap;
831 		}
832 	}
833 
834 	/*
835 	 * Now try to move the entire range which is invalid for multi VMA move.
836 	 *
837 	 * This will fail, and no VMA should be moved, as we check this ahead of
838 	 * time.
839 	 */
840 	res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
841 	err = errno;
842 	if (res != MAP_FAILED) {
843 		fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
844 		success = false;
845 		goto out_unmap;
846 	}
847 	if (err != EFAULT) {
848 		errno = err;
849 		perror("mremap() unexpected error");
850 		success = false;
851 		goto out_unmap;
852 	}
853 	if (is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
854 		fprintf(stderr,
855 			"Invalid uffd-armed VMA at start of multi range moved\n");
856 		success = false;
857 		goto out_unmap;
858 	}
859 
860 	/*
861 	 * Now try to move a single VMA, this should succeed as not multi VMA
862 	 * move.
863 	 */
864 	res = mremap(ptr, page_size, page_size,
865 		     MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
866 	if (res == MAP_FAILED) {
867 		perror("mremap single invalid-multi VMA");
868 		success = false;
869 		goto out_unmap;
870 	}
871 
872 	/*
873 	 * Unmap the VMA, and remap a non-uffd registered (therefore, multi VMA
874 	 * move valid) VMA at the start of ptr range.
875 	 */
876 	if (munmap(tgt_ptr, page_size)) {
877 		perror("munmap");
878 		success = false;
879 		goto out_unmap;
880 	}
881 	res = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
882 		   MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
883 	if (res == MAP_FAILED) {
884 		perror("mmap");
885 		success = false;
886 		goto out_unmap;
887 	}
888 
889 	/*
890 	 * Now try to move the entire range, we should succeed in moving the
891 	 * first VMA, but no others, and report a failure.
892 	 */
893 	res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
894 	err = errno;
895 	if (res != MAP_FAILED) {
896 		fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
897 		success = false;
898 		goto out_unmap;
899 	}
900 	if (err != EFAULT) {
901 		errno = err;
902 		perror("mremap() unexpected error");
903 		success = false;
904 		goto out_unmap;
905 	}
906 	if (!is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
907 		fprintf(stderr, "Valid VMA not moved\n");
908 		success = false;
909 		goto out_unmap;
910 	}
911 
912 	/*
913 	 * Unmap the VMA, and map valid VMA at start of ptr range, and replace
914 	 * all existing multi-move invalid VMAs, except the last, with valid
915 	 * multi-move VMAs.
916 	 */
917 	if (munmap(tgt_ptr, page_size)) {
918 		perror("munmap");
919 		success = false;
920 		goto out_unmap;
921 	}
922 	if (munmap(ptr, size - 2 * page_size)) {
923 		perror("munmap");
924 		success = false;
925 		goto out_unmap;
926 	}
927 	for (i = 0; i < 8; i += 2) {
928 		res = mmap(&ptr[i * page_size], page_size,
929 			   PROT_READ | PROT_WRITE,
930 			   MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
931 		if (res == MAP_FAILED) {
932 			perror("mmap");
933 			success = false;
934 			goto out_unmap;
935 		}
936 	}
937 
938 	/*
939 	 * Now try to move the entire range, we should succeed in moving all but
940 	 * the last VMA, and report a failure.
941 	 */
942 	res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
943 	err = errno;
944 	if (res != MAP_FAILED) {
945 		fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
946 		success = false;
947 		goto out_unmap;
948 	}
949 	if (err != EFAULT) {
950 		errno = err;
951 		perror("mremap() unexpected error");
952 		success = false;
953 		goto out_unmap;
954 	}
955 
956 	for (i = 0; i < 10; i += 2) {
957 		bool is_mapped = is_ptr_mapped(maps_fp,
958 				&tgt_ptr[i * page_size], page_size);
959 
960 		if (i < 8 && !is_mapped) {
961 			fprintf(stderr, "Valid VMA not moved at %d\n", i);
962 			success = false;
963 			goto out_unmap;
964 		} else if (i == 8 && is_mapped) {
965 			fprintf(stderr, "Invalid VMA moved at %d\n", i);
966 			success = false;
967 			goto out_unmap;
968 		}
969 	}
970 
971 out_unmap:
972 	if (munmap(tgt_ptr, size))
973 		perror("munmap tgt");
974 	if (munmap(ptr, size))
975 		perror("munmap src");
976 out_close_uffd:
977 	close(uffd);
978 out:
979 	if (success)
980 		ksft_test_result_pass("%s\n", test_name);
981 	else
982 		ksft_test_result_fail("%s\n", test_name);
983 }
984 #else
mremap_move_multi_invalid_vmas(FILE * maps_fp,unsigned long page_size)985 static void mremap_move_multi_invalid_vmas(FILE *maps_fp, unsigned long page_size)
986 {
987 	char *test_name = "mremap move multiple invalid vmas";
988 
989 	ksft_test_result_skip("%s - missing uffd", test_name);
990 }
991 #endif /* __NR_userfaultfd */
992 
993 /* Returns the time taken for the remap on success else returns -1. */
remap_region(struct config c,unsigned int threshold_mb,char * rand_addr)994 static long long remap_region(struct config c, unsigned int threshold_mb,
995 			      char *rand_addr)
996 {
997 	void *addr, *tmp_addr, *src_addr, *dest_addr, *dest_preamble_addr = NULL;
998 	unsigned long long t, d;
999 	struct timespec t_start = {0, 0}, t_end = {0, 0};
1000 	long long  start_ns, end_ns, align_mask, ret, offset;
1001 	unsigned long long threshold;
1002 	unsigned long num_chunks;
1003 
1004 	if (threshold_mb == VALIDATION_NO_THRESHOLD)
1005 		threshold = c.region_size;
1006 	else
1007 		threshold = MIN(threshold_mb * _1MB, c.region_size);
1008 
1009 	src_addr = get_source_mapping(c);
1010 	if (!src_addr) {
1011 		ret = -1;
1012 		goto out;
1013 	}
1014 
1015 	/* Set byte pattern for source block. */
1016 	memcpy(src_addr, rand_addr, threshold);
1017 
1018 	/* Mask to zero out lower bits of address for alignment */
1019 	align_mask = ~(c.dest_alignment - 1);
1020 	/* Offset of destination address from the end of the source region */
1021 	offset = (c.overlapping) ? -c.dest_alignment : c.dest_alignment;
1022 	addr = (void *) (((unsigned long long) src_addr + c.region_size
1023 			  + offset) & align_mask);
1024 
1025 	/* Remap after the destination block preamble. */
1026 	addr += c.dest_preamble_size;
1027 
1028 	/* See comment in get_source_mapping() */
1029 	if (!((unsigned long long) addr & c.dest_alignment))
1030 		addr = (void *) ((unsigned long long) addr | c.dest_alignment);
1031 
1032 	/* Don't destroy existing mappings unless expected to overlap */
1033 	while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
1034 		/* Check for unsigned overflow */
1035 		tmp_addr = addr + c.dest_alignment;
1036 		if (tmp_addr < addr) {
1037 			ksft_print_msg("Couldn't find a valid region to remap to\n");
1038 			ret = -1;
1039 			goto clean_up_src;
1040 		}
1041 		addr += c.dest_alignment;
1042 	}
1043 
1044 	if (c.dest_preamble_size) {
1045 		dest_preamble_addr = mmap((void *) addr - c.dest_preamble_size, c.dest_preamble_size,
1046 					  PROT_READ | PROT_WRITE,
1047 					  MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
1048 							-1, 0);
1049 		if (dest_preamble_addr == MAP_FAILED) {
1050 			ksft_print_msg("Failed to map dest preamble region: %s\n",
1051 					strerror(errno));
1052 			ret = -1;
1053 			goto clean_up_src;
1054 		}
1055 
1056 		/* Set byte pattern for the dest preamble block. */
1057 		memcpy(dest_preamble_addr, rand_addr, c.dest_preamble_size);
1058 	}
1059 
1060 	clock_gettime(CLOCK_MONOTONIC, &t_start);
1061 	dest_addr = mremap(src_addr, c.region_size, c.region_size,
1062 					  MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
1063 	clock_gettime(CLOCK_MONOTONIC, &t_end);
1064 
1065 	if (dest_addr == MAP_FAILED) {
1066 		ksft_print_msg("mremap failed: %s\n", strerror(errno));
1067 		ret = -1;
1068 		goto clean_up_dest_preamble;
1069 	}
1070 
1071 	/*
1072 	 * Verify byte pattern after remapping. Employ an algorithm with a
1073 	 * square root time complexity in threshold: divide the range into
1074 	 * chunks, if memcmp() returns non-zero, only then perform an
1075 	 * iteration in that chunk to find the mismatch index.
1076 	 */
1077 	num_chunks = get_sqrt(threshold);
1078 	for (unsigned long i = 0; i < num_chunks; ++i) {
1079 		size_t chunk_size = threshold / num_chunks;
1080 		unsigned long shift = i * chunk_size;
1081 
1082 		if (!memcmp(dest_addr + shift, rand_addr + shift, chunk_size))
1083 			continue;
1084 
1085 		/* brute force iteration only over mismatch segment */
1086 		for (t = shift; t < shift + chunk_size; ++t) {
1087 			if (((char *) dest_addr)[t] != rand_addr[t]) {
1088 				ksft_print_msg("Data after remap doesn't match at offset %llu\n",
1089 						t);
1090 				ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
1091 						((char *) dest_addr)[t] & 0xff);
1092 				ret = -1;
1093 				goto clean_up_dest;
1094 			}
1095 		}
1096 	}
1097 
1098 	/*
1099 	 * if threshold is not divisible by num_chunks, then check the
1100 	 * last chunk
1101 	 */
1102 	for (t = num_chunks * (threshold / num_chunks); t < threshold; ++t) {
1103 		if (((char *) dest_addr)[t] != rand_addr[t]) {
1104 			ksft_print_msg("Data after remap doesn't match at offset %llu\n",
1105 					t);
1106 			ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
1107 					((char *) dest_addr)[t] & 0xff);
1108 			ret = -1;
1109 			goto clean_up_dest;
1110 		}
1111 	}
1112 
1113 	/* Verify the dest preamble byte pattern after remapping */
1114 	if (!c.dest_preamble_size)
1115 		goto no_preamble;
1116 
1117 	num_chunks = get_sqrt(c.dest_preamble_size);
1118 
1119 	for (unsigned long i = 0; i < num_chunks; ++i) {
1120 		size_t chunk_size = c.dest_preamble_size / num_chunks;
1121 		unsigned long shift = i * chunk_size;
1122 
1123 		if (!memcmp(dest_preamble_addr + shift, rand_addr + shift,
1124 			    chunk_size))
1125 			continue;
1126 
1127 		/* brute force iteration only over mismatched segment */
1128 		for (d = shift; d < shift + chunk_size; ++d) {
1129 			if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
1130 				ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
1131 						d);
1132 				ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
1133 						((char *) dest_preamble_addr)[d] & 0xff);
1134 				ret = -1;
1135 				goto clean_up_dest;
1136 			}
1137 		}
1138 	}
1139 
1140 	for (d = num_chunks * (c.dest_preamble_size / num_chunks); d < c.dest_preamble_size; ++d) {
1141 		if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
1142 			ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
1143 					d);
1144 			ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
1145 					((char *) dest_preamble_addr)[d] & 0xff);
1146 			ret = -1;
1147 			goto clean_up_dest;
1148 		}
1149 	}
1150 
1151 no_preamble:
1152 	start_ns = t_start.tv_sec * NS_PER_SEC + t_start.tv_nsec;
1153 	end_ns = t_end.tv_sec * NS_PER_SEC + t_end.tv_nsec;
1154 	ret = end_ns - start_ns;
1155 
1156 /*
1157  * Since the destination address is specified using MREMAP_FIXED, subsequent
1158  * mremap will unmap any previous mapping at the address range specified by
1159  * dest_addr and region_size. This significantly affects the remap time of
1160  * subsequent tests. So we clean up mappings after each test.
1161  */
1162 clean_up_dest:
1163 	munmap(dest_addr, c.region_size);
1164 clean_up_dest_preamble:
1165 	if (c.dest_preamble_size && dest_preamble_addr)
1166 		munmap(dest_preamble_addr, c.dest_preamble_size);
1167 clean_up_src:
1168 	munmap(src_addr, c.region_size);
1169 out:
1170 	return ret;
1171 }
1172 
1173 /*
1174  * Verify that an mremap aligning down does not destroy
1175  * the beginning of the mapping just because the aligned
1176  * down address landed on a mapping that maybe does not exist.
1177  */
mremap_move_1mb_from_start(unsigned int pattern_seed,char * rand_addr)1178 static void mremap_move_1mb_from_start(unsigned int pattern_seed,
1179 				       char *rand_addr)
1180 {
1181 	char *test_name = "mremap move 1mb from start at 1MB+256KB aligned src";
1182 	void *src = NULL, *dest = NULL;
1183 	unsigned int i, success = 1;
1184 
1185 	/* Config to reuse get_source_mapping() to do an aligned mmap. */
1186 	struct config c = {
1187 		.src_alignment = SIZE_MB(1) + SIZE_KB(256),
1188 		.region_size = SIZE_MB(6)
1189 	};
1190 
1191 	src = get_source_mapping(c);
1192 	if (!src) {
1193 		success = 0;
1194 		goto out;
1195 	}
1196 
1197 	c.src_alignment = SIZE_MB(1) + SIZE_KB(256);
1198 	dest = get_source_mapping(c);
1199 	if (!dest) {
1200 		success = 0;
1201 		goto out;
1202 	}
1203 
1204 	/* Set byte pattern for source block. */
1205 	memcpy(src, rand_addr, SIZE_MB(2));
1206 
1207 	/*
1208 	 * Unmap the beginning of dest so that the aligned address
1209 	 * falls on no mapping.
1210 	 */
1211 	munmap(dest, SIZE_MB(1));
1212 
1213 	void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
1214 						   MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
1215 	if (new_ptr == MAP_FAILED) {
1216 		perror("mremap");
1217 		success = 0;
1218 		goto out;
1219 	}
1220 
1221 	/* Verify byte pattern after remapping */
1222 	srand(pattern_seed);
1223 	for (i = 0; i < SIZE_MB(1); i++) {
1224 		char c = (char) rand();
1225 
1226 		if (((char *)src)[i] != c) {
1227 			ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
1228 				       i);
1229 			ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
1230 					((char *) src)[i] & 0xff);
1231 			success = 0;
1232 		}
1233 	}
1234 
1235 out:
1236 	if (src && munmap(src, c.region_size) == -1)
1237 		perror("munmap src");
1238 
1239 	if (dest && munmap(dest, c.region_size) == -1)
1240 		perror("munmap dest");
1241 
1242 	if (success)
1243 		ksft_test_result_pass("%s\n", test_name);
1244 	else
1245 		ksft_test_result_fail("%s\n", test_name);
1246 }
1247 
run_mremap_test_case(struct test test_case,int * failures,unsigned int threshold_mb,char * rand_addr)1248 static void run_mremap_test_case(struct test test_case, int *failures,
1249 				 unsigned int threshold_mb,
1250 				 char *rand_addr)
1251 {
1252 	long long remap_time = remap_region(test_case.config, threshold_mb,
1253 					    rand_addr);
1254 
1255 	if (remap_time < 0) {
1256 		if (test_case.expect_failure)
1257 			ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
1258 					      test_case.name);
1259 		else {
1260 			ksft_test_result_fail("%s\n", test_case.name);
1261 			*failures += 1;
1262 		}
1263 	} else {
1264 		/*
1265 		 * Comparing mremap time is only applicable if entire region
1266 		 * was faulted in.
1267 		 */
1268 		if (threshold_mb == VALIDATION_NO_THRESHOLD ||
1269 		    test_case.config.region_size <= threshold_mb * _1MB)
1270 			ksft_test_result_pass("%s\n\tmremap time: %12lldns\n",
1271 					      test_case.name, remap_time);
1272 		else
1273 			ksft_test_result_pass("%s\n", test_case.name);
1274 	}
1275 }
1276 
usage(const char * cmd)1277 static void usage(const char *cmd)
1278 {
1279 	fprintf(stderr,
1280 		"Usage: %s [[-t <threshold_mb>] [-p <pattern_seed>]]\n"
1281 		"-t\t only validate threshold_mb of the remapped region\n"
1282 		"  \t if 0 is supplied no threshold is used; all tests\n"
1283 		"  \t are run and remapped regions validated fully.\n"
1284 		"  \t The default threshold used is 4MB.\n"
1285 		"-p\t provide a seed to generate the random pattern for\n"
1286 		"  \t validating the remapped region.\n", cmd);
1287 }
1288 
parse_args(int argc,char ** argv,unsigned int * threshold_mb,unsigned int * pattern_seed)1289 static int parse_args(int argc, char **argv, unsigned int *threshold_mb,
1290 		      unsigned int *pattern_seed)
1291 {
1292 	const char *optstr = "t:p:";
1293 	int opt;
1294 
1295 	while ((opt = getopt(argc, argv, optstr)) != -1) {
1296 		switch (opt) {
1297 		case 't':
1298 			*threshold_mb = atoi(optarg);
1299 			break;
1300 		case 'p':
1301 			*pattern_seed = atoi(optarg);
1302 			break;
1303 		default:
1304 			usage(argv[0]);
1305 			return -1;
1306 		}
1307 	}
1308 
1309 	if (optind < argc) {
1310 		usage(argv[0]);
1311 		return -1;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 #define MAX_TEST 15
1318 #define MAX_PERF_TEST 3
main(int argc,char ** argv)1319 int main(int argc, char **argv)
1320 {
1321 	int failures = 0;
1322 	unsigned int i;
1323 	int run_perf_tests;
1324 	unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
1325 
1326 	/* hard-coded test configs */
1327 	size_t max_test_variable_region_size = _2GB;
1328 	size_t max_test_constant_region_size = _2MB;
1329 	size_t dest_preamble_size = 10 * _4MB;
1330 
1331 	unsigned int pattern_seed;
1332 	char *rand_addr;
1333 	size_t rand_size;
1334 	int num_expand_tests = 2;
1335 	int num_misc_tests = 9;
1336 	struct test test_cases[MAX_TEST] = {};
1337 	struct test perf_test_cases[MAX_PERF_TEST];
1338 	int page_size;
1339 	time_t t;
1340 	FILE *maps_fp;
1341 
1342 	pattern_seed = (unsigned int) time(&t);
1343 
1344 	if (parse_args(argc, argv, &threshold_mb, &pattern_seed) < 0)
1345 		exit(EXIT_FAILURE);
1346 
1347 	ksft_print_msg("Test configs:\n\tthreshold_mb=%u\n\tpattern_seed=%u\n\n",
1348 		       threshold_mb, pattern_seed);
1349 
1350 	/*
1351 	 * set preallocated random array according to test configs; see the
1352 	 * functions for the logic of setting the size
1353 	 */
1354 	if (!threshold_mb)
1355 		rand_size = MAX(max_test_variable_region_size,
1356 				max_test_constant_region_size);
1357 	else
1358 		rand_size = MAX(MIN(threshold_mb * _1MB,
1359 				    max_test_variable_region_size),
1360 				max_test_constant_region_size);
1361 	rand_size = MAX(dest_preamble_size, rand_size);
1362 
1363 	rand_addr = (char *)mmap(NULL, rand_size, PROT_READ | PROT_WRITE,
1364 				 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1365 	if (rand_addr == MAP_FAILED) {
1366 		perror("mmap");
1367 		ksft_exit_fail_msg("cannot mmap rand_addr\n");
1368 	}
1369 
1370 	/* fill stream of random bytes */
1371 	srand(pattern_seed);
1372 	for (unsigned long i = 0; i < rand_size; ++i)
1373 		rand_addr[i] = (char) rand();
1374 
1375 	page_size = sysconf(_SC_PAGESIZE);
1376 
1377 	/* Expected mremap failures */
1378 	test_cases[0] =	MAKE_TEST(page_size, page_size, page_size,
1379 				  OVERLAPPING, EXPECT_FAILURE,
1380 				  "mremap - Source and Destination Regions Overlapping");
1381 
1382 	test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size,
1383 				  NON_OVERLAPPING, EXPECT_FAILURE,
1384 				  "mremap - Destination Address Misaligned (1KB-aligned)");
1385 	test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size,
1386 				  NON_OVERLAPPING, EXPECT_FAILURE,
1387 				  "mremap - Source Address Misaligned (1KB-aligned)");
1388 
1389 	/* Src addr PTE aligned */
1390 	test_cases[3] = MAKE_TEST(PTE, PTE, PTE * 2,
1391 				  NON_OVERLAPPING, EXPECT_SUCCESS,
1392 				  "8KB mremap - Source PTE-aligned, Destination PTE-aligned");
1393 
1394 	/* Src addr 1MB aligned */
1395 	test_cases[4] = MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1396 				  "2MB mremap - Source 1MB-aligned, Destination PTE-aligned");
1397 	test_cases[5] = MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1398 				  "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
1399 
1400 	/* Src addr PMD aligned */
1401 	test_cases[6] = MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1402 				  "4MB mremap - Source PMD-aligned, Destination PTE-aligned");
1403 	test_cases[7] =	MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1404 				  "4MB mremap - Source PMD-aligned, Destination 1MB-aligned");
1405 	test_cases[8] = MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1406 				  "4MB mremap - Source PMD-aligned, Destination PMD-aligned");
1407 
1408 	/* Src addr PUD aligned */
1409 	test_cases[9] = MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1410 				  "2GB mremap - Source PUD-aligned, Destination PTE-aligned");
1411 	test_cases[10] = MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1412 				   "2GB mremap - Source PUD-aligned, Destination 1MB-aligned");
1413 	test_cases[11] = MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1414 				   "2GB mremap - Source PUD-aligned, Destination PMD-aligned");
1415 	test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1416 				   "2GB mremap - Source PUD-aligned, Destination PUD-aligned");
1417 
1418 	/* Src and Dest addr 1MB aligned. 5MB mremap. */
1419 	test_cases[13] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1420 				  "5MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
1421 
1422 	/* Src and Dest addr 1MB aligned. 5MB mremap. */
1423 	test_cases[14] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
1424 				  "5MB mremap - Source 1MB-aligned, Dest 1MB-aligned with 40MB Preamble");
1425 	test_cases[14].config.dest_preamble_size = 10 * _4MB;
1426 
1427 	perf_test_cases[0] =  MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1428 					"1GB mremap - Source PTE-aligned, Destination PTE-aligned");
1429 	/*
1430 	 * mremap 1GB region - Page table level aligned time
1431 	 * comparison.
1432 	 */
1433 	perf_test_cases[1] = MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1434 				       "1GB mremap - Source PMD-aligned, Destination PMD-aligned");
1435 	perf_test_cases[2] = MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
1436 				       "1GB mremap - Source PUD-aligned, Destination PUD-aligned");
1437 
1438 	run_perf_tests =  (threshold_mb == VALIDATION_NO_THRESHOLD) ||
1439 				(threshold_mb * _1MB >= _1GB);
1440 
1441 	ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ?
1442 		      ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests + num_misc_tests);
1443 
1444 	for (i = 0; i < ARRAY_SIZE(test_cases); i++)
1445 		run_mremap_test_case(test_cases[i], &failures, threshold_mb,
1446 				     rand_addr);
1447 
1448 	maps_fp = fopen("/proc/self/maps", "r");
1449 
1450 	if (maps_fp == NULL) {
1451 		munmap(rand_addr, rand_size);
1452 		ksft_exit_fail_msg("Failed to read /proc/self/maps: %s\n", strerror(errno));
1453 	}
1454 
1455 	mremap_expand_merge(maps_fp, page_size);
1456 	mremap_expand_merge_offset(maps_fp, page_size);
1457 
1458 	mremap_move_within_range(pattern_seed, rand_addr);
1459 	mremap_move_1mb_from_start(pattern_seed, rand_addr);
1460 	mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
1461 	mremap_shrink_multiple_vmas(page_size, /* inplace= */false);
1462 	mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ false);
1463 	mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
1464 	mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
1465 	mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
1466 	mremap_move_multi_invalid_vmas(maps_fp, page_size);
1467 
1468 	fclose(maps_fp);
1469 
1470 	if (run_perf_tests) {
1471 		ksft_print_msg("\n%s\n",
1472 		 "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:");
1473 		for (i = 0; i < ARRAY_SIZE(perf_test_cases); i++)
1474 			run_mremap_test_case(perf_test_cases[i], &failures,
1475 					     threshold_mb,
1476 					     rand_addr);
1477 	}
1478 
1479 	munmap(rand_addr, rand_size);
1480 
1481 	if (failures > 0)
1482 		ksft_exit_fail();
1483 	else
1484 		ksft_exit_pass();
1485 }
1486