xref: /linux/tools/testing/selftests/mm/guard-regions.c (revision a58f3dcf20ea9e7e968ee8369fd782bbb53dff73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/limits.h>
10 #include <linux/userfaultfd.h>
11 #include <setjmp.h>
12 #include <signal.h>
13 #include <stdbool.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/ioctl.h>
18 #include <sys/mman.h>
19 #include <sys/syscall.h>
20 #include <sys/uio.h>
21 #include <unistd.h>
22 #include "vm_util.h"
23 
24 /*
25  * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
26  *
27  * "If the signal occurs other than as the result of calling the abort or raise
28  *  function, the behavior is undefined if the signal handler refers to any
29  *  object with static storage duration other than by assigning a value to an
30  *  object declared as volatile sig_atomic_t"
31  */
32 static volatile sig_atomic_t signal_jump_set;
33 static sigjmp_buf signal_jmp_buf;
34 
35 /*
36  * Ignore the checkpatch warning, we must read from x but don't want to do
37  * anything with it in order to trigger a read page fault. We therefore must use
38  * volatile to stop the compiler from optimising this away.
39  */
40 #define FORCE_READ(x) (*(volatile typeof(x) *)x)
41 
42 /*
43  * How is the test backing the mapping being tested?
44  */
45 enum backing_type {
46 	ANON_BACKED,
47 	SHMEM_BACKED,
48 	LOCAL_FILE_BACKED,
49 };
50 
51 FIXTURE(guard_regions)
52 {
53 	unsigned long page_size;
54 	char path[PATH_MAX];
55 	int fd;
56 };
57 
58 FIXTURE_VARIANT(guard_regions)
59 {
60 	enum backing_type backing;
61 };
62 
63 FIXTURE_VARIANT_ADD(guard_regions, anon)
64 {
65 	.backing = ANON_BACKED,
66 };
67 
68 FIXTURE_VARIANT_ADD(guard_regions, shmem)
69 {
70 	.backing = SHMEM_BACKED,
71 };
72 
73 FIXTURE_VARIANT_ADD(guard_regions, file)
74 {
75 	.backing = LOCAL_FILE_BACKED,
76 };
77 
78 static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
79 {
80 	switch (variant->backing) {
81 	case  ANON_BACKED:
82 	case  SHMEM_BACKED:
83 		return true;
84 	default:
85 		return false;
86 	}
87 }
88 
89 static void *mmap_(FIXTURE_DATA(guard_regions) * self,
90 		   const FIXTURE_VARIANT(guard_regions) * variant,
91 		   void *addr, size_t length, int prot, int extra_flags,
92 		   off_t offset)
93 {
94 	int fd;
95 	int flags = extra_flags;
96 
97 	switch (variant->backing) {
98 	case ANON_BACKED:
99 		flags |= MAP_PRIVATE | MAP_ANON;
100 		fd = -1;
101 		break;
102 	case SHMEM_BACKED:
103 	case LOCAL_FILE_BACKED:
104 		flags |= MAP_SHARED;
105 		fd = self->fd;
106 		break;
107 	default:
108 		ksft_exit_fail();
109 		break;
110 	}
111 
112 	return mmap(addr, length, prot, flags, fd, offset);
113 }
114 
115 static int userfaultfd(int flags)
116 {
117 	return syscall(SYS_userfaultfd, flags);
118 }
119 
120 static void handle_fatal(int c)
121 {
122 	if (!signal_jump_set)
123 		return;
124 
125 	siglongjmp(signal_jmp_buf, c);
126 }
127 
128 static int pidfd_open(pid_t pid, unsigned int flags)
129 {
130 	return syscall(SYS_pidfd_open, pid, flags);
131 }
132 
133 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
134 				   size_t n, int advice, unsigned int flags)
135 {
136 	return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
137 }
138 
139 /*
140  * Enable our signal catcher and try to read/write the specified buffer. The
141  * return value indicates whether the read/write succeeds without a fatal
142  * signal.
143  */
144 static bool try_access_buf(char *ptr, bool write)
145 {
146 	bool failed;
147 
148 	/* Tell signal handler to jump back here on fatal signal. */
149 	signal_jump_set = true;
150 	/* If a fatal signal arose, we will jump back here and failed is set. */
151 	failed = sigsetjmp(signal_jmp_buf, 0) != 0;
152 
153 	if (!failed) {
154 		if (write)
155 			*ptr = 'x';
156 		else
157 			FORCE_READ(ptr);
158 	}
159 
160 	signal_jump_set = false;
161 	return !failed;
162 }
163 
164 /* Try and read from a buffer, return true if no fatal signal. */
165 static bool try_read_buf(char *ptr)
166 {
167 	return try_access_buf(ptr, false);
168 }
169 
170 /* Try and write to a buffer, return true if no fatal signal. */
171 static bool try_write_buf(char *ptr)
172 {
173 	return try_access_buf(ptr, true);
174 }
175 
176 /*
177  * Try and BOTH read from AND write to a buffer, return true if BOTH operations
178  * succeed.
179  */
180 static bool try_read_write_buf(char *ptr)
181 {
182 	return try_read_buf(ptr) && try_write_buf(ptr);
183 }
184 
185 static void setup_sighandler(void)
186 {
187 	struct sigaction act = {
188 		.sa_handler = &handle_fatal,
189 		.sa_flags = SA_NODEFER,
190 	};
191 
192 	sigemptyset(&act.sa_mask);
193 	if (sigaction(SIGSEGV, &act, NULL))
194 		ksft_exit_fail_perror("sigaction");
195 }
196 
197 static void teardown_sighandler(void)
198 {
199 	struct sigaction act = {
200 		.sa_handler = SIG_DFL,
201 		.sa_flags = SA_NODEFER,
202 	};
203 
204 	sigemptyset(&act.sa_mask);
205 	sigaction(SIGSEGV, &act, NULL);
206 }
207 
208 static int open_file(const char *prefix, char *path)
209 {
210 	int fd;
211 
212 	snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
213 	fd = mkstemp(path);
214 	if (fd < 0)
215 		ksft_exit_fail_perror("mkstemp");
216 
217 	return fd;
218 }
219 
220 /* Establish a varying pattern in a buffer. */
221 static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
222 {
223 	size_t i;
224 
225 	for (i = 0; i < num_pages; i++) {
226 		char *ptr2 = &ptr[i * page_size];
227 
228 		memset(ptr2, 'a' + (i % 26), page_size);
229 	}
230 }
231 
232 /*
233  * Check that a buffer contains the pattern set by set_pattern(), starting at a
234  * page offset of pgoff within the buffer.
235  */
236 static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
237 				 size_t pgoff)
238 {
239 	size_t i;
240 
241 	for (i = 0; i < num_pages * page_size; i++) {
242 		size_t offset = pgoff * page_size + i;
243 		char actual = ptr[offset];
244 		char expected = 'a' + ((offset / page_size) % 26);
245 
246 		if (actual != expected)
247 			return false;
248 	}
249 
250 	return true;
251 }
252 
253 /* Check that a buffer contains the pattern set by set_pattern(). */
254 static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
255 {
256 	return check_pattern_offset(ptr, num_pages, page_size, 0);
257 }
258 
259 /* Determine if a buffer contains only repetitions of a specified char. */
260 static bool is_buf_eq(char *buf, size_t size, char chr)
261 {
262 	size_t i;
263 
264 	for (i = 0; i < size; i++) {
265 		if (buf[i] != chr)
266 			return false;
267 	}
268 
269 	return true;
270 }
271 
272 FIXTURE_SETUP(guard_regions)
273 {
274 	self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
275 	setup_sighandler();
276 
277 	if (variant->backing == ANON_BACKED)
278 		return;
279 
280 	self->fd = open_file(
281 		variant->backing == SHMEM_BACKED ? "/tmp/" : "",
282 		self->path);
283 
284 	/* We truncate file to at least 100 pages, tests can modify as needed. */
285 	ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
286 };
287 
288 FIXTURE_TEARDOWN_PARENT(guard_regions)
289 {
290 	teardown_sighandler();
291 
292 	if (variant->backing == ANON_BACKED)
293 		return;
294 
295 	if (self->fd >= 0)
296 		close(self->fd);
297 
298 	if (self->path[0] != '\0')
299 		unlink(self->path);
300 }
301 
302 TEST_F(guard_regions, basic)
303 {
304 	const unsigned long NUM_PAGES = 10;
305 	const unsigned long page_size = self->page_size;
306 	char *ptr;
307 	int i;
308 
309 	ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
310 		    PROT_READ | PROT_WRITE, 0, 0);
311 	ASSERT_NE(ptr, MAP_FAILED);
312 
313 	/* Trivially assert we can touch the first page. */
314 	ASSERT_TRUE(try_read_write_buf(ptr));
315 
316 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
317 
318 	/* Establish that 1st page SIGSEGV's. */
319 	ASSERT_FALSE(try_read_write_buf(ptr));
320 
321 	/* Ensure we can touch everything else.*/
322 	for (i = 1; i < NUM_PAGES; i++) {
323 		char *curr = &ptr[i * page_size];
324 
325 		ASSERT_TRUE(try_read_write_buf(curr));
326 	}
327 
328 	/* Establish a guard page at the end of the mapping. */
329 	ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
330 			  MADV_GUARD_INSTALL), 0);
331 
332 	/* Check that both guard pages result in SIGSEGV. */
333 	ASSERT_FALSE(try_read_write_buf(ptr));
334 	ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
335 
336 	/* Remove the first guard page. */
337 	ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
338 
339 	/* Make sure we can touch it. */
340 	ASSERT_TRUE(try_read_write_buf(ptr));
341 
342 	/* Remove the last guard page. */
343 	ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
344 			     MADV_GUARD_REMOVE));
345 
346 	/* Make sure we can touch it. */
347 	ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
348 
349 	/*
350 	 *  Test setting a _range_ of pages, namely the first 3. The first of
351 	 *  these be faulted in, so this also tests that we can install guard
352 	 *  pages over backed pages.
353 	 */
354 	ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
355 
356 	/* Make sure they are all guard pages. */
357 	for (i = 0; i < 3; i++) {
358 		char *curr = &ptr[i * page_size];
359 
360 		ASSERT_FALSE(try_read_write_buf(curr));
361 	}
362 
363 	/* Make sure the rest are not. */
364 	for (i = 3; i < NUM_PAGES; i++) {
365 		char *curr = &ptr[i * page_size];
366 
367 		ASSERT_TRUE(try_read_write_buf(curr));
368 	}
369 
370 	/* Remove guard pages. */
371 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
372 
373 	/* Now make sure we can touch everything. */
374 	for (i = 0; i < NUM_PAGES; i++) {
375 		char *curr = &ptr[i * page_size];
376 
377 		ASSERT_TRUE(try_read_write_buf(curr));
378 	}
379 
380 	/*
381 	 * Now remove all guard pages, make sure we don't remove existing
382 	 * entries.
383 	 */
384 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
385 
386 	for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
387 		char chr = ptr[i];
388 
389 		ASSERT_EQ(chr, 'x');
390 	}
391 
392 	ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
393 }
394 
395 /* Assert that operations applied across multiple VMAs work as expected. */
396 TEST_F(guard_regions, multi_vma)
397 {
398 	const unsigned long page_size = self->page_size;
399 	char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
400 	int i;
401 
402 	/* Reserve a 100 page region over which we can install VMAs. */
403 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
404 			   PROT_NONE, 0, 0);
405 	ASSERT_NE(ptr_region, MAP_FAILED);
406 
407 	/* Place a VMA of 10 pages size at the start of the region. */
408 	ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
409 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
410 	ASSERT_NE(ptr1, MAP_FAILED);
411 
412 	/* Place a VMA of 5 pages size 50 pages into the region. */
413 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
414 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
415 	ASSERT_NE(ptr2, MAP_FAILED);
416 
417 	/* Place a VMA of 20 pages size at the end of the region. */
418 	ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
419 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
420 	ASSERT_NE(ptr3, MAP_FAILED);
421 
422 	/* Unmap gaps. */
423 	ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
424 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
425 
426 	/*
427 	 * We end up with VMAs like this:
428 	 *
429 	 * 0    10 .. 50   55 .. 80   100
430 	 * [---]      [---]      [---]
431 	 */
432 
433 	/*
434 	 * Now mark the whole range as guard pages and make sure all VMAs are as
435 	 * such.
436 	 */
437 
438 	/*
439 	 * madvise() is certifiable and lets you perform operations over gaps,
440 	 * everything works, but it indicates an error and errno is set to
441 	 * -ENOMEM. Also if anything runs out of memory it is set to
442 	 * -ENOMEM. You are meant to guess which is which.
443 	 */
444 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
445 	ASSERT_EQ(errno, ENOMEM);
446 
447 	for (i = 0; i < 10; i++) {
448 		char *curr = &ptr1[i * page_size];
449 
450 		ASSERT_FALSE(try_read_write_buf(curr));
451 	}
452 
453 	for (i = 0; i < 5; i++) {
454 		char *curr = &ptr2[i * page_size];
455 
456 		ASSERT_FALSE(try_read_write_buf(curr));
457 	}
458 
459 	for (i = 0; i < 20; i++) {
460 		char *curr = &ptr3[i * page_size];
461 
462 		ASSERT_FALSE(try_read_write_buf(curr));
463 	}
464 
465 	/* Now remove guar pages over range and assert the opposite. */
466 
467 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
468 	ASSERT_EQ(errno, ENOMEM);
469 
470 	for (i = 0; i < 10; i++) {
471 		char *curr = &ptr1[i * page_size];
472 
473 		ASSERT_TRUE(try_read_write_buf(curr));
474 	}
475 
476 	for (i = 0; i < 5; i++) {
477 		char *curr = &ptr2[i * page_size];
478 
479 		ASSERT_TRUE(try_read_write_buf(curr));
480 	}
481 
482 	for (i = 0; i < 20; i++) {
483 		char *curr = &ptr3[i * page_size];
484 
485 		ASSERT_TRUE(try_read_write_buf(curr));
486 	}
487 
488 	/* Now map incompatible VMAs in the gaps. */
489 	ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
490 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
491 	ASSERT_NE(ptr, MAP_FAILED);
492 	ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
493 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
494 	ASSERT_NE(ptr, MAP_FAILED);
495 
496 	/*
497 	 * We end up with VMAs like this:
498 	 *
499 	 * 0    10 .. 50   55 .. 80   100
500 	 * [---][xxxx][---][xxxx][---]
501 	 *
502 	 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
503 	 * them.
504 	 */
505 
506 	/* Multiple VMAs adjacent to one another should result in no error. */
507 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
508 	for (i = 0; i < 100; i++) {
509 		char *curr = &ptr_region[i * page_size];
510 
511 		ASSERT_FALSE(try_read_write_buf(curr));
512 	}
513 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
514 	for (i = 0; i < 100; i++) {
515 		char *curr = &ptr_region[i * page_size];
516 
517 		ASSERT_TRUE(try_read_write_buf(curr));
518 	}
519 
520 	/* Cleanup. */
521 	ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
522 }
523 
524 /*
525  * Assert that batched operations performed using process_madvise() work as
526  * expected.
527  */
528 TEST_F(guard_regions, process_madvise)
529 {
530 	const unsigned long page_size = self->page_size;
531 	pid_t pid = getpid();
532 	int pidfd = pidfd_open(pid, 0);
533 	char *ptr_region, *ptr1, *ptr2, *ptr3;
534 	ssize_t count;
535 	struct iovec vec[6];
536 
537 	ASSERT_NE(pidfd, -1);
538 
539 	/* Reserve region to map over. */
540 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
541 			   PROT_NONE, 0, 0);
542 	ASSERT_NE(ptr_region, MAP_FAILED);
543 
544 	/*
545 	 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
546 	 * overwrite existing entries and test this code path against
547 	 * overwriting existing entries.
548 	 */
549 	ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
550 		     PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
551 	ASSERT_NE(ptr1, MAP_FAILED);
552 	/* We want guard markers at start/end of each VMA. */
553 	vec[0].iov_base = ptr1;
554 	vec[0].iov_len = page_size;
555 	vec[1].iov_base = &ptr1[9 * page_size];
556 	vec[1].iov_len = page_size;
557 
558 	/* 5 pages offset 50 pages into reserve region. */
559 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
560 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
561 	ASSERT_NE(ptr2, MAP_FAILED);
562 	vec[2].iov_base = ptr2;
563 	vec[2].iov_len = page_size;
564 	vec[3].iov_base = &ptr2[4 * page_size];
565 	vec[3].iov_len = page_size;
566 
567 	/* 20 pages offset 79 pages into reserve region. */
568 	ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
569 		    PROT_READ | PROT_WRITE, MAP_FIXED, 0);
570 	ASSERT_NE(ptr3, MAP_FAILED);
571 	vec[4].iov_base = ptr3;
572 	vec[4].iov_len = page_size;
573 	vec[5].iov_base = &ptr3[19 * page_size];
574 	vec[5].iov_len = page_size;
575 
576 	/* Free surrounding VMAs. */
577 	ASSERT_EQ(munmap(ptr_region, page_size), 0);
578 	ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
579 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
580 	ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
581 
582 	/* Now guard in one step. */
583 	count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
584 
585 	/* OK we don't have permission to do this, skip. */
586 	if (count == -1 && errno == EPERM)
587 		ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
588 
589 	/* Returns the number of bytes advised. */
590 	ASSERT_EQ(count, 6 * page_size);
591 
592 	/* Now make sure the guarding was applied. */
593 
594 	ASSERT_FALSE(try_read_write_buf(ptr1));
595 	ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
596 
597 	ASSERT_FALSE(try_read_write_buf(ptr2));
598 	ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
599 
600 	ASSERT_FALSE(try_read_write_buf(ptr3));
601 	ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
602 
603 	/* Now do the same with unguard... */
604 	count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
605 
606 	/* ...and everything should now succeed. */
607 
608 	ASSERT_TRUE(try_read_write_buf(ptr1));
609 	ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
610 
611 	ASSERT_TRUE(try_read_write_buf(ptr2));
612 	ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
613 
614 	ASSERT_TRUE(try_read_write_buf(ptr3));
615 	ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
616 
617 	/* Cleanup. */
618 	ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
619 	ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
620 	ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
621 	close(pidfd);
622 }
623 
624 /* Assert that unmapping ranges does not leave guard markers behind. */
625 TEST_F(guard_regions, munmap)
626 {
627 	const unsigned long page_size = self->page_size;
628 	char *ptr, *ptr_new1, *ptr_new2;
629 
630 	ptr = mmap_(self, variant, NULL, 10 * page_size,
631 		    PROT_READ | PROT_WRITE, 0, 0);
632 	ASSERT_NE(ptr, MAP_FAILED);
633 
634 	/* Guard first and last pages. */
635 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
636 	ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
637 
638 	/* Assert that they are guarded. */
639 	ASSERT_FALSE(try_read_write_buf(ptr));
640 	ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
641 
642 	/* Unmap them. */
643 	ASSERT_EQ(munmap(ptr, page_size), 0);
644 	ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
645 
646 	/* Map over them.*/
647 	ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
648 			 MAP_FIXED, 0);
649 	ASSERT_NE(ptr_new1, MAP_FAILED);
650 	ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
651 			 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
652 	ASSERT_NE(ptr_new2, MAP_FAILED);
653 
654 	/* Assert that they are now not guarded. */
655 	ASSERT_TRUE(try_read_write_buf(ptr_new1));
656 	ASSERT_TRUE(try_read_write_buf(ptr_new2));
657 
658 	/* Cleanup. */
659 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
660 }
661 
662 /* Assert that mprotect() operations have no bearing on guard markers. */
663 TEST_F(guard_regions, mprotect)
664 {
665 	const unsigned long page_size = self->page_size;
666 	char *ptr;
667 	int i;
668 
669 	ptr = mmap_(self, variant, NULL, 10 * page_size,
670 		    PROT_READ | PROT_WRITE, 0, 0);
671 	ASSERT_NE(ptr, MAP_FAILED);
672 
673 	/* Guard the middle of the range. */
674 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
675 			  MADV_GUARD_INSTALL), 0);
676 
677 	/* Assert that it is indeed guarded. */
678 	ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
679 	ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
680 
681 	/* Now make these pages read-only. */
682 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
683 
684 	/* Make sure the range is still guarded. */
685 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
686 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
687 
688 	/* Make sure we can guard again without issue.*/
689 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
690 			  MADV_GUARD_INSTALL), 0);
691 
692 	/* Make sure the range is, yet again, still guarded. */
693 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
694 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
695 
696 	/* Now unguard the whole range. */
697 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
698 
699 	/* Make sure the whole range is readable. */
700 	for (i = 0; i < 10; i++) {
701 		char *curr = &ptr[i * page_size];
702 
703 		ASSERT_TRUE(try_read_buf(curr));
704 	}
705 
706 	/* Cleanup. */
707 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
708 }
709 
710 /* Split and merge VMAs and make sure guard pages still behave. */
711 TEST_F(guard_regions, split_merge)
712 {
713 	const unsigned long page_size = self->page_size;
714 	char *ptr, *ptr_new;
715 	int i;
716 
717 	ptr = mmap_(self, variant, NULL, 10 * page_size,
718 		    PROT_READ | PROT_WRITE, 0, 0);
719 	ASSERT_NE(ptr, MAP_FAILED);
720 
721 	/* Guard the whole range. */
722 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
723 
724 	/* Make sure the whole range is guarded. */
725 	for (i = 0; i < 10; i++) {
726 		char *curr = &ptr[i * page_size];
727 
728 		ASSERT_FALSE(try_read_write_buf(curr));
729 	}
730 
731 	/* Now unmap some pages in the range so we split. */
732 	ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
733 	ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
734 	ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
735 
736 	/* Make sure the remaining ranges are guarded post-split. */
737 	for (i = 0; i < 2; i++) {
738 		char *curr = &ptr[i * page_size];
739 
740 		ASSERT_FALSE(try_read_write_buf(curr));
741 	}
742 	for (i = 2; i < 5; i++) {
743 		char *curr = &ptr[i * page_size];
744 
745 		ASSERT_FALSE(try_read_write_buf(curr));
746 	}
747 	for (i = 6; i < 8; i++) {
748 		char *curr = &ptr[i * page_size];
749 
750 		ASSERT_FALSE(try_read_write_buf(curr));
751 	}
752 	for (i = 9; i < 10; i++) {
753 		char *curr = &ptr[i * page_size];
754 
755 		ASSERT_FALSE(try_read_write_buf(curr));
756 	}
757 
758 	/* Now map them again - the unmap will have cleared the guards. */
759 	ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
760 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
761 	ASSERT_NE(ptr_new, MAP_FAILED);
762 	ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
763 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
764 	ASSERT_NE(ptr_new, MAP_FAILED);
765 	ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
766 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
767 	ASSERT_NE(ptr_new, MAP_FAILED);
768 
769 	/* Now make sure guard pages are established. */
770 	for (i = 0; i < 10; i++) {
771 		char *curr = &ptr[i * page_size];
772 		bool result = try_read_write_buf(curr);
773 		bool expect_true = i == 2 || i == 5 || i == 8;
774 
775 		ASSERT_TRUE(expect_true ? result : !result);
776 	}
777 
778 	/* Now guard everything again. */
779 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
780 
781 	/* Make sure the whole range is guarded. */
782 	for (i = 0; i < 10; i++) {
783 		char *curr = &ptr[i * page_size];
784 
785 		ASSERT_FALSE(try_read_write_buf(curr));
786 	}
787 
788 	/* Now split the range into three. */
789 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
790 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
791 
792 	/* Make sure the whole range is guarded for read. */
793 	for (i = 0; i < 10; i++) {
794 		char *curr = &ptr[i * page_size];
795 
796 		ASSERT_FALSE(try_read_buf(curr));
797 	}
798 
799 	/* Now reset protection bits so we merge the whole thing. */
800 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
801 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
802 			   PROT_READ | PROT_WRITE), 0);
803 
804 	/* Make sure the whole range is still guarded. */
805 	for (i = 0; i < 10; i++) {
806 		char *curr = &ptr[i * page_size];
807 
808 		ASSERT_FALSE(try_read_write_buf(curr));
809 	}
810 
811 	/* Split range into 3 again... */
812 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
813 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
814 
815 	/* ...and unguard the whole range. */
816 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
817 
818 	/* Make sure the whole range is remedied for read. */
819 	for (i = 0; i < 10; i++) {
820 		char *curr = &ptr[i * page_size];
821 
822 		ASSERT_TRUE(try_read_buf(curr));
823 	}
824 
825 	/* Merge them again. */
826 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
827 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
828 			   PROT_READ | PROT_WRITE), 0);
829 
830 	/* Now ensure the merged range is remedied for read/write. */
831 	for (i = 0; i < 10; i++) {
832 		char *curr = &ptr[i * page_size];
833 
834 		ASSERT_TRUE(try_read_write_buf(curr));
835 	}
836 
837 	/* Cleanup. */
838 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
839 }
840 
841 /* Assert that MADV_DONTNEED does not remove guard markers. */
842 TEST_F(guard_regions, dontneed)
843 {
844 	const unsigned long page_size = self->page_size;
845 	char *ptr;
846 	int i;
847 
848 	ptr = mmap_(self, variant, NULL, 10 * page_size,
849 		    PROT_READ | PROT_WRITE, 0, 0);
850 	ASSERT_NE(ptr, MAP_FAILED);
851 
852 	/* Back the whole range. */
853 	for (i = 0; i < 10; i++) {
854 		char *curr = &ptr[i * page_size];
855 
856 		*curr = 'y';
857 	}
858 
859 	/* Guard every other page. */
860 	for (i = 0; i < 10; i += 2) {
861 		char *curr = &ptr[i * page_size];
862 		int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
863 
864 		ASSERT_EQ(res, 0);
865 	}
866 
867 	/* Indicate that we don't need any of the range. */
868 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
869 
870 	/* Check to ensure guard markers are still in place. */
871 	for (i = 0; i < 10; i++) {
872 		char *curr = &ptr[i * page_size];
873 		bool result = try_read_buf(curr);
874 
875 		if (i % 2 == 0) {
876 			ASSERT_FALSE(result);
877 		} else {
878 			ASSERT_TRUE(result);
879 			switch (variant->backing) {
880 			case ANON_BACKED:
881 				/* If anon, then we get a zero page. */
882 				ASSERT_EQ(*curr, '\0');
883 				break;
884 			default:
885 				/* Otherwise, we get the file data. */
886 				ASSERT_EQ(*curr, 'y');
887 				break;
888 			}
889 		}
890 
891 		/* Now write... */
892 		result = try_write_buf(&ptr[i * page_size]);
893 
894 		/* ...and make sure same result. */
895 		ASSERT_TRUE(i % 2 != 0 ? result : !result);
896 	}
897 
898 	/* Cleanup. */
899 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
900 }
901 
902 /* Assert that mlock()'ed pages work correctly with guard markers. */
903 TEST_F(guard_regions, mlock)
904 {
905 	const unsigned long page_size = self->page_size;
906 	char *ptr;
907 	int i;
908 
909 	ptr = mmap_(self, variant, NULL, 10 * page_size,
910 		    PROT_READ | PROT_WRITE, 0, 0);
911 	ASSERT_NE(ptr, MAP_FAILED);
912 
913 	/* Populate. */
914 	for (i = 0; i < 10; i++) {
915 		char *curr = &ptr[i * page_size];
916 
917 		*curr = 'y';
918 	}
919 
920 	/* Lock. */
921 	ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
922 
923 	/* Now try to guard, should fail with EINVAL. */
924 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
925 	ASSERT_EQ(errno, EINVAL);
926 
927 	/* OK unlock. */
928 	ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
929 
930 	/* Guard first half of range, should now succeed. */
931 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
932 
933 	/* Make sure guard works. */
934 	for (i = 0; i < 10; i++) {
935 		char *curr = &ptr[i * page_size];
936 		bool result = try_read_write_buf(curr);
937 
938 		if (i < 5) {
939 			ASSERT_FALSE(result);
940 		} else {
941 			ASSERT_TRUE(result);
942 			ASSERT_EQ(*curr, 'x');
943 		}
944 	}
945 
946 	/*
947 	 * Now lock the latter part of the range. We can't lock the guard pages,
948 	 * as this would result in the pages being populated and the guarding
949 	 * would cause this to error out.
950 	 */
951 	ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
952 
953 	/*
954 	 * Now remove guard pages, we permit mlock()'d ranges to have guard
955 	 * pages removed as it is a non-destructive operation.
956 	 */
957 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
958 
959 	/* Now check that no guard pages remain. */
960 	for (i = 0; i < 10; i++) {
961 		char *curr = &ptr[i * page_size];
962 
963 		ASSERT_TRUE(try_read_write_buf(curr));
964 	}
965 
966 	/* Cleanup. */
967 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
968 }
969 
970 /*
971  * Assert that moving, extending and shrinking memory via mremap() retains
972  * guard markers where possible.
973  *
974  * - Moving a mapping alone should retain markers as they are.
975  */
976 TEST_F(guard_regions, mremap_move)
977 {
978 	const unsigned long page_size = self->page_size;
979 	char *ptr, *ptr_new;
980 
981 	/* Map 5 pages. */
982 	ptr = mmap_(self, variant, NULL, 5 * page_size,
983 		    PROT_READ | PROT_WRITE, 0, 0);
984 	ASSERT_NE(ptr, MAP_FAILED);
985 
986 	/* Place guard markers at both ends of the 5 page span. */
987 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
988 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
989 
990 	/* Make sure the guard pages are in effect. */
991 	ASSERT_FALSE(try_read_write_buf(ptr));
992 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
993 
994 	/* Map a new region we will move this range into. Doing this ensures
995 	 * that we have reserved a range to map into.
996 	 */
997 	ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
998 	ASSERT_NE(ptr_new, MAP_FAILED);
999 
1000 	ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
1001 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
1002 
1003 	/* Make sure the guard markers are retained. */
1004 	ASSERT_FALSE(try_read_write_buf(ptr_new));
1005 	ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
1006 
1007 	/*
1008 	 * Clean up - we only need reference the new pointer as we overwrote the
1009 	 * PROT_NONE range and moved the existing one.
1010 	 */
1011 	munmap(ptr_new, 5 * page_size);
1012 }
1013 
1014 /*
1015  * Assert that moving, extending and shrinking memory via mremap() retains
1016  * guard markers where possible.
1017  *
1018  * Expanding should retain guard pages, only now in different position. The user
1019  * will have to remove guard pages manually to fix up (they'd have to do the
1020  * same if it were a PROT_NONE mapping).
1021  */
1022 TEST_F(guard_regions, mremap_expand)
1023 {
1024 	const unsigned long page_size = self->page_size;
1025 	char *ptr, *ptr_new;
1026 
1027 	/* Map 10 pages... */
1028 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1029 		    PROT_READ | PROT_WRITE, 0, 0);
1030 	ASSERT_NE(ptr, MAP_FAILED);
1031 	/* ...But unmap the last 5 so we can ensure we can expand into them. */
1032 	ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
1033 
1034 	/* Place guard markers at both ends of the 5 page span. */
1035 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1036 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1037 
1038 	/* Make sure the guarding is in effect. */
1039 	ASSERT_FALSE(try_read_write_buf(ptr));
1040 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1041 
1042 	/* Now expand to 10 pages. */
1043 	ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
1044 	ASSERT_NE(ptr, MAP_FAILED);
1045 
1046 	/*
1047 	 * Make sure the guard markers are retained in their original positions.
1048 	 */
1049 	ASSERT_FALSE(try_read_write_buf(ptr));
1050 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1051 
1052 	/* Reserve a region which we can move to and expand into. */
1053 	ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
1054 	ASSERT_NE(ptr_new, MAP_FAILED);
1055 
1056 	/* Now move and expand into it. */
1057 	ptr = mremap(ptr, 10 * page_size, 20 * page_size,
1058 		     MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
1059 	ASSERT_EQ(ptr, ptr_new);
1060 
1061 	/*
1062 	 * Again, make sure the guard markers are retained in their original positions.
1063 	 */
1064 	ASSERT_FALSE(try_read_write_buf(ptr));
1065 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1066 
1067 	/*
1068 	 * A real user would have to remove guard markers, but would reasonably
1069 	 * expect all characteristics of the mapping to be retained, including
1070 	 * guard markers.
1071 	 */
1072 
1073 	/* Cleanup. */
1074 	munmap(ptr, 20 * page_size);
1075 }
1076 /*
1077  * Assert that moving, extending and shrinking memory via mremap() retains
1078  * guard markers where possible.
1079  *
1080  * Shrinking will result in markers that are shrunk over being removed. Again,
1081  * if the user were using a PROT_NONE mapping they'd have to manually fix this
1082  * up also so this is OK.
1083  */
1084 TEST_F(guard_regions, mremap_shrink)
1085 {
1086 	const unsigned long page_size = self->page_size;
1087 	char *ptr;
1088 	int i;
1089 
1090 	/* Map 5 pages. */
1091 	ptr = mmap_(self, variant, NULL, 5 * page_size,
1092 		    PROT_READ | PROT_WRITE, 0, 0);
1093 	ASSERT_NE(ptr, MAP_FAILED);
1094 
1095 	/* Place guard markers at both ends of the 5 page span. */
1096 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1097 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1098 
1099 	/* Make sure the guarding is in effect. */
1100 	ASSERT_FALSE(try_read_write_buf(ptr));
1101 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1102 
1103 	/* Now shrink to 3 pages. */
1104 	ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
1105 	ASSERT_NE(ptr, MAP_FAILED);
1106 
1107 	/* We expect the guard marker at the start to be retained... */
1108 	ASSERT_FALSE(try_read_write_buf(ptr));
1109 
1110 	/* ...But remaining pages will not have guard markers. */
1111 	for (i = 1; i < 3; i++) {
1112 		char *curr = &ptr[i * page_size];
1113 
1114 		ASSERT_TRUE(try_read_write_buf(curr));
1115 	}
1116 
1117 	/*
1118 	 * As with expansion, a real user would have to remove guard pages and
1119 	 * fixup. But you'd have to do similar manual things with PROT_NONE
1120 	 * mappings too.
1121 	 */
1122 
1123 	/*
1124 	 * If we expand back to the original size, the end marker will, of
1125 	 * course, no longer be present.
1126 	 */
1127 	ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
1128 	ASSERT_NE(ptr, MAP_FAILED);
1129 
1130 	/* Again, we expect the guard marker at the start to be retained... */
1131 	ASSERT_FALSE(try_read_write_buf(ptr));
1132 
1133 	/* ...But remaining pages will not have guard markers. */
1134 	for (i = 1; i < 5; i++) {
1135 		char *curr = &ptr[i * page_size];
1136 
1137 		ASSERT_TRUE(try_read_write_buf(curr));
1138 	}
1139 
1140 	/* Cleanup. */
1141 	munmap(ptr, 5 * page_size);
1142 }
1143 
1144 /*
1145  * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1146  * retain guard pages.
1147  */
1148 TEST_F(guard_regions, fork)
1149 {
1150 	const unsigned long page_size = self->page_size;
1151 	char *ptr;
1152 	pid_t pid;
1153 	int i;
1154 
1155 	/* Map 10 pages. */
1156 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1157 		    PROT_READ | PROT_WRITE, 0, 0);
1158 	ASSERT_NE(ptr, MAP_FAILED);
1159 
1160 	/* Establish guard pages in the first 5 pages. */
1161 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1162 
1163 	pid = fork();
1164 	ASSERT_NE(pid, -1);
1165 	if (!pid) {
1166 		/* This is the child process now. */
1167 
1168 		/* Assert that the guarding is in effect. */
1169 		for (i = 0; i < 10; i++) {
1170 			char *curr = &ptr[i * page_size];
1171 			bool result = try_read_write_buf(curr);
1172 
1173 			ASSERT_TRUE(i >= 5 ? result : !result);
1174 		}
1175 
1176 		/* Now unguard the range.*/
1177 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1178 
1179 		exit(0);
1180 	}
1181 
1182 	/* Parent process. */
1183 
1184 	/* Parent simply waits on child. */
1185 	waitpid(pid, NULL, 0);
1186 
1187 	/* Child unguard does not impact parent page table state. */
1188 	for (i = 0; i < 10; i++) {
1189 		char *curr = &ptr[i * page_size];
1190 		bool result = try_read_write_buf(curr);
1191 
1192 		ASSERT_TRUE(i >= 5 ? result : !result);
1193 	}
1194 
1195 	/* Cleanup. */
1196 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1197 }
1198 
1199 /*
1200  * Assert expected behaviour after we fork populated ranges of anonymous memory
1201  * and then guard and unguard the range.
1202  */
1203 TEST_F(guard_regions, fork_cow)
1204 {
1205 	const unsigned long page_size = self->page_size;
1206 	char *ptr;
1207 	pid_t pid;
1208 	int i;
1209 
1210 	if (variant->backing != ANON_BACKED)
1211 		SKIP(return, "CoW only supported on anon mappings");
1212 
1213 	/* Map 10 pages. */
1214 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1215 		    PROT_READ | PROT_WRITE, 0, 0);
1216 	ASSERT_NE(ptr, MAP_FAILED);
1217 
1218 	/* Populate range. */
1219 	for (i = 0; i < 10 * page_size; i++) {
1220 		char chr = 'a' + (i % 26);
1221 
1222 		ptr[i] = chr;
1223 	}
1224 
1225 	pid = fork();
1226 	ASSERT_NE(pid, -1);
1227 	if (!pid) {
1228 		/* This is the child process now. */
1229 
1230 		/* Ensure the range is as expected. */
1231 		for (i = 0; i < 10 * page_size; i++) {
1232 			char expected = 'a' + (i % 26);
1233 			char actual = ptr[i];
1234 
1235 			ASSERT_EQ(actual, expected);
1236 		}
1237 
1238 		/* Establish guard pages across the whole range. */
1239 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1240 		/* Remove it. */
1241 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1242 
1243 		/*
1244 		 * By removing the guard pages, the page tables will be
1245 		 * cleared. Assert that we are looking at the zero page now.
1246 		 */
1247 		for (i = 0; i < 10 * page_size; i++) {
1248 			char actual = ptr[i];
1249 
1250 			ASSERT_EQ(actual, '\0');
1251 		}
1252 
1253 		exit(0);
1254 	}
1255 
1256 	/* Parent process. */
1257 
1258 	/* Parent simply waits on child. */
1259 	waitpid(pid, NULL, 0);
1260 
1261 	/* Ensure the range is unchanged in parent anon range. */
1262 	for (i = 0; i < 10 * page_size; i++) {
1263 		char expected = 'a' + (i % 26);
1264 		char actual = ptr[i];
1265 
1266 		ASSERT_EQ(actual, expected);
1267 	}
1268 
1269 	/* Cleanup. */
1270 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1271 }
1272 
1273 /*
1274  * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1275  * behave as expected.
1276  */
1277 TEST_F(guard_regions, fork_wipeonfork)
1278 {
1279 	const unsigned long page_size = self->page_size;
1280 	char *ptr;
1281 	pid_t pid;
1282 	int i;
1283 
1284 	if (variant->backing != ANON_BACKED)
1285 		SKIP(return, "Wipe on fork only supported on anon mappings");
1286 
1287 	/* Map 10 pages. */
1288 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1289 		    PROT_READ | PROT_WRITE, 0, 0);
1290 	ASSERT_NE(ptr, MAP_FAILED);
1291 
1292 	/* Mark wipe on fork. */
1293 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1294 
1295 	/* Guard the first 5 pages. */
1296 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1297 
1298 	pid = fork();
1299 	ASSERT_NE(pid, -1);
1300 	if (!pid) {
1301 		/* This is the child process now. */
1302 
1303 		/* Guard will have been wiped. */
1304 		for (i = 0; i < 10; i++) {
1305 			char *curr = &ptr[i * page_size];
1306 
1307 			ASSERT_TRUE(try_read_write_buf(curr));
1308 		}
1309 
1310 		exit(0);
1311 	}
1312 
1313 	/* Parent process. */
1314 
1315 	waitpid(pid, NULL, 0);
1316 
1317 	/* Guard markers should be in effect.*/
1318 	for (i = 0; i < 10; i++) {
1319 		char *curr = &ptr[i * page_size];
1320 		bool result = try_read_write_buf(curr);
1321 
1322 		ASSERT_TRUE(i >= 5 ? result : !result);
1323 	}
1324 
1325 	/* Cleanup. */
1326 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1327 }
1328 
1329 /* Ensure that MADV_FREE retains guard entries as expected. */
1330 TEST_F(guard_regions, lazyfree)
1331 {
1332 	const unsigned long page_size = self->page_size;
1333 	char *ptr;
1334 	int i;
1335 
1336 	if (variant->backing != ANON_BACKED)
1337 		SKIP(return, "MADV_FREE only supported on anon mappings");
1338 
1339 	/* Map 10 pages. */
1340 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1341 		    PROT_READ | PROT_WRITE, 0, 0);
1342 	ASSERT_NE(ptr, MAP_FAILED);
1343 
1344 	/* Guard range. */
1345 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1346 
1347 	/* Ensure guarded. */
1348 	for (i = 0; i < 10; i++) {
1349 		char *curr = &ptr[i * page_size];
1350 
1351 		ASSERT_FALSE(try_read_write_buf(curr));
1352 	}
1353 
1354 	/* Lazyfree range. */
1355 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1356 
1357 	/* This should leave the guard markers in place. */
1358 	for (i = 0; i < 10; i++) {
1359 		char *curr = &ptr[i * page_size];
1360 
1361 		ASSERT_FALSE(try_read_write_buf(curr));
1362 	}
1363 
1364 	/* Cleanup. */
1365 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1366 }
1367 
1368 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
1369 TEST_F(guard_regions, populate)
1370 {
1371 	const unsigned long page_size = self->page_size;
1372 	char *ptr;
1373 
1374 	/* Map 10 pages. */
1375 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1376 		    PROT_READ | PROT_WRITE, 0, 0);
1377 	ASSERT_NE(ptr, MAP_FAILED);
1378 
1379 	/* Guard range. */
1380 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1381 
1382 	/* Populate read should error out... */
1383 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1384 	ASSERT_EQ(errno, EFAULT);
1385 
1386 	/* ...as should populate write. */
1387 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1388 	ASSERT_EQ(errno, EFAULT);
1389 
1390 	/* Cleanup. */
1391 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1392 }
1393 
1394 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1395 TEST_F(guard_regions, cold_pageout)
1396 {
1397 	const unsigned long page_size = self->page_size;
1398 	char *ptr;
1399 	int i;
1400 
1401 	/* Map 10 pages. */
1402 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1403 		    PROT_READ | PROT_WRITE, 0, 0);
1404 	ASSERT_NE(ptr, MAP_FAILED);
1405 
1406 	/* Guard range. */
1407 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1408 
1409 	/* Ensured guarded. */
1410 	for (i = 0; i < 10; i++) {
1411 		char *curr = &ptr[i * page_size];
1412 
1413 		ASSERT_FALSE(try_read_write_buf(curr));
1414 	}
1415 
1416 	/* Now mark cold. This should have no impact on guard markers. */
1417 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1418 
1419 	/* Should remain guarded. */
1420 	for (i = 0; i < 10; i++) {
1421 		char *curr = &ptr[i * page_size];
1422 
1423 		ASSERT_FALSE(try_read_write_buf(curr));
1424 	}
1425 
1426 	/* OK, now page out. This should equally, have no effect on markers. */
1427 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1428 
1429 	/* Should remain guarded. */
1430 	for (i = 0; i < 10; i++) {
1431 		char *curr = &ptr[i * page_size];
1432 
1433 		ASSERT_FALSE(try_read_write_buf(curr));
1434 	}
1435 
1436 	/* Cleanup. */
1437 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1438 }
1439 
1440 /* Ensure that guard pages do not break userfaultd. */
1441 TEST_F(guard_regions, uffd)
1442 {
1443 	const unsigned long page_size = self->page_size;
1444 	int uffd;
1445 	char *ptr;
1446 	int i;
1447 	struct uffdio_api api = {
1448 		.api = UFFD_API,
1449 		.features = 0,
1450 	};
1451 	struct uffdio_register reg;
1452 	struct uffdio_range range;
1453 
1454 	if (!is_anon_backed(variant))
1455 		SKIP(return, "uffd only works on anon backing");
1456 
1457 	/* Set up uffd. */
1458 	uffd = userfaultfd(0);
1459 	if (uffd == -1 && errno == EPERM)
1460 		ksft_exit_skip("No userfaultfd permissions, try running as root.\n");
1461 	ASSERT_NE(uffd, -1);
1462 
1463 	ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1464 
1465 	/* Map 10 pages. */
1466 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1467 		    PROT_READ | PROT_WRITE, 0, 0);
1468 	ASSERT_NE(ptr, MAP_FAILED);
1469 
1470 	/* Register the range with uffd. */
1471 	range.start = (unsigned long)ptr;
1472 	range.len = 10 * page_size;
1473 	reg.range = range;
1474 	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1475 	ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, &reg), 0);
1476 
1477 	/* Guard the range. This should not trigger the uffd. */
1478 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1479 
1480 	/* The guarding should behave as usual with no uffd intervention. */
1481 	for (i = 0; i < 10; i++) {
1482 		char *curr = &ptr[i * page_size];
1483 
1484 		ASSERT_FALSE(try_read_write_buf(curr));
1485 	}
1486 
1487 	/* Cleanup. */
1488 	ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1489 	close(uffd);
1490 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1491 }
1492 
1493 /*
1494  * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1495  * aggressively read-ahead, then install guard regions and assert that it
1496  * behaves correctly.
1497  *
1498  * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1499  * cache folios, meaning we maximise the possibility of some broken readahead.
1500  */
1501 TEST_F(guard_regions, madvise_sequential)
1502 {
1503 	char *ptr;
1504 	int i;
1505 	const unsigned long page_size = self->page_size;
1506 
1507 	if (variant->backing == ANON_BACKED)
1508 		SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
1509 
1510 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1511 		    PROT_READ | PROT_WRITE, 0, 0);
1512 	ASSERT_NE(ptr, MAP_FAILED);
1513 
1514 	/* Establish a pattern of data in the file. */
1515 	set_pattern(ptr, 10, page_size);
1516 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1517 
1518 	/* Mark it as being accessed sequentially. */
1519 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
1520 
1521 	/* Mark every other page a guard page. */
1522 	for (i = 0; i < 10; i += 2) {
1523 		char *ptr2 = &ptr[i * page_size];
1524 
1525 		ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
1526 	}
1527 
1528 	/* Now page it out. */
1529 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1530 
1531 	/* Now make sure pages are as expected. */
1532 	for (i = 0; i < 10; i++) {
1533 		char *chrp = &ptr[i * page_size];
1534 
1535 		if (i % 2 == 0) {
1536 			bool result = try_read_write_buf(chrp);
1537 
1538 			ASSERT_FALSE(result);
1539 		} else {
1540 			ASSERT_EQ(*chrp, 'a' + i);
1541 		}
1542 	}
1543 
1544 	/* Now remove guard pages. */
1545 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1546 
1547 	/* Now make sure all data is as expected. */
1548 	if (!check_pattern(ptr, 10, page_size))
1549 		ASSERT_TRUE(false);
1550 
1551 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1552 }
1553 
1554 /*
1555  * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1556  * correctly.
1557  */
1558 TEST_F(guard_regions, map_private)
1559 {
1560 	const unsigned long page_size = self->page_size;
1561 	char *ptr_shared, *ptr_private;
1562 	int i;
1563 
1564 	if (variant->backing == ANON_BACKED)
1565 		SKIP(return, "MAP_PRIVATE test specific to file-backed");
1566 
1567 	ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1568 	ASSERT_NE(ptr_shared, MAP_FAILED);
1569 
1570 	/* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
1571 	ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
1572 	ASSERT_NE(ptr_private, MAP_FAILED);
1573 
1574 	/* Set pattern in shared mapping. */
1575 	set_pattern(ptr_shared, 10, page_size);
1576 
1577 	/* Install guard regions in every other page in the shared mapping. */
1578 	for (i = 0; i < 10; i += 2) {
1579 		char *ptr = &ptr_shared[i * page_size];
1580 
1581 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1582 	}
1583 
1584 	for (i = 0; i < 10; i++) {
1585 		/* Every even shared page should be guarded. */
1586 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1587 		/* Private mappings should always be readable. */
1588 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1589 	}
1590 
1591 	/* Install guard regions in every other page in the private mapping. */
1592 	for (i = 0; i < 10; i += 2) {
1593 		char *ptr = &ptr_private[i * page_size];
1594 
1595 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1596 	}
1597 
1598 	for (i = 0; i < 10; i++) {
1599 		/* Every even shared page should be guarded. */
1600 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1601 		/* Every odd private page should be guarded. */
1602 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1603 	}
1604 
1605 	/* Remove guard regions from shared mapping. */
1606 	ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
1607 
1608 	for (i = 0; i < 10; i++) {
1609 		/* Shared mappings should always be readable. */
1610 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1611 		/* Every even private page should be guarded. */
1612 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1613 	}
1614 
1615 	/* Remove guard regions from private mapping. */
1616 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1617 
1618 	for (i = 0; i < 10; i++) {
1619 		/* Shared mappings should always be readable. */
1620 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1621 		/* Private mappings should always be readable. */
1622 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1623 	}
1624 
1625 	/* Ensure patterns are intact. */
1626 	ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
1627 	ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
1628 
1629 	/* Now write out every other page to MAP_PRIVATE. */
1630 	for (i = 0; i < 10; i += 2) {
1631 		char *ptr = &ptr_private[i * page_size];
1632 
1633 		memset(ptr, 'a' + i, page_size);
1634 	}
1635 
1636 	/*
1637 	 * At this point the mapping is:
1638 	 *
1639 	 * 0123456789
1640 	 * SPSPSPSPSP
1641 	 *
1642 	 * Where S = shared, P = private mappings.
1643 	 */
1644 
1645 	/* Now mark the beginning of the mapping guarded. */
1646 	ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
1647 
1648 	/*
1649 	 * This renders the mapping:
1650 	 *
1651 	 * 0123456789
1652 	 * xxxxxPSPSP
1653 	 */
1654 
1655 	for (i = 0; i < 10; i++) {
1656 		char *ptr = &ptr_private[i * page_size];
1657 
1658 		/* Ensure guard regions as expected. */
1659 		ASSERT_EQ(try_read_buf(ptr), i >= 5);
1660 		/* The shared mapping should always succeed. */
1661 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1662 	}
1663 
1664 	/* Remove the guard regions altogether. */
1665 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1666 
1667 	/*
1668 	 *
1669 	 * We now expect the mapping to be:
1670 	 *
1671 	 * 0123456789
1672 	 * SSSSSPSPSP
1673 	 *
1674 	 * As we removed guard regions, the private pages from the first 5 will
1675 	 * have been zapped, so on fault will reestablish the shared mapping.
1676 	 */
1677 
1678 	for (i = 0; i < 10; i++) {
1679 		char *ptr = &ptr_private[i * page_size];
1680 
1681 		/*
1682 		 * Assert that shared mappings in the MAP_PRIVATE mapping match
1683 		 * the shared mapping.
1684 		 */
1685 		if (i < 5 || i % 2 == 0) {
1686 			char *ptr_s = &ptr_shared[i * page_size];
1687 
1688 			ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
1689 			continue;
1690 		}
1691 
1692 		/* Everything else is a private mapping. */
1693 		ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
1694 	}
1695 
1696 	ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
1697 	ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
1698 }
1699 
1700 /* Test that guard regions established over a read-only mapping function correctly. */
1701 TEST_F(guard_regions, readonly_file)
1702 {
1703 	const unsigned long page_size = self->page_size;
1704 	char *ptr;
1705 	int i;
1706 
1707 	if (variant->backing == ANON_BACKED)
1708 		SKIP(return, "Read-only test specific to file-backed");
1709 
1710 	/* Map shared so we can populate with pattern, populate it, unmap. */
1711 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1712 		    PROT_READ | PROT_WRITE, 0, 0);
1713 	ASSERT_NE(ptr, MAP_FAILED);
1714 	set_pattern(ptr, 10, page_size);
1715 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1716 	/* Close the fd so we can re-open read-only. */
1717 	ASSERT_EQ(close(self->fd), 0);
1718 
1719 	/* Re-open read-only. */
1720 	self->fd = open(self->path, O_RDONLY);
1721 	ASSERT_NE(self->fd, -1);
1722 	/* Re-map read-only. */
1723 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1724 	ASSERT_NE(ptr, MAP_FAILED);
1725 
1726 	/* Mark every other page guarded. */
1727 	for (i = 0; i < 10; i += 2) {
1728 		char *ptr_pg = &ptr[i * page_size];
1729 
1730 		ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
1731 	}
1732 
1733 	/* Assert that the guard regions are in place.*/
1734 	for (i = 0; i < 10; i++) {
1735 		char *ptr_pg = &ptr[i * page_size];
1736 
1737 		ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
1738 	}
1739 
1740 	/* Remove guard regions. */
1741 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1742 
1743 	/* Ensure the data is as expected. */
1744 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1745 
1746 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1747 }
1748 
1749 TEST_F(guard_regions, fault_around)
1750 {
1751 	const unsigned long page_size = self->page_size;
1752 	char *ptr;
1753 	int i;
1754 
1755 	if (variant->backing == ANON_BACKED)
1756 		SKIP(return, "Fault-around test specific to file-backed");
1757 
1758 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1759 		    PROT_READ | PROT_WRITE, 0, 0);
1760 	ASSERT_NE(ptr, MAP_FAILED);
1761 
1762 	/* Establish a pattern in the backing file. */
1763 	set_pattern(ptr, 10, page_size);
1764 
1765 	/*
1766 	 * Now drop it from the page cache so we get major faults when next we
1767 	 * map it.
1768 	 */
1769 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1770 
1771 	/* Unmap and remap 'to be sure'. */
1772 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1773 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1774 		    PROT_READ | PROT_WRITE, 0, 0);
1775 	ASSERT_NE(ptr, MAP_FAILED);
1776 
1777 	/* Now make every even page guarded. */
1778 	for (i = 0; i < 10; i += 2) {
1779 		char *ptr_p = &ptr[i * page_size];
1780 
1781 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1782 	}
1783 
1784 	/* Now fault in every odd page. This should trigger fault-around. */
1785 	for (i = 1; i < 10; i += 2) {
1786 		char *ptr_p = &ptr[i * page_size];
1787 
1788 		ASSERT_TRUE(try_read_buf(ptr_p));
1789 	}
1790 
1791 	/* Finally, ensure that guard regions are intact as expected. */
1792 	for (i = 0; i < 10; i++) {
1793 		char *ptr_p = &ptr[i * page_size];
1794 
1795 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1796 	}
1797 
1798 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1799 }
1800 
1801 TEST_F(guard_regions, truncation)
1802 {
1803 	const unsigned long page_size = self->page_size;
1804 	char *ptr;
1805 	int i;
1806 
1807 	if (variant->backing == ANON_BACKED)
1808 		SKIP(return, "Truncation test specific to file-backed");
1809 
1810 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1811 		    PROT_READ | PROT_WRITE, 0, 0);
1812 	ASSERT_NE(ptr, MAP_FAILED);
1813 
1814 	/*
1815 	 * Establish a pattern in the backing file, just so there is data
1816 	 * there.
1817 	 */
1818 	set_pattern(ptr, 10, page_size);
1819 
1820 	/* Now make every even page guarded. */
1821 	for (i = 0; i < 10; i += 2) {
1822 		char *ptr_p = &ptr[i * page_size];
1823 
1824 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1825 	}
1826 
1827 	/* Now assert things are as expected. */
1828 	for (i = 0; i < 10; i++) {
1829 		char *ptr_p = &ptr[i * page_size];
1830 
1831 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1832 	}
1833 
1834 	/* Now truncate to actually used size (initialised to 100). */
1835 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1836 
1837 	/* Here the guard regions will remain intact. */
1838 	for (i = 0; i < 10; i++) {
1839 		char *ptr_p = &ptr[i * page_size];
1840 
1841 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1842 	}
1843 
1844 	/* Now truncate to half the size, then truncate again to the full size. */
1845 	ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
1846 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1847 
1848 	/* Again, guard pages will remain intact. */
1849 	for (i = 0; i < 10; i++) {
1850 		char *ptr_p = &ptr[i * page_size];
1851 
1852 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1853 	}
1854 
1855 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1856 }
1857 
1858 TEST_F(guard_regions, hole_punch)
1859 {
1860 	const unsigned long page_size = self->page_size;
1861 	char *ptr;
1862 	int i;
1863 
1864 	if (variant->backing == ANON_BACKED)
1865 		SKIP(return, "Truncation test specific to file-backed");
1866 
1867 	/* Establish pattern in mapping. */
1868 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1869 		    PROT_READ | PROT_WRITE, 0, 0);
1870 	ASSERT_NE(ptr, MAP_FAILED);
1871 	set_pattern(ptr, 10, page_size);
1872 
1873 	/* Install a guard region in the middle of the mapping. */
1874 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1875 			  MADV_GUARD_INSTALL), 0);
1876 
1877 	/*
1878 	 * The buffer will now be:
1879 	 *
1880 	 * 0123456789
1881 	 * ***xxxx***
1882 	 *
1883 	 * Where * is data and x is the guard region.
1884 	 */
1885 
1886 	/* Ensure established. */
1887 	for (i = 0; i < 10; i++) {
1888 		char *ptr_p = &ptr[i * page_size];
1889 
1890 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1891 	}
1892 
1893 	/* Now hole punch the guarded region. */
1894 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1895 			  MADV_REMOVE), 0);
1896 
1897 	/* Ensure guard regions remain. */
1898 	for (i = 0; i < 10; i++) {
1899 		char *ptr_p = &ptr[i * page_size];
1900 
1901 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1902 	}
1903 
1904 	/* Now remove guard region throughout. */
1905 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1906 
1907 	/* Check that the pattern exists in non-hole punched region. */
1908 	ASSERT_TRUE(check_pattern(ptr, 3, page_size));
1909 	/* Check that hole punched region is zeroed. */
1910 	ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
1911 	/* Check that the pattern exists in the remainder of the file. */
1912 	ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
1913 
1914 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1915 }
1916 
1917 /*
1918  * Ensure that a memfd works correctly with guard regions, that we can write
1919  * seal it then open the mapping read-only and still establish guard regions
1920  * within, remove those guard regions and have everything work correctly.
1921  */
1922 TEST_F(guard_regions, memfd_write_seal)
1923 {
1924 	const unsigned long page_size = self->page_size;
1925 	char *ptr;
1926 	int i;
1927 
1928 	if (variant->backing != SHMEM_BACKED)
1929 		SKIP(return, "memfd write seal test specific to shmem");
1930 
1931 	/* OK, we need a memfd, so close existing one. */
1932 	ASSERT_EQ(close(self->fd), 0);
1933 
1934 	/* Create and truncate memfd. */
1935 	self->fd = memfd_create("guard_regions_memfd_seals_test",
1936 				MFD_ALLOW_SEALING);
1937 	ASSERT_NE(self->fd, -1);
1938 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1939 
1940 	/* Map, set pattern, unmap. */
1941 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1942 	ASSERT_NE(ptr, MAP_FAILED);
1943 	set_pattern(ptr, 10, page_size);
1944 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1945 
1946 	/* Write-seal the memfd. */
1947 	ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
1948 
1949 	/* Now map the memfd readonly. */
1950 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1951 	ASSERT_NE(ptr, MAP_FAILED);
1952 
1953 	/* Ensure pattern is as expected. */
1954 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1955 
1956 	/* Now make every even page guarded. */
1957 	for (i = 0; i < 10; i += 2) {
1958 		char *ptr_p = &ptr[i * page_size];
1959 
1960 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1961 	}
1962 
1963 	/* Now assert things are as expected. */
1964 	for (i = 0; i < 10; i++) {
1965 		char *ptr_p = &ptr[i * page_size];
1966 
1967 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1968 	}
1969 
1970 	/* Now remove guard regions. */
1971 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1972 
1973 	/* Ensure pattern is as expected. */
1974 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1975 
1976 	/* Ensure write seal intact. */
1977 	for (i = 0; i < 10; i++) {
1978 		char *ptr_p = &ptr[i * page_size];
1979 
1980 		ASSERT_FALSE(try_write_buf(ptr_p));
1981 	}
1982 
1983 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1984 }
1985 
1986 
1987 /*
1988  * Since we are now permitted to establish guard regions in read-only anonymous
1989  * mappings, for the sake of thoroughness, though it probably has no practical
1990  * use, test that guard regions function with a mapping to the anonymous zero
1991  * page.
1992  */
1993 TEST_F(guard_regions, anon_zeropage)
1994 {
1995 	const unsigned long page_size = self->page_size;
1996 	char *ptr;
1997 	int i;
1998 
1999 	if (!is_anon_backed(variant))
2000 		SKIP(return, "anon zero page test specific to anon/shmem");
2001 
2002 	/* Obtain a read-only i.e. anon zero page mapping. */
2003 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
2004 	ASSERT_NE(ptr, MAP_FAILED);
2005 
2006 	/* Now make every even page guarded. */
2007 	for (i = 0; i < 10; i += 2) {
2008 		char *ptr_p = &ptr[i * page_size];
2009 
2010 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2011 	}
2012 
2013 	/* Now assert things are as expected. */
2014 	for (i = 0; i < 10; i++) {
2015 		char *ptr_p = &ptr[i * page_size];
2016 
2017 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2018 	}
2019 
2020 	/* Now remove all guard regions. */
2021 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2022 
2023 	/* Now assert things are as expected. */
2024 	for (i = 0; i < 10; i++) {
2025 		char *ptr_p = &ptr[i * page_size];
2026 
2027 		ASSERT_TRUE(try_read_buf(ptr_p));
2028 	}
2029 
2030 	/* Ensure zero page...*/
2031 	ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
2032 
2033 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2034 }
2035 
2036 /*
2037  * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2038  */
2039 TEST_F(guard_regions, pagemap)
2040 {
2041 	const unsigned long page_size = self->page_size;
2042 	int proc_fd;
2043 	char *ptr;
2044 	int i;
2045 
2046 	proc_fd = open("/proc/self/pagemap", O_RDONLY);
2047 	ASSERT_NE(proc_fd, -1);
2048 
2049 	ptr = mmap_(self, variant, NULL, 10 * page_size,
2050 		    PROT_READ | PROT_WRITE, 0, 0);
2051 	ASSERT_NE(ptr, MAP_FAILED);
2052 
2053 	/* Read from pagemap, and assert no guard regions are detected. */
2054 	for (i = 0; i < 10; i++) {
2055 		char *ptr_p = &ptr[i * page_size];
2056 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2057 		unsigned long masked = entry & PM_GUARD_REGION;
2058 
2059 		ASSERT_EQ(masked, 0);
2060 	}
2061 
2062 	/* Install a guard region in every other page. */
2063 	for (i = 0; i < 10; i += 2) {
2064 		char *ptr_p = &ptr[i * page_size];
2065 
2066 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2067 	}
2068 
2069 	/* Re-read from pagemap, and assert guard regions are detected. */
2070 	for (i = 0; i < 10; i++) {
2071 		char *ptr_p = &ptr[i * page_size];
2072 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2073 		unsigned long masked = entry & PM_GUARD_REGION;
2074 
2075 		ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
2076 	}
2077 
2078 	ASSERT_EQ(close(proc_fd), 0);
2079 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2080 }
2081 
2082 TEST_HARNESS_MAIN
2083