xref: /linux/tools/testing/selftests/mm/guard-regions.c (revision 040f404b731207935ed644b14bcc2bb8b8488d00)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/limits.h>
10 #include <linux/userfaultfd.h>
11 #include <linux/fs.h>
12 #include <setjmp.h>
13 #include <signal.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <sys/uio.h>
22 #include <unistd.h>
23 #include "vm_util.h"
24 
25 #include "../pidfd/pidfd.h"
26 
27 /*
28  * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
29  *
30  * "If the signal occurs other than as the result of calling the abort or raise
31  *  function, the behavior is undefined if the signal handler refers to any
32  *  object with static storage duration other than by assigning a value to an
33  *  object declared as volatile sig_atomic_t"
34  */
35 static volatile sig_atomic_t signal_jump_set;
36 static sigjmp_buf signal_jmp_buf;
37 
38 /*
39  * Ignore the checkpatch warning, we must read from x but don't want to do
40  * anything with it in order to trigger a read page fault. We therefore must use
41  * volatile to stop the compiler from optimising this away.
42  */
43 #define FORCE_READ(x) (*(volatile typeof(x) *)x)
44 
45 /*
46  * How is the test backing the mapping being tested?
47  */
48 enum backing_type {
49 	ANON_BACKED,
50 	SHMEM_BACKED,
51 	LOCAL_FILE_BACKED,
52 };
53 
54 FIXTURE(guard_regions)
55 {
56 	unsigned long page_size;
57 	char path[PATH_MAX];
58 	int fd;
59 };
60 
61 FIXTURE_VARIANT(guard_regions)
62 {
63 	enum backing_type backing;
64 };
65 
66 FIXTURE_VARIANT_ADD(guard_regions, anon)
67 {
68 	.backing = ANON_BACKED,
69 };
70 
71 FIXTURE_VARIANT_ADD(guard_regions, shmem)
72 {
73 	.backing = SHMEM_BACKED,
74 };
75 
76 FIXTURE_VARIANT_ADD(guard_regions, file)
77 {
78 	.backing = LOCAL_FILE_BACKED,
79 };
80 
81 static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
82 {
83 	switch (variant->backing) {
84 	case  ANON_BACKED:
85 	case  SHMEM_BACKED:
86 		return true;
87 	default:
88 		return false;
89 	}
90 }
91 
92 static void *mmap_(FIXTURE_DATA(guard_regions) * self,
93 		   const FIXTURE_VARIANT(guard_regions) * variant,
94 		   void *addr, size_t length, int prot, int extra_flags,
95 		   off_t offset)
96 {
97 	int fd;
98 	int flags = extra_flags;
99 
100 	switch (variant->backing) {
101 	case ANON_BACKED:
102 		flags |= MAP_PRIVATE | MAP_ANON;
103 		fd = -1;
104 		break;
105 	case SHMEM_BACKED:
106 	case LOCAL_FILE_BACKED:
107 		flags |= MAP_SHARED;
108 		fd = self->fd;
109 		break;
110 	default:
111 		ksft_exit_fail();
112 		break;
113 	}
114 
115 	return mmap(addr, length, prot, flags, fd, offset);
116 }
117 
118 static int userfaultfd(int flags)
119 {
120 	return syscall(SYS_userfaultfd, flags);
121 }
122 
123 static void handle_fatal(int c)
124 {
125 	if (!signal_jump_set)
126 		return;
127 
128 	siglongjmp(signal_jmp_buf, c);
129 }
130 
131 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
132 				   size_t n, int advice, unsigned int flags)
133 {
134 	return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
135 }
136 
137 /*
138  * Enable our signal catcher and try to read/write the specified buffer. The
139  * return value indicates whether the read/write succeeds without a fatal
140  * signal.
141  */
142 static bool try_access_buf(char *ptr, bool write)
143 {
144 	bool failed;
145 
146 	/* Tell signal handler to jump back here on fatal signal. */
147 	signal_jump_set = true;
148 	/* If a fatal signal arose, we will jump back here and failed is set. */
149 	failed = sigsetjmp(signal_jmp_buf, 0) != 0;
150 
151 	if (!failed) {
152 		if (write)
153 			*ptr = 'x';
154 		else
155 			FORCE_READ(ptr);
156 	}
157 
158 	signal_jump_set = false;
159 	return !failed;
160 }
161 
162 /* Try and read from a buffer, return true if no fatal signal. */
163 static bool try_read_buf(char *ptr)
164 {
165 	return try_access_buf(ptr, false);
166 }
167 
168 /* Try and write to a buffer, return true if no fatal signal. */
169 static bool try_write_buf(char *ptr)
170 {
171 	return try_access_buf(ptr, true);
172 }
173 
174 /*
175  * Try and BOTH read from AND write to a buffer, return true if BOTH operations
176  * succeed.
177  */
178 static bool try_read_write_buf(char *ptr)
179 {
180 	return try_read_buf(ptr) && try_write_buf(ptr);
181 }
182 
183 static void setup_sighandler(void)
184 {
185 	struct sigaction act = {
186 		.sa_handler = &handle_fatal,
187 		.sa_flags = SA_NODEFER,
188 	};
189 
190 	sigemptyset(&act.sa_mask);
191 	if (sigaction(SIGSEGV, &act, NULL))
192 		ksft_exit_fail_perror("sigaction");
193 }
194 
195 static void teardown_sighandler(void)
196 {
197 	struct sigaction act = {
198 		.sa_handler = SIG_DFL,
199 		.sa_flags = SA_NODEFER,
200 	};
201 
202 	sigemptyset(&act.sa_mask);
203 	sigaction(SIGSEGV, &act, NULL);
204 }
205 
206 static int open_file(const char *prefix, char *path)
207 {
208 	int fd;
209 
210 	snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
211 	fd = mkstemp(path);
212 	if (fd < 0)
213 		ksft_exit_fail_perror("mkstemp");
214 
215 	return fd;
216 }
217 
218 /* Establish a varying pattern in a buffer. */
219 static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
220 {
221 	size_t i;
222 
223 	for (i = 0; i < num_pages; i++) {
224 		char *ptr2 = &ptr[i * page_size];
225 
226 		memset(ptr2, 'a' + (i % 26), page_size);
227 	}
228 }
229 
230 /*
231  * Check that a buffer contains the pattern set by set_pattern(), starting at a
232  * page offset of pgoff within the buffer.
233  */
234 static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
235 				 size_t pgoff)
236 {
237 	size_t i;
238 
239 	for (i = 0; i < num_pages * page_size; i++) {
240 		size_t offset = pgoff * page_size + i;
241 		char actual = ptr[offset];
242 		char expected = 'a' + ((offset / page_size) % 26);
243 
244 		if (actual != expected)
245 			return false;
246 	}
247 
248 	return true;
249 }
250 
251 /* Check that a buffer contains the pattern set by set_pattern(). */
252 static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
253 {
254 	return check_pattern_offset(ptr, num_pages, page_size, 0);
255 }
256 
257 /* Determine if a buffer contains only repetitions of a specified char. */
258 static bool is_buf_eq(char *buf, size_t size, char chr)
259 {
260 	size_t i;
261 
262 	for (i = 0; i < size; i++) {
263 		if (buf[i] != chr)
264 			return false;
265 	}
266 
267 	return true;
268 }
269 
270 FIXTURE_SETUP(guard_regions)
271 {
272 	self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
273 	setup_sighandler();
274 
275 	switch (variant->backing) {
276 	case ANON_BACKED:
277 		return;
278 	case LOCAL_FILE_BACKED:
279 		self->fd = open_file("", self->path);
280 		break;
281 	case SHMEM_BACKED:
282 		self->fd = memfd_create(self->path, 0);
283 		break;
284 	}
285 
286 	/* We truncate file to at least 100 pages, tests can modify as needed. */
287 	ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
288 };
289 
290 FIXTURE_TEARDOWN_PARENT(guard_regions)
291 {
292 	teardown_sighandler();
293 
294 	if (variant->backing == ANON_BACKED)
295 		return;
296 
297 	if (self->fd >= 0)
298 		close(self->fd);
299 
300 	if (self->path[0] != '\0')
301 		unlink(self->path);
302 }
303 
304 TEST_F(guard_regions, basic)
305 {
306 	const unsigned long NUM_PAGES = 10;
307 	const unsigned long page_size = self->page_size;
308 	char *ptr;
309 	int i;
310 
311 	ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
312 		    PROT_READ | PROT_WRITE, 0, 0);
313 	ASSERT_NE(ptr, MAP_FAILED);
314 
315 	/* Trivially assert we can touch the first page. */
316 	ASSERT_TRUE(try_read_write_buf(ptr));
317 
318 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
319 
320 	/* Establish that 1st page SIGSEGV's. */
321 	ASSERT_FALSE(try_read_write_buf(ptr));
322 
323 	/* Ensure we can touch everything else.*/
324 	for (i = 1; i < NUM_PAGES; i++) {
325 		char *curr = &ptr[i * page_size];
326 
327 		ASSERT_TRUE(try_read_write_buf(curr));
328 	}
329 
330 	/* Establish a guard page at the end of the mapping. */
331 	ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
332 			  MADV_GUARD_INSTALL), 0);
333 
334 	/* Check that both guard pages result in SIGSEGV. */
335 	ASSERT_FALSE(try_read_write_buf(ptr));
336 	ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
337 
338 	/* Remove the first guard page. */
339 	ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
340 
341 	/* Make sure we can touch it. */
342 	ASSERT_TRUE(try_read_write_buf(ptr));
343 
344 	/* Remove the last guard page. */
345 	ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
346 			     MADV_GUARD_REMOVE));
347 
348 	/* Make sure we can touch it. */
349 	ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
350 
351 	/*
352 	 *  Test setting a _range_ of pages, namely the first 3. The first of
353 	 *  these be faulted in, so this also tests that we can install guard
354 	 *  pages over backed pages.
355 	 */
356 	ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
357 
358 	/* Make sure they are all guard pages. */
359 	for (i = 0; i < 3; i++) {
360 		char *curr = &ptr[i * page_size];
361 
362 		ASSERT_FALSE(try_read_write_buf(curr));
363 	}
364 
365 	/* Make sure the rest are not. */
366 	for (i = 3; i < NUM_PAGES; i++) {
367 		char *curr = &ptr[i * page_size];
368 
369 		ASSERT_TRUE(try_read_write_buf(curr));
370 	}
371 
372 	/* Remove guard pages. */
373 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
374 
375 	/* Now make sure we can touch everything. */
376 	for (i = 0; i < NUM_PAGES; i++) {
377 		char *curr = &ptr[i * page_size];
378 
379 		ASSERT_TRUE(try_read_write_buf(curr));
380 	}
381 
382 	/*
383 	 * Now remove all guard pages, make sure we don't remove existing
384 	 * entries.
385 	 */
386 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
387 
388 	for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
389 		char chr = ptr[i];
390 
391 		ASSERT_EQ(chr, 'x');
392 	}
393 
394 	ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
395 }
396 
397 /* Assert that operations applied across multiple VMAs work as expected. */
398 TEST_F(guard_regions, multi_vma)
399 {
400 	const unsigned long page_size = self->page_size;
401 	char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
402 	int i;
403 
404 	/* Reserve a 100 page region over which we can install VMAs. */
405 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
406 			   PROT_NONE, 0, 0);
407 	ASSERT_NE(ptr_region, MAP_FAILED);
408 
409 	/* Place a VMA of 10 pages size at the start of the region. */
410 	ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
411 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
412 	ASSERT_NE(ptr1, MAP_FAILED);
413 
414 	/* Place a VMA of 5 pages size 50 pages into the region. */
415 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
416 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
417 	ASSERT_NE(ptr2, MAP_FAILED);
418 
419 	/* Place a VMA of 20 pages size at the end of the region. */
420 	ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
421 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
422 	ASSERT_NE(ptr3, MAP_FAILED);
423 
424 	/* Unmap gaps. */
425 	ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
426 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
427 
428 	/*
429 	 * We end up with VMAs like this:
430 	 *
431 	 * 0    10 .. 50   55 .. 80   100
432 	 * [---]      [---]      [---]
433 	 */
434 
435 	/*
436 	 * Now mark the whole range as guard pages and make sure all VMAs are as
437 	 * such.
438 	 */
439 
440 	/*
441 	 * madvise() is certifiable and lets you perform operations over gaps,
442 	 * everything works, but it indicates an error and errno is set to
443 	 * -ENOMEM. Also if anything runs out of memory it is set to
444 	 * -ENOMEM. You are meant to guess which is which.
445 	 */
446 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
447 	ASSERT_EQ(errno, ENOMEM);
448 
449 	for (i = 0; i < 10; i++) {
450 		char *curr = &ptr1[i * page_size];
451 
452 		ASSERT_FALSE(try_read_write_buf(curr));
453 	}
454 
455 	for (i = 0; i < 5; i++) {
456 		char *curr = &ptr2[i * page_size];
457 
458 		ASSERT_FALSE(try_read_write_buf(curr));
459 	}
460 
461 	for (i = 0; i < 20; i++) {
462 		char *curr = &ptr3[i * page_size];
463 
464 		ASSERT_FALSE(try_read_write_buf(curr));
465 	}
466 
467 	/* Now remove guar pages over range and assert the opposite. */
468 
469 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
470 	ASSERT_EQ(errno, ENOMEM);
471 
472 	for (i = 0; i < 10; i++) {
473 		char *curr = &ptr1[i * page_size];
474 
475 		ASSERT_TRUE(try_read_write_buf(curr));
476 	}
477 
478 	for (i = 0; i < 5; i++) {
479 		char *curr = &ptr2[i * page_size];
480 
481 		ASSERT_TRUE(try_read_write_buf(curr));
482 	}
483 
484 	for (i = 0; i < 20; i++) {
485 		char *curr = &ptr3[i * page_size];
486 
487 		ASSERT_TRUE(try_read_write_buf(curr));
488 	}
489 
490 	/* Now map incompatible VMAs in the gaps. */
491 	ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
492 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
493 	ASSERT_NE(ptr, MAP_FAILED);
494 	ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
495 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
496 	ASSERT_NE(ptr, MAP_FAILED);
497 
498 	/*
499 	 * We end up with VMAs like this:
500 	 *
501 	 * 0    10 .. 50   55 .. 80   100
502 	 * [---][xxxx][---][xxxx][---]
503 	 *
504 	 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
505 	 * them.
506 	 */
507 
508 	/* Multiple VMAs adjacent to one another should result in no error. */
509 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
510 	for (i = 0; i < 100; i++) {
511 		char *curr = &ptr_region[i * page_size];
512 
513 		ASSERT_FALSE(try_read_write_buf(curr));
514 	}
515 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
516 	for (i = 0; i < 100; i++) {
517 		char *curr = &ptr_region[i * page_size];
518 
519 		ASSERT_TRUE(try_read_write_buf(curr));
520 	}
521 
522 	/* Cleanup. */
523 	ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
524 }
525 
526 /*
527  * Assert that batched operations performed using process_madvise() work as
528  * expected.
529  */
530 TEST_F(guard_regions, process_madvise)
531 {
532 	const unsigned long page_size = self->page_size;
533 	char *ptr_region, *ptr1, *ptr2, *ptr3;
534 	ssize_t count;
535 	struct iovec vec[6];
536 
537 	/* Reserve region to map over. */
538 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
539 			   PROT_NONE, 0, 0);
540 	ASSERT_NE(ptr_region, MAP_FAILED);
541 
542 	/*
543 	 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
544 	 * overwrite existing entries and test this code path against
545 	 * overwriting existing entries.
546 	 */
547 	ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
548 		     PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
549 	ASSERT_NE(ptr1, MAP_FAILED);
550 	/* We want guard markers at start/end of each VMA. */
551 	vec[0].iov_base = ptr1;
552 	vec[0].iov_len = page_size;
553 	vec[1].iov_base = &ptr1[9 * page_size];
554 	vec[1].iov_len = page_size;
555 
556 	/* 5 pages offset 50 pages into reserve region. */
557 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
558 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
559 	ASSERT_NE(ptr2, MAP_FAILED);
560 	vec[2].iov_base = ptr2;
561 	vec[2].iov_len = page_size;
562 	vec[3].iov_base = &ptr2[4 * page_size];
563 	vec[3].iov_len = page_size;
564 
565 	/* 20 pages offset 79 pages into reserve region. */
566 	ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
567 		    PROT_READ | PROT_WRITE, MAP_FIXED, 0);
568 	ASSERT_NE(ptr3, MAP_FAILED);
569 	vec[4].iov_base = ptr3;
570 	vec[4].iov_len = page_size;
571 	vec[5].iov_base = &ptr3[19 * page_size];
572 	vec[5].iov_len = page_size;
573 
574 	/* Free surrounding VMAs. */
575 	ASSERT_EQ(munmap(ptr_region, page_size), 0);
576 	ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
577 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
578 	ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
579 
580 	/* Now guard in one step. */
581 	count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
582 
583 	/* OK we don't have permission to do this, skip. */
584 	if (count == -1 && errno == EPERM)
585 		ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
586 
587 	/* Returns the number of bytes advised. */
588 	ASSERT_EQ(count, 6 * page_size);
589 
590 	/* Now make sure the guarding was applied. */
591 
592 	ASSERT_FALSE(try_read_write_buf(ptr1));
593 	ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
594 
595 	ASSERT_FALSE(try_read_write_buf(ptr2));
596 	ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
597 
598 	ASSERT_FALSE(try_read_write_buf(ptr3));
599 	ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
600 
601 	/* Now do the same with unguard... */
602 	count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
603 
604 	/* ...and everything should now succeed. */
605 
606 	ASSERT_TRUE(try_read_write_buf(ptr1));
607 	ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
608 
609 	ASSERT_TRUE(try_read_write_buf(ptr2));
610 	ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
611 
612 	ASSERT_TRUE(try_read_write_buf(ptr3));
613 	ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
614 
615 	/* Cleanup. */
616 	ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
617 	ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
618 	ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
619 }
620 
621 /* Assert that unmapping ranges does not leave guard markers behind. */
622 TEST_F(guard_regions, munmap)
623 {
624 	const unsigned long page_size = self->page_size;
625 	char *ptr, *ptr_new1, *ptr_new2;
626 
627 	ptr = mmap_(self, variant, NULL, 10 * page_size,
628 		    PROT_READ | PROT_WRITE, 0, 0);
629 	ASSERT_NE(ptr, MAP_FAILED);
630 
631 	/* Guard first and last pages. */
632 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
633 	ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
634 
635 	/* Assert that they are guarded. */
636 	ASSERT_FALSE(try_read_write_buf(ptr));
637 	ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
638 
639 	/* Unmap them. */
640 	ASSERT_EQ(munmap(ptr, page_size), 0);
641 	ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
642 
643 	/* Map over them.*/
644 	ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
645 			 MAP_FIXED, 0);
646 	ASSERT_NE(ptr_new1, MAP_FAILED);
647 	ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
648 			 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
649 	ASSERT_NE(ptr_new2, MAP_FAILED);
650 
651 	/* Assert that they are now not guarded. */
652 	ASSERT_TRUE(try_read_write_buf(ptr_new1));
653 	ASSERT_TRUE(try_read_write_buf(ptr_new2));
654 
655 	/* Cleanup. */
656 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
657 }
658 
659 /* Assert that mprotect() operations have no bearing on guard markers. */
660 TEST_F(guard_regions, mprotect)
661 {
662 	const unsigned long page_size = self->page_size;
663 	char *ptr;
664 	int i;
665 
666 	ptr = mmap_(self, variant, NULL, 10 * page_size,
667 		    PROT_READ | PROT_WRITE, 0, 0);
668 	ASSERT_NE(ptr, MAP_FAILED);
669 
670 	/* Guard the middle of the range. */
671 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
672 			  MADV_GUARD_INSTALL), 0);
673 
674 	/* Assert that it is indeed guarded. */
675 	ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
676 	ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
677 
678 	/* Now make these pages read-only. */
679 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
680 
681 	/* Make sure the range is still guarded. */
682 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
683 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
684 
685 	/* Make sure we can guard again without issue.*/
686 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
687 			  MADV_GUARD_INSTALL), 0);
688 
689 	/* Make sure the range is, yet again, still guarded. */
690 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
691 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
692 
693 	/* Now unguard the whole range. */
694 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
695 
696 	/* Make sure the whole range is readable. */
697 	for (i = 0; i < 10; i++) {
698 		char *curr = &ptr[i * page_size];
699 
700 		ASSERT_TRUE(try_read_buf(curr));
701 	}
702 
703 	/* Cleanup. */
704 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
705 }
706 
707 /* Split and merge VMAs and make sure guard pages still behave. */
708 TEST_F(guard_regions, split_merge)
709 {
710 	const unsigned long page_size = self->page_size;
711 	char *ptr, *ptr_new;
712 	int i;
713 
714 	ptr = mmap_(self, variant, NULL, 10 * page_size,
715 		    PROT_READ | PROT_WRITE, 0, 0);
716 	ASSERT_NE(ptr, MAP_FAILED);
717 
718 	/* Guard the whole range. */
719 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
720 
721 	/* Make sure the whole range is guarded. */
722 	for (i = 0; i < 10; i++) {
723 		char *curr = &ptr[i * page_size];
724 
725 		ASSERT_FALSE(try_read_write_buf(curr));
726 	}
727 
728 	/* Now unmap some pages in the range so we split. */
729 	ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
730 	ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
731 	ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
732 
733 	/* Make sure the remaining ranges are guarded post-split. */
734 	for (i = 0; i < 2; i++) {
735 		char *curr = &ptr[i * page_size];
736 
737 		ASSERT_FALSE(try_read_write_buf(curr));
738 	}
739 	for (i = 2; i < 5; i++) {
740 		char *curr = &ptr[i * page_size];
741 
742 		ASSERT_FALSE(try_read_write_buf(curr));
743 	}
744 	for (i = 6; i < 8; i++) {
745 		char *curr = &ptr[i * page_size];
746 
747 		ASSERT_FALSE(try_read_write_buf(curr));
748 	}
749 	for (i = 9; i < 10; i++) {
750 		char *curr = &ptr[i * page_size];
751 
752 		ASSERT_FALSE(try_read_write_buf(curr));
753 	}
754 
755 	/* Now map them again - the unmap will have cleared the guards. */
756 	ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
757 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
758 	ASSERT_NE(ptr_new, MAP_FAILED);
759 	ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
760 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
761 	ASSERT_NE(ptr_new, MAP_FAILED);
762 	ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
763 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
764 	ASSERT_NE(ptr_new, MAP_FAILED);
765 
766 	/* Now make sure guard pages are established. */
767 	for (i = 0; i < 10; i++) {
768 		char *curr = &ptr[i * page_size];
769 		bool result = try_read_write_buf(curr);
770 		bool expect_true = i == 2 || i == 5 || i == 8;
771 
772 		ASSERT_TRUE(expect_true ? result : !result);
773 	}
774 
775 	/* Now guard everything again. */
776 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
777 
778 	/* Make sure the whole range is guarded. */
779 	for (i = 0; i < 10; i++) {
780 		char *curr = &ptr[i * page_size];
781 
782 		ASSERT_FALSE(try_read_write_buf(curr));
783 	}
784 
785 	/* Now split the range into three. */
786 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
787 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
788 
789 	/* Make sure the whole range is guarded for read. */
790 	for (i = 0; i < 10; i++) {
791 		char *curr = &ptr[i * page_size];
792 
793 		ASSERT_FALSE(try_read_buf(curr));
794 	}
795 
796 	/* Now reset protection bits so we merge the whole thing. */
797 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
798 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
799 			   PROT_READ | PROT_WRITE), 0);
800 
801 	/* Make sure the whole range is still guarded. */
802 	for (i = 0; i < 10; i++) {
803 		char *curr = &ptr[i * page_size];
804 
805 		ASSERT_FALSE(try_read_write_buf(curr));
806 	}
807 
808 	/* Split range into 3 again... */
809 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
810 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
811 
812 	/* ...and unguard the whole range. */
813 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
814 
815 	/* Make sure the whole range is remedied for read. */
816 	for (i = 0; i < 10; i++) {
817 		char *curr = &ptr[i * page_size];
818 
819 		ASSERT_TRUE(try_read_buf(curr));
820 	}
821 
822 	/* Merge them again. */
823 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
824 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
825 			   PROT_READ | PROT_WRITE), 0);
826 
827 	/* Now ensure the merged range is remedied for read/write. */
828 	for (i = 0; i < 10; i++) {
829 		char *curr = &ptr[i * page_size];
830 
831 		ASSERT_TRUE(try_read_write_buf(curr));
832 	}
833 
834 	/* Cleanup. */
835 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
836 }
837 
838 /* Assert that MADV_DONTNEED does not remove guard markers. */
839 TEST_F(guard_regions, dontneed)
840 {
841 	const unsigned long page_size = self->page_size;
842 	char *ptr;
843 	int i;
844 
845 	ptr = mmap_(self, variant, NULL, 10 * page_size,
846 		    PROT_READ | PROT_WRITE, 0, 0);
847 	ASSERT_NE(ptr, MAP_FAILED);
848 
849 	/* Back the whole range. */
850 	for (i = 0; i < 10; i++) {
851 		char *curr = &ptr[i * page_size];
852 
853 		*curr = 'y';
854 	}
855 
856 	/* Guard every other page. */
857 	for (i = 0; i < 10; i += 2) {
858 		char *curr = &ptr[i * page_size];
859 		int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
860 
861 		ASSERT_EQ(res, 0);
862 	}
863 
864 	/* Indicate that we don't need any of the range. */
865 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
866 
867 	/* Check to ensure guard markers are still in place. */
868 	for (i = 0; i < 10; i++) {
869 		char *curr = &ptr[i * page_size];
870 		bool result = try_read_buf(curr);
871 
872 		if (i % 2 == 0) {
873 			ASSERT_FALSE(result);
874 		} else {
875 			ASSERT_TRUE(result);
876 			switch (variant->backing) {
877 			case ANON_BACKED:
878 				/* If anon, then we get a zero page. */
879 				ASSERT_EQ(*curr, '\0');
880 				break;
881 			default:
882 				/* Otherwise, we get the file data. */
883 				ASSERT_EQ(*curr, 'y');
884 				break;
885 			}
886 		}
887 
888 		/* Now write... */
889 		result = try_write_buf(&ptr[i * page_size]);
890 
891 		/* ...and make sure same result. */
892 		ASSERT_TRUE(i % 2 != 0 ? result : !result);
893 	}
894 
895 	/* Cleanup. */
896 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
897 }
898 
899 /* Assert that mlock()'ed pages work correctly with guard markers. */
900 TEST_F(guard_regions, mlock)
901 {
902 	const unsigned long page_size = self->page_size;
903 	char *ptr;
904 	int i;
905 
906 	ptr = mmap_(self, variant, NULL, 10 * page_size,
907 		    PROT_READ | PROT_WRITE, 0, 0);
908 	ASSERT_NE(ptr, MAP_FAILED);
909 
910 	/* Populate. */
911 	for (i = 0; i < 10; i++) {
912 		char *curr = &ptr[i * page_size];
913 
914 		*curr = 'y';
915 	}
916 
917 	/* Lock. */
918 	ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
919 
920 	/* Now try to guard, should fail with EINVAL. */
921 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
922 	ASSERT_EQ(errno, EINVAL);
923 
924 	/* OK unlock. */
925 	ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
926 
927 	/* Guard first half of range, should now succeed. */
928 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
929 
930 	/* Make sure guard works. */
931 	for (i = 0; i < 10; i++) {
932 		char *curr = &ptr[i * page_size];
933 		bool result = try_read_write_buf(curr);
934 
935 		if (i < 5) {
936 			ASSERT_FALSE(result);
937 		} else {
938 			ASSERT_TRUE(result);
939 			ASSERT_EQ(*curr, 'x');
940 		}
941 	}
942 
943 	/*
944 	 * Now lock the latter part of the range. We can't lock the guard pages,
945 	 * as this would result in the pages being populated and the guarding
946 	 * would cause this to error out.
947 	 */
948 	ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
949 
950 	/*
951 	 * Now remove guard pages, we permit mlock()'d ranges to have guard
952 	 * pages removed as it is a non-destructive operation.
953 	 */
954 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
955 
956 	/* Now check that no guard pages remain. */
957 	for (i = 0; i < 10; i++) {
958 		char *curr = &ptr[i * page_size];
959 
960 		ASSERT_TRUE(try_read_write_buf(curr));
961 	}
962 
963 	/* Cleanup. */
964 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
965 }
966 
967 /*
968  * Assert that moving, extending and shrinking memory via mremap() retains
969  * guard markers where possible.
970  *
971  * - Moving a mapping alone should retain markers as they are.
972  */
973 TEST_F(guard_regions, mremap_move)
974 {
975 	const unsigned long page_size = self->page_size;
976 	char *ptr, *ptr_new;
977 
978 	/* Map 5 pages. */
979 	ptr = mmap_(self, variant, NULL, 5 * page_size,
980 		    PROT_READ | PROT_WRITE, 0, 0);
981 	ASSERT_NE(ptr, MAP_FAILED);
982 
983 	/* Place guard markers at both ends of the 5 page span. */
984 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
985 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
986 
987 	/* Make sure the guard pages are in effect. */
988 	ASSERT_FALSE(try_read_write_buf(ptr));
989 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
990 
991 	/* Map a new region we will move this range into. Doing this ensures
992 	 * that we have reserved a range to map into.
993 	 */
994 	ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
995 	ASSERT_NE(ptr_new, MAP_FAILED);
996 
997 	ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
998 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
999 
1000 	/* Make sure the guard markers are retained. */
1001 	ASSERT_FALSE(try_read_write_buf(ptr_new));
1002 	ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
1003 
1004 	/*
1005 	 * Clean up - we only need reference the new pointer as we overwrote the
1006 	 * PROT_NONE range and moved the existing one.
1007 	 */
1008 	munmap(ptr_new, 5 * page_size);
1009 }
1010 
1011 /*
1012  * Assert that moving, extending and shrinking memory via mremap() retains
1013  * guard markers where possible.
1014  *
1015  * Expanding should retain guard pages, only now in different position. The user
1016  * will have to remove guard pages manually to fix up (they'd have to do the
1017  * same if it were a PROT_NONE mapping).
1018  */
1019 TEST_F(guard_regions, mremap_expand)
1020 {
1021 	const unsigned long page_size = self->page_size;
1022 	char *ptr, *ptr_new;
1023 
1024 	/* Map 10 pages... */
1025 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1026 		    PROT_READ | PROT_WRITE, 0, 0);
1027 	ASSERT_NE(ptr, MAP_FAILED);
1028 	/* ...But unmap the last 5 so we can ensure we can expand into them. */
1029 	ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
1030 
1031 	/* Place guard markers at both ends of the 5 page span. */
1032 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1033 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1034 
1035 	/* Make sure the guarding is in effect. */
1036 	ASSERT_FALSE(try_read_write_buf(ptr));
1037 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1038 
1039 	/* Now expand to 10 pages. */
1040 	ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
1041 	ASSERT_NE(ptr, MAP_FAILED);
1042 
1043 	/*
1044 	 * Make sure the guard markers are retained in their original positions.
1045 	 */
1046 	ASSERT_FALSE(try_read_write_buf(ptr));
1047 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1048 
1049 	/* Reserve a region which we can move to and expand into. */
1050 	ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
1051 	ASSERT_NE(ptr_new, MAP_FAILED);
1052 
1053 	/* Now move and expand into it. */
1054 	ptr = mremap(ptr, 10 * page_size, 20 * page_size,
1055 		     MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
1056 	ASSERT_EQ(ptr, ptr_new);
1057 
1058 	/*
1059 	 * Again, make sure the guard markers are retained in their original positions.
1060 	 */
1061 	ASSERT_FALSE(try_read_write_buf(ptr));
1062 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1063 
1064 	/*
1065 	 * A real user would have to remove guard markers, but would reasonably
1066 	 * expect all characteristics of the mapping to be retained, including
1067 	 * guard markers.
1068 	 */
1069 
1070 	/* Cleanup. */
1071 	munmap(ptr, 20 * page_size);
1072 }
1073 /*
1074  * Assert that moving, extending and shrinking memory via mremap() retains
1075  * guard markers where possible.
1076  *
1077  * Shrinking will result in markers that are shrunk over being removed. Again,
1078  * if the user were using a PROT_NONE mapping they'd have to manually fix this
1079  * up also so this is OK.
1080  */
1081 TEST_F(guard_regions, mremap_shrink)
1082 {
1083 	const unsigned long page_size = self->page_size;
1084 	char *ptr;
1085 	int i;
1086 
1087 	/* Map 5 pages. */
1088 	ptr = mmap_(self, variant, NULL, 5 * page_size,
1089 		    PROT_READ | PROT_WRITE, 0, 0);
1090 	ASSERT_NE(ptr, MAP_FAILED);
1091 
1092 	/* Place guard markers at both ends of the 5 page span. */
1093 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1094 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1095 
1096 	/* Make sure the guarding is in effect. */
1097 	ASSERT_FALSE(try_read_write_buf(ptr));
1098 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1099 
1100 	/* Now shrink to 3 pages. */
1101 	ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
1102 	ASSERT_NE(ptr, MAP_FAILED);
1103 
1104 	/* We expect the guard marker at the start to be retained... */
1105 	ASSERT_FALSE(try_read_write_buf(ptr));
1106 
1107 	/* ...But remaining pages will not have guard markers. */
1108 	for (i = 1; i < 3; i++) {
1109 		char *curr = &ptr[i * page_size];
1110 
1111 		ASSERT_TRUE(try_read_write_buf(curr));
1112 	}
1113 
1114 	/*
1115 	 * As with expansion, a real user would have to remove guard pages and
1116 	 * fixup. But you'd have to do similar manual things with PROT_NONE
1117 	 * mappings too.
1118 	 */
1119 
1120 	/*
1121 	 * If we expand back to the original size, the end marker will, of
1122 	 * course, no longer be present.
1123 	 */
1124 	ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
1125 	ASSERT_NE(ptr, MAP_FAILED);
1126 
1127 	/* Again, we expect the guard marker at the start to be retained... */
1128 	ASSERT_FALSE(try_read_write_buf(ptr));
1129 
1130 	/* ...But remaining pages will not have guard markers. */
1131 	for (i = 1; i < 5; i++) {
1132 		char *curr = &ptr[i * page_size];
1133 
1134 		ASSERT_TRUE(try_read_write_buf(curr));
1135 	}
1136 
1137 	/* Cleanup. */
1138 	munmap(ptr, 5 * page_size);
1139 }
1140 
1141 /*
1142  * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1143  * retain guard pages.
1144  */
1145 TEST_F(guard_regions, fork)
1146 {
1147 	const unsigned long page_size = self->page_size;
1148 	char *ptr;
1149 	pid_t pid;
1150 	int i;
1151 
1152 	/* Map 10 pages. */
1153 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1154 		    PROT_READ | PROT_WRITE, 0, 0);
1155 	ASSERT_NE(ptr, MAP_FAILED);
1156 
1157 	/* Establish guard pages in the first 5 pages. */
1158 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1159 
1160 	pid = fork();
1161 	ASSERT_NE(pid, -1);
1162 	if (!pid) {
1163 		/* This is the child process now. */
1164 
1165 		/* Assert that the guarding is in effect. */
1166 		for (i = 0; i < 10; i++) {
1167 			char *curr = &ptr[i * page_size];
1168 			bool result = try_read_write_buf(curr);
1169 
1170 			ASSERT_TRUE(i >= 5 ? result : !result);
1171 		}
1172 
1173 		/* Now unguard the range.*/
1174 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1175 
1176 		exit(0);
1177 	}
1178 
1179 	/* Parent process. */
1180 
1181 	/* Parent simply waits on child. */
1182 	waitpid(pid, NULL, 0);
1183 
1184 	/* Child unguard does not impact parent page table state. */
1185 	for (i = 0; i < 10; i++) {
1186 		char *curr = &ptr[i * page_size];
1187 		bool result = try_read_write_buf(curr);
1188 
1189 		ASSERT_TRUE(i >= 5 ? result : !result);
1190 	}
1191 
1192 	/* Cleanup. */
1193 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1194 }
1195 
1196 /*
1197  * Assert expected behaviour after we fork populated ranges of anonymous memory
1198  * and then guard and unguard the range.
1199  */
1200 TEST_F(guard_regions, fork_cow)
1201 {
1202 	const unsigned long page_size = self->page_size;
1203 	char *ptr;
1204 	pid_t pid;
1205 	int i;
1206 
1207 	if (variant->backing != ANON_BACKED)
1208 		SKIP(return, "CoW only supported on anon mappings");
1209 
1210 	/* Map 10 pages. */
1211 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1212 		    PROT_READ | PROT_WRITE, 0, 0);
1213 	ASSERT_NE(ptr, MAP_FAILED);
1214 
1215 	/* Populate range. */
1216 	for (i = 0; i < 10 * page_size; i++) {
1217 		char chr = 'a' + (i % 26);
1218 
1219 		ptr[i] = chr;
1220 	}
1221 
1222 	pid = fork();
1223 	ASSERT_NE(pid, -1);
1224 	if (!pid) {
1225 		/* This is the child process now. */
1226 
1227 		/* Ensure the range is as expected. */
1228 		for (i = 0; i < 10 * page_size; i++) {
1229 			char expected = 'a' + (i % 26);
1230 			char actual = ptr[i];
1231 
1232 			ASSERT_EQ(actual, expected);
1233 		}
1234 
1235 		/* Establish guard pages across the whole range. */
1236 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1237 		/* Remove it. */
1238 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1239 
1240 		/*
1241 		 * By removing the guard pages, the page tables will be
1242 		 * cleared. Assert that we are looking at the zero page now.
1243 		 */
1244 		for (i = 0; i < 10 * page_size; i++) {
1245 			char actual = ptr[i];
1246 
1247 			ASSERT_EQ(actual, '\0');
1248 		}
1249 
1250 		exit(0);
1251 	}
1252 
1253 	/* Parent process. */
1254 
1255 	/* Parent simply waits on child. */
1256 	waitpid(pid, NULL, 0);
1257 
1258 	/* Ensure the range is unchanged in parent anon range. */
1259 	for (i = 0; i < 10 * page_size; i++) {
1260 		char expected = 'a' + (i % 26);
1261 		char actual = ptr[i];
1262 
1263 		ASSERT_EQ(actual, expected);
1264 	}
1265 
1266 	/* Cleanup. */
1267 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1268 }
1269 
1270 /*
1271  * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1272  * behave as expected.
1273  */
1274 TEST_F(guard_regions, fork_wipeonfork)
1275 {
1276 	const unsigned long page_size = self->page_size;
1277 	char *ptr;
1278 	pid_t pid;
1279 	int i;
1280 
1281 	if (variant->backing != ANON_BACKED)
1282 		SKIP(return, "Wipe on fork only supported on anon mappings");
1283 
1284 	/* Map 10 pages. */
1285 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1286 		    PROT_READ | PROT_WRITE, 0, 0);
1287 	ASSERT_NE(ptr, MAP_FAILED);
1288 
1289 	/* Mark wipe on fork. */
1290 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1291 
1292 	/* Guard the first 5 pages. */
1293 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1294 
1295 	pid = fork();
1296 	ASSERT_NE(pid, -1);
1297 	if (!pid) {
1298 		/* This is the child process now. */
1299 
1300 		/* Guard will have been wiped. */
1301 		for (i = 0; i < 10; i++) {
1302 			char *curr = &ptr[i * page_size];
1303 
1304 			ASSERT_TRUE(try_read_write_buf(curr));
1305 		}
1306 
1307 		exit(0);
1308 	}
1309 
1310 	/* Parent process. */
1311 
1312 	waitpid(pid, NULL, 0);
1313 
1314 	/* Guard markers should be in effect.*/
1315 	for (i = 0; i < 10; i++) {
1316 		char *curr = &ptr[i * page_size];
1317 		bool result = try_read_write_buf(curr);
1318 
1319 		ASSERT_TRUE(i >= 5 ? result : !result);
1320 	}
1321 
1322 	/* Cleanup. */
1323 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1324 }
1325 
1326 /* Ensure that MADV_FREE retains guard entries as expected. */
1327 TEST_F(guard_regions, lazyfree)
1328 {
1329 	const unsigned long page_size = self->page_size;
1330 	char *ptr;
1331 	int i;
1332 
1333 	if (variant->backing != ANON_BACKED)
1334 		SKIP(return, "MADV_FREE only supported on anon mappings");
1335 
1336 	/* Map 10 pages. */
1337 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1338 		    PROT_READ | PROT_WRITE, 0, 0);
1339 	ASSERT_NE(ptr, MAP_FAILED);
1340 
1341 	/* Guard range. */
1342 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1343 
1344 	/* Ensure guarded. */
1345 	for (i = 0; i < 10; i++) {
1346 		char *curr = &ptr[i * page_size];
1347 
1348 		ASSERT_FALSE(try_read_write_buf(curr));
1349 	}
1350 
1351 	/* Lazyfree range. */
1352 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1353 
1354 	/* This should leave the guard markers in place. */
1355 	for (i = 0; i < 10; i++) {
1356 		char *curr = &ptr[i * page_size];
1357 
1358 		ASSERT_FALSE(try_read_write_buf(curr));
1359 	}
1360 
1361 	/* Cleanup. */
1362 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1363 }
1364 
1365 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
1366 TEST_F(guard_regions, populate)
1367 {
1368 	const unsigned long page_size = self->page_size;
1369 	char *ptr;
1370 
1371 	/* Map 10 pages. */
1372 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1373 		    PROT_READ | PROT_WRITE, 0, 0);
1374 	ASSERT_NE(ptr, MAP_FAILED);
1375 
1376 	/* Guard range. */
1377 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1378 
1379 	/* Populate read should error out... */
1380 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1381 	ASSERT_EQ(errno, EFAULT);
1382 
1383 	/* ...as should populate write. */
1384 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1385 	ASSERT_EQ(errno, EFAULT);
1386 
1387 	/* Cleanup. */
1388 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1389 }
1390 
1391 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1392 TEST_F(guard_regions, cold_pageout)
1393 {
1394 	const unsigned long page_size = self->page_size;
1395 	char *ptr;
1396 	int i;
1397 
1398 	/* Map 10 pages. */
1399 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1400 		    PROT_READ | PROT_WRITE, 0, 0);
1401 	ASSERT_NE(ptr, MAP_FAILED);
1402 
1403 	/* Guard range. */
1404 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1405 
1406 	/* Ensured guarded. */
1407 	for (i = 0; i < 10; i++) {
1408 		char *curr = &ptr[i * page_size];
1409 
1410 		ASSERT_FALSE(try_read_write_buf(curr));
1411 	}
1412 
1413 	/* Now mark cold. This should have no impact on guard markers. */
1414 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1415 
1416 	/* Should remain guarded. */
1417 	for (i = 0; i < 10; i++) {
1418 		char *curr = &ptr[i * page_size];
1419 
1420 		ASSERT_FALSE(try_read_write_buf(curr));
1421 	}
1422 
1423 	/* OK, now page out. This should equally, have no effect on markers. */
1424 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1425 
1426 	/* Should remain guarded. */
1427 	for (i = 0; i < 10; i++) {
1428 		char *curr = &ptr[i * page_size];
1429 
1430 		ASSERT_FALSE(try_read_write_buf(curr));
1431 	}
1432 
1433 	/* Cleanup. */
1434 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1435 }
1436 
1437 /* Ensure that guard pages do not break userfaultd. */
1438 TEST_F(guard_regions, uffd)
1439 {
1440 	const unsigned long page_size = self->page_size;
1441 	int uffd;
1442 	char *ptr;
1443 	int i;
1444 	struct uffdio_api api = {
1445 		.api = UFFD_API,
1446 		.features = 0,
1447 	};
1448 	struct uffdio_register reg;
1449 	struct uffdio_range range;
1450 
1451 	if (!is_anon_backed(variant))
1452 		SKIP(return, "uffd only works on anon backing");
1453 
1454 	/* Set up uffd. */
1455 	uffd = userfaultfd(0);
1456 	if (uffd == -1 && errno == EPERM)
1457 		ksft_exit_skip("No userfaultfd permissions, try running as root.\n");
1458 	ASSERT_NE(uffd, -1);
1459 
1460 	ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1461 
1462 	/* Map 10 pages. */
1463 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1464 		    PROT_READ | PROT_WRITE, 0, 0);
1465 	ASSERT_NE(ptr, MAP_FAILED);
1466 
1467 	/* Register the range with uffd. */
1468 	range.start = (unsigned long)ptr;
1469 	range.len = 10 * page_size;
1470 	reg.range = range;
1471 	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1472 	ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, &reg), 0);
1473 
1474 	/* Guard the range. This should not trigger the uffd. */
1475 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1476 
1477 	/* The guarding should behave as usual with no uffd intervention. */
1478 	for (i = 0; i < 10; i++) {
1479 		char *curr = &ptr[i * page_size];
1480 
1481 		ASSERT_FALSE(try_read_write_buf(curr));
1482 	}
1483 
1484 	/* Cleanup. */
1485 	ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1486 	close(uffd);
1487 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1488 }
1489 
1490 /*
1491  * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1492  * aggressively read-ahead, then install guard regions and assert that it
1493  * behaves correctly.
1494  *
1495  * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1496  * cache folios, meaning we maximise the possibility of some broken readahead.
1497  */
1498 TEST_F(guard_regions, madvise_sequential)
1499 {
1500 	char *ptr;
1501 	int i;
1502 	const unsigned long page_size = self->page_size;
1503 
1504 	if (variant->backing == ANON_BACKED)
1505 		SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
1506 
1507 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1508 		    PROT_READ | PROT_WRITE, 0, 0);
1509 	ASSERT_NE(ptr, MAP_FAILED);
1510 
1511 	/* Establish a pattern of data in the file. */
1512 	set_pattern(ptr, 10, page_size);
1513 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1514 
1515 	/* Mark it as being accessed sequentially. */
1516 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
1517 
1518 	/* Mark every other page a guard page. */
1519 	for (i = 0; i < 10; i += 2) {
1520 		char *ptr2 = &ptr[i * page_size];
1521 
1522 		ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
1523 	}
1524 
1525 	/* Now page it out. */
1526 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1527 
1528 	/* Now make sure pages are as expected. */
1529 	for (i = 0; i < 10; i++) {
1530 		char *chrp = &ptr[i * page_size];
1531 
1532 		if (i % 2 == 0) {
1533 			bool result = try_read_write_buf(chrp);
1534 
1535 			ASSERT_FALSE(result);
1536 		} else {
1537 			ASSERT_EQ(*chrp, 'a' + i);
1538 		}
1539 	}
1540 
1541 	/* Now remove guard pages. */
1542 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1543 
1544 	/* Now make sure all data is as expected. */
1545 	if (!check_pattern(ptr, 10, page_size))
1546 		ASSERT_TRUE(false);
1547 
1548 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1549 }
1550 
1551 /*
1552  * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1553  * correctly.
1554  */
1555 TEST_F(guard_regions, map_private)
1556 {
1557 	const unsigned long page_size = self->page_size;
1558 	char *ptr_shared, *ptr_private;
1559 	int i;
1560 
1561 	if (variant->backing == ANON_BACKED)
1562 		SKIP(return, "MAP_PRIVATE test specific to file-backed");
1563 
1564 	ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1565 	ASSERT_NE(ptr_shared, MAP_FAILED);
1566 
1567 	/* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
1568 	ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
1569 	ASSERT_NE(ptr_private, MAP_FAILED);
1570 
1571 	/* Set pattern in shared mapping. */
1572 	set_pattern(ptr_shared, 10, page_size);
1573 
1574 	/* Install guard regions in every other page in the shared mapping. */
1575 	for (i = 0; i < 10; i += 2) {
1576 		char *ptr = &ptr_shared[i * page_size];
1577 
1578 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1579 	}
1580 
1581 	for (i = 0; i < 10; i++) {
1582 		/* Every even shared page should be guarded. */
1583 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1584 		/* Private mappings should always be readable. */
1585 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1586 	}
1587 
1588 	/* Install guard regions in every other page in the private mapping. */
1589 	for (i = 0; i < 10; i += 2) {
1590 		char *ptr = &ptr_private[i * page_size];
1591 
1592 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1593 	}
1594 
1595 	for (i = 0; i < 10; i++) {
1596 		/* Every even shared page should be guarded. */
1597 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1598 		/* Every odd private page should be guarded. */
1599 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1600 	}
1601 
1602 	/* Remove guard regions from shared mapping. */
1603 	ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
1604 
1605 	for (i = 0; i < 10; i++) {
1606 		/* Shared mappings should always be readable. */
1607 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1608 		/* Every even private page should be guarded. */
1609 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1610 	}
1611 
1612 	/* Remove guard regions from private mapping. */
1613 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1614 
1615 	for (i = 0; i < 10; i++) {
1616 		/* Shared mappings should always be readable. */
1617 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1618 		/* Private mappings should always be readable. */
1619 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1620 	}
1621 
1622 	/* Ensure patterns are intact. */
1623 	ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
1624 	ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
1625 
1626 	/* Now write out every other page to MAP_PRIVATE. */
1627 	for (i = 0; i < 10; i += 2) {
1628 		char *ptr = &ptr_private[i * page_size];
1629 
1630 		memset(ptr, 'a' + i, page_size);
1631 	}
1632 
1633 	/*
1634 	 * At this point the mapping is:
1635 	 *
1636 	 * 0123456789
1637 	 * SPSPSPSPSP
1638 	 *
1639 	 * Where S = shared, P = private mappings.
1640 	 */
1641 
1642 	/* Now mark the beginning of the mapping guarded. */
1643 	ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
1644 
1645 	/*
1646 	 * This renders the mapping:
1647 	 *
1648 	 * 0123456789
1649 	 * xxxxxPSPSP
1650 	 */
1651 
1652 	for (i = 0; i < 10; i++) {
1653 		char *ptr = &ptr_private[i * page_size];
1654 
1655 		/* Ensure guard regions as expected. */
1656 		ASSERT_EQ(try_read_buf(ptr), i >= 5);
1657 		/* The shared mapping should always succeed. */
1658 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1659 	}
1660 
1661 	/* Remove the guard regions altogether. */
1662 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1663 
1664 	/*
1665 	 *
1666 	 * We now expect the mapping to be:
1667 	 *
1668 	 * 0123456789
1669 	 * SSSSSPSPSP
1670 	 *
1671 	 * As we removed guard regions, the private pages from the first 5 will
1672 	 * have been zapped, so on fault will reestablish the shared mapping.
1673 	 */
1674 
1675 	for (i = 0; i < 10; i++) {
1676 		char *ptr = &ptr_private[i * page_size];
1677 
1678 		/*
1679 		 * Assert that shared mappings in the MAP_PRIVATE mapping match
1680 		 * the shared mapping.
1681 		 */
1682 		if (i < 5 || i % 2 == 0) {
1683 			char *ptr_s = &ptr_shared[i * page_size];
1684 
1685 			ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
1686 			continue;
1687 		}
1688 
1689 		/* Everything else is a private mapping. */
1690 		ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
1691 	}
1692 
1693 	ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
1694 	ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
1695 }
1696 
1697 /* Test that guard regions established over a read-only mapping function correctly. */
1698 TEST_F(guard_regions, readonly_file)
1699 {
1700 	const unsigned long page_size = self->page_size;
1701 	char *ptr;
1702 	int i;
1703 
1704 	if (variant->backing != LOCAL_FILE_BACKED)
1705 		SKIP(return, "Read-only test specific to file-backed");
1706 
1707 	/* Map shared so we can populate with pattern, populate it, unmap. */
1708 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1709 		    PROT_READ | PROT_WRITE, 0, 0);
1710 	ASSERT_NE(ptr, MAP_FAILED);
1711 	set_pattern(ptr, 10, page_size);
1712 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1713 	/* Close the fd so we can re-open read-only. */
1714 	ASSERT_EQ(close(self->fd), 0);
1715 
1716 	/* Re-open read-only. */
1717 	self->fd = open(self->path, O_RDONLY);
1718 	ASSERT_NE(self->fd, -1);
1719 	/* Re-map read-only. */
1720 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1721 	ASSERT_NE(ptr, MAP_FAILED);
1722 
1723 	/* Mark every other page guarded. */
1724 	for (i = 0; i < 10; i += 2) {
1725 		char *ptr_pg = &ptr[i * page_size];
1726 
1727 		ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
1728 	}
1729 
1730 	/* Assert that the guard regions are in place.*/
1731 	for (i = 0; i < 10; i++) {
1732 		char *ptr_pg = &ptr[i * page_size];
1733 
1734 		ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
1735 	}
1736 
1737 	/* Remove guard regions. */
1738 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1739 
1740 	/* Ensure the data is as expected. */
1741 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1742 
1743 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1744 }
1745 
1746 TEST_F(guard_regions, fault_around)
1747 {
1748 	const unsigned long page_size = self->page_size;
1749 	char *ptr;
1750 	int i;
1751 
1752 	if (variant->backing == ANON_BACKED)
1753 		SKIP(return, "Fault-around test specific to file-backed");
1754 
1755 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1756 		    PROT_READ | PROT_WRITE, 0, 0);
1757 	ASSERT_NE(ptr, MAP_FAILED);
1758 
1759 	/* Establish a pattern in the backing file. */
1760 	set_pattern(ptr, 10, page_size);
1761 
1762 	/*
1763 	 * Now drop it from the page cache so we get major faults when next we
1764 	 * map it.
1765 	 */
1766 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1767 
1768 	/* Unmap and remap 'to be sure'. */
1769 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1770 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1771 		    PROT_READ | PROT_WRITE, 0, 0);
1772 	ASSERT_NE(ptr, MAP_FAILED);
1773 
1774 	/* Now make every even page guarded. */
1775 	for (i = 0; i < 10; i += 2) {
1776 		char *ptr_p = &ptr[i * page_size];
1777 
1778 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1779 	}
1780 
1781 	/* Now fault in every odd page. This should trigger fault-around. */
1782 	for (i = 1; i < 10; i += 2) {
1783 		char *ptr_p = &ptr[i * page_size];
1784 
1785 		ASSERT_TRUE(try_read_buf(ptr_p));
1786 	}
1787 
1788 	/* Finally, ensure that guard regions are intact as expected. */
1789 	for (i = 0; i < 10; i++) {
1790 		char *ptr_p = &ptr[i * page_size];
1791 
1792 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1793 	}
1794 
1795 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1796 }
1797 
1798 TEST_F(guard_regions, truncation)
1799 {
1800 	const unsigned long page_size = self->page_size;
1801 	char *ptr;
1802 	int i;
1803 
1804 	if (variant->backing == ANON_BACKED)
1805 		SKIP(return, "Truncation test specific to file-backed");
1806 
1807 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1808 		    PROT_READ | PROT_WRITE, 0, 0);
1809 	ASSERT_NE(ptr, MAP_FAILED);
1810 
1811 	/*
1812 	 * Establish a pattern in the backing file, just so there is data
1813 	 * there.
1814 	 */
1815 	set_pattern(ptr, 10, page_size);
1816 
1817 	/* Now make every even page guarded. */
1818 	for (i = 0; i < 10; i += 2) {
1819 		char *ptr_p = &ptr[i * page_size];
1820 
1821 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1822 	}
1823 
1824 	/* Now assert things are as expected. */
1825 	for (i = 0; i < 10; i++) {
1826 		char *ptr_p = &ptr[i * page_size];
1827 
1828 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1829 	}
1830 
1831 	/* Now truncate to actually used size (initialised to 100). */
1832 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1833 
1834 	/* Here the guard regions will remain intact. */
1835 	for (i = 0; i < 10; i++) {
1836 		char *ptr_p = &ptr[i * page_size];
1837 
1838 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1839 	}
1840 
1841 	/* Now truncate to half the size, then truncate again to the full size. */
1842 	ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
1843 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1844 
1845 	/* Again, guard pages will remain intact. */
1846 	for (i = 0; i < 10; i++) {
1847 		char *ptr_p = &ptr[i * page_size];
1848 
1849 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1850 	}
1851 
1852 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1853 }
1854 
1855 TEST_F(guard_regions, hole_punch)
1856 {
1857 	const unsigned long page_size = self->page_size;
1858 	char *ptr;
1859 	int i;
1860 
1861 	if (variant->backing == ANON_BACKED)
1862 		SKIP(return, "Truncation test specific to file-backed");
1863 
1864 	/* Establish pattern in mapping. */
1865 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1866 		    PROT_READ | PROT_WRITE, 0, 0);
1867 	ASSERT_NE(ptr, MAP_FAILED);
1868 	set_pattern(ptr, 10, page_size);
1869 
1870 	/* Install a guard region in the middle of the mapping. */
1871 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1872 			  MADV_GUARD_INSTALL), 0);
1873 
1874 	/*
1875 	 * The buffer will now be:
1876 	 *
1877 	 * 0123456789
1878 	 * ***xxxx***
1879 	 *
1880 	 * Where * is data and x is the guard region.
1881 	 */
1882 
1883 	/* Ensure established. */
1884 	for (i = 0; i < 10; i++) {
1885 		char *ptr_p = &ptr[i * page_size];
1886 
1887 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1888 	}
1889 
1890 	/* Now hole punch the guarded region. */
1891 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1892 			  MADV_REMOVE), 0);
1893 
1894 	/* Ensure guard regions remain. */
1895 	for (i = 0; i < 10; i++) {
1896 		char *ptr_p = &ptr[i * page_size];
1897 
1898 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1899 	}
1900 
1901 	/* Now remove guard region throughout. */
1902 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1903 
1904 	/* Check that the pattern exists in non-hole punched region. */
1905 	ASSERT_TRUE(check_pattern(ptr, 3, page_size));
1906 	/* Check that hole punched region is zeroed. */
1907 	ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
1908 	/* Check that the pattern exists in the remainder of the file. */
1909 	ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
1910 
1911 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1912 }
1913 
1914 /*
1915  * Ensure that a memfd works correctly with guard regions, that we can write
1916  * seal it then open the mapping read-only and still establish guard regions
1917  * within, remove those guard regions and have everything work correctly.
1918  */
1919 TEST_F(guard_regions, memfd_write_seal)
1920 {
1921 	const unsigned long page_size = self->page_size;
1922 	char *ptr;
1923 	int i;
1924 
1925 	if (variant->backing != SHMEM_BACKED)
1926 		SKIP(return, "memfd write seal test specific to shmem");
1927 
1928 	/* OK, we need a memfd, so close existing one. */
1929 	ASSERT_EQ(close(self->fd), 0);
1930 
1931 	/* Create and truncate memfd. */
1932 	self->fd = memfd_create("guard_regions_memfd_seals_test",
1933 				MFD_ALLOW_SEALING);
1934 	ASSERT_NE(self->fd, -1);
1935 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1936 
1937 	/* Map, set pattern, unmap. */
1938 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1939 	ASSERT_NE(ptr, MAP_FAILED);
1940 	set_pattern(ptr, 10, page_size);
1941 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1942 
1943 	/* Write-seal the memfd. */
1944 	ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
1945 
1946 	/* Now map the memfd readonly. */
1947 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1948 	ASSERT_NE(ptr, MAP_FAILED);
1949 
1950 	/* Ensure pattern is as expected. */
1951 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1952 
1953 	/* Now make every even page guarded. */
1954 	for (i = 0; i < 10; i += 2) {
1955 		char *ptr_p = &ptr[i * page_size];
1956 
1957 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1958 	}
1959 
1960 	/* Now assert things are as expected. */
1961 	for (i = 0; i < 10; i++) {
1962 		char *ptr_p = &ptr[i * page_size];
1963 
1964 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1965 	}
1966 
1967 	/* Now remove guard regions. */
1968 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1969 
1970 	/* Ensure pattern is as expected. */
1971 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1972 
1973 	/* Ensure write seal intact. */
1974 	for (i = 0; i < 10; i++) {
1975 		char *ptr_p = &ptr[i * page_size];
1976 
1977 		ASSERT_FALSE(try_write_buf(ptr_p));
1978 	}
1979 
1980 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1981 }
1982 
1983 
1984 /*
1985  * Since we are now permitted to establish guard regions in read-only anonymous
1986  * mappings, for the sake of thoroughness, though it probably has no practical
1987  * use, test that guard regions function with a mapping to the anonymous zero
1988  * page.
1989  */
1990 TEST_F(guard_regions, anon_zeropage)
1991 {
1992 	const unsigned long page_size = self->page_size;
1993 	char *ptr;
1994 	int i;
1995 
1996 	if (!is_anon_backed(variant))
1997 		SKIP(return, "anon zero page test specific to anon/shmem");
1998 
1999 	/* Obtain a read-only i.e. anon zero page mapping. */
2000 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
2001 	ASSERT_NE(ptr, MAP_FAILED);
2002 
2003 	/* Now make every even page guarded. */
2004 	for (i = 0; i < 10; i += 2) {
2005 		char *ptr_p = &ptr[i * page_size];
2006 
2007 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2008 	}
2009 
2010 	/* Now assert things are as expected. */
2011 	for (i = 0; i < 10; i++) {
2012 		char *ptr_p = &ptr[i * page_size];
2013 
2014 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2015 	}
2016 
2017 	/* Now remove all guard regions. */
2018 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2019 
2020 	/* Now assert things are as expected. */
2021 	for (i = 0; i < 10; i++) {
2022 		char *ptr_p = &ptr[i * page_size];
2023 
2024 		ASSERT_TRUE(try_read_buf(ptr_p));
2025 	}
2026 
2027 	/* Ensure zero page...*/
2028 	ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
2029 
2030 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2031 }
2032 
2033 /*
2034  * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2035  */
2036 TEST_F(guard_regions, pagemap)
2037 {
2038 	const unsigned long page_size = self->page_size;
2039 	int proc_fd;
2040 	char *ptr;
2041 	int i;
2042 
2043 	proc_fd = open("/proc/self/pagemap", O_RDONLY);
2044 	ASSERT_NE(proc_fd, -1);
2045 
2046 	ptr = mmap_(self, variant, NULL, 10 * page_size,
2047 		    PROT_READ | PROT_WRITE, 0, 0);
2048 	ASSERT_NE(ptr, MAP_FAILED);
2049 
2050 	/* Read from pagemap, and assert no guard regions are detected. */
2051 	for (i = 0; i < 10; i++) {
2052 		char *ptr_p = &ptr[i * page_size];
2053 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2054 		unsigned long masked = entry & PM_GUARD_REGION;
2055 
2056 		ASSERT_EQ(masked, 0);
2057 	}
2058 
2059 	/* Install a guard region in every other page. */
2060 	for (i = 0; i < 10; i += 2) {
2061 		char *ptr_p = &ptr[i * page_size];
2062 
2063 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2064 	}
2065 
2066 	/* Re-read from pagemap, and assert guard regions are detected. */
2067 	for (i = 0; i < 10; i++) {
2068 		char *ptr_p = &ptr[i * page_size];
2069 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2070 		unsigned long masked = entry & PM_GUARD_REGION;
2071 
2072 		ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
2073 	}
2074 
2075 	ASSERT_EQ(close(proc_fd), 0);
2076 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2077 }
2078 
2079 /*
2080  * Assert that PAGEMAP_SCAN correctly reports guard region ranges.
2081  */
2082 TEST_F(guard_regions, pagemap_scan)
2083 {
2084 	const unsigned long page_size = self->page_size;
2085 	struct page_region pm_regs[10];
2086 	struct pm_scan_arg pm_scan_args = {
2087 		.size = sizeof(struct pm_scan_arg),
2088 		.category_anyof_mask = PAGE_IS_GUARD,
2089 		.return_mask = PAGE_IS_GUARD,
2090 		.vec = (long)&pm_regs,
2091 		.vec_len = ARRAY_SIZE(pm_regs),
2092 	};
2093 	int proc_fd, i;
2094 	char *ptr;
2095 
2096 	proc_fd = open("/proc/self/pagemap", O_RDONLY);
2097 	ASSERT_NE(proc_fd, -1);
2098 
2099 	ptr = mmap_(self, variant, NULL, 10 * page_size,
2100 		    PROT_READ | PROT_WRITE, 0, 0);
2101 	ASSERT_NE(ptr, MAP_FAILED);
2102 
2103 	pm_scan_args.start = (long)ptr;
2104 	pm_scan_args.end = (long)ptr + 10 * page_size;
2105 	ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 0);
2106 	ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2107 
2108 	/* Install a guard region in every other page. */
2109 	for (i = 0; i < 10; i += 2) {
2110 		char *ptr_p = &ptr[i * page_size];
2111 
2112 		ASSERT_EQ(syscall(__NR_madvise, ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2113 	}
2114 
2115 	/*
2116 	 * Assert ioctl() returns the count of located regions, where each
2117 	 * region spans every other page within the range of 10 pages.
2118 	 */
2119 	ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 5);
2120 	ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2121 
2122 	/* Re-read from pagemap, and assert guard regions are detected. */
2123 	for (i = 0; i < 5; i++) {
2124 		long ptr_p = (long)&ptr[2 * i * page_size];
2125 
2126 		ASSERT_EQ(pm_regs[i].start, ptr_p);
2127 		ASSERT_EQ(pm_regs[i].end, ptr_p + page_size);
2128 		ASSERT_EQ(pm_regs[i].categories, PAGE_IS_GUARD);
2129 	}
2130 
2131 	ASSERT_EQ(close(proc_fd), 0);
2132 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2133 }
2134 
2135 TEST_HARNESS_MAIN
2136